diff --git a/Documentation/ABI/testing/sysfs-timecard b/Documentation/ABI/testing/sysfs-timecard new file mode 100644 index 00000000000000..97f6773794a511 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-timecard @@ -0,0 +1,174 @@ +What: /sys/class/timecard/ +Date: September 2021 +Contact: Jonathan Lemon +Description: This directory contains files and directories + providing a standardized interface to the ancillary + features of the OpenCompute timecard. + +What: /sys/class/timecard/ocpN/ +Date: September 2021 +Contact: Jonathan Lemon +Description: This directory contains the attributes of the Nth timecard + registered. + +What: /sys/class/timecard/ocpN/available_clock_sources +Date: September 2021 +Contact: Jonathan Lemon +Description: (RO) The list of available time sources that the PHC + uses for clock adjustments. + + ==== ================================================= + NONE no adjustments + PPS adjustments come from the PPS1 selector (default) + TOD adjustments from the GNSS/TOD module + IRIG adjustments from external IRIG-B signal + DCF adjustments from external DCF signal + ==== ================================================= + +What: /sys/class/timecard/ocpN/available_sma_inputs +Date: September 2021 +Contact: Jonathan Lemon +Description: (RO) Set of available destinations (sinks) for a SMA + input signal. + + ===== ================================================ + 10Mhz signal is used as the 10Mhz reference clock + PPS1 signal is sent to the PPS1 selector + PPS2 signal is sent to the PPS2 selector + TS1 signal is sent to timestamper 1 + TS2 signal is sent to timestamper 2 + IRIG signal is sent to the IRIG-B module + DCF signal is sent to the DCF module + ===== ================================================ + +What: /sys/class/timecard/ocpN/available_sma_outputs +Date: May 2021 +Contact: Jonathan Lemon +Description: (RO) Set of available sources for a SMA output signal. + + ===== ================================================ + 10Mhz output is from the 10Mhz reference clock + PHC output PPS is from the PHC clock + MAC output PPS is from the Miniature Atomic Clock + GNSS output PPS is from the GNSS module + GNSS2 output PPS is from the second GNSS module + IRIG output is from the PHC, in IRIG-B format + DCF output is from the PHC, in DCF format + ===== ================================================ + +What: /sys/class/timecard/ocpN/clock_source +Date: September 2021 +Contact: Jonathan Lemon +Description: (RW) Contains the current synchronization source used by + the PHC. May be changed by writing one of the listed + values from the available_clock_sources attribute set. + +What: /sys/class/timecard/ocpN/gnss_sync +Date: September 2021 +Contact: Jonathan Lemon +Description: (RO) Indicates whether a valid GNSS signal is received, + or when the signal was lost. + +What: /sys/class/timecard/ocpN/i2c +Date: September 2021 +Contact: Jonathan Lemon +Description: This optional attribute links to the associated i2c device. + +What: /sys/class/timecard/ocpN/irig_b_mode +Date: September 2021 +Contact: Jonathan Lemon +Description: (RW) An integer from 0-7 indicating the timecode format + of the IRIG-B output signal: B00 + +What: /sys/class/timecard/ocpN/pps +Date: September 2021 +Contact: Jonathan Lemon +Description: This optional attribute links to the associated PPS device. + +What: /sys/class/timecard/ocpN/ptp +Date: September 2021 +Contact: Jonathan Lemon +Description: This attribute links to the associated PTP device. + +What: /sys/class/timecard/ocpN/serialnum +Date: September 2021 +Contact: Jonathan Lemon +Description: (RO) Provides the serial number of the timecard. + +What: /sys/class/timecard/ocpN/sma1 +What: /sys/class/timecard/ocpN/sma2 +What: /sys/class/timecard/ocpN/sma3 +What: /sys/class/timecard/ocpN/sma4 +Date: September 2021 +Contact: Jonathan Lemon +Description: (RW) These attributes specify the direction of the signal + on the associated SMA connectors, and also the signal sink + or source. + + The display format of the attribute is a space separated + list of signals, prefixed by the input/output direction. + + The signal direction may be changed (if supported) by + prefixing the signal list with either "in:" or "out:". + If neither prefix is present, then the direction is unchanged. + + The output signal may be changed by writing one of the listed + values from the available_sma_outputs attribute set. + + The input destinations may be changed by writing multiple + values from the available_sma_inputs attribute set, + separated by spaces. If there are duplicated input + destinations between connectors, the lowest numbered SMA + connector is given priority. + + Note that not all input combinations may make sense. + + The 10Mhz reference clock input is currently only valid + on SMA1 and may not be combined with other destination sinks. + +What: /sys/class/timecard/ocpN/ts_window_adjust +Date: September 2021 +Contact: Jonathan Lemon +Description: (RW) When retrieving the PHC with the PTP SYS_OFFSET_EXTENDED + ioctl, a system timestamp is made before and after the PHC + time is retrieved. The midpoint between the two system + timestamps is usually taken to be the SYS time associated + with the PHC time. This estimate may be wrong, as it depends + on PCI latencies, and when the PHC time was latched + + The attribute value reduces the end timestamp by the given + number of nanoseconds, so the computed midpoint matches the + retrieved PHC time. + + The initial value is set based on measured PCI latency and + the estimated point where the FPGA latches the PHC time. This + value may be changed by writing an unsigned integer. + +What: /sys/class/timecard/ocpN/ttyGNSS +What: /sys/class/timecard/ocpN/ttyGNSS2 +Date: September 2021 +Contact: Jonathan Lemon +Description: These optional attributes link to the TTY serial ports + associated with the GNSS devices. + +What: /sys/class/timecard/ocpN/ttyMAC +Date: September 2021 +Contact: Jonathan Lemon +Description: This optional attribute links to the TTY serial port + associated with the Miniature Atomic Clock. + +What: /sys/class/timecard/ocpN/ttyNMEA +Date: September 2021 +Contact: Jonathan Lemon +Description: This optional attribute links to the TTY serial port + which outputs the PHC time in NMEA ZDA format. + +What: /sys/class/timecard/ocpN/utc_tai_offset +Date: September 2021 +Contact: Jonathan Lemon +Description: (RW) The DCF and IRIG output signals are in UTC, while the + TimeCard operates on TAI. This attribute allows setting the + offset in seconds, which is added to the TAI timebase for + these formats. + + The offset may be changed by writing an unsigned integer. diff --git a/Documentation/bpf/bpf_licensing.rst b/Documentation/bpf/bpf_licensing.rst new file mode 100644 index 00000000000000..b19c433f41d2c5 --- /dev/null +++ b/Documentation/bpf/bpf_licensing.rst @@ -0,0 +1,92 @@ +============= +BPF licensing +============= + +Background +========== + +* Classic BPF was BSD licensed + +"BPF" was originally introduced as BSD Packet Filter in +http://www.tcpdump.org/papers/bpf-usenix93.pdf. The corresponding instruction +set and its implementation came from BSD with BSD license. That original +instruction set is now known as "classic BPF". + +However an instruction set is a specification for machine-language interaction, +similar to a programming language. It is not a code. Therefore, the +application of a BSD license may be misleading in a certain context, as the +instruction set may enjoy no copyright protection. + +* eBPF (extended BPF) instruction set continues to be BSD + +In 2014, the classic BPF instruction set was significantly extended. We +typically refer to this instruction set as eBPF to disambiguate it from cBPF. +The eBPF instruction set is still BSD licensed. + +Implementations of eBPF +======================= + +Using the eBPF instruction set requires implementing code in both kernel space +and user space. + +In Linux Kernel +--------------- + +The reference implementations of the eBPF interpreter and various just-in-time +compilers are part of Linux and are GPLv2 licensed. The implementation of +eBPF helper functions is also GPLv2 licensed. Interpreters, JITs, helpers, +and verifiers are called eBPF runtime. + +In User Space +------------- + +There are also implementations of eBPF runtime (interpreter, JITs, helper +functions) under +Apache2 (https://github.com/iovisor/ubpf), +MIT (https://github.com/qmonnet/rbpf), and +BSD (https://github.com/DPDK/dpdk/blob/main/lib/librte_bpf). + +In HW +----- + +The HW can choose to execute eBPF instruction natively and provide eBPF runtime +in HW or via the use of implementing firmware with a proprietary license. + +In other operating systems +-------------------------- + +Other kernels or user space implementations of eBPF instruction set and runtime +can have proprietary licenses. + +Using BPF programs in the Linux kernel +====================================== + +Linux Kernel (while being GPLv2) allows linking of proprietary kernel modules +under these rules: +Documentation/process/license-rules.rst + +When a kernel module is loaded, the linux kernel checks which functions it +intends to use. If any function is marked as "GPL only," the corresponding +module or program has to have GPL compatible license. + +Loading BPF program into the Linux kernel is similar to loading a kernel +module. BPF is loaded at run time and not statically linked to the Linux +kernel. BPF program loading follows the same license checking rules as kernel +modules. BPF programs can be proprietary if they don't use "GPL only" BPF +helper functions. + +Further, some BPF program types - Linux Security Modules (LSM) and TCP +Congestion Control (struct_ops), as of Aug 2021 - are required to be GPL +compatible even if they don't use "GPL only" helper functions directly. The +registration step of LSM and TCP congestion control modules of the Linux +kernel is done through EXPORT_SYMBOL_GPL kernel functions. In that sense LSM +and struct_ops BPF programs are implicitly calling "GPL only" functions. +The same restriction applies to BPF programs that call kernel functions +directly via unstable interface also known as "kfunc". + +Packaging BPF programs with user space applications +==================================================== + +Generally, proprietary-licensed applications and GPL licensed BPF programs +written for the Linux kernel in the same package can co-exist because they are +separate executable processes. This applies to both cBPF and eBPF programs. diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 846354cd2d69eb..9ad4218a751f56 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -85,6 +85,7 @@ sequentially and type id is assigned to each recognized type starting from id #define BTF_KIND_VAR 14 /* Variable */ #define BTF_KIND_DATASEC 15 /* Section */ #define BTF_KIND_FLOAT 16 /* Floating point */ + #define BTF_KIND_DECL_TAG 17 /* Decl Tag */ Note that the type section encodes debug info, not just pure types. ``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram. @@ -106,7 +107,7 @@ Each type contains the following common data:: * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC and FUNC_PROTO. + * FUNC, FUNC_PROTO and DECL_TAG. * "type" is a type_id referring to another type. */ union { @@ -465,6 +466,32 @@ map definition. No additional type data follow ``btf_type``. +2.2.17 BTF_KIND_DECL_TAG +~~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a non-empty string + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_DECL_TAG + * ``info.vlen``: 0 + * ``type``: ``struct``, ``union``, ``func``, ``var`` or ``typedef`` + +``btf_type`` is followed by ``struct btf_decl_tag``.:: + + struct btf_decl_tag { + __u32 component_idx; + }; + +The ``name_off`` encodes btf_decl_tag attribute string. +The ``type`` should be ``struct``, ``union``, ``func``, ``var`` or ``typedef``. +For ``var`` or ``typedef`` type, ``btf_decl_tag.component_idx`` must be ``-1``. +For the other three types, if the btf_decl_tag attribute is +applied to the ``struct``, ``union`` or ``func`` itself, +``btf_decl_tag.component_idx`` must be ``-1``. Otherwise, +the attribute is applied to a ``struct``/``union`` member or +a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a +valid index (starting from 0) pointing to a member or an argument. + 3. BTF Kernel API ***************** diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 1ceb5d704a9780..37f273a7e8b65c 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -82,6 +82,15 @@ Testing and debugging BPF s390 +Licensing +========= + +.. toctree:: + :maxdepth: 1 + + bpf_licensing + + Other ===== diff --git a/Documentation/bpf/libbpf/libbpf_naming_convention.rst b/Documentation/bpf/libbpf/libbpf_naming_convention.rst index 9c68d5014ff1bd..f86360f734a848 100644 --- a/Documentation/bpf/libbpf/libbpf_naming_convention.rst +++ b/Documentation/bpf/libbpf/libbpf_naming_convention.rst @@ -150,6 +150,46 @@ mirror of the mainline's version of libbpf for a stand-alone build. However, all changes to libbpf's code base must be upstreamed through the mainline kernel tree. + +API documentation convention +============================ + +The libbpf API is documented via comments above definitions in +header files. These comments can be rendered by doxygen and sphinx +for well organized html output. This section describes the +convention in which these comments should be formated. + +Here is an example from btf.h: + +.. code-block:: c + + /** + * @brief **btf__new()** creates a new instance of a BTF object from the raw + * bytes of an ELF's BTF section + * @param data raw bytes + * @param size number of bytes passed in `data` + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ + +The comment must start with a block comment of the form '/\*\*'. + +The documentation always starts with a @brief directive. This line is a short +description about this API. It starts with the name of the API, denoted in bold +like so: **api_name**. Please include an open and close parenthesis if this is a +function. Follow with the short description of the API. A longer form description +can be added below the last directive, at the bottom of the comment. + +Parameters are denoted with the @param directive, there should be one for each +parameter. If this is a function with a non-void return, use the @return directive +to document it. + License ------------------- diff --git a/Documentation/devicetree/bindings/net/asix,ax88796c.yaml b/Documentation/devicetree/bindings/net/asix,ax88796c.yaml new file mode 100644 index 00000000000000..699ebf452479be --- /dev/null +++ b/Documentation/devicetree/bindings/net/asix,ax88796c.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/asix,ax88796c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ASIX AX88796C SPI Ethernet Adapter + +maintainers: + - Ɓukasz Stelmach + +description: | + ASIX AX88796C is an Ethernet controller with a built in PHY. This + describes SPI mode of the chip. + + The node for this driver must be a child node of an SPI controller, + hence all mandatory properties described in + ../spi/spi-controller.yaml must be specified. + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + const: asix,ax88796c + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 40000000 + + interrupts: + maxItems: 1 + + reset-gpios: + description: + A GPIO line handling reset of the chip. As the line is active low, + it should be marked GPIO_ACTIVE_LOW. + maxItems: 1 + + local-mac-address: true + + mac-address: true + +required: + - compatible + - reg + - spi-max-frequency + - interrupts + - reset-gpios + +additionalProperties: false + +examples: + # Artik5 eval board + - | + #include + #include + spi0 { + #address-cells = <1>; + #size-cells = <0>; + + ethernet@0 { + compatible = "asix,ax88796c"; + reg = <0x0>; + local-mac-address = [00 00 00 00 00 00]; /* Filled in by a bootloader */ + interrupt-parent = <&gpx2>; + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + spi-max-frequency = <40000000>; + reset-gpios = <&gpe0 2 GPIO_ACTIVE_LOW>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt index 33a0d67e4ce5e1..0b5994fba35fab 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt @@ -2,7 +2,8 @@ Required properties: - compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2", - "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5", "brcm,bcm2711-genet-v5". + "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5", "brcm,bcm2711-genet-v5" or + "brcm,bcm7712-genet-v5". - reg: address and length of the register set for the device - interrupts and/or interrupts-extended: must be two cells, the first cell is the general purpose interrupt line, while the second cell is the diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml index 16aa192c118e39..2ad7f79ad371af 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.yaml +++ b/Documentation/devicetree/bindings/net/dsa/dsa.yaml @@ -46,6 +46,9 @@ patternProperties: type: object description: Ethernet switch ports + allOf: + - $ref: "http://devicetree.org/schemas/net/ethernet-controller.yaml#" + properties: reg: description: Port number @@ -73,11 +76,14 @@ patternProperties: dsa-tag-protocol: description: Instead of the default, the switch will use this tag protocol if - possible. Useful when a device supports multiple protcols and + possible. Useful when a device supports multiple protocols and the default is incompatible with the Ethernet device. enum: - dsa - edsa + - ocelot + - ocelot-8021q + - seville phy-handle: true @@ -91,6 +97,10 @@ patternProperties: managed: true + rx-internal-delay-ps: true + + tx-internal-delay-ps: true + required: - reg diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml index f978f8719d8e87..24cd733c11d103 100644 --- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml +++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml @@ -74,10 +74,42 @@ properties: - compatible - reg +patternProperties: + "^(ethernet-)?ports$": + patternProperties: + "^(ethernet-)?port@[0-9]+$": + allOf: + - if: + properties: + phy-mode: + contains: + enum: + - rgmii + - rgmii-rxid + - rgmii-txid + - rgmii-id + then: + properties: + rx-internal-delay-ps: + $ref: "#/$defs/internal-delay-ps" + tx-internal-delay-ps: + $ref: "#/$defs/internal-delay-ps" + required: - compatible - reg +$defs: + internal-delay-ps: + description: + Disable tunable delay lines using 0 ps, or enable them and select + the phase between 1640 ps (73.8 degree shift at 1Gbps) and 2260 ps + (101.7 degree shift) in increments of 0.9 degrees (20 ps). + enum: + [0, 1640, 1660, 1680, 1700, 1720, 1740, 1760, 1780, 1800, 1820, 1840, + 1860, 1880, 1900, 1920, 1940, 1960, 1980, 2000, 2020, 2040, 2060, 2080, + 2100, 2120, 2140, 2160, 2180, 2200, 2220, 2240, 2260] + unevaluatedProperties: false examples: @@ -97,29 +129,40 @@ examples: port@0 { phy-handle = <&rgmii_phy6>; phy-mode = "rgmii-id"; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; reg = <0>; }; port@1 { phy-handle = <&rgmii_phy3>; phy-mode = "rgmii-id"; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; reg = <1>; }; port@2 { phy-handle = <&rgmii_phy4>; phy-mode = "rgmii-id"; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; reg = <2>; }; port@3 { + phy-handle = <&rgmii_phy4>; phy-mode = "rgmii-id"; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; reg = <3>; }; port@4 { ethernet = <&enet2>; phy-mode = "rgmii"; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; reg = <4>; fixed-link { diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt deleted file mode 100644 index 8c73f67c43cabe..00000000000000 --- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt +++ /dev/null @@ -1,215 +0,0 @@ -* Qualcomm Atheros QCA8xxx switch family - -Required properties: - -- compatible: should be one of: - "qca,qca8327" - "qca,qca8334" - "qca,qca8337" - -- #size-cells: must be 0 -- #address-cells: must be 1 - -Optional properties: - -- reset-gpios: GPIO to be used to reset the whole device - -Subnodes: - -The integrated switch subnode should be specified according to the binding -described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external -mdio-bus each subnode describing a port needs to have a valid phandle -referencing the internal PHY it is connected to. This is because there's no -N:N mapping of port and PHY id. -To declare the internal mdio-bus configuration, declare a mdio node in the -switch node and declare the phandle for the port referencing the internal -PHY is connected to. In this config a internal mdio-bus is registered and -the mdio MASTER is used as communication. - -Don't use mixed external and internal mdio-bus configurations, as this is -not supported by the hardware. - -The CPU port of this switch is always port 0. - -A CPU port node has the following optional node: - -- fixed-link : Fixed-link subnode describing a link to a non-MDIO - managed entity. See - Documentation/devicetree/bindings/net/fixed-link.txt - for details. - -For QCA8K the 'fixed-link' sub-node supports only the following properties: - -- 'speed' (integer, mandatory), to indicate the link speed. Accepted - values are 10, 100 and 1000 -- 'full-duplex' (boolean, optional), to indicate that full duplex is - used. When absent, half duplex is assumed. - -Examples: - -for the external mdio-bus configuration: - - &mdio0 { - phy_port1: phy@0 { - reg = <0>; - }; - - phy_port2: phy@1 { - reg = <1>; - }; - - phy_port3: phy@2 { - reg = <2>; - }; - - phy_port4: phy@3 { - reg = <3>; - }; - - phy_port5: phy@4 { - reg = <4>; - }; - - switch@10 { - compatible = "qca,qca8337"; - #address-cells = <1>; - #size-cells = <0>; - - reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; - reg = <0x10>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - label = "cpu"; - ethernet = <&gmac1>; - phy-mode = "rgmii"; - fixed-link { - speed = 1000; - full-duplex; - }; - }; - - port@1 { - reg = <1>; - label = "lan1"; - phy-handle = <&phy_port1>; - }; - - port@2 { - reg = <2>; - label = "lan2"; - phy-handle = <&phy_port2>; - }; - - port@3 { - reg = <3>; - label = "lan3"; - phy-handle = <&phy_port3>; - }; - - port@4 { - reg = <4>; - label = "lan4"; - phy-handle = <&phy_port4>; - }; - - port@5 { - reg = <5>; - label = "wan"; - phy-handle = <&phy_port5>; - }; - }; - }; - }; - -for the internal master mdio-bus configuration: - - &mdio0 { - switch@10 { - compatible = "qca,qca8337"; - #address-cells = <1>; - #size-cells = <0>; - - reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; - reg = <0x10>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - label = "cpu"; - ethernet = <&gmac1>; - phy-mode = "rgmii"; - fixed-link { - speed = 1000; - full-duplex; - }; - }; - - port@1 { - reg = <1>; - label = "lan1"; - phy-mode = "internal"; - phy-handle = <&phy_port1>; - }; - - port@2 { - reg = <2>; - label = "lan2"; - phy-mode = "internal"; - phy-handle = <&phy_port2>; - }; - - port@3 { - reg = <3>; - label = "lan3"; - phy-mode = "internal"; - phy-handle = <&phy_port3>; - }; - - port@4 { - reg = <4>; - label = "lan4"; - phy-mode = "internal"; - phy-handle = <&phy_port4>; - }; - - port@5 { - reg = <5>; - label = "wan"; - phy-mode = "internal"; - phy-handle = <&phy_port5>; - }; - }; - - mdio { - #address-cells = <1>; - #size-cells = <0>; - - phy_port1: phy@0 { - reg = <0>; - }; - - phy_port2: phy@1 { - reg = <1>; - }; - - phy_port3: phy@2 { - reg = <2>; - }; - - phy_port4: phy@3 { - reg = <3>; - }; - - phy_port5: phy@4 { - reg = <4>; - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml new file mode 100644 index 00000000000000..48de0ace265da9 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml @@ -0,0 +1,362 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/dsa/qca8k.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Atheros QCA83xx switch family + +maintainers: + - John Crispin + +description: + If the QCA8K switch is connect to an SoC's external mdio-bus, each subnode + describing a port needs to have a valid phandle referencing the internal PHY + it is connected to. This is because there is no N:N mapping of port and PHY + ID. To declare the internal mdio-bus configuration, declare an MDIO node in + the switch node and declare the phandle for the port, referencing the internal + PHY it is connected to. In this config, an internal mdio-bus is registered and + the MDIO master is used for communication. Mixed external and internal + mdio-bus configurations are not supported by the hardware. + +properties: + compatible: + oneOf: + - enum: + - qca,qca8327 + - qca,qca8328 + - qca,qca8334 + - qca,qca8337 + description: | + qca,qca8328: referenced as AR8328(N)-AK1(A/B) QFN 176 pin package + qca,qca8327: referenced as AR8327(N)-AL1A DR-QFN 148 pin package + qca,qca8334: referenced as QCA8334-AL3C QFN 88 pin package + qca,qca8337: referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package + + reg: + maxItems: 1 + + reset-gpios: + description: + GPIO to be used to reset the whole device + maxItems: 1 + + qca,ignore-power-on-sel: + $ref: /schemas/types.yaml#/definitions/flag + description: + Ignore power-on pin strapping to configure LED open-drain or EEPROM + presence. This is needed for devices with incorrect configuration or when + the OEM has decided not to use pin strapping and falls back to SW regs. + + qca,led-open-drain: + $ref: /schemas/types.yaml#/definitions/flag + description: + Set LEDs to open-drain mode. This requires the qca,ignore-power-on-sel to + be set, otherwise the driver will fail at probe. This is required if the + OEM does not use pin strapping to set this mode and prefers to set it + using SW regs. The pin strappings related to LED open-drain mode are + B68 on the QCA832x and B49 on the QCA833x. + + mdio: + type: object + description: Qca8k switch have an internal mdio to access switch port. + If this is not present, the legacy mapping is used and the + internal mdio access is used. + With the legacy mapping the reg corresponding to the internal + mdio is the switch reg with an offset of -1. + + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + + patternProperties: + "^(ethernet-)?phy@[0-4]$": + type: object + + allOf: + - $ref: "http://devicetree.org/schemas/net/mdio.yaml#" + + properties: + reg: + maxItems: 1 + + required: + - reg + +patternProperties: + "^(ethernet-)?ports$": + type: object + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + + patternProperties: + "^(ethernet-)?port@[0-6]$": + type: object + description: Ethernet switch ports + + properties: + reg: + description: Port number + + label: + description: + Describes the label associated with this port, which will become + the netdev name + $ref: /schemas/types.yaml#/definitions/string + + link: + description: + Should be a list of phandles to other switch's DSA port. This + port is used as the outgoing port towards the phandle ports. The + full routing information must be given, not just the one hop + routes to neighbouring switches + $ref: /schemas/types.yaml#/definitions/phandle-array + + ethernet: + description: + Should be a phandle to a valid Ethernet device node. This host + device is what the switch port is connected to + $ref: /schemas/types.yaml#/definitions/phandle + + phy-handle: true + + phy-mode: true + + fixed-link: true + + mac-address: true + + sfp: true + + qca,sgmii-rxclk-falling-edge: + $ref: /schemas/types.yaml#/definitions/flag + description: + Set the receive clock phase to falling edge. Mostly commonly used on + the QCA8327 with CPU port 0 set to SGMII. + + qca,sgmii-txclk-falling-edge: + $ref: /schemas/types.yaml#/definitions/flag + description: + Set the transmit clock phase to falling edge. + + qca,sgmii-enable-pll: + $ref: /schemas/types.yaml#/definitions/flag + description: + For SGMII CPU port, explicitly enable PLL, TX and RX chain along with + Signal Detection. On the QCA8327 this should not be enabled, otherwise + the SGMII port will not initialize. When used on the QCA8337, revision 3 + or greater, a warning will be displayed. When the CPU port is set to + SGMII on the QCA8337, it is advised to set this unless a communication + issue is observed. + + required: + - reg + + additionalProperties: false + +oneOf: + - required: + - ports + - required: + - ethernet-ports + +required: + - compatible + - reg + +additionalProperties: true + +examples: + - | + #include + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + external_phy_port1: ethernet-phy@0 { + reg = <0>; + }; + + external_phy_port2: ethernet-phy@1 { + reg = <1>; + }; + + external_phy_port3: ethernet-phy@2 { + reg = <2>; + }; + + external_phy_port4: ethernet-phy@3 { + reg = <3>; + }; + + external_phy_port5: ethernet-phy@4 { + reg = <4>; + }; + + switch@10 { + compatible = "qca,qca8337"; + #address-cells = <1>; + #size-cells = <0>; + reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; + reg = <0x10>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "cpu"; + ethernet = <&gmac1>; + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + port@1 { + reg = <1>; + label = "lan1"; + phy-handle = <&external_phy_port1>; + }; + + port@2 { + reg = <2>; + label = "lan2"; + phy-handle = <&external_phy_port2>; + }; + + port@3 { + reg = <3>; + label = "lan3"; + phy-handle = <&external_phy_port3>; + }; + + port@4 { + reg = <4>; + label = "lan4"; + phy-handle = <&external_phy_port4>; + }; + + port@5 { + reg = <5>; + label = "wan"; + phy-handle = <&external_phy_port5>; + }; + }; + }; + }; + - | + #include + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch@10 { + compatible = "qca,qca8337"; + #address-cells = <1>; + #size-cells = <0>; + reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; + reg = <0x10>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "cpu"; + ethernet = <&gmac1>; + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + port@1 { + reg = <1>; + label = "lan1"; + phy-mode = "internal"; + phy-handle = <&internal_phy_port1>; + }; + + port@2 { + reg = <2>; + label = "lan2"; + phy-mode = "internal"; + phy-handle = <&internal_phy_port2>; + }; + + port@3 { + reg = <3>; + label = "lan3"; + phy-mode = "internal"; + phy-handle = <&internal_phy_port3>; + }; + + port@4 { + reg = <4>; + label = "lan4"; + phy-mode = "internal"; + phy-handle = <&internal_phy_port4>; + }; + + port@5 { + reg = <5>; + label = "wan"; + phy-mode = "internal"; + phy-handle = <&internal_phy_port5>; + }; + + port@6 { + reg = <0>; + label = "cpu"; + ethernet = <&gmac1>; + phy-mode = "sgmii"; + + qca,sgmii-rxclk-falling-edge; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + internal_phy_port1: ethernet-phy@0 { + reg = <0>; + }; + + internal_phy_port2: ethernet-phy@1 { + reg = <1>; + }; + + internal_phy_port3: ethernet-phy@2 { + reg = <2>; + }; + + internal_phy_port4: ethernet-phy@3 { + reg = <3>; + }; + + internal_phy_port5: ethernet-phy@4 { + reg = <4>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt index b6ae8541bd5598..7959ec237983be 100644 --- a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt +++ b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt @@ -9,6 +9,7 @@ SMI-based Realtek devices. Required properties: - compatible: must be exactly one of: + "realtek,rtl8365mb" (4+1 ports) "realtek,rtl8366" "realtek,rtl8366rb" (4+1 ports) "realtek,rtl8366s" (4+1 ports) @@ -62,6 +63,8 @@ and subnodes of DSA switches. Examples: +An example for the RTL8366RB: + switch { compatible = "realtek,rtl8366rb"; /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */ @@ -151,3 +154,87 @@ switch { }; }; }; + +An example for the RTL8365MB-VC: + +switch { + compatible = "realtek,rtl8365mb"; + mdc-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>; + mdio-gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + + switch_intc: interrupt-controller { + interrupt-parent = <&gpio5>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + port@0 { + reg = <0>; + label = "swp0"; + phy-handle = <ðphy0>; + }; + port@1 { + reg = <1>; + label = "swp1"; + phy-handle = <ðphy1>; + }; + port@2 { + reg = <2>; + label = "swp2"; + phy-handle = <ðphy2>; + }; + port@3 { + reg = <3>; + label = "swp3"; + phy-handle = <ðphy3>; + }; + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&fec1>; + phy-mode = "rgmii"; + tx-internal-delay-ps = <2000>; + rx-internal-delay-ps = <2000>; + + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; + + mdio { + compatible = "realtek,smi-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: phy@0 { + reg = <0>; + interrupt-parent = <&switch_intc>; + interrupts = <0>; + }; + ethphy1: phy@1 { + reg = <1>; + interrupt-parent = <&switch_intc>; + interrupts = <1>; + }; + ethphy2: phy@2 { + reg = <2>; + interrupt-parent = <&switch_intc>; + interrupts = <2>; + }; + ethphy3: phy@3 { + reg = <3>; + interrupt-parent = <&switch_intc>; + interrupts = <3>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml b/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml new file mode 100644 index 00000000000000..437502c5ca9626 --- /dev/null +++ b/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/lantiq,etop-xway.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lantiq Xway ETOP Ethernet driver + +maintainers: + - John Crispin + +properties: + $nodename: + pattern: "^ethernet@[0-9a-f]+$" + + compatible: + const: lantiq,etop-xway + + reg: + maxItems: 1 + + interrupts: + items: + - description: TX interrupt + - description: RX interrupt + + interrupt-names: + items: + - const: tx + - const: rx + + lantiq,tx-burst-length: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + TX programmable burst length. + enum: [2, 4, 8] + + lantiq,rx-burst-length: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + RX programmable burst length. + enum: [2, 4, 8] + + phy-mode: true + +required: + - compatible + - reg + - interrupt-parent + - interrupts + - interrupt-names + - lantiq,tx-burst-length + - lantiq,rx-burst-length + - phy-mode + +additionalProperties: false + +examples: + - | + ethernet@e180000 { + compatible = "lantiq,etop-xway"; + reg = <0xe180000 0x40000>; + interrupt-parent = <&icu0>; + interrupts = <73>, <78>; + interrupt-names = "tx", "rx"; + lantiq,tx-burst-length = <8>; + lantiq,rx-burst-length = <8>; + phy-mode = "rmii"; + }; diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt deleted file mode 100644 index 5ff5e68bbbb691..00000000000000 --- a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt +++ /dev/null @@ -1,21 +0,0 @@ -Lantiq xRX200 GSWIP PMAC Ethernet driver -================================== - -Required properties: - -- compatible : "lantiq,xrx200-net" for the PMAC of the embedded - : GSWIP in the xXR200 -- reg : memory range of the PMAC core inside of the GSWIP core -- interrupts : TX and RX DMA interrupts. Use interrupt-names "tx" for - : the TX interrupt and "rx" for the RX interrupt. - -Example: - -ethernet@e10b308 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "lantiq,xrx200-net"; - reg = <0xe10b308 0xcf8>; - interrupts = <73>, <72>; - interrupt-names = "tx", "rx"; -}; diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml new file mode 100644 index 00000000000000..7bc074a4236963 --- /dev/null +++ b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/lantiq,xrx200-net.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lantiq xRX200 GSWIP PMAC Ethernet driver + +maintainers: + - Hauke Mehrtens + +properties: + $nodename: + pattern: "^ethernet@[0-9a-f]+$" + + compatible: + const: lantiq,xrx200-net + + reg: + maxItems: 1 + + interrupts: + items: + - description: TX interrupt + - description: RX interrupt + + interrupt-names: + items: + - const: tx + - const: rx + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +required: + - compatible + - reg + - interrupt-parent + - interrupts + - interrupt-names + - "#address-cells" + - "#size-cells" + +additionalProperties: false + +examples: + - | + ethernet@e10b308 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "lantiq,xrx200-net"; + reg = <0xe10b308 0xcf8>; + interrupt-parent = <&icu0>; + interrupts = <73>, <72>; + interrupt-names = "tx", "rx"; + }; diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index af9df2f01a1ce7..a1b06fd1962e4d 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -30,6 +30,10 @@ Required properties: Optional elements: 'tsu_clk' - clocks: Phandles to input clocks. +Optional properties: +- mdio: node containing PHY children. If this node is not present, then PHYs + will be direct children. + The MAC address will be determined using the optional properties defined in ethernet.txt. diff --git a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml index 948677ade6d16c..d7748dd331994b 100644 --- a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml +++ b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml @@ -51,6 +51,9 @@ examples: switch@10 { compatible = "qca,qca8337"; reg = <0x10>; - /* ... */ + + ports { + /* ... */ + }; }; }; diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml index c101a1ec846ea8..06b38c9bc6ec38 100644 --- a/Documentation/devicetree/bindings/net/renesas,ether.yaml +++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml @@ -100,15 +100,18 @@ additionalProperties: false examples: # Lager board - | - #include - #include + #include + #include + #include + #include ethernet@ee700000 { compatible = "renesas,ether-r8a7790", "renesas,rcar-gen2-ether"; reg = <0xee700000 0x400>; - interrupt-parent = <&gic>; - interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&mstp8_clks R8A7790_CLK_ETHER>; + interrupts = ; + clocks = <&cpg CPG_MOD 813>; + power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; + resets = <&cpg 813>; phy-mode = "rmii"; phy-handle = <&phy1>; renesas,ether-link-active-low; @@ -116,8 +119,12 @@ examples: #size-cells = <0>; phy1: ethernet-phy@1 { + compatible = "ethernet-phy-id0022.1537", + "ethernet-phy-ieee802.3-c22"; reg = <1>; interrupt-parent = <&irqc0>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + micrel,led-mode = <1>; + reset-gpios = <&gpio5 31 GPIO_ACTIVE_LOW>; }; }; diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml index 4c927d2c17d35d..bda821065a2b63 100644 --- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml +++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml @@ -287,6 +287,7 @@ examples: "ch13", "ch14", "ch15", "ch16", "ch17", "ch18", "ch19", "ch20", "ch21", "ch22", "ch23", "ch24"; clocks = <&cpg CPG_MOD 812>; + clock-names = "fck"; iommus = <&ipmmu_ds0 16>; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 812>; @@ -298,6 +299,8 @@ examples: #size-cells = <0>; phy0: ethernet-phy@0 { + compatible = "ethernet-phy-id0022.1622", + "ethernet-phy-ieee802.3-c22"; rxc-skew-ps = <1500>; reg = <0>; interrupt-parent = <&gpio2>; diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml index 8a03a24a2019b9..6bc61c42418f70 100644 --- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml +++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml @@ -24,6 +24,7 @@ properties: - socionext,uniphier-ld11-ave4 - socionext,uniphier-ld20-ave4 - socionext,uniphier-pxs3-ave4 + - socionext,uniphier-nx1-ave4 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml index 3e2c2e43175e5a..1489d3c1cd6ecc 100644 --- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml +++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml @@ -47,6 +47,11 @@ properties: ieee80211-freq-limit: true + mediatek,eeprom-data: + $ref: /schemas/types.yaml#/definitions/uint32-array + description: + EEPROM data embedded as array. + mediatek,mtd-eeprom: $ref: /schemas/types.yaml#/definitions/phandle-array description: diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt deleted file mode 100644 index aaaeeb5f935bc8..00000000000000 --- a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt +++ /dev/null @@ -1,48 +0,0 @@ -* Qualcomm Atheros ath9k wireless devices - -This node provides properties for configuring the ath9k wireless device. The -node is expected to be specified as a child node of the PCI controller to -which the wireless chip is connected. - -Required properties: -- compatible: For PCI and PCIe devices this should be an identifier following - the format as defined in "PCI Bus Binding to Open Firmware" - Revision 2.1. One of the possible formats is "pciVVVV,DDDD" - where VVVV is the PCI vendor ID and DDDD is PCI device ID. - Typically QCA's PCI vendor ID 168c is used while the PCI device - ID depends on the chipset - see the following (possibly - incomplete) list: - - 0023 for AR5416 - - 0024 for AR5418 - - 0027 for AR9160 - - 0029 for AR9220 and AR9223 - - 002a for AR9280 and AR9283 - - 002b for AR9285 - - 002c for AR2427 - - 002d for AR9227 - - 002e for AR9287 - - 0030 for AR9380, AR9381 and AR9382 - - 0032 for AR9485 - - 0033 for AR9580 and AR9590 - - 0034 for AR9462 - - 0036 for AR9565 - - 0037 for AR9485 -- reg: Address and length of the register set for the device. - -Optional properties: -- qca,no-eeprom: Indicates that there is no physical EEPROM connected to the - ath9k wireless chip (in this case the calibration / - EEPROM data will be loaded from userspace using the - kernel firmware loader). - -The MAC address will be determined using the optional properties defined in -net/ethernet.txt. - -In this example, the node is defined as child node of the PCI controller: -&pci0 { - wifi@168c,002d { - compatible = "pci168c,002d"; - reg = <0x7000 0 0 0 0x1000>; - qca,no-eeprom; - }; -}; diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml new file mode 100644 index 00000000000000..8cd0adbf7021f8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/wireless/qca,ath9k.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Atheros ath9k wireless devices Generic Binding + +maintainers: + - Kalle Valo + +description: | + This node provides properties for configuring the ath9k wireless device. + The node is expected to be specified as a child node of the PCI controller + to which the wireless chip is connected. + +allOf: + - $ref: ieee80211.yaml# + +properties: + compatible: + enum: + - pci168c,0023 # AR5416 + - pci168c,0024 # AR5418 + - pci168c,0027 # AR9160 + - pci168c,0029 # AR9220 and AR9223 + - pci168c,002a # AR9280 and AR9283 + - pci168c,002b # AR9285 + - pci168c,002c # AR2427 - 802.11n bonded out + - pci168c,002d # AR9227 + - pci168c,002e # AR9287 + - pci168c,0030 # AR9380, AR9381 and AR9382 + - pci168c,0032 # AR9485 + - pci168c,0033 # AR9580 and AR9590 + - pci168c,0034 # AR9462 + - pci168c,0036 # AR9565 + - pci168c,0037 # AR1111 and AR9485 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + ieee80211-freq-limit: true + + qca,no-eeprom: + $ref: /schemas/types.yaml#/definitions/flag + description: + Indicates that there is no physical EEPROM connected + + nvmem-cells: + items: + - description: Reference to an nvmem node for the MAC address + - description: Reference to an nvmem node for calibration data + + nvmem-cell-names: + items: + - const: mac-address + - const: calibration + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + pcie0 { + #address-cells = <3>; + #size-cells = <2>; + wifi@0,0 { + compatible = "pci168c,002d"; + reg = <0 0 0 0 0>; + interrupts = <3>; + qca,no-eeprom; + }; + }; + - | + pci0 { + #address-cells = <3>; + #size-cells = <2>; + wifi@0,11 { + compatible = "pci168c,0029"; + reg = <0x8800 0 0 0 0>; + nvmem-cells = <&macaddr_art_c>, <&cal_art_1000>; + nvmem-cell-names = "mac-address", "calibration"; + }; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 952553759ac42d..0aa9e7676fcff7 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -131,6 +131,8 @@ patternProperties: description: Asahi Kasei Corp. "^asc,.*": description: All Sensors Corporation + "^asix,.*": + description: ASIX Electronics Corporation "^aspeed,.*": description: ASPEED Technology Inc. "^asus,.*": diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst index 4b59cf2c599f79..5edf50d7dbd527 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst @@ -543,6 +543,8 @@ The CR-space dump uses vsc interface which is valid even if the FW command interface is not functional, which is the case in most FW fatal errors. The recover function runs recover flow which reloads the driver and triggers fw reset if needed. +On firmware error, the health buffer is dumped into the dmesg. The log +level is derived from the error's severity (given in health buffer). User commands examples: @@ -700,3 +702,61 @@ Eswitch QoS tracepoints: $ cat /sys/kernel/debug/tracing/trace ... <...>-27418 [006] .... 76547.187258: mlx5_esw_group_qos_destroy: (0000:82:00.0) group=000000007b576bb3 tsar_ix=1 + +SF tracepoints: + +- mlx5_sf_add: trace addition of the SF port:: + + $ echo mlx5:mlx5_sf_add >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + devlink-9363 [031] ..... 24610.188722: mlx5_sf_add: (0000:06:00.0) port_index=32768 controller=0 hw_id=0x8000 sfnum=88 + +- mlx5_sf_free: trace freeing of the SF port:: + + $ echo mlx5:mlx5_sf_free >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + devlink-9830 [038] ..... 26300.404749: mlx5_sf_free: (0000:06:00.0) port_index=32768 controller=0 hw_id=0x8000 + +- mlx5_sf_hwc_alloc: trace allocating of the hardware SF context:: + + $ echo mlx5:mlx5_sf_hwc_alloc >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + devlink-9775 [031] ..... 26296.385259: mlx5_sf_hwc_alloc: (0000:06:00.0) controller=0 hw_id=0x8000 sfnum=88 + +- mlx5_sf_hwc_free: trace freeing of the hardware SF context:: + + $ echo mlx5:mlx5_sf_hwc_free >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + kworker/u128:3-9093 [046] ..... 24625.365771: mlx5_sf_hwc_free: (0000:06:00.0) hw_id=0x8000 + +- mlx5_sf_hwc_deferred_free : trace deferred freeing of the hardware SF context:: + + $ echo mlx5:mlx5_sf_hwc_deferred_free >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + devlink-9519 [046] ..... 24624.400271: mlx5_sf_hwc_deferred_free: (0000:06:00.0) hw_id=0x8000 + +- mlx5_sf_vhca_event: trace SF vhca event and state:: + + $ echo mlx5:mlx5_sf_vhca_event >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + kworker/u128:3-9093 [046] ..... 24625.365525: mlx5_sf_vhca_event: (0000:06:00.0) hw_id=0x8000 sfnum=88 vhca_state=1 + +- mlx5_sf_dev_add : trace SF device add event:: + + $ echo mlx5:mlx5_sf_dev_add>> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + kworker/u128:3-9093 [000] ..... 24616.524495: mlx5_sf_dev_add: (0000:06:00.0) sfdev=00000000fc5d96fd aux_id=4 hw_id=0x8000 sfnum=88 + +- mlx5_sf_dev_del : trace SF device delete event:: + + $ echo mlx5:mlx5_sf_dev_del >> /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace + ... + kworker/u128:3-9093 [044] ..... 24624.400749: mlx5_sf_dev_del: (0000:06:00.0) sfdev=00000000fc5d96fd aux_id=4 hw_id=0x8000 sfnum=88 diff --git a/Documentation/networking/devlink/bnxt.rst b/Documentation/networking/devlink/bnxt.rst index 3dfd84ccb1c7b2..a4fb27663cd6b1 100644 --- a/Documentation/networking/devlink/bnxt.rst +++ b/Documentation/networking/devlink/bnxt.rst @@ -22,6 +22,8 @@ Parameters - Permanent * - ``msix_vec_per_pf_min`` - Permanent + * - ``enable_remote_dev_reset`` + - Runtime The ``bnxt`` driver also implements the following driver-specific parameters. diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst index 58fe95e9a49dac..f06dca9a1eb641 100644 --- a/Documentation/networking/devlink/devlink-region.rst +++ b/Documentation/networking/devlink/devlink-region.rst @@ -44,8 +44,8 @@ example usage # Show all of the exposed regions with region sizes: $ devlink region show - pci/0000:00:05.0/cr-space: size 1048576 snapshot [1 2] - pci/0000:00:05.0/fw-health: size 64 snapshot [1 2] + pci/0000:00:05.0/cr-space: size 1048576 snapshot [1 2] max 8 + pci/0000:00:05.0/fw-health: size 64 snapshot [1 2] max 8 # Delete a snapshot using: $ devlink region del pci/0000:00:05.0/cr-space snapshot 1 diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst index 5d97cee9457be5..59c78e9717d2bd 100644 --- a/Documentation/networking/devlink/ice.rst +++ b/Documentation/networking/devlink/ice.rst @@ -142,6 +142,10 @@ Users can request an immediate capture of a snapshot via the .. code:: shell + $ devlink region show + pci/0000:01:00.0/nvm-flash: size 10485760 snapshot [] max 1 + pci/0000:01:00.0/device-caps: size 4096 snapshot [] max 10 + $ devlink region new pci/0000:01:00.0/nvm-flash snapshot 1 $ devlink region dump pci/0000:01:00.0/nvm-flash snapshot 1 diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst index 45b5f8b341df1b..443123772f44d5 100644 --- a/Documentation/networking/devlink/index.rst +++ b/Documentation/networking/devlink/index.rst @@ -47,3 +47,5 @@ parameters, info versions, and other features it supports. ti-cpsw-switch am65-nuss-cpsw-switch prestera + iosm + octeontx2 diff --git a/Documentation/networking/devlink/iosm.rst b/Documentation/networking/devlink/iosm.rst new file mode 100644 index 00000000000000..6136181339aad3 --- /dev/null +++ b/Documentation/networking/devlink/iosm.rst @@ -0,0 +1,162 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================== +iosm devlink support +==================== + +This document describes the devlink features implemented by the ``iosm`` +device driver. + +Parameters +========== + +The ``iosm`` driver implements the following driver-specific parameters. + +.. list-table:: Driver-specific parameters implemented + :widths: 5 5 5 85 + + * - Name + - Type + - Mode + - Description + * - ``erase_full_flash`` + - u8 + - runtime + - erase_full_flash parameter is used to check if full erase is required for + the device during firmware flashing. + If set, Full nand erase command will be sent to the device. By default, + only conditional erase support is enabled. + + +Flash Update +============ + +The ``iosm`` driver implements support for flash update using the +``devlink-flash`` interface. + +It supports updating the device flash using a combined flash image which contains +the Bootloader images and other modem software images. + +The driver uses DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT to identify type of +firmware image that need to be flashed as requested by user space application. +Supported firmware image types. + +.. list-table:: Firmware Image types + :widths: 15 85 + + * - Name + - Description + * - ``PSI RAM`` + - Primary Signed Image + * - ``EBL`` + - External Bootloader + * - ``FLS`` + - Modem Software Image + +PSI RAM and EBL are the RAM images which are injected to the device when the +device is in BOOT ROM stage. Once this is successful, the actual modem firmware +image is flashed to the device. The modem software image contains multiple files +each having one secure bin file and at least one Loadmap/Region file. For flashing +these files, appropriate commands are sent to the modem device along with the +data required for flashing. The data like region count and address of each region +has to be passed to the driver using the devlink param command. + +If the device has to be fully erased before firmware flashing, user application +need to set the erase_full_flash parameter using devlink param command. +By default, conditional erase feature is supported. + +Flash Commands: +=============== +1) When modem is in Boot ROM stage, user can use below command to inject PSI RAM +image using devlink flash command. + +$ devlink dev flash pci/0000:02:00.0 file + +2) If user want to do a full erase, below command need to be issued to set the +erase full flash param (To be set only if full erase required). + +$ devlink dev param set pci/0000:02:00.0 name erase_full_flash value true cmode runtime + +3) Inject EBL after the modem is in PSI stage. + +$ devlink dev flash pci/0000:02:00.0 file + +4) Once EBL is injected successfully, then the actual firmware flashing takes +place. Below is the sequence of commands used for each of the firmware images. + +a) Flash secure bin file. + +$ devlink dev flash pci/0000:02:00.0 file + +b) Flashing the Loadmap/Region file + +$ devlink dev flash pci/0000:02:00.0 file + +Regions +======= + +The ``iosm`` driver supports dumping the coredump logs. + +In case a firmware encounters an exception, a snapshot will be taken by the +driver. Following regions are accessed for device internal data. + +.. list-table:: Regions implemented + :widths: 15 85 + + * - Name + - Description + * - ``report.json`` + - The summary of exception details logged as part of this region. + * - ``coredump.fcd`` + - This region contains the details related to the exception occurred in the + device (RAM dump). + * - ``cdd.log`` + - This region contains the logs related to the modem CDD driver. + * - ``eeprom.bin`` + - This region contains the eeprom logs. + * - ``bootcore_trace.bin`` + - This region contains the current instance of bootloader logs. + * - ``bootcore_prev_trace.bin`` + - This region contains the previous instance of bootloader logs. + + +Region commands +=============== + +$ devlink region show + +$ devlink region new pci/0000:02:00.0/report.json + +$ devlink region dump pci/0000:02:00.0/report.json snapshot 0 + +$ devlink region del pci/0000:02:00.0/report.json snapshot 0 + +$ devlink region new pci/0000:02:00.0/coredump.fcd + +$ devlink region dump pci/0000:02:00.0/coredump.fcd snapshot 1 + +$ devlink region del pci/0000:02:00.0/coredump.fcd snapshot 1 + +$ devlink region new pci/0000:02:00.0/cdd.log + +$ devlink region dump pci/0000:02:00.0/cdd.log snapshot 2 + +$ devlink region del pci/0000:02:00.0/cdd.log snapshot 2 + +$ devlink region new pci/0000:02:00.0/eeprom.bin + +$ devlink region dump pci/0000:02:00.0/eeprom.bin snapshot 3 + +$ devlink region del pci/0000:02:00.0/eeprom.bin snapshot 3 + +$ devlink region new pci/0000:02:00.0/bootcore_trace.bin + +$ devlink region dump pci/0000:02:00.0/bootcore_trace.bin snapshot 4 + +$ devlink region del pci/0000:02:00.0/bootcore_trace.bin snapshot 4 + +$ devlink region new pci/0000:02:00.0/bootcore_prev_trace.bin + +$ devlink region dump pci/0000:02:00.0/bootcore_prev_trace.bin snapshot 5 + +$ devlink region del pci/0000:02:00.0/bootcore_prev_trace.bin snapshot 5 diff --git a/Documentation/networking/devlink/octeontx2.rst b/Documentation/networking/devlink/octeontx2.rst new file mode 100644 index 00000000000000..610de99b728a8a --- /dev/null +++ b/Documentation/networking/devlink/octeontx2.rst @@ -0,0 +1,42 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================= +octeontx2 devlink support +========================= + +This document describes the devlink features implemented by the ``octeontx2 AF, PF and VF`` +device drivers. + +Parameters +========== + +The ``octeontx2 PF and VF`` drivers implement the following driver-specific parameters. + +.. list-table:: Driver-specific parameters implemented + :widths: 5 5 5 85 + + * - Name + - Type + - Mode + - Description + * - ``mcam_count`` + - u16 + - runtime + - Select number of match CAM entries to be allocated for an interface. + The same is used for ntuple filters of the interface. Supported by + PF and VF drivers. + +The ``octeontx2 AF`` driver implements the following driver-specific parameters. + +.. list-table:: Driver-specific parameters implemented + :widths: 5 5 5 85 + + * - Name + - Type + - Mode + - Description + * - ``dwrr_mtu`` + - u32 + - runtime + - Use to set the quantum which hardware uses for scheduling among transmit queues. + Hardware uses weighted DWRR algorithm to schedule among all transmit queues. diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index d9b55b7a1a4d33..7b598c7e391299 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -41,6 +41,11 @@ In the message structure descriptions below, if an attribute name is suffixed with "+", parent nest can contain multiple attributes of the same type. This implements an array of entries. +Attributes that need to be filled-in by device drivers and that are dumped to +user space based on whether they are valid or not should not use zero as a +valid value. This avoids the need to explicitly signal the validity of the +attribute in the device driver API. + Request header ============== @@ -179,7 +184,7 @@ according to message purpose: Userspace to kernel: - ===================================== ================================ + ===================================== ================================= ``ETHTOOL_MSG_STRSET_GET`` get string set ``ETHTOOL_MSG_LINKINFO_GET`` get link settings ``ETHTOOL_MSG_LINKINFO_SET`` set link settings @@ -213,7 +218,9 @@ Userspace to kernel: ``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET`` get standard statistics ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info - ===================================== ================================ + ``ETHTOOL_MSG_MODULE_SET`` set transceiver module parameters + ``ETHTOOL_MSG_MODULE_GET`` get transceiver module parameters + ===================================== ================================= Kernel to userspace: @@ -252,6 +259,7 @@ Kernel to userspace: ``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info + ``ETHTOOL_MSG_MODULE_GET_REPLY`` transceiver module parameters ======================================== ================================= ``GET`` requests are sent by userspace applications to retrieve device @@ -520,6 +528,8 @@ Link extended states: power required from cable or module ``ETHTOOL_LINK_EXT_STATE_OVERHEAT`` The module is overheated + + ``ETHTOOL_LINK_EXT_STATE_MODULE`` Transceiver module issue ================================================ ============================================ Link extended substates: @@ -613,6 +623,14 @@ Link extended substates: ``ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE`` Cable test failure =================================================== ============================================ + Transceiver module issue substates: + + =================================================== ============================================ + ``ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY`` The CMIS Module State Machine did not reach + the ModuleReady state. For example, if the + module is stuck at ModuleFault state + =================================================== ============================================ + DEBUG_GET ========= @@ -1521,6 +1539,63 @@ Kernel response contents: ``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array ==================================== ====== ========================== +MODULE_GET +========== + +Gets transceiver module parameters. + +Request contents: + + ===================================== ====== ========================== + ``ETHTOOL_A_MODULE_HEADER`` nested request header + ===================================== ====== ========================== + +Kernel response contents: + + ====================================== ====== ========================== + ``ETHTOOL_A_MODULE_HEADER`` nested reply header + ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` u8 power mode policy + ``ETHTOOL_A_MODULE_POWER_MODE`` u8 operational power mode + ====================================== ====== ========================== + +The optional ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` attribute encodes the +transceiver module power mode policy enforced by the host. The default policy +is driver-dependent, but "auto" is the recommended default and it should be +implemented by new drivers and drivers where conformance to a legacy behavior +is not critical. + +The optional ``ETHTHOOL_A_MODULE_POWER_MODE`` attribute encodes the operational +power mode policy of the transceiver module. It is only reported when a module +is plugged-in. Possible values are: + +.. kernel-doc:: include/uapi/linux/ethtool.h + :identifiers: ethtool_module_power_mode + +MODULE_SET +========== + +Sets transceiver module parameters. + +Request contents: + + ====================================== ====== ========================== + ``ETHTOOL_A_MODULE_HEADER`` nested request header + ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` u8 power mode policy + ====================================== ====== ========================== + +When set, the optional ``ETHTOOL_A_MODULE_POWER_MODE_POLICY`` attribute is used +to set the transceiver module power policy enforced by the host. Possible +values are: + +.. kernel-doc:: include/uapi/linux/ethtool.h + :identifiers: ethtool_module_power_mode_policy + +For SFF-8636 modules, low power mode is forced by the host according to table +6-10 in revision 2.10a of the specification. + +For CMIS modules, low power mode is forced by the host according to table 6-12 +in revision 5.0 of the specification. + Request translation =================== @@ -1620,4 +1695,6 @@ are netlink only. n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET`` n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` + n/a ``ETHTOOL_MSG_MODULE_GET`` + n/a ``ETHTOOL_MSG_MODULE_SET`` =================================== ===================================== diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index d91ab28718d493..c61cc0219f4c1c 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -989,14 +989,6 @@ tcp_challenge_ack_limit - INTEGER in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) Default: 1000 -tcp_rx_skb_cache - BOOLEAN - Controls a per TCP socket cache of one skb, that might help - performance of some workloads. This might be dangerous - on systems with a lot of TCP sockets, since it increases - memory usage. - - Default: 0 (disabled) - UDP variables ============= @@ -1619,6 +1611,15 @@ arp_accept - BOOLEAN gratuitous arp frame, the arp table will be updated regardless if this setting is on or off. +arp_evict_nocarrier - BOOLEAN + Clears the ARP cache on NOCARRIER events. This option is important for + wireless devices where the ARP cache should not be cleared when roaming + between access points on the same network. In most cases this should + remain as the default (1). + + - 1 - (default): Clear the ARP cache on NOCARRIER events + - 0 - Do not clear ARP cache on NOCARRIER events + mcast_solicit - INTEGER The maximum number of multicast probes in INCOMPLETE state, when the associated hardware address is unknown. Defaults @@ -2349,6 +2350,15 @@ ndisc_tclass - INTEGER * 0 - (default) +ndisc_evict_nocarrier - BOOLEAN + Clears the neighbor discovery table on NOCARRIER events. This option is + important for wireless devices where the neighbor discovery cache should + not be cleared when roaming between access points on the same network. + In most cases this should remain as the default (1). + + - 1 - (default): Clear neighbor discover cache on NOCARRIER events. + - 0 - Do not clear neighbor discovery cache on NOCARRIER events. + mldv1_unsolicited_report_interval - INTEGER The interval in milliseconds in which the next unsolicited MLDv1 report retransmit will take place. diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst index 2afccc63856ee0..95ef56d62077f7 100644 --- a/Documentation/networking/ipvs-sysctl.rst +++ b/Documentation/networking/ipvs-sysctl.rst @@ -300,3 +300,14 @@ sync_version - INTEGER Kernels with this sync_version entry are able to receive messages of both version 1 and version 2 of the synchronisation protocol. + +run_estimation - BOOLEAN + 0 - disabled + not 0 - enabled (default) + + If disabled, the estimation will be stop, and you can't see + any update on speed estimation data. + + You can always re-enable estimation by setting this value to 1. + But be careful, the first estimation after re-enable is not + accurate. diff --git a/Documentation/networking/mctp.rst b/Documentation/networking/mctp.rst index fa7730dbf7b914..46f74bffce0f4a 100644 --- a/Documentation/networking/mctp.rst +++ b/Documentation/networking/mctp.rst @@ -211,3 +211,62 @@ remote address is already known, or the message does not require a reply. Like the send calls, sockets will only receive responses to requests they have sent (TO=1) and may only respond (TO=0) to requests they have received. + +Kernel internals +================ + +There are a few possible packet flows in the MCTP stack: + +1. local TX to remote endpoint, message <= MTU:: + + sendmsg() + -> mctp_local_output() + : route lookup + -> rt->output() (== mctp_route_output) + -> dev_queue_xmit() + +2. local TX to remote endpoint, message > MTU:: + + sendmsg() + -> mctp_local_output() + -> mctp_do_fragment_route() + : creates packet-sized skbs. For each new skb: + -> rt->output() (== mctp_route_output) + -> dev_queue_xmit() + +3. remote TX to local endpoint, single-packet message:: + + mctp_pkttype_receive() + : route lookup + -> rt->output() (== mctp_route_input) + : sk_key lookup + -> sock_queue_rcv_skb() + +4. remote TX to local endpoint, multiple-packet message:: + + mctp_pkttype_receive() + : route lookup + -> rt->output() (== mctp_route_input) + : sk_key lookup + : stores skb in struct sk_key->reasm_head + + mctp_pkttype_receive() + : route lookup + -> rt->output() (== mctp_route_input) + : sk_key lookup + : finds existing reassembly in sk_key->reasm_head + : appends new fragment + -> sock_queue_rcv_skb() + +Key refcounts +------------- + + * keys are refed by: + + - a skb: during route output, stored in ``skb->cb``. + + - netns and sock lists. + + * keys can be associated with a device, in which case they hold a + reference to the dev (set through ``key->dev``, counted through + ``dev->key_count``). Multiple keys can reference the device. diff --git a/MAINTAINERS b/MAINTAINERS index dcc1819ec84b4e..f96aa662ee324d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1026,6 +1026,14 @@ S: Maintained F: Documentation/devicetree/bindings/iio/light/ams,as73211.yaml F: drivers/iio/light/as73211.c +AMT (Automatic Multicast Tunneling) +M: Taehee Yoo +L: netdev@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git +F: drivers/net/amt.c + ANALOG DEVICES INC AD7192 DRIVER M: Alexandru Tachici L: linux-iio@vger.kernel.org @@ -2908,6 +2916,12 @@ S: Maintained F: Documentation/hwmon/asc7621.rst F: drivers/hwmon/asc7621.c +ASIX AX88796C SPI ETHERNET ADAPTER +M: Ɓukasz Stelmach +S: Maintained +F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml +F: drivers/net/ethernet/asix/ax88796c_* + ASPEED PINCTRL DRIVERS M: Andrew Jeffery L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) @@ -3438,6 +3452,7 @@ S: Supported F: arch/arm64/net/ BPF JIT for MIPS (32-BIT AND 64-BIT) +M: Johan Almbladh M: Paul Burton L: netdev@vger.kernel.org L: bpf@vger.kernel.org @@ -7034,7 +7049,6 @@ F: drivers/net/mdio/fwnode_mdio.c F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ F: drivers/net/phy/ -F: drivers/of/of_net.c F: include/dt-bindings/net/qca-ar803x.h F: include/linux/*mdio*.h F: include/linux/mdio/*.h @@ -7046,6 +7060,7 @@ F: include/linux/platform_data/mdio-gpio.h F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h +F: net/core/of_net.c EXFAT FILE SYSTEM M: Namjae Jeon @@ -11878,7 +11893,9 @@ F: drivers/mmc/host/mtk-sd.c MEDIATEK MT76 WIRELESS LAN DRIVER M: Felix Fietkau M: Lorenzo Bianconi -R: Ryder Lee +M: Ryder Lee +R: Shayne Chen +R: Sean Wang L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt76/ @@ -13109,6 +13126,7 @@ F: include/linux/dsa/ F: include/linux/platform_data/dsa.h F: include/net/dsa.h F: net/dsa/ +F: tools/testing/selftests/drivers/net/dsa/ NETWORKING [GENERAL] M: "David S. Miller" @@ -15530,6 +15548,7 @@ M: ath9k-devel@qca.qualcomm.com L: linux-wireless@vger.kernel.org S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k +F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml F: drivers/net/wireless/ath/ath9k/ QUALCOMM CAMERA SUBSYSTEM DRIVER @@ -15951,6 +15970,12 @@ L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/realtek/rtw88/ +REALTEK WIRELESS DRIVER (rtw89) +M: Ping-Ke Shih +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/realtek/rtw89/ + REDPINE WIRELESS DRIVER M: Amitkumar Karwar M: Siva Rebbagondla diff --git a/Makefile b/Makefile index d6520b79776888..8cf2afe005cca1 100644 --- a/Makefile +++ b/Makefile @@ -480,6 +480,8 @@ LZ4 = lz4c XZ = xz ZSTD = zstd +PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh) + CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) NOSTDINC_FLAGS := @@ -534,6 +536,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL +export PAHOLE_FLAGS # Files to ignore in find ... statements diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 1dd9baf4a6c218..284d28755b8de6 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -131,6 +131,8 @@ #define SO_BUF_LOCK 72 +#define SO_RESERVE_MEM 73 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a903b26cde4097..eeb6dc0ecf4639 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1882,11 +1882,6 @@ static int validate_code(struct jit_ctx *ctx) return 0; } -void bpf_jit_compile(struct bpf_prog *prog) -{ - /* Nothing to do here. We support Internal BPF. */ -} - bool bpf_jit_needs_zext(void) { return true; diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index 79e55421cfb18a..1a5d1e8eb4c808 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c @@ -200,7 +200,7 @@ static struct net_device * __init nfeth_probe(int unit) dev->irq = nfEtherIRQ; dev->netdev_ops = &nfeth_netdev_ops; - memcpy(dev->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(dev, mac); priv = netdev_priv(dev); priv->ethX = unit; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 95fc3566986067..def1844d10d3d1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -56,7 +56,6 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS select HAVE_CONTEXT_TRACKING select HAVE_TIF_NOHZ select HAVE_C_RECORDMCOUNT @@ -64,7 +63,10 @@ config MIPS select HAVE_DEBUG_STACKOVERFLOW select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE - select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 + select HAVE_EBPF_JIT if !CPU_MICROMIPS && \ + !CPU_DADDI_WORKAROUNDS && \ + !CPU_R4000_WORKAROUNDS && \ + !CPU_R4400_WORKAROUNDS select HAVE_EXIT_THREAD select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD @@ -1211,15 +1213,6 @@ config SYS_SUPPORTS_RELOCATABLE The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF to allow access to command line and entropy sources. -config MIPS_CBPF_JIT - def_bool y - depends on BPF_JIT && HAVE_CBPF_JIT - -config MIPS_EBPF_JIT - def_bool y - depends on BPF_JIT && HAVE_EBPF_JIT - - # # Endianness selection. Sufficiently obscure so many users don't know what to # answer,so we try hard to limit the available choices. Also the use of a diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h index 8218a1356bd8bf..31ca9151b539d5 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h +++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h @@ -45,6 +45,6 @@ extern void ltq_dma_close(struct ltq_dma_channel *ch); extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch); extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch); extern void ltq_dma_free(struct ltq_dma_channel *ch); -extern void ltq_dma_init_port(int p); +extern void ltq_dma_init_port(int p, int tx_burst, int rx_burst); #endif diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index f7effca791a50f..296bcf31abb571 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -145,6 +145,7 @@ Ip_u1(_mtlo); Ip_u3u1u2(_mul); Ip_u1u2(_multu); Ip_u3u1u2(_mulu); +Ip_u3u1u2(_muhu); Ip_u3u1u2(_nor); Ip_u3u1u2(_or); Ip_u2u1u3(_ori); @@ -248,7 +249,11 @@ static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \ #define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off) #define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3) #define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b) +#ifdef CONFIG_CPU_NOP_WORKAROUNDS +#define uasm_i_nop(buf) uasm_i_or(buf, 1, 1, 0) +#else #define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0) +#endif #define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1) static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1, diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 1eaf6a1ca56105..24e0efb360f672 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -142,6 +142,8 @@ #define SO_BUF_LOCK 72 +#define SO_RESERVE_MEM 73 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 63dccb2ed08b28..f8eedeb15f1829 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,7 @@ #define LTQ_DMA_PCTRL 0x44 #define LTQ_DMA_IRNEN 0xf4 +#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */ #define DMA_DESCPT BIT(3) /* descriptor complete irq */ #define DMA_TX BIT(8) /* TX channel direction */ #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ @@ -39,8 +41,11 @@ #define DMA_IRQ_ACK 0x7e /* IRQ status register */ #define DMA_POLL BIT(31) /* turn on channel polling */ #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ -#define DMA_2W_BURST BIT(1) /* 2 word burst length */ -#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ +#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */ +#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */ +#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */ +#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */ +#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */ #define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ @@ -177,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch) EXPORT_SYMBOL_GPL(ltq_dma_free); void -ltq_dma_init_port(int p) +ltq_dma_init_port(int p, int tx_burst, int rx_burst) { ltq_dma_w32(p, LTQ_DMA_PS); switch (p) { @@ -186,15 +191,44 @@ ltq_dma_init_port(int p) * Tell the DMA engine to swap the endianness of data frames and * drop packets if the channel arbitration fails. */ - ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN, + ltq_dma_w32_mask(0, (DMA_ETOP_ENDIANNESS | DMA_PDEN), LTQ_DMA_PCTRL); break; - case DMA_PORT_DEU: - ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), + default: + break; + } + + switch (rx_burst) { + case 8: + ltq_dma_w32_mask(0x0c, (DMA_PCTRL_8W_BURST << DMA_RX_BURST_SHIFT), + LTQ_DMA_PCTRL); + break; + case 4: + ltq_dma_w32_mask(0x0c, (DMA_PCTRL_4W_BURST << DMA_RX_BURST_SHIFT), + LTQ_DMA_PCTRL); + break; + case 2: + ltq_dma_w32_mask(0x0c, (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT), LTQ_DMA_PCTRL); break; + default: + break; + } + switch (tx_burst) { + case 8: + ltq_dma_w32_mask(0x30, (DMA_PCTRL_8W_BURST << DMA_TX_BURST_SHIFT), + LTQ_DMA_PCTRL); + break; + case 4: + ltq_dma_w32_mask(0x30, (DMA_PCTRL_4W_BURST << DMA_TX_BURST_SHIFT), + LTQ_DMA_PCTRL); + break; + case 2: + ltq_dma_w32_mask(0x30, (DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT), + LTQ_DMA_PCTRL); + break; default: break; } @@ -206,7 +240,7 @@ ltq_dma_init(struct platform_device *pdev) { struct clk *clk; struct resource *res; - unsigned id; + unsigned int id, nchannels; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -222,21 +256,24 @@ ltq_dma_init(struct platform_device *pdev) clk_enable(clk); ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); + usleep_range(1, 10); + /* disable all interrupts */ ltq_dma_w32(0, LTQ_DMA_IRNEN); /* reset/configure each channel */ - for (i = 0; i < DMA_MAX_CHANNEL; i++) { + id = ltq_dma_r32(LTQ_DMA_ID); + nchannels = ((id & DMA_ID_CHNR) >> 20); + for (i = 0; i < nchannels; i++) { ltq_dma_w32(i, LTQ_DMA_CS); ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); } - id = ltq_dma_r32(LTQ_DMA_ID); dev_info(&pdev->dev, "Init done - hw rev: %X, ports: %d, channels: %d\n", - id & 0x1f, (id >> 16) & 0xf, id >> 20); + id & 0x1f, (id >> 16) & 0xf, nchannels); return 0; } diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 7154a1d99aad61..e15c6700cd0884 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -90,7 +90,7 @@ static const struct insn insn_table[insn_invalid] = { RS | RT | RD}, [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, - [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op), + [insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op), RS | RT | RD}, [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, @@ -150,6 +150,8 @@ static const struct insn insn_table[insn_invalid] = { [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op), RS | RT | RD}, + [insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op), + RS | RT | RD}, #ifndef CONFIG_CPU_MIPSR6 [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, #else diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 81dd226d6b6bf0..125140979d62c5 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -59,7 +59,7 @@ enum opcode { insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0, - insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor, + insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll, insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, @@ -344,6 +344,7 @@ I_u1(_mtlo) I_u3u1u2(_mul) I_u1u2(_multu) I_u3u1u2(_mulu) +I_u3u1u2(_muhu) I_u3u1u2(_nor) I_u3u1u2(_or) I_u2u1u3(_ori) diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile index d55912349039ca..e3e6ae6514e84b 100644 --- a/arch/mips/net/Makefile +++ b/arch/mips/net/Makefile @@ -1,5 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only # MIPS networking code -obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o -obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o + +ifeq ($(CONFIG_32BIT),y) + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o +else + obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o +endif diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c deleted file mode 100644 index cb6d22439f71b0..00000000000000 --- a/arch/mips/net/bpf_jit.c +++ /dev/null @@ -1,1299 +0,0 @@ -/* - * Just-In-Time compiler for BPF filters on MIPS - * - * Copyright (c) 2014 Imagination Technologies Ltd. - * Author: Markos Chandras - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2 of the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bpf_jit.h" - -/* ABI - * r_skb_hl SKB header length - * r_data SKB data pointer - * r_off Offset - * r_A BPF register A - * r_X BPF register X - * r_skb *skb - * r_M *scratch memory - * r_skb_len SKB length - * - * On entry (*bpf_func)(*skb, *filter) - * a0 = MIPS_R_A0 = skb; - * a1 = MIPS_R_A1 = filter; - * - * Stack - * ... - * M[15] - * M[14] - * M[13] - * ... - * M[0] <-- r_M - * saved reg k-1 - * saved reg k-2 - * ... - * saved reg 0 <-- r_sp - * - * - * Packet layout - * - * <--------------------- len ------------------------> - * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------> - * ---------------------------------------------------- - * | skb->data | - * ---------------------------------------------------- - */ - -#define ptr typeof(unsigned long) - -#define SCRATCH_OFF(k) (4 * (k)) - -/* JIT flags */ -#define SEEN_CALL (1 << BPF_MEMWORDS) -#define SEEN_SREG_SFT (BPF_MEMWORDS + 1) -#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT) -#define SEEN_SREG(x) (SEEN_SREG_BASE << (x)) -#define SEEN_OFF SEEN_SREG(2) -#define SEEN_A SEEN_SREG(3) -#define SEEN_X SEEN_SREG(4) -#define SEEN_SKB SEEN_SREG(5) -#define SEEN_MEM SEEN_SREG(6) -/* SEEN_SK_DATA also implies skb_hl an skb_len */ -#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0)) - -/* Arguments used by JIT */ -#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ - -#define SBIT(x) (1 << (x)) /* Signed version of BIT() */ - -/** - * struct jit_ctx - JIT context - * @skf: The sk_filter - * @prologue_bytes: Number of bytes for prologue - * @idx: Instruction index - * @flags: JIT flags - * @offsets: Instruction offsets - * @target: Memory location for the compiled filter - */ -struct jit_ctx { - const struct bpf_prog *skf; - unsigned int prologue_bytes; - u32 idx; - u32 flags; - u32 *offsets; - u32 *target; -}; - - -static inline int optimize_div(u32 *k) -{ - /* power of 2 divides can be implemented with right shift */ - if (!(*k & (*k-1))) { - *k = ilog2(*k); - return 1; - } - - return 0; -} - -static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx); - -/* Simply emit the instruction if the JIT memory space has been allocated */ -#define emit_instr(ctx, func, ...) \ -do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->idx]; \ - uasm_i_##func(&p, ##__VA_ARGS__); \ - } \ - (ctx)->idx++; \ -} while (0) - -/* - * Similar to emit_instr but it must be used when we need to emit - * 32-bit or 64-bit instructions - */ -#define emit_long_instr(ctx, func, ...) \ -do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->idx]; \ - UASM_i_##func(&p, ##__VA_ARGS__); \ - } \ - (ctx)->idx++; \ -} while (0) - -/* Determine if immediate is within the 16-bit signed range */ -static inline bool is_range16(s32 imm) -{ - return !(imm >= SBIT(15) || imm < -SBIT(15)); -} - -static inline void emit_addu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, addu, dst, src1, src2); -} - -static inline void emit_nop(struct jit_ctx *ctx) -{ - emit_instr(ctx, nop); -} - -/* Load a u32 immediate to a register */ -static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - /* addiu can only handle s16 */ - if (!is_range16(imm)) { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); - p = &ctx->target[ctx->idx + 1]; - uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff); - } else { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_addiu(&p, dst, r_zero, imm); - } - } - ctx->idx++; - - if (!is_range16(imm)) - ctx->idx++; -} - -static inline void emit_or(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, or, dst, src1, src2); -} - -static inline void emit_ori(unsigned int dst, unsigned src, u32 imm, - struct jit_ctx *ctx) -{ - if (imm >= BIT(16)) { - emit_load_imm(r_tmp, imm, ctx); - emit_or(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, ori, dst, src, imm); - } -} - -static inline void emit_daddiu(unsigned int dst, unsigned int src, - int imm, struct jit_ctx *ctx) -{ - /* - * Only used for stack, so the imm is relatively small - * and it fits in 15-bits - */ - emit_instr(ctx, daddiu, dst, src, imm); -} - -static inline void emit_addiu(unsigned int dst, unsigned int src, - u32 imm, struct jit_ctx *ctx) -{ - if (!is_range16(imm)) { - emit_load_imm(r_tmp, imm, ctx); - emit_addu(dst, r_tmp, src, ctx); - } else { - emit_instr(ctx, addiu, dst, src, imm); - } -} - -static inline void emit_and(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, and, dst, src1, src2); -} - -static inline void emit_andi(unsigned int dst, unsigned int src, - u32 imm, struct jit_ctx *ctx) -{ - /* If imm does not fit in u16 then load it to register */ - if (imm >= BIT(16)) { - emit_load_imm(r_tmp, imm, ctx); - emit_and(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, andi, dst, src, imm); - } -} - -static inline void emit_xor(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, xor, dst, src1, src2); -} - -static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) -{ - /* If imm does not fit in u16 then load it to register */ - if (imm >= BIT(16)) { - emit_load_imm(r_tmp, imm, ctx); - emit_xor(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, xori, dst, src, imm); - } -} - -static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) -{ - emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset); -} - -static inline void emit_subu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, subu, dst, src1, src2); -} - -static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx) -{ - emit_subu(reg, r_zero, reg, ctx); -} - -static inline void emit_sllv(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, sllv, dst, src, sa); -} - -static inline void emit_sll(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - /* sa is 5-bits long */ - if (sa >= BIT(5)) - /* Shifting >= 32 results in zero */ - emit_jit_reg_move(dst, r_zero, ctx); - else - emit_instr(ctx, sll, dst, src, sa); -} - -static inline void emit_srlv(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, srlv, dst, src, sa); -} - -static inline void emit_srl(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - /* sa is 5-bits long */ - if (sa >= BIT(5)) - /* Shifting >= 32 results in zero */ - emit_jit_reg_move(dst, r_zero, ctx); - else - emit_instr(ctx, srl, dst, src, sa); -} - -static inline void emit_slt(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, slt, dst, src1, src2); -} - -static inline void emit_sltu(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, sltu, dst, src1, src2); -} - -static inline void emit_sltiu(unsigned dst, unsigned int src, - unsigned int imm, struct jit_ctx *ctx) -{ - /* 16 bit immediate */ - if (!is_range16((s32)imm)) { - emit_load_imm(r_tmp, imm, ctx); - emit_sltu(dst, src, r_tmp, ctx); - } else { - emit_instr(ctx, sltiu, dst, src, imm); - } - -} - -/* Store register on the stack */ -static inline void emit_store_stack_reg(ptr reg, ptr base, - unsigned int offset, - struct jit_ctx *ctx) -{ - emit_long_instr(ctx, SW, reg, offset, base); -} - -static inline void emit_store(ptr reg, ptr base, unsigned int offset, - struct jit_ctx *ctx) -{ - emit_instr(ctx, sw, reg, offset, base); -} - -static inline void emit_load_stack_reg(ptr reg, ptr base, - unsigned int offset, - struct jit_ctx *ctx) -{ - emit_long_instr(ctx, LW, reg, offset, base); -} - -static inline void emit_load(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lw, reg, offset, base); -} - -static inline void emit_load_byte(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lb, reg, offset, base); -} - -static inline void emit_half_load(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lh, reg, offset, base); -} - -static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base, - unsigned int offset, struct jit_ctx *ctx) -{ - emit_instr(ctx, lhu, reg, offset, base); -} - -static inline void emit_mul(unsigned int dst, unsigned int src1, - unsigned int src2, struct jit_ctx *ctx) -{ - emit_instr(ctx, mul, dst, src1, src2); -} - -static inline void emit_div(unsigned int dst, unsigned int src, - struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_divu(&p, dst, src); - p = &ctx->target[ctx->idx + 1]; - uasm_i_mflo(&p, dst); - } - ctx->idx += 2; /* 2 insts */ -} - -static inline void emit_mod(unsigned int dst, unsigned int src, - struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - u32 *p = &ctx->target[ctx->idx]; - uasm_i_divu(&p, dst, src); - p = &ctx->target[ctx->idx + 1]; - uasm_i_mfhi(&p, dst); - } - ctx->idx += 2; /* 2 insts */ -} - -static inline void emit_dsll(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, dsll, dst, src, sa); -} - -static inline void emit_dsrl32(unsigned int dst, unsigned int src, - unsigned int sa, struct jit_ctx *ctx) -{ - emit_instr(ctx, dsrl32, dst, src, sa); -} - -static inline void emit_wsbh(unsigned int dst, unsigned int src, - struct jit_ctx *ctx) -{ - emit_instr(ctx, wsbh, dst, src); -} - -/* load pointer to register */ -static inline void emit_load_ptr(unsigned int dst, unsigned int src, - int imm, struct jit_ctx *ctx) -{ - /* src contains the base addr of the 32/64-pointer */ - emit_long_instr(ctx, LW, dst, imm, src); -} - -/* load a function pointer to register */ -static inline void emit_load_func(unsigned int reg, ptr imm, - struct jit_ctx *ctx) -{ - if (IS_ENABLED(CONFIG_64BIT)) { - /* At this point imm is always 64-bit */ - emit_load_imm(r_tmp, (u64)imm >> 32, ctx); - emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ - emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx); - emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ - emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx); - } else { - emit_load_imm(reg, imm, ctx); - } -} - -/* Move to real MIPS register */ -static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) -{ - emit_long_instr(ctx, ADDU, dst, src, r_zero); -} - -/* Move to JIT (32-bit) register */ -static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) -{ - emit_addu(dst, src, r_zero, ctx); -} - -/* Compute the immediate value for PC-relative branches. */ -static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) -{ - if (ctx->target == NULL) - return 0; - - /* - * We want a pc-relative branch. We only do forward branches - * so tgt is always after pc. tgt is the instruction offset - * we want to jump to. - - * Branch on MIPS: - * I: target_offset <- sign_extend(offset) - * I+1: PC += target_offset (delay slot) - * - * ctx->idx currently points to the branch instruction - * but the offset is added to the delay slot so we need - * to subtract 4. - */ - return ctx->offsets[tgt] - - (ctx->idx * 4 - ctx->prologue_bytes) - 4; -} - -static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2, - unsigned int imm, struct jit_ctx *ctx) -{ - if (ctx->target != NULL) { - u32 *p = &ctx->target[ctx->idx]; - - switch (cond) { - case MIPS_COND_EQ: - uasm_i_beq(&p, reg1, reg2, imm); - break; - case MIPS_COND_NE: - uasm_i_bne(&p, reg1, reg2, imm); - break; - case MIPS_COND_ALL: - uasm_i_b(&p, imm); - break; - default: - pr_warn("%s: Unhandled branch conditional: %d\n", - __func__, cond); - } - } - ctx->idx++; -} - -static inline void emit_b(unsigned int imm, struct jit_ctx *ctx) -{ - emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx); -} - -static inline void emit_jalr(unsigned int link, unsigned int reg, - struct jit_ctx *ctx) -{ - emit_instr(ctx, jalr, link, reg); -} - -static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) -{ - emit_instr(ctx, jr, reg); -} - -static inline u16 align_sp(unsigned int num) -{ - /* Double word alignment for 32-bit, quadword for 64-bit */ - unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8; - num = (num + (align - 1)) & -align; - return num; -} - -static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) -{ - int i = 0, real_off = 0; - u32 sflags, tmp_flags; - - /* Adjust the stack pointer */ - if (offset) - emit_stack_offset(-align_sp(offset), ctx); - - tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; - /* sflags is essentially a bitmap */ - while (tmp_flags) { - if ((sflags >> i) & 0x1) { - emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off, - ctx); - real_off += SZREG; - } - i++; - tmp_flags >>= 1; - } - - /* save return address */ - if (ctx->flags & SEEN_CALL) { - emit_store_stack_reg(r_ra, r_sp, real_off, ctx); - real_off += SZREG; - } - - /* Setup r_M leaving the alignment gap if necessary */ - if (ctx->flags & SEEN_MEM) { - if (real_off % (SZREG * 2)) - real_off += SZREG; - emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off); - } -} - -static void restore_bpf_jit_regs(struct jit_ctx *ctx, - unsigned int offset) -{ - int i, real_off = 0; - u32 sflags, tmp_flags; - - tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; - /* sflags is a bitmap */ - i = 0; - while (tmp_flags) { - if ((sflags >> i) & 0x1) { - emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off, - ctx); - real_off += SZREG; - } - i++; - tmp_flags >>= 1; - } - - /* restore return address */ - if (ctx->flags & SEEN_CALL) - emit_load_stack_reg(r_ra, r_sp, real_off, ctx); - - /* Restore the sp and discard the scrach memory */ - if (offset) - emit_stack_offset(align_sp(offset), ctx); -} - -static unsigned int get_stack_depth(struct jit_ctx *ctx) -{ - int sp_off = 0; - - - /* How may s* regs do we need to preserved? */ - sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG; - - if (ctx->flags & SEEN_MEM) - sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */ - - if (ctx->flags & SEEN_CALL) - sp_off += SZREG; /* Space for our ra register */ - - return sp_off; -} - -static void build_prologue(struct jit_ctx *ctx) -{ - int sp_off; - - /* Calculate the total offset for the stack pointer */ - sp_off = get_stack_depth(ctx); - save_bpf_jit_regs(ctx, sp_off); - - if (ctx->flags & SEEN_SKB) - emit_reg_move(r_skb, MIPS_R_A0, ctx); - - if (ctx->flags & SEEN_SKB_DATA) { - /* Load packet length */ - emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len), - ctx); - emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len), - ctx); - /* Load the data pointer */ - emit_load_ptr(r_skb_data, r_skb, - offsetof(struct sk_buff, data), ctx); - /* Load the header length */ - emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx); - } - - if (ctx->flags & SEEN_X) - emit_jit_reg_move(r_X, r_zero, ctx); - - /* - * Do not leak kernel data to userspace, we only need to clear - * r_A if it is ever used. In fact if it is never used, we - * will not save/restore it, so clearing it in this case would - * corrupt the state of the caller. - */ - if (bpf_needs_clear_a(&ctx->skf->insns[0]) && - (ctx->flags & SEEN_A)) - emit_jit_reg_move(r_A, r_zero, ctx); -} - -static void build_epilogue(struct jit_ctx *ctx) -{ - unsigned int sp_off; - - /* Calculate the total offset for the stack pointer */ - - sp_off = get_stack_depth(ctx); - restore_bpf_jit_regs(ctx, sp_off); - - /* Return */ - emit_jr(r_ra, ctx); - emit_nop(ctx); -} - -#define CHOOSE_LOAD_FUNC(K, func) \ - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ - func##_positive) - -static bool is_bad_offset(int b_off) -{ - return b_off > 0x1ffff || b_off < -0x20000; -} - -static int build_body(struct jit_ctx *ctx) -{ - const struct bpf_prog *prog = ctx->skf; - const struct sock_filter *inst; - unsigned int i, off, condt; - u32 k, b_off __maybe_unused; - u8 (*sk_load_func)(unsigned long *skb, int offset); - - for (i = 0; i < prog->len; i++) { - u16 code; - - inst = &(prog->insns[i]); - pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", - __func__, inst->code, inst->jt, inst->jf, inst->k); - k = inst->k; - code = bpf_anc_helper(inst); - - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - - switch (code) { - case BPF_LD | BPF_IMM: - /* A <- k ==> li r_A, k */ - ctx->flags |= SEEN_A; - emit_load_imm(r_A, k, ctx); - break; - case BPF_LD | BPF_W | BPF_LEN: - BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4); - /* A <- len ==> lw r_A, offset(skb) */ - ctx->flags |= SEEN_SKB | SEEN_A; - off = offsetof(struct sk_buff, len); - emit_load(r_A, r_skb, off, ctx); - break; - case BPF_LD | BPF_MEM: - /* A <- M[k] ==> lw r_A, offset(M) */ - ctx->flags |= SEEN_MEM | SEEN_A; - emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_LD | BPF_W | BPF_ABS: - /* A <- P[k:4] */ - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word); - goto load; - case BPF_LD | BPF_H | BPF_ABS: - /* A <- P[k:2] */ - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half); - goto load; - case BPF_LD | BPF_B | BPF_ABS: - /* A <- P[k:1] */ - sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte); -load: - emit_load_imm(r_off, k, ctx); -load_common: - ctx->flags |= SEEN_CALL | SEEN_OFF | - SEEN_SKB | SEEN_A | SEEN_SKB_DATA; - - emit_load_func(r_s0, (ptr)sk_load_func, ctx); - emit_reg_move(MIPS_R_A0, r_skb, ctx); - emit_jalr(MIPS_R_RA, r_s0, ctx); - /* Load second argument to delay slot */ - emit_reg_move(MIPS_R_A1, r_off, ctx); - /* Check the error value */ - emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx), - ctx); - /* Load return register on DS for failures */ - emit_reg_move(r_ret, r_zero, ctx); - /* Return with error */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_b(b_off, ctx); - emit_nop(ctx); - break; - case BPF_LD | BPF_W | BPF_IND: - /* A <- P[X + k:4] */ - sk_load_func = sk_load_word; - goto load_ind; - case BPF_LD | BPF_H | BPF_IND: - /* A <- P[X + k:2] */ - sk_load_func = sk_load_half; - goto load_ind; - case BPF_LD | BPF_B | BPF_IND: - /* A <- P[X + k:1] */ - sk_load_func = sk_load_byte; -load_ind: - ctx->flags |= SEEN_OFF | SEEN_X; - emit_addiu(r_off, r_X, k, ctx); - goto load_common; - case BPF_LDX | BPF_IMM: - /* X <- k */ - ctx->flags |= SEEN_X; - emit_load_imm(r_X, k, ctx); - break; - case BPF_LDX | BPF_MEM: - /* X <- M[k] */ - ctx->flags |= SEEN_X | SEEN_MEM; - emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_LDX | BPF_W | BPF_LEN: - /* X <- len */ - ctx->flags |= SEEN_X | SEEN_SKB; - off = offsetof(struct sk_buff, len); - emit_load(r_X, r_skb, off, ctx); - break; - case BPF_LDX | BPF_B | BPF_MSH: - /* X <- 4 * (P[k:1] & 0xf) */ - ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB; - /* Load offset to a1 */ - emit_load_func(r_s0, (ptr)sk_load_byte, ctx); - /* - * This may emit two instructions so it may not fit - * in the delay slot. So use a0 in the delay slot. - */ - emit_load_imm(MIPS_R_A1, k, ctx); - emit_jalr(MIPS_R_RA, r_s0, ctx); - emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ - /* Check the error value */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx); - emit_reg_move(r_ret, r_zero, ctx); - /* We are good */ - /* X <- P[1:K] & 0xf */ - emit_andi(r_X, r_A, 0xf, ctx); - /* X << 2 */ - emit_b(b_imm(i + 1, ctx), ctx); - emit_sll(r_X, r_X, 2, ctx); /* delay slot */ - break; - case BPF_ST: - /* M[k] <- A */ - ctx->flags |= SEEN_MEM | SEEN_A; - emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_STX: - /* M[k] <- X */ - ctx->flags |= SEEN_MEM | SEEN_X; - emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); - break; - case BPF_ALU | BPF_ADD | BPF_K: - /* A += K */ - ctx->flags |= SEEN_A; - emit_addiu(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_ADD | BPF_X: - /* A += X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_addu(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_SUB | BPF_K: - /* A -= K */ - ctx->flags |= SEEN_A; - emit_addiu(r_A, r_A, -k, ctx); - break; - case BPF_ALU | BPF_SUB | BPF_X: - /* A -= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_subu(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_MUL | BPF_K: - /* A *= K */ - /* Load K to scratch register before MUL */ - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - emit_mul(r_A, r_A, r_s0, ctx); - break; - case BPF_ALU | BPF_MUL | BPF_X: - /* A *= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_mul(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_DIV | BPF_K: - /* A /= k */ - if (k == 1) - break; - if (optimize_div(&k)) { - ctx->flags |= SEEN_A; - emit_srl(r_A, r_A, k, ctx); - break; - } - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - emit_div(r_A, r_s0, ctx); - break; - case BPF_ALU | BPF_MOD | BPF_K: - /* A %= k */ - if (k == 1) { - ctx->flags |= SEEN_A; - emit_jit_reg_move(r_A, r_zero, ctx); - } else { - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - emit_mod(r_A, r_s0, ctx); - } - break; - case BPF_ALU | BPF_DIV | BPF_X: - /* A /= X */ - ctx->flags |= SEEN_X | SEEN_A; - /* Check if r_X is zero */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx); - emit_load_imm(r_ret, 0, ctx); /* delay slot */ - emit_div(r_A, r_X, ctx); - break; - case BPF_ALU | BPF_MOD | BPF_X: - /* A %= X */ - ctx->flags |= SEEN_X | SEEN_A; - /* Check if r_X is zero */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx); - emit_load_imm(r_ret, 0, ctx); /* delay slot */ - emit_mod(r_A, r_X, ctx); - break; - case BPF_ALU | BPF_OR | BPF_K: - /* A |= K */ - ctx->flags |= SEEN_A; - emit_ori(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_OR | BPF_X: - /* A |= X */ - ctx->flags |= SEEN_A; - emit_ori(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_XOR | BPF_K: - /* A ^= k */ - ctx->flags |= SEEN_A; - emit_xori(r_A, r_A, k, ctx); - break; - case BPF_ANC | SKF_AD_ALU_XOR_X: - case BPF_ALU | BPF_XOR | BPF_X: - /* A ^= X */ - ctx->flags |= SEEN_A; - emit_xor(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_AND | BPF_K: - /* A &= K */ - ctx->flags |= SEEN_A; - emit_andi(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_AND | BPF_X: - /* A &= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_and(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_LSH | BPF_K: - /* A <<= K */ - ctx->flags |= SEEN_A; - emit_sll(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_LSH | BPF_X: - /* A <<= X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_sllv(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_RSH | BPF_K: - /* A >>= K */ - ctx->flags |= SEEN_A; - emit_srl(r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_RSH | BPF_X: - ctx->flags |= SEEN_A | SEEN_X; - emit_srlv(r_A, r_A, r_X, ctx); - break; - case BPF_ALU | BPF_NEG: - /* A = -A */ - ctx->flags |= SEEN_A; - emit_neg(r_A, ctx); - break; - case BPF_JMP | BPF_JA: - /* pc += K */ - b_off = b_imm(i + k + 1, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_b(b_off, ctx); - emit_nop(ctx); - break; - case BPF_JMP | BPF_JEQ | BPF_K: - /* pc += ( A == K ) ? pc->jt : pc->jf */ - condt = MIPS_COND_EQ | MIPS_COND_K; - goto jmp_cmp; - case BPF_JMP | BPF_JEQ | BPF_X: - ctx->flags |= SEEN_X; - /* pc += ( A == X ) ? pc->jt : pc->jf */ - condt = MIPS_COND_EQ | MIPS_COND_X; - goto jmp_cmp; - case BPF_JMP | BPF_JGE | BPF_K: - /* pc += ( A >= K ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GE | MIPS_COND_K; - goto jmp_cmp; - case BPF_JMP | BPF_JGE | BPF_X: - ctx->flags |= SEEN_X; - /* pc += ( A >= X ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GE | MIPS_COND_X; - goto jmp_cmp; - case BPF_JMP | BPF_JGT | BPF_K: - /* pc += ( A > K ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GT | MIPS_COND_K; - goto jmp_cmp; - case BPF_JMP | BPF_JGT | BPF_X: - ctx->flags |= SEEN_X; - /* pc += ( A > X ) ? pc->jt : pc->jf */ - condt = MIPS_COND_GT | MIPS_COND_X; -jmp_cmp: - /* Greater or Equal */ - if ((condt & MIPS_COND_GE) || - (condt & MIPS_COND_GT)) { - if (condt & MIPS_COND_K) { /* K */ - ctx->flags |= SEEN_A; - emit_sltiu(r_s0, r_A, k, ctx); - } else { /* X */ - ctx->flags |= SEEN_A | - SEEN_X; - emit_sltu(r_s0, r_A, r_X, ctx); - } - /* A < (K|X) ? r_scrach = 1 */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, - ctx); - emit_nop(ctx); - /* A > (K|X) ? scratch = 0 */ - if (condt & MIPS_COND_GT) { - /* Checking for equality */ - ctx->flags |= SEEN_A | SEEN_X; - if (condt & MIPS_COND_K) - emit_load_imm(r_s0, k, ctx); - else - emit_jit_reg_move(r_s0, r_X, - ctx); - b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_EQ, r_A, r_s0, - b_off, ctx); - emit_nop(ctx); - /* Finally, A > K|X */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - } else { - /* A >= (K|X) so jump */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - } - } else { - /* A == K|X */ - if (condt & MIPS_COND_K) { /* K */ - ctx->flags |= SEEN_A; - emit_load_imm(r_s0, k, ctx); - /* jump true */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_bcond(MIPS_COND_EQ, r_A, r_s0, - b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, - ctx); - emit_bcond(MIPS_COND_NE, r_A, r_s0, - b_off, ctx); - emit_nop(ctx); - } else { /* X */ - /* jump true */ - ctx->flags |= SEEN_A | SEEN_X; - b_off = b_imm(i + inst->jt + 1, - ctx); - emit_bcond(MIPS_COND_EQ, r_A, r_X, - b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_NE, r_A, r_X, - b_off, ctx); - emit_nop(ctx); - } - } - break; - case BPF_JMP | BPF_JSET | BPF_K: - ctx->flags |= SEEN_A; - /* pc += (A & K) ? pc -> jt : pc -> jf */ - emit_load_imm(r_s1, k, ctx); - emit_and(r_s0, r_A, r_s1, ctx); - /* jump true */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - break; - case BPF_JMP | BPF_JSET | BPF_X: - ctx->flags |= SEEN_X | SEEN_A; - /* pc += (A & X) ? pc -> jt : pc -> jf */ - emit_and(r_s0, r_A, r_X, ctx); - /* jump true */ - b_off = b_imm(i + inst->jt + 1, ctx); - emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); - emit_nop(ctx); - /* jump false */ - b_off = b_imm(i + inst->jf + 1, ctx); - emit_b(b_off, ctx); - emit_nop(ctx); - break; - case BPF_RET | BPF_A: - ctx->flags |= SEEN_A; - if (i != prog->len - 1) { - /* - * If this is not the last instruction - * then jump to the epilogue - */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_b(b_off, ctx); - } - emit_reg_move(r_ret, r_A, ctx); /* delay slot */ - break; - case BPF_RET | BPF_K: - /* - * It can emit two instructions so it does not fit on - * the delay slot. - */ - emit_load_imm(r_ret, k, ctx); - if (i != prog->len - 1) { - /* - * If this is not the last instruction - * then jump to the epilogue - */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_b(b_off, ctx); - emit_nop(ctx); - } - break; - case BPF_MISC | BPF_TAX: - /* X = A */ - ctx->flags |= SEEN_X | SEEN_A; - emit_jit_reg_move(r_X, r_A, ctx); - break; - case BPF_MISC | BPF_TXA: - /* A = X */ - ctx->flags |= SEEN_A | SEEN_X; - emit_jit_reg_move(r_A, r_X, ctx); - break; - /* AUX */ - case BPF_ANC | SKF_AD_PROTOCOL: - /* A = ntohs(skb->protocol */ - ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; - BUILD_BUG_ON(sizeof_field(struct sk_buff, - protocol) != 2); - off = offsetof(struct sk_buff, protocol); - emit_half_load(r_A, r_skb, off, ctx); -#ifdef CONFIG_CPU_LITTLE_ENDIAN - /* This needs little endian fixup */ - if (cpu_has_wsbh) { - /* R2 and later have the wsbh instruction */ - emit_wsbh(r_A, r_A, ctx); - } else { - /* Get first byte */ - emit_andi(r_tmp_imm, r_A, 0xff, ctx); - /* Shift it */ - emit_sll(r_tmp, r_tmp_imm, 8, ctx); - /* Get second byte */ - emit_srl(r_tmp_imm, r_A, 8, ctx); - emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx); - /* Put everyting together in r_A */ - emit_or(r_A, r_tmp, r_tmp_imm, ctx); - } -#endif - break; - case BPF_ANC | SKF_AD_CPU: - ctx->flags |= SEEN_A | SEEN_OFF; - /* A = current_thread_info()->cpu */ - BUILD_BUG_ON(sizeof_field(struct thread_info, - cpu) != 4); - off = offsetof(struct thread_info, cpu); - /* $28/gp points to the thread_info struct */ - emit_load(r_A, 28, off, ctx); - break; - case BPF_ANC | SKF_AD_IFINDEX: - /* A = skb->dev->ifindex */ - case BPF_ANC | SKF_AD_HATYPE: - /* A = skb->dev->type */ - ctx->flags |= SEEN_SKB | SEEN_A; - off = offsetof(struct sk_buff, dev); - /* Load *dev pointer */ - emit_load_ptr(r_s0, r_skb, off, ctx); - /* error (0) in the delay slot */ - b_off = b_imm(prog->len, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx); - emit_reg_move(r_ret, r_zero, ctx); - if (code == (BPF_ANC | SKF_AD_IFINDEX)) { - BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); - off = offsetof(struct net_device, ifindex); - emit_load(r_A, r_s0, off, ctx); - } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */ - BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); - off = offsetof(struct net_device, type); - emit_half_load_unsigned(r_A, r_s0, off, ctx); - } - break; - case BPF_ANC | SKF_AD_MARK: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); - off = offsetof(struct sk_buff, mark); - emit_load(r_A, r_skb, off, ctx); - break; - case BPF_ANC | SKF_AD_RXHASH: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); - off = offsetof(struct sk_buff, hash); - emit_load(r_A, r_skb, off, ctx); - break; - case BPF_ANC | SKF_AD_VLAN_TAG: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(sizeof_field(struct sk_buff, - vlan_tci) != 2); - off = offsetof(struct sk_buff, vlan_tci); - emit_half_load_unsigned(r_A, r_skb, off, ctx); - break; - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: - ctx->flags |= SEEN_SKB | SEEN_A; - emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx); - if (PKT_VLAN_PRESENT_BIT) - emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx); - if (PKT_VLAN_PRESENT_BIT < 7) - emit_andi(r_A, r_A, 1, ctx); - break; - case BPF_ANC | SKF_AD_PKTTYPE: - ctx->flags |= SEEN_SKB; - - emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); - /* Keep only the last 3 bits */ - emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); -#ifdef __BIG_ENDIAN_BITFIELD - /* Get the actual packet type to the lower 3 bits */ - emit_srl(r_A, r_A, 5, ctx); -#endif - break; - case BPF_ANC | SKF_AD_QUEUE: - ctx->flags |= SEEN_SKB | SEEN_A; - BUILD_BUG_ON(sizeof_field(struct sk_buff, - queue_mapping) != 2); - BUILD_BUG_ON(offsetof(struct sk_buff, - queue_mapping) > 0xff); - off = offsetof(struct sk_buff, queue_mapping); - emit_half_load_unsigned(r_A, r_skb, off, ctx); - break; - default: - pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__, - inst->code); - return -1; - } - } - - /* compute offsets only during the first pass */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - - return 0; -} - -void bpf_jit_compile(struct bpf_prog *fp) -{ - struct jit_ctx ctx; - unsigned int alloc_size, tmp_idx; - - if (!bpf_jit_enable) - return; - - memset(&ctx, 0, sizeof(ctx)); - - ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); - if (ctx.offsets == NULL) - return; - - ctx.skf = fp; - - if (build_body(&ctx)) - goto out; - - tmp_idx = ctx.idx; - build_prologue(&ctx); - ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; - /* just to complete the ctx.idx count */ - build_epilogue(&ctx); - - alloc_size = 4 * ctx.idx; - ctx.target = module_alloc(alloc_size); - if (ctx.target == NULL) - goto out; - - /* Clean it */ - memset(ctx.target, 0, alloc_size); - - ctx.idx = 0; - - /* Generate the actual JIT code */ - build_prologue(&ctx); - if (build_body(&ctx)) { - module_memfree(ctx.target); - goto out; - } - build_epilogue(&ctx); - - /* Update the icache */ - flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); - - if (bpf_jit_enable > 1) - /* Dump JIT code */ - bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); - - fp->bpf_func = (void *)ctx.target; - fp->jited = 1; - -out: - kfree(ctx.offsets); -} - -void bpf_jit_free(struct bpf_prog *fp) -{ - if (fp->jited) - module_memfree(fp->bpf_func); - - bpf_prog_unlock_free(fp); -} diff --git a/arch/mips/net/bpf_jit.h b/arch/mips/net/bpf_jit.h deleted file mode 100644 index 166ca06c9da9c9..00000000000000 --- a/arch/mips/net/bpf_jit.h +++ /dev/null @@ -1,81 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Just-In-Time compiler for BPF filters on MIPS - * - * Copyright (c) 2014 Imagination Technologies Ltd. - * Author: Markos Chandras - */ - -#ifndef BPF_JIT_MIPS_OP_H -#define BPF_JIT_MIPS_OP_H - -/* Registers used by JIT */ -#define MIPS_R_ZERO 0 -#define MIPS_R_V0 2 -#define MIPS_R_A0 4 -#define MIPS_R_A1 5 -#define MIPS_R_T4 12 -#define MIPS_R_T5 13 -#define MIPS_R_T6 14 -#define MIPS_R_T7 15 -#define MIPS_R_S0 16 -#define MIPS_R_S1 17 -#define MIPS_R_S2 18 -#define MIPS_R_S3 19 -#define MIPS_R_S4 20 -#define MIPS_R_S5 21 -#define MIPS_R_S6 22 -#define MIPS_R_S7 23 -#define MIPS_R_SP 29 -#define MIPS_R_RA 31 - -/* Conditional codes */ -#define MIPS_COND_EQ 0x1 -#define MIPS_COND_GE (0x1 << 1) -#define MIPS_COND_GT (0x1 << 2) -#define MIPS_COND_NE (0x1 << 3) -#define MIPS_COND_ALL (0x1 << 4) -/* Conditionals on X register or K immediate */ -#define MIPS_COND_X (0x1 << 5) -#define MIPS_COND_K (0x1 << 6) - -#define r_ret MIPS_R_V0 - -/* - * Use 2 scratch registers to avoid pipeline interlocks. - * There is no overhead during epilogue and prologue since - * any of the $s0-$s6 registers will only be preserved if - * they are going to actually be used. - */ -#define r_skb_hl MIPS_R_S0 /* skb header length */ -#define r_skb_data MIPS_R_S1 /* skb actual data */ -#define r_off MIPS_R_S2 -#define r_A MIPS_R_S3 -#define r_X MIPS_R_S4 -#define r_skb MIPS_R_S5 -#define r_M MIPS_R_S6 -#define r_skb_len MIPS_R_S7 -#define r_s0 MIPS_R_T4 /* scratch reg 1 */ -#define r_s1 MIPS_R_T5 /* scratch reg 2 */ -#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */ -#define r_tmp MIPS_R_T7 /* No need to preserve this */ -#define r_zero MIPS_R_ZERO -#define r_sp MIPS_R_SP -#define r_ra MIPS_R_RA - -#ifndef __ASSEMBLY__ - -/* Declare ASM helpers */ - -#define DECLARE_LOAD_FUNC(func) \ - extern u8 func(unsigned long *skb, int offset); \ - extern u8 func##_negative(unsigned long *skb, int offset); \ - extern u8 func##_positive(unsigned long *skb, int offset) - -DECLARE_LOAD_FUNC(sk_load_word); -DECLARE_LOAD_FUNC(sk_load_half); -DECLARE_LOAD_FUNC(sk_load_byte); - -#endif - -#endif /* BPF_JIT_MIPS_OP_H */ diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S deleted file mode 100644 index 57154c5883b6f8..00000000000000 --- a/arch/mips/net/bpf_jit_asm.S +++ /dev/null @@ -1,285 +0,0 @@ -/* - * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF - * compiler. - * - * Copyright (C) 2015 Imagination Technologies Ltd. - * Author: Markos Chandras - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2 of the License. - */ - -#include -#include -#include -#include "bpf_jit.h" - -/* ABI - * - * r_skb_hl skb header length - * r_skb_data skb data - * r_off(a1) offset register - * r_A BPF register A - * r_X PF register X - * r_skb(a0) *skb - * r_M *scratch memory - * r_skb_le skb length - * r_s0 Scratch register 0 - * r_s1 Scratch register 1 - * - * On entry: - * a0: *skb - * a1: offset (imm or imm + X) - * - * All non-BPF-ABI registers are free for use. On return, we only - * care about r_ret. The BPF-ABI registers are assumed to remain - * unmodified during the entire filter operation. - */ - -#define skb a0 -#define offset a1 -#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ - - /* We know better :) so prevent assembler reordering etc */ - .set noreorder - -#define is_offset_negative(TYPE) \ - /* If offset is negative we have more work to do */ \ - slti t0, offset, 0; \ - bgtz t0, bpf_slow_path_##TYPE##_neg; \ - /* Be careful what follows in DS. */ - -#define is_offset_in_header(SIZE, TYPE) \ - /* Reading from header? */ \ - addiu $r_s0, $r_skb_hl, -SIZE; \ - slt t0, $r_s0, offset; \ - bgtz t0, bpf_slow_path_##TYPE; \ - -LEAF(sk_load_word) - is_offset_negative(word) -FEXPORT(sk_load_word_positive) - is_offset_in_header(4, word) - /* Offset within header boundaries */ - PTR_ADDU t1, $r_skb_data, offset - .set reorder - lw $r_A, 0(t1) - .set noreorder -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - wsbh t0, $r_A - rotr $r_A, t0, 16 -# else - sll t0, $r_A, 24 - srl t1, $r_A, 24 - srl t2, $r_A, 8 - or t0, t0, t1 - andi t2, t2, 0xff00 - andi t1, $r_A, 0xff00 - or t0, t0, t2 - sll t1, t1, 8 - or $r_A, t0, t1 -# endif -#endif - jr $r_ra - move $r_ret, zero - END(sk_load_word) - -LEAF(sk_load_half) - is_offset_negative(half) -FEXPORT(sk_load_half_positive) - is_offset_in_header(2, half) - /* Offset within header boundaries */ - PTR_ADDU t1, $r_skb_data, offset - lhu $r_A, 0(t1) -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - wsbh $r_A, $r_A -# else - sll t0, $r_A, 8 - srl t1, $r_A, 8 - andi t0, t0, 0xff00 - or $r_A, t0, t1 -# endif -#endif - jr $r_ra - move $r_ret, zero - END(sk_load_half) - -LEAF(sk_load_byte) - is_offset_negative(byte) -FEXPORT(sk_load_byte_positive) - is_offset_in_header(1, byte) - /* Offset within header boundaries */ - PTR_ADDU t1, $r_skb_data, offset - lbu $r_A, 0(t1) - jr $r_ra - move $r_ret, zero - END(sk_load_byte) - -/* - * call skb_copy_bits: - * (prototype in linux/skbuff.h) - * - * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) - * - * o32 mandates we leave 4 spaces for argument registers in case - * the callee needs to use them. Even though we don't care about - * the argument registers ourselves, we need to allocate that space - * to remain ABI compliant since the callee may want to use that space. - * We also allocate 2 more spaces for $r_ra and our return register (*to). - * - * n64 is a bit different. The *caller* will allocate the space to preserve - * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no - * good reason but it does not matter that much really. - * - * (void *to) is returned in r_s0 - * - */ -#ifdef CONFIG_CPU_LITTLE_ENDIAN -#define DS_OFFSET(SIZE) (4 * SZREG) -#else -#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) -#endif -#define bpf_slow_path_common(SIZE) \ - /* Quick check. Are we within reasonable boundaries? */ \ - LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ - sltu $r_s0, offset, $r_s1; \ - beqz $r_s0, fault; \ - /* Load 4th argument in DS */ \ - LONG_ADDIU a3, zero, SIZE; \ - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ - PTR_LA t0, skb_copy_bits; \ - PTR_S $r_ra, (5 * SZREG)($r_sp); \ - /* Assign low slot to a2 */ \ - PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ - jalr t0; \ - /* Reset our destination slot (DS but it's ok) */ \ - INT_S zero, (4 * SZREG)($r_sp); \ - /* \ - * skb_copy_bits returns 0 on success and -EFAULT \ - * on error. Our data live in a2. Do not bother with \ - * our data if an error has been returned. \ - */ \ - /* Restore our frame */ \ - PTR_L $r_ra, (5 * SZREG)($r_sp); \ - INT_L $r_s0, (4 * SZREG)($r_sp); \ - bltz v0, fault; \ - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ - move $r_ret, zero; \ - -NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) - bpf_slow_path_common(4) -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - wsbh t0, $r_s0 - jr $r_ra - rotr $r_A, t0, 16 -# else - sll t0, $r_s0, 24 - srl t1, $r_s0, 24 - srl t2, $r_s0, 8 - or t0, t0, t1 - andi t2, t2, 0xff00 - andi t1, $r_s0, 0xff00 - or t0, t0, t2 - sll t1, t1, 8 - jr $r_ra - or $r_A, t0, t1 -# endif -#else - jr $r_ra - move $r_A, $r_s0 -#endif - - END(bpf_slow_path_word) - -NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) - bpf_slow_path_common(2) -#ifdef CONFIG_CPU_LITTLE_ENDIAN -# if MIPS_ISA_REV >= 2 - jr $r_ra - wsbh $r_A, $r_s0 -# else - sll t0, $r_s0, 8 - andi t1, $r_s0, 0xff00 - andi t0, t0, 0xff00 - srl t1, t1, 8 - jr $r_ra - or $r_A, t0, t1 -# endif -#else - jr $r_ra - move $r_A, $r_s0 -#endif - - END(bpf_slow_path_half) - -NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) - bpf_slow_path_common(1) - jr $r_ra - move $r_A, $r_s0 - - END(bpf_slow_path_byte) - -/* - * Negative entry points - */ - .macro bpf_is_end_of_data - li t0, SKF_LL_OFF - /* Reading link layer data? */ - slt t1, offset, t0 - bgtz t1, fault - /* Be careful what follows in DS. */ - .endm -/* - * call skb_copy_bits: - * (prototype in linux/filter.h) - * - * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, - * int k, unsigned int size) - * - * see above (bpf_slow_path_common) for ABI restrictions - */ -#define bpf_negative_common(SIZE) \ - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ - PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ - PTR_S $r_ra, (5 * SZREG)($r_sp); \ - jalr t0; \ - li a2, SIZE; \ - PTR_L $r_ra, (5 * SZREG)($r_sp); \ - /* Check return pointer */ \ - beqz v0, fault; \ - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ - /* Preserve our pointer */ \ - move $r_s0, v0; \ - /* Set return value */ \ - move $r_ret, zero; \ - -bpf_slow_path_word_neg: - bpf_is_end_of_data -NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) - bpf_negative_common(4) - jr $r_ra - lw $r_A, 0($r_s0) - END(sk_load_word_negative) - -bpf_slow_path_half_neg: - bpf_is_end_of_data -NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) - bpf_negative_common(2) - jr $r_ra - lhu $r_A, 0($r_s0) - END(sk_load_half_negative) - -bpf_slow_path_byte_neg: - bpf_is_end_of_data -NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) - bpf_negative_common(1) - jr $r_ra - lbu $r_A, 0($r_s0) - END(sk_load_byte_negative) - -fault: - jr $r_ra - addiu $r_ret, zero, 1 diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c new file mode 100644 index 00000000000000..b17130d510d49a --- /dev/null +++ b/arch/mips/net/bpf_jit_comp.c @@ -0,0 +1,1034 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Just-In-Time compiler for eBPF bytecode on MIPS. + * Implementation of JIT functions common to 32-bit and 64-bit CPUs. + * + * Copyright (c) 2021 Anyfi Networks AB. + * Author: Johan Almbladh + * + * Based on code and ideas from + * Copyright (c) 2017 Cavium, Inc. + * Copyright (c) 2017 Shubham Bansal + * Copyright (c) 2011 Mircea Gherzan + */ + +/* + * Code overview + * ============= + * + * - bpf_jit_comp.h + * Common definitions and utilities. + * + * - bpf_jit_comp.c + * Implementation of JIT top-level logic and exported JIT API functions. + * Implementation of internal operations shared by 32-bit and 64-bit code. + * JMP and ALU JIT control code, register control code, shared ALU and + * JMP/JMP32 JIT operations. + * + * - bpf_jit_comp32.c + * Implementation of functions to JIT prologue, epilogue and a single eBPF + * instruction for 32-bit MIPS CPUs. The functions use shared operations + * where possible, and implement the rest for 32-bit MIPS such as ALU64 + * operations. + * + * - bpf_jit_comp64.c + * Ditto, for 64-bit MIPS CPUs. + * + * Zero and sign extension + * ======================== + * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension, + * but the eBPF instruction set mandates zero extension. We let the verifier + * insert explicit zero-extensions after 32-bit ALU operations, both for + * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs + * are JITed with sign extensions inserted when so expected. + * + * ALU operations + * ============== + * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are + * JITed in the following steps. ALU64 operations on 32-bit MIPS are more + * complicated and therefore only processed by special implementations in + * step (3). + * + * 1) valid_alu_i: + * Determine if an immediate operation can be emitted as such, or if + * we must fall back to the register version. + * + * 2) rewrite_alu_i: + * Convert BPF operation and immediate value to a canonical form for + * JITing. In some degenerate cases this form may be a no-op. + * + * 3) emit_alu_{i,i64,r,64}: + * Emit instructions for an ALU or ALU64 immediate or register operation. + * + * JMP operations + * ============== + * JMP and JMP32 operations require an JIT instruction offset table for + * translating the jump offset. This table is computed by dry-running the + * JIT without actually emitting anything. However, the computed PC-relative + * offset may overflow the 18-bit offset field width of the native MIPS + * branch instruction. In such cases, the long jump is converted into the + * following sequence. + * + * ! +2 Inverted PC-relative branch + * nop Delay slot + * j Unconditional absolute long jump + * nop Delay slot + * + * Since this converted sequence alters the offset table, all offsets must + * be re-calculated. This may in turn trigger new branch conversions, so + * the process is repeated until no further changes are made. Normally it + * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we + * fall back to converting every remaining jump operation. The branch + * conversion is independent of how the JMP or JMP32 condition is JITed. + * + * JMP32 and JMP operations are JITed as follows. + * + * 1) setup_jmp_{i,r}: + * Convert jump conditional and offset into a form that can be JITed. + * This form may be a no-op, a canonical form, or an inverted PC-relative + * jump if branch conversion is necessary. + * + * 2) valid_jmp_i: + * Determine if an immediate operations can be emitted as such, or if + * we must fall back to the register version. Applies to JMP32 for 32-bit + * MIPS, and both JMP and JMP32 for 64-bit MIPS. + * + * 3) emit_jmp_{i,i64,r,r64}: + * Emit instructions for an JMP or JMP32 immediate or register operation. + * + * 4) finish_jmp_{i,r}: + * Emit any instructions needed to finish the jump. This includes a nop + * for the delay slot if a branch was emitted, and a long absolute jump + * if the branch was converted. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_jit_comp.h" + +/* Convenience macros for descriptor access */ +#define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT) +#define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT) + +/* + * Push registers on the stack, starting at a given depth from the stack + * pointer and increasing. The next depth to be written is returned. + */ +int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth) +{ + int reg; + + for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++) + if (mask & BIT(reg)) { + if ((excl & BIT(reg)) == 0) { + if (sizeof(long) == 4) + emit(ctx, sw, reg, depth, MIPS_R_SP); + else /* sizeof(long) == 8 */ + emit(ctx, sd, reg, depth, MIPS_R_SP); + } + depth += sizeof(long); + } + + ctx->stack_used = max((int)ctx->stack_used, depth); + return depth; +} + +/* + * Pop registers from the stack, starting at a given depth from the stack + * pointer and increasing. The next depth to be read is returned. + */ +int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth) +{ + int reg; + + for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++) + if (mask & BIT(reg)) { + if ((excl & BIT(reg)) == 0) { + if (sizeof(long) == 4) + emit(ctx, lw, reg, depth, MIPS_R_SP); + else /* sizeof(long) == 8 */ + emit(ctx, ld, reg, depth, MIPS_R_SP); + } + depth += sizeof(long); + } + + return depth; +} + +/* Compute the 28-bit jump target address from a BPF program location */ +int get_target(struct jit_context *ctx, u32 loc) +{ + u32 index = INDEX(ctx->descriptors[loc]); + unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index]; + unsigned long addr = (unsigned long)&ctx->target[index]; + + if (!ctx->target) + return 0; + + if ((addr ^ pc) & ~MIPS_JMP_MASK) + return -1; + + return addr & MIPS_JMP_MASK; +} + +/* Compute the PC-relative offset to relative BPF program offset */ +int get_offset(const struct jit_context *ctx, int off) +{ + return (INDEX(ctx->descriptors[ctx->bpf_index + off]) - + ctx->jit_index - 1) * sizeof(u32); +} + +/* dst = imm (register width) */ +void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm) +{ + if (imm >= -0x8000 && imm <= 0x7fff) { + emit(ctx, addiu, dst, MIPS_R_ZERO, imm); + } else { + emit(ctx, lui, dst, (s16)((u32)imm >> 16)); + emit(ctx, ori, dst, dst, (u16)(imm & 0xffff)); + } + clobber_reg(ctx, dst); +} + +/* dst = src (register width) */ +void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src) +{ + emit(ctx, ori, dst, src, 0); + clobber_reg(ctx, dst); +} + +/* Validate ALU immediate range */ +bool valid_alu_i(u8 op, s32 imm) +{ + switch (BPF_OP(op)) { + case BPF_NEG: + case BPF_LSH: + case BPF_RSH: + case BPF_ARSH: + /* All legal eBPF values are valid */ + return true; + case BPF_ADD: + /* imm must be 16 bits */ + return imm >= -0x8000 && imm <= 0x7fff; + case BPF_SUB: + /* -imm must be 16 bits */ + return imm >= -0x7fff && imm <= 0x8000; + case BPF_AND: + case BPF_OR: + case BPF_XOR: + /* imm must be 16 bits unsigned */ + return imm >= 0 && imm <= 0xffff; + case BPF_MUL: + /* imm must be zero or a positive power of two */ + return imm == 0 || (imm > 0 && is_power_of_2(imm)); + case BPF_DIV: + case BPF_MOD: + /* imm must be an 17-bit power of two */ + return (u32)imm <= 0x10000 && is_power_of_2((u32)imm); + } + return false; +} + +/* Rewrite ALU immediate operation */ +bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val) +{ + bool act = true; + + switch (BPF_OP(op)) { + case BPF_LSH: + case BPF_RSH: + case BPF_ARSH: + case BPF_ADD: + case BPF_SUB: + case BPF_OR: + case BPF_XOR: + /* imm == 0 is a no-op */ + act = imm != 0; + break; + case BPF_MUL: + if (imm == 1) { + /* dst * 1 is a no-op */ + act = false; + } else if (imm == 0) { + /* dst * 0 is dst & 0 */ + op = BPF_AND; + } else { + /* dst * (1 << n) is dst << n */ + op = BPF_LSH; + imm = ilog2(abs(imm)); + } + break; + case BPF_DIV: + if (imm == 1) { + /* dst / 1 is a no-op */ + act = false; + } else { + /* dst / (1 << n) is dst >> n */ + op = BPF_RSH; + imm = ilog2(imm); + } + break; + case BPF_MOD: + /* dst % (1 << n) is dst & ((1 << n) - 1) */ + op = BPF_AND; + imm--; + break; + } + + *alu = op; + *val = imm; + return act; +} + +/* ALU immediate operation (32-bit) */ +void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op) +{ + switch (BPF_OP(op)) { + /* dst = -dst */ + case BPF_NEG: + emit(ctx, subu, dst, MIPS_R_ZERO, dst); + break; + /* dst = dst & imm */ + case BPF_AND: + emit(ctx, andi, dst, dst, (u16)imm); + break; + /* dst = dst | imm */ + case BPF_OR: + emit(ctx, ori, dst, dst, (u16)imm); + break; + /* dst = dst ^ imm */ + case BPF_XOR: + emit(ctx, xori, dst, dst, (u16)imm); + break; + /* dst = dst << imm */ + case BPF_LSH: + emit(ctx, sll, dst, dst, imm); + break; + /* dst = dst >> imm */ + case BPF_RSH: + emit(ctx, srl, dst, dst, imm); + break; + /* dst = dst >> imm (arithmetic) */ + case BPF_ARSH: + emit(ctx, sra, dst, dst, imm); + break; + /* dst = dst + imm */ + case BPF_ADD: + emit(ctx, addiu, dst, dst, imm); + break; + /* dst = dst - imm */ + case BPF_SUB: + emit(ctx, addiu, dst, dst, -imm); + break; + } + clobber_reg(ctx, dst); +} + +/* ALU register operation (32-bit) */ +void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op) +{ + switch (BPF_OP(op)) { + /* dst = dst & src */ + case BPF_AND: + emit(ctx, and, dst, dst, src); + break; + /* dst = dst | src */ + case BPF_OR: + emit(ctx, or, dst, dst, src); + break; + /* dst = dst ^ src */ + case BPF_XOR: + emit(ctx, xor, dst, dst, src); + break; + /* dst = dst << src */ + case BPF_LSH: + emit(ctx, sllv, dst, dst, src); + break; + /* dst = dst >> src */ + case BPF_RSH: + emit(ctx, srlv, dst, dst, src); + break; + /* dst = dst >> src (arithmetic) */ + case BPF_ARSH: + emit(ctx, srav, dst, dst, src); + break; + /* dst = dst + src */ + case BPF_ADD: + emit(ctx, addu, dst, dst, src); + break; + /* dst = dst - src */ + case BPF_SUB: + emit(ctx, subu, dst, dst, src); + break; + /* dst = dst * src */ + case BPF_MUL: + if (cpu_has_mips32r1 || cpu_has_mips32r6) { + emit(ctx, mul, dst, dst, src); + } else { + emit(ctx, multu, dst, src); + emit(ctx, mflo, dst); + } + break; + /* dst = dst / src */ + case BPF_DIV: + if (cpu_has_mips32r6) { + emit(ctx, divu_r6, dst, dst, src); + } else { + emit(ctx, divu, dst, src); + emit(ctx, mflo, dst); + } + break; + /* dst = dst % src */ + case BPF_MOD: + if (cpu_has_mips32r6) { + emit(ctx, modu, dst, dst, src); + } else { + emit(ctx, divu, dst, src); + emit(ctx, mfhi, dst); + } + break; + } + clobber_reg(ctx, dst); +} + +/* Atomic read-modify-write (32-bit) */ +void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code) +{ + LLSC_sync(ctx); + emit(ctx, ll, MIPS_R_T9, off, dst); + switch (code) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src); + break; + case BPF_XCHG: + emit(ctx, move, MIPS_R_T8, src); + break; + } + emit(ctx, sc, MIPS_R_T8, off, dst); + emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset); + emit(ctx, nop); /* Delay slot */ + + if (code & BPF_FETCH) { + emit(ctx, move, src, MIPS_R_T9); + clobber_reg(ctx, src); + } +} + +/* Atomic compare-and-exchange (32-bit) */ +void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off) +{ + LLSC_sync(ctx); + emit(ctx, ll, MIPS_R_T9, off, dst); + emit(ctx, bne, MIPS_R_T9, res, 12); + emit(ctx, move, MIPS_R_T8, src); /* Delay slot */ + emit(ctx, sc, MIPS_R_T8, off, dst); + emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset); + emit(ctx, move, res, MIPS_R_T9); /* Delay slot */ + clobber_reg(ctx, res); +} + +/* Swap bytes and truncate a register word or half word */ +void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width) +{ + u8 tmp = MIPS_R_T8; + u8 msk = MIPS_R_T9; + + switch (width) { + /* Swap bytes in a word */ + case 32: + if (cpu_has_mips32r2 || cpu_has_mips32r6) { + emit(ctx, wsbh, dst, dst); + emit(ctx, rotr, dst, dst, 16); + } else { + emit(ctx, sll, tmp, dst, 16); /* tmp = dst << 16 */ + emit(ctx, srl, dst, dst, 16); /* dst = dst >> 16 */ + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ + + emit(ctx, lui, msk, 0xff); /* msk = 0x00ff0000 */ + emit(ctx, ori, msk, msk, 0xff); /* msk = msk | 0xff */ + + emit(ctx, and, tmp, dst, msk); /* tmp = dst & msk */ + emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */ + emit(ctx, srl, dst, dst, 8); /* dst = dst >> 8 */ + emit(ctx, and, dst, dst, msk); /* dst = dst & msk */ + emit(ctx, or, dst, dst, tmp); /* reg = dst | tmp */ + } + break; + /* Swap bytes in a half word */ + case 16: + if (cpu_has_mips32r2 || cpu_has_mips32r6) { + emit(ctx, wsbh, dst, dst); + emit(ctx, andi, dst, dst, 0xffff); + } else { + emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */ + emit(ctx, srl, tmp, tmp, 8); /* t = t >> 8 */ + emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */ + emit(ctx, sll, dst, dst, 8); /* d = d << 8 */ + emit(ctx, or, dst, dst, tmp); /* d = d | t */ + } + break; + } + clobber_reg(ctx, dst); +} + +/* Validate jump immediate range */ +bool valid_jmp_i(u8 op, s32 imm) +{ + switch (op) { + case JIT_JNOP: + /* Immediate value not used */ + return true; + case BPF_JEQ: + case BPF_JNE: + /* No immediate operation */ + return false; + case BPF_JSET: + case JIT_JNSET: + /* imm must be 16 bits unsigned */ + return imm >= 0 && imm <= 0xffff; + case BPF_JGE: + case BPF_JLT: + case BPF_JSGE: + case BPF_JSLT: + /* imm must be 16 bits */ + return imm >= -0x8000 && imm <= 0x7fff; + case BPF_JGT: + case BPF_JLE: + case BPF_JSGT: + case BPF_JSLE: + /* imm + 1 must be 16 bits */ + return imm >= -0x8001 && imm <= 0x7ffe; + } + return false; +} + +/* Invert a conditional jump operation */ +static u8 invert_jmp(u8 op) +{ + switch (op) { + case BPF_JA: return JIT_JNOP; + case BPF_JEQ: return BPF_JNE; + case BPF_JNE: return BPF_JEQ; + case BPF_JSET: return JIT_JNSET; + case BPF_JGT: return BPF_JLE; + case BPF_JGE: return BPF_JLT; + case BPF_JLT: return BPF_JGE; + case BPF_JLE: return BPF_JGT; + case BPF_JSGT: return BPF_JSLE; + case BPF_JSGE: return BPF_JSLT; + case BPF_JSLT: return BPF_JSGE; + case BPF_JSLE: return BPF_JSGT; + } + return 0; +} + +/* Prepare a PC-relative jump operation */ +static void setup_jmp(struct jit_context *ctx, u8 bpf_op, + s16 bpf_off, u8 *jit_op, s32 *jit_off) +{ + u32 *descp = &ctx->descriptors[ctx->bpf_index]; + int op = bpf_op; + int offset = 0; + + /* Do not compute offsets on the first pass */ + if (INDEX(*descp) == 0) + goto done; + + /* Skip jumps never taken */ + if (bpf_op == JIT_JNOP) + goto done; + + /* Convert jumps always taken */ + if (bpf_op == BPF_JA) + *descp |= JIT_DESC_CONVERT; + + /* + * Current ctx->jit_index points to the start of the branch preamble. + * Since the preamble differs among different branch conditionals, + * the current index cannot be used to compute the branch offset. + * Instead, we use the offset table value for the next instruction, + * which gives the index immediately after the branch delay slot. + */ + if (!CONVERTED(*descp)) { + int target = ctx->bpf_index + bpf_off + 1; + int origin = ctx->bpf_index + 1; + + offset = (INDEX(ctx->descriptors[target]) - + INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32); + } + + /* + * The PC-relative branch offset field on MIPS is 18 bits signed, + * so if the computed offset is larger than this we generate a an + * absolute jump that we skip with an inverted conditional branch. + */ + if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) { + offset = 3 * sizeof(u32); + op = invert_jmp(bpf_op); + ctx->changes += !CONVERTED(*descp); + *descp |= JIT_DESC_CONVERT; + } + +done: + *jit_off = offset; + *jit_op = op; +} + +/* Prepare a PC-relative jump operation with immediate conditional */ +void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width, + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off) +{ + bool always = false; + bool never = false; + + switch (bpf_op) { + case BPF_JEQ: + case BPF_JNE: + break; + case BPF_JSET: + case BPF_JLT: + never = imm == 0; + break; + case BPF_JGE: + always = imm == 0; + break; + case BPF_JGT: + never = (u32)imm == U32_MAX; + break; + case BPF_JLE: + always = (u32)imm == U32_MAX; + break; + case BPF_JSGT: + never = imm == S32_MAX && width == 32; + break; + case BPF_JSGE: + always = imm == S32_MIN && width == 32; + break; + case BPF_JSLT: + never = imm == S32_MIN && width == 32; + break; + case BPF_JSLE: + always = imm == S32_MAX && width == 32; + break; + } + + if (never) + bpf_op = JIT_JNOP; + if (always) + bpf_op = BPF_JA; + setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off); +} + +/* Prepare a PC-relative jump operation with register conditional */ +void setup_jmp_r(struct jit_context *ctx, bool same_reg, + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off) +{ + switch (bpf_op) { + case BPF_JSET: + break; + case BPF_JEQ: + case BPF_JGE: + case BPF_JLE: + case BPF_JSGE: + case BPF_JSLE: + if (same_reg) + bpf_op = BPF_JA; + break; + case BPF_JNE: + case BPF_JLT: + case BPF_JGT: + case BPF_JSGT: + case BPF_JSLT: + if (same_reg) + bpf_op = JIT_JNOP; + break; + } + setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off); +} + +/* Finish a PC-relative jump operation */ +int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off) +{ + /* Emit conditional branch delay slot */ + if (jit_op != JIT_JNOP) + emit(ctx, nop); + /* + * Emit an absolute long jump with delay slot, + * if the PC-relative branch was converted. + */ + if (CONVERTED(ctx->descriptors[ctx->bpf_index])) { + int target = get_target(ctx, ctx->bpf_index + bpf_off + 1); + + if (target < 0) + return -1; + emit(ctx, j, target); + emit(ctx, nop); + } + return 0; +} + +/* Jump immediate (32-bit) */ +void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op) +{ + switch (op) { + /* No-op, used internally for branch optimization */ + case JIT_JNOP: + break; + /* PC += off if dst & imm */ + case BPF_JSET: + emit(ctx, andi, MIPS_R_T9, dst, (u16)imm); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ + case JIT_JNSET: + emit(ctx, andi, MIPS_R_T9, dst, (u16)imm); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst > imm */ + case BPF_JGT: + emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst >= imm */ + case BPF_JGE: + emit(ctx, sltiu, MIPS_R_T9, dst, imm); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst < imm */ + case BPF_JLT: + emit(ctx, sltiu, MIPS_R_T9, dst, imm); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst <= imm */ + case BPF_JLE: + emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst > imm (signed) */ + case BPF_JSGT: + emit(ctx, slti, MIPS_R_T9, dst, imm + 1); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst >= imm (signed) */ + case BPF_JSGE: + emit(ctx, slti, MIPS_R_T9, dst, imm); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst < imm (signed) */ + case BPF_JSLT: + emit(ctx, slti, MIPS_R_T9, dst, imm); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst <= imm (signed) */ + case BPF_JSLE: + emit(ctx, slti, MIPS_R_T9, dst, imm + 1); + emit(ctx, bnez, MIPS_R_T9, off); + break; + } +} + +/* Jump register (32-bit) */ +void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op) +{ + switch (op) { + /* No-op, used internally for branch optimization */ + case JIT_JNOP: + break; + /* PC += off if dst == src */ + case BPF_JEQ: + emit(ctx, beq, dst, src, off); + break; + /* PC += off if dst != src */ + case BPF_JNE: + emit(ctx, bne, dst, src, off); + break; + /* PC += off if dst & src */ + case BPF_JSET: + emit(ctx, and, MIPS_R_T9, dst, src); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ + case JIT_JNSET: + emit(ctx, and, MIPS_R_T9, dst, src); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst > src */ + case BPF_JGT: + emit(ctx, sltu, MIPS_R_T9, src, dst); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst >= src */ + case BPF_JGE: + emit(ctx, sltu, MIPS_R_T9, dst, src); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst < src */ + case BPF_JLT: + emit(ctx, sltu, MIPS_R_T9, dst, src); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst <= src */ + case BPF_JLE: + emit(ctx, sltu, MIPS_R_T9, src, dst); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst > src (signed) */ + case BPF_JSGT: + emit(ctx, slt, MIPS_R_T9, src, dst); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst >= src (signed) */ + case BPF_JSGE: + emit(ctx, slt, MIPS_R_T9, dst, src); + emit(ctx, beqz, MIPS_R_T9, off); + break; + /* PC += off if dst < src (signed) */ + case BPF_JSLT: + emit(ctx, slt, MIPS_R_T9, dst, src); + emit(ctx, bnez, MIPS_R_T9, off); + break; + /* PC += off if dst <= src (signed) */ + case BPF_JSLE: + emit(ctx, slt, MIPS_R_T9, src, dst); + emit(ctx, beqz, MIPS_R_T9, off); + break; + } +} + +/* Jump always */ +int emit_ja(struct jit_context *ctx, s16 off) +{ + int target = get_target(ctx, ctx->bpf_index + off + 1); + + if (target < 0) + return -1; + emit(ctx, j, target); + emit(ctx, nop); + return 0; +} + +/* Jump to epilogue */ +int emit_exit(struct jit_context *ctx) +{ + int target = get_target(ctx, ctx->program->len); + + if (target < 0) + return -1; + emit(ctx, j, target); + emit(ctx, nop); + return 0; +} + +/* Build the program body from eBPF bytecode */ +static int build_body(struct jit_context *ctx) +{ + const struct bpf_prog *prog = ctx->program; + unsigned int i; + + ctx->stack_used = 0; + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + u32 *descp = &ctx->descriptors[i]; + int ret; + + access_reg(ctx, insn->src_reg); + access_reg(ctx, insn->dst_reg); + + ctx->bpf_index = i; + if (ctx->target == NULL) { + ctx->changes += INDEX(*descp) != ctx->jit_index; + *descp &= JIT_DESC_CONVERT; + *descp |= ctx->jit_index; + } + + ret = build_insn(insn, ctx); + if (ret < 0) + return ret; + + if (ret > 0) { + i++; + if (ctx->target == NULL) + descp[1] = ctx->jit_index; + } + } + + /* Store the end offset, where the epilogue begins */ + ctx->descriptors[prog->len] = ctx->jit_index; + return 0; +} + +/* Set the branch conversion flag on all instructions */ +static void set_convert_flag(struct jit_context *ctx, bool enable) +{ + const struct bpf_prog *prog = ctx->program; + u32 flag = enable ? JIT_DESC_CONVERT : 0; + unsigned int i; + + for (i = 0; i <= prog->len; i++) + ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag; +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *p; + + /* We are guaranteed to have aligned memory. */ + for (p = area; size >= sizeof(u32); size -= sizeof(u32)) + uasm_i_break(&p, BRK_BUG); /* Increments p */ +} + +bool bpf_jit_needs_zext(void) +{ + return true; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header = NULL; + struct jit_context ctx; + bool tmp_blinded = false; + unsigned int tmp_idx; + unsigned int image_size; + u8 *image_ptr; + int tries; + + /* + * If BPF JIT was not enabled then we must fall back to + * the interpreter. + */ + if (!prog->jit_requested) + return orig_prog; + /* + * If constant blinding was enabled and we failed during blinding + * then we must fall back to the interpreter. Otherwise, we save + * the new JITed code. + */ + tmp = bpf_jit_blind_constants(prog); + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + memset(&ctx, 0, sizeof(ctx)); + ctx.program = prog; + + /* + * Not able to allocate memory for descriptors[], then + * we must fall back to the interpreter + */ + ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors), + GFP_KERNEL); + if (ctx.descriptors == NULL) + goto out_err; + + /* First pass discovers used resources */ + if (build_body(&ctx) < 0) + goto out_err; + /* + * Second pass computes instruction offsets. + * If any PC-relative branches are out of range, a sequence of + * a PC-relative branch + a jump is generated, and we have to + * try again from the beginning to generate the new offsets. + * This is done until no additional conversions are necessary. + * The last two iterations are done with all branches being + * converted, to guarantee offset table convergence within a + * fixed number of iterations. + */ + ctx.jit_index = 0; + build_prologue(&ctx); + tmp_idx = ctx.jit_index; + + tries = JIT_MAX_ITERATIONS; + do { + ctx.jit_index = tmp_idx; + ctx.changes = 0; + if (tries == 2) + set_convert_flag(&ctx, true); + if (build_body(&ctx) < 0) + goto out_err; + } while (ctx.changes > 0 && --tries > 0); + + if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge")) + goto out_err; + + build_epilogue(&ctx, MIPS_R_RA); + + /* Now we know the size of the structure to make */ + image_size = sizeof(u32) * ctx.jit_index; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + /* + * Not able to allocate memory for the structure then + * we must fall back to the interpretation + */ + if (header == NULL) + goto out_err; + + /* Actual pass to generate final JIT code */ + ctx.target = (u32 *)image_ptr; + ctx.jit_index = 0; + + /* + * If building the JITed code fails somehow, + * we fall back to the interpretation. + */ + build_prologue(&ctx); + if (build_body(&ctx) < 0) + goto out_err; + build_epilogue(&ctx, MIPS_R_RA); + + /* Populate line info meta data */ + set_convert_flag(&ctx, false); + bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]); + + /* Set as read-only exec and flush instruction cache */ + bpf_jit_binary_lock_ro(header); + flush_icache_range((unsigned long)header, + (unsigned long)&ctx.target[ctx.jit_index]); + + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, 2, ctx.target); + + prog->bpf_func = (void *)ctx.target; + prog->jited = 1; + prog->jited_len = image_size; + +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + kfree(ctx.descriptors); + return prog; + +out_err: + prog = orig_prog; + if (header) + bpf_jit_binary_free(header); + goto out; +} diff --git a/arch/mips/net/bpf_jit_comp.h b/arch/mips/net/bpf_jit_comp.h new file mode 100644 index 00000000000000..6f3a7b07294b83 --- /dev/null +++ b/arch/mips/net/bpf_jit_comp.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS. + * + * Copyright (c) 2021 Anyfi Networks AB. + * Author: Johan Almbladh + * + * Based on code and ideas from + * Copyright (c) 2017 Cavium, Inc. + * Copyright (c) 2017 Shubham Bansal + * Copyright (c) 2011 Mircea Gherzan + */ + +#ifndef _BPF_JIT_COMP_H +#define _BPF_JIT_COMP_H + +/* MIPS registers */ +#define MIPS_R_ZERO 0 /* Const zero */ +#define MIPS_R_AT 1 /* Asm temp */ +#define MIPS_R_V0 2 /* Result */ +#define MIPS_R_V1 3 /* Result */ +#define MIPS_R_A0 4 /* Argument */ +#define MIPS_R_A1 5 /* Argument */ +#define MIPS_R_A2 6 /* Argument */ +#define MIPS_R_A3 7 /* Argument */ +#define MIPS_R_A4 8 /* Arg (n64) */ +#define MIPS_R_A5 9 /* Arg (n64) */ +#define MIPS_R_A6 10 /* Arg (n64) */ +#define MIPS_R_A7 11 /* Arg (n64) */ +#define MIPS_R_T0 8 /* Temp (o32) */ +#define MIPS_R_T1 9 /* Temp (o32) */ +#define MIPS_R_T2 10 /* Temp (o32) */ +#define MIPS_R_T3 11 /* Temp (o32) */ +#define MIPS_R_T4 12 /* Temporary */ +#define MIPS_R_T5 13 /* Temporary */ +#define MIPS_R_T6 14 /* Temporary */ +#define MIPS_R_T7 15 /* Temporary */ +#define MIPS_R_S0 16 /* Saved */ +#define MIPS_R_S1 17 /* Saved */ +#define MIPS_R_S2 18 /* Saved */ +#define MIPS_R_S3 19 /* Saved */ +#define MIPS_R_S4 20 /* Saved */ +#define MIPS_R_S5 21 /* Saved */ +#define MIPS_R_S6 22 /* Saved */ +#define MIPS_R_S7 23 /* Saved */ +#define MIPS_R_T8 24 /* Temporary */ +#define MIPS_R_T9 25 /* Temporary */ +/* MIPS_R_K0 26 Reserved */ +/* MIPS_R_K1 27 Reserved */ +#define MIPS_R_GP 28 /* Global ptr */ +#define MIPS_R_SP 29 /* Stack ptr */ +#define MIPS_R_FP 30 /* Frame ptr */ +#define MIPS_R_RA 31 /* Return */ + +/* + * Jump address mask for immediate jumps. The four most significant bits + * must be equal to PC. + */ +#define MIPS_JMP_MASK 0x0fffffffUL + +/* Maximum number of iterations in offset table computation */ +#define JIT_MAX_ITERATIONS 8 + +/* + * Jump pseudo-instructions used internally + * for branch conversion and branch optimization. + */ +#define JIT_JNSET 0xe0 +#define JIT_JNOP 0xf0 + +/* Descriptor flag for PC-relative branch conversion */ +#define JIT_DESC_CONVERT BIT(31) + +/* JIT context for an eBPF program */ +struct jit_context { + struct bpf_prog *program; /* The eBPF program being JITed */ + u32 *descriptors; /* eBPF to JITed CPU insn descriptors */ + u32 *target; /* JITed code buffer */ + u32 bpf_index; /* Index of current BPF program insn */ + u32 jit_index; /* Index of current JIT target insn */ + u32 changes; /* Number of PC-relative branch conv */ + u32 accessed; /* Bit mask of read eBPF registers */ + u32 clobbered; /* Bit mask of modified CPU registers */ + u32 stack_size; /* Total allocated stack size in bytes */ + u32 saved_size; /* Size of callee-saved registers */ + u32 stack_used; /* Stack size used for function calls */ +}; + +/* Emit the instruction if the JIT memory space has been allocated */ +#define __emit(ctx, func, ...) \ +do { \ + if ((ctx)->target != NULL) { \ + u32 *p = &(ctx)->target[ctx->jit_index]; \ + uasm_i_##func(&p, ##__VA_ARGS__); \ + } \ + (ctx)->jit_index++; \ +} while (0) +#define emit(...) __emit(__VA_ARGS__) + +/* Workaround for R10000 ll/sc errata */ +#ifdef CONFIG_WAR_R10000 +#define LLSC_beqz beqzl +#else +#define LLSC_beqz beqz +#endif + +/* Workaround for Loongson-3 ll/sc errata */ +#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS +#define LLSC_sync(ctx) emit(ctx, sync, 0) +#define LLSC_offset 4 +#else +#define LLSC_sync(ctx) +#define LLSC_offset 0 +#endif + +/* Workaround for Loongson-2F jump errata */ +#ifdef CONFIG_CPU_JUMP_WORKAROUNDS +#define JALR_MASK 0xffffffffcfffffffULL +#else +#define JALR_MASK (~0ULL) +#endif + +/* + * Mark a BPF register as accessed, it needs to be + * initialized by the program if expected, e.g. FP. + */ +static inline void access_reg(struct jit_context *ctx, u8 reg) +{ + ctx->accessed |= BIT(reg); +} + +/* + * Mark a CPU register as clobbered, it needs to be + * saved/restored by the program if callee-saved. + */ +static inline void clobber_reg(struct jit_context *ctx, u8 reg) +{ + ctx->clobbered |= BIT(reg); +} + +/* + * Push registers on the stack, starting at a given depth from the stack + * pointer and increasing. The next depth to be written is returned. + */ +int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth); + +/* + * Pop registers from the stack, starting at a given depth from the stack + * pointer and increasing. The next depth to be read is returned. + */ +int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth); + +/* Compute the 28-bit jump target address from a BPF program location */ +int get_target(struct jit_context *ctx, u32 loc); + +/* Compute the PC-relative offset to relative BPF program offset */ +int get_offset(const struct jit_context *ctx, int off); + +/* dst = imm (32-bit) */ +void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm); + +/* dst = src (32-bit) */ +void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src); + +/* Validate ALU/ALU64 immediate range */ +bool valid_alu_i(u8 op, s32 imm); + +/* Rewrite ALU/ALU64 immediate operation */ +bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val); + +/* ALU immediate operation (32-bit) */ +void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op); + +/* ALU register operation (32-bit) */ +void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op); + +/* Atomic read-modify-write (32-bit) */ +void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code); + +/* Atomic compare-and-exchange (32-bit) */ +void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off); + +/* Swap bytes and truncate a register word or half word */ +void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width); + +/* Validate JMP/JMP32 immediate range */ +bool valid_jmp_i(u8 op, s32 imm); + +/* Prepare a PC-relative jump operation with immediate conditional */ +void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width, + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off); + +/* Prepare a PC-relative jump operation with register conditional */ +void setup_jmp_r(struct jit_context *ctx, bool same_reg, + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off); + +/* Finish a PC-relative jump operation */ +int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off); + +/* Conditional JMP/JMP32 immediate */ +void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op); + +/* Conditional JMP/JMP32 register */ +void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op); + +/* Jump always */ +int emit_ja(struct jit_context *ctx, s16 off); + +/* Jump to epilogue */ +int emit_exit(struct jit_context *ctx); + +/* + * Build program prologue to set up the stack and registers. + * This function is implemented separately for 32-bit and 64-bit JITs. + */ +void build_prologue(struct jit_context *ctx); + +/* + * Build the program epilogue to restore the stack and registers. + * This function is implemented separately for 32-bit and 64-bit JITs. + */ +void build_epilogue(struct jit_context *ctx, int dest_reg); + +/* + * Convert an eBPF instruction to native instruction, i.e + * JITs an eBPF instruction. + * Returns : + * 0 - Successfully JITed an 8-byte eBPF instruction + * >0 - Successfully JITed a 16-byte eBPF instruction + * <0 - Failed to JIT. + * This function is implemented separately for 32-bit and 64-bit JITs. + */ +int build_insn(const struct bpf_insn *insn, struct jit_context *ctx); + +#endif /* _BPF_JIT_COMP_H */ diff --git a/arch/mips/net/bpf_jit_comp32.c b/arch/mips/net/bpf_jit_comp32.c new file mode 100644 index 00000000000000..bd996ede12f8e1 --- /dev/null +++ b/arch/mips/net/bpf_jit_comp32.c @@ -0,0 +1,1899 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Just-In-Time compiler for eBPF bytecode on MIPS. + * Implementation of JIT functions for 32-bit CPUs. + * + * Copyright (c) 2021 Anyfi Networks AB. + * Author: Johan Almbladh + * + * Based on code and ideas from + * Copyright (c) 2017 Cavium, Inc. + * Copyright (c) 2017 Shubham Bansal + * Copyright (c) 2011 Mircea Gherzan + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_jit_comp.h" + +/* MIPS a4-a7 are not available in the o32 ABI */ +#undef MIPS_R_A4 +#undef MIPS_R_A5 +#undef MIPS_R_A6 +#undef MIPS_R_A7 + +/* Stack is 8-byte aligned in o32 ABI */ +#define MIPS_STACK_ALIGNMENT 8 + +/* + * The top 16 bytes of a stack frame is reserved for the callee in O32 ABI. + * This corresponds to stack space for register arguments a0-a3. + */ +#define JIT_RESERVED_STACK 16 + +/* Temporary 64-bit register used by JIT */ +#define JIT_REG_TMP MAX_BPF_JIT_REG + +/* + * Number of prologue bytes to skip when doing a tail call. + * Tail call count (TCC) initialization (8 bytes) always, plus + * R0-to-v0 assignment (4 bytes) if big endian. + */ +#ifdef __BIG_ENDIAN +#define JIT_TCALL_SKIP 12 +#else +#define JIT_TCALL_SKIP 8 +#endif + +/* CPU registers holding the callee return value */ +#define JIT_RETURN_REGS \ + (BIT(MIPS_R_V0) | \ + BIT(MIPS_R_V1)) + +/* CPU registers arguments passed to callee directly */ +#define JIT_ARG_REGS \ + (BIT(MIPS_R_A0) | \ + BIT(MIPS_R_A1) | \ + BIT(MIPS_R_A2) | \ + BIT(MIPS_R_A3)) + +/* CPU register arguments passed to callee on stack */ +#define JIT_STACK_REGS \ + (BIT(MIPS_R_T0) | \ + BIT(MIPS_R_T1) | \ + BIT(MIPS_R_T2) | \ + BIT(MIPS_R_T3) | \ + BIT(MIPS_R_T4) | \ + BIT(MIPS_R_T5)) + +/* Caller-saved CPU registers */ +#define JIT_CALLER_REGS \ + (JIT_RETURN_REGS | \ + JIT_ARG_REGS | \ + JIT_STACK_REGS) + +/* Callee-saved CPU registers */ +#define JIT_CALLEE_REGS \ + (BIT(MIPS_R_S0) | \ + BIT(MIPS_R_S1) | \ + BIT(MIPS_R_S2) | \ + BIT(MIPS_R_S3) | \ + BIT(MIPS_R_S4) | \ + BIT(MIPS_R_S5) | \ + BIT(MIPS_R_S6) | \ + BIT(MIPS_R_S7) | \ + BIT(MIPS_R_GP) | \ + BIT(MIPS_R_FP) | \ + BIT(MIPS_R_RA)) + +/* + * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers. + * + * 1) Native register pairs are ordered according to CPU endiannes, following + * the MIPS convention for passing 64-bit arguments and return values. + * 2) The eBPF return value, arguments and callee-saved registers are mapped + * to their native MIPS equivalents. + * 3) Since the 32 highest bits in the eBPF FP register are always zero, + * only one general-purpose register is actually needed for the mapping. + * We use the fp register for this purpose, and map the highest bits to + * the MIPS register r0 (zero). + * 4) We use the MIPS gp and at registers as internal temporary registers + * for constant blinding. The gp register is callee-saved. + * 5) One 64-bit temporary register is mapped for use when sign-extending + * immediate operands. MIPS registers t6-t9 are available to the JIT + * for as temporaries when implementing complex 64-bit operations. + * + * With this scheme all eBPF registers are being mapped to native MIPS + * registers without having to use any stack scratch space. The direct + * register mapping (2) simplifies the handling of function calls. + */ +static const u8 bpf2mips32[][2] = { + /* Return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = {MIPS_R_V1, MIPS_R_V0}, + /* Arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = {MIPS_R_A1, MIPS_R_A0}, + [BPF_REG_2] = {MIPS_R_A3, MIPS_R_A2}, + /* Remaining arguments, to be passed on the stack per O32 ABI */ + [BPF_REG_3] = {MIPS_R_T1, MIPS_R_T0}, + [BPF_REG_4] = {MIPS_R_T3, MIPS_R_T2}, + [BPF_REG_5] = {MIPS_R_T5, MIPS_R_T4}, + /* Callee-saved registers that in-kernel function will preserve */ + [BPF_REG_6] = {MIPS_R_S1, MIPS_R_S0}, + [BPF_REG_7] = {MIPS_R_S3, MIPS_R_S2}, + [BPF_REG_8] = {MIPS_R_S5, MIPS_R_S4}, + [BPF_REG_9] = {MIPS_R_S7, MIPS_R_S6}, + /* Read-only frame pointer to access the eBPF stack */ +#ifdef __BIG_ENDIAN + [BPF_REG_FP] = {MIPS_R_FP, MIPS_R_ZERO}, +#else + [BPF_REG_FP] = {MIPS_R_ZERO, MIPS_R_FP}, +#endif + /* Temporary register for blinding constants */ + [BPF_REG_AX] = {MIPS_R_GP, MIPS_R_AT}, + /* Temporary register for internal JIT use */ + [JIT_REG_TMP] = {MIPS_R_T7, MIPS_R_T6}, +}; + +/* Get low CPU register for a 64-bit eBPF register mapping */ +static inline u8 lo(const u8 reg[]) +{ +#ifdef __BIG_ENDIAN + return reg[0]; +#else + return reg[1]; +#endif +} + +/* Get high CPU register for a 64-bit eBPF register mapping */ +static inline u8 hi(const u8 reg[]) +{ +#ifdef __BIG_ENDIAN + return reg[1]; +#else + return reg[0]; +#endif +} + +/* + * Mark a 64-bit CPU register pair as clobbered, it needs to be + * saved/restored by the program if callee-saved. + */ +static void clobber_reg64(struct jit_context *ctx, const u8 reg[]) +{ + clobber_reg(ctx, reg[0]); + clobber_reg(ctx, reg[1]); +} + +/* dst = imm (sign-extended) */ +static void emit_mov_se_i64(struct jit_context *ctx, const u8 dst[], s32 imm) +{ + emit_mov_i(ctx, lo(dst), imm); + if (imm < 0) + emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); + else + emit(ctx, move, hi(dst), MIPS_R_ZERO); + clobber_reg64(ctx, dst); +} + +/* Zero extension, if verifier does not do it for us */ +static void emit_zext_ver(struct jit_context *ctx, const u8 dst[]) +{ + if (!ctx->program->aux->verifier_zext) { + emit(ctx, move, hi(dst), MIPS_R_ZERO); + clobber_reg(ctx, hi(dst)); + } +} + +/* Load delay slot, if ISA mandates it */ +static void emit_load_delay(struct jit_context *ctx) +{ + if (!cpu_has_mips_2_3_4_5_r) + emit(ctx, nop); +} + +/* ALU immediate operation (64-bit) */ +static void emit_alu_i64(struct jit_context *ctx, + const u8 dst[], s32 imm, u8 op) +{ + u8 src = MIPS_R_T6; + + /* + * ADD/SUB with all but the max negative imm can be handled by + * inverting the operation and the imm value, saving one insn. + */ + if (imm > S32_MIN && imm < 0) + switch (op) { + case BPF_ADD: + op = BPF_SUB; + imm = -imm; + break; + case BPF_SUB: + op = BPF_ADD; + imm = -imm; + break; + } + + /* Move immediate to temporary register */ + emit_mov_i(ctx, src, imm); + + switch (op) { + /* dst = dst + imm */ + case BPF_ADD: + emit(ctx, addu, lo(dst), lo(dst), src); + emit(ctx, sltu, MIPS_R_T9, lo(dst), src); + emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9); + if (imm < 0) + emit(ctx, addiu, hi(dst), hi(dst), -1); + break; + /* dst = dst - imm */ + case BPF_SUB: + emit(ctx, sltu, MIPS_R_T9, lo(dst), src); + emit(ctx, subu, lo(dst), lo(dst), src); + emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); + if (imm < 0) + emit(ctx, addiu, hi(dst), hi(dst), 1); + break; + /* dst = dst | imm */ + case BPF_OR: + emit(ctx, or, lo(dst), lo(dst), src); + if (imm < 0) + emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); + break; + /* dst = dst & imm */ + case BPF_AND: + emit(ctx, and, lo(dst), lo(dst), src); + if (imm >= 0) + emit(ctx, move, hi(dst), MIPS_R_ZERO); + break; + /* dst = dst ^ imm */ + case BPF_XOR: + emit(ctx, xor, lo(dst), lo(dst), src); + if (imm < 0) { + emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst)); + emit(ctx, addiu, hi(dst), hi(dst), -1); + } + break; + } + clobber_reg64(ctx, dst); +} + +/* ALU register operation (64-bit) */ +static void emit_alu_r64(struct jit_context *ctx, + const u8 dst[], const u8 src[], u8 op) +{ + switch (BPF_OP(op)) { + /* dst = dst + src */ + case BPF_ADD: + if (src == dst) { + emit(ctx, srl, MIPS_R_T9, lo(dst), 31); + emit(ctx, addu, lo(dst), lo(dst), lo(dst)); + } else { + emit(ctx, addu, lo(dst), lo(dst), lo(src)); + emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); + } + emit(ctx, addu, hi(dst), hi(dst), hi(src)); + emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9); + break; + /* dst = dst - src */ + case BPF_SUB: + emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); + emit(ctx, subu, lo(dst), lo(dst), lo(src)); + emit(ctx, subu, hi(dst), hi(dst), hi(src)); + emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); + break; + /* dst = dst | src */ + case BPF_OR: + emit(ctx, or, lo(dst), lo(dst), lo(src)); + emit(ctx, or, hi(dst), hi(dst), hi(src)); + break; + /* dst = dst & src */ + case BPF_AND: + emit(ctx, and, lo(dst), lo(dst), lo(src)); + emit(ctx, and, hi(dst), hi(dst), hi(src)); + break; + /* dst = dst ^ src */ + case BPF_XOR: + emit(ctx, xor, lo(dst), lo(dst), lo(src)); + emit(ctx, xor, hi(dst), hi(dst), hi(src)); + break; + } + clobber_reg64(ctx, dst); +} + +/* ALU invert (64-bit) */ +static void emit_neg_i64(struct jit_context *ctx, const u8 dst[]) +{ + emit(ctx, sltu, MIPS_R_T9, MIPS_R_ZERO, lo(dst)); + emit(ctx, subu, lo(dst), MIPS_R_ZERO, lo(dst)); + emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst)); + emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); + + clobber_reg64(ctx, dst); +} + +/* ALU shift immediate (64-bit) */ +static void emit_shift_i64(struct jit_context *ctx, + const u8 dst[], u32 imm, u8 op) +{ + switch (BPF_OP(op)) { + /* dst = dst << imm */ + case BPF_LSH: + if (imm < 32) { + emit(ctx, srl, MIPS_R_T9, lo(dst), 32 - imm); + emit(ctx, sll, lo(dst), lo(dst), imm); + emit(ctx, sll, hi(dst), hi(dst), imm); + emit(ctx, or, hi(dst), hi(dst), MIPS_R_T9); + } else { + emit(ctx, sll, hi(dst), lo(dst), imm - 32); + emit(ctx, move, lo(dst), MIPS_R_ZERO); + } + break; + /* dst = dst >> imm */ + case BPF_RSH: + if (imm < 32) { + emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); + emit(ctx, srl, lo(dst), lo(dst), imm); + emit(ctx, srl, hi(dst), hi(dst), imm); + emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9); + } else { + emit(ctx, srl, lo(dst), hi(dst), imm - 32); + emit(ctx, move, hi(dst), MIPS_R_ZERO); + } + break; + /* dst = dst >> imm (arithmetic) */ + case BPF_ARSH: + if (imm < 32) { + emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); + emit(ctx, srl, lo(dst), lo(dst), imm); + emit(ctx, sra, hi(dst), hi(dst), imm); + emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9); + } else { + emit(ctx, sra, lo(dst), hi(dst), imm - 32); + emit(ctx, sra, hi(dst), hi(dst), 31); + } + break; + } + clobber_reg64(ctx, dst); +} + +/* ALU shift register (64-bit) */ +static void emit_shift_r64(struct jit_context *ctx, + const u8 dst[], u8 src, u8 op) +{ + u8 t1 = MIPS_R_T8; + u8 t2 = MIPS_R_T9; + + emit(ctx, andi, t1, src, 32); /* t1 = src & 32 */ + emit(ctx, beqz, t1, 16); /* PC += 16 if t1 == 0 */ + emit(ctx, nor, t2, src, MIPS_R_ZERO); /* t2 = ~src (delay slot) */ + + switch (BPF_OP(op)) { + /* dst = dst << src */ + case BPF_LSH: + /* Next: shift >= 32 */ + emit(ctx, sllv, hi(dst), lo(dst), src); /* dh = dl << src */ + emit(ctx, move, lo(dst), MIPS_R_ZERO); /* dl = 0 */ + emit(ctx, b, 20); /* PC += 20 */ + /* +16: shift < 32 */ + emit(ctx, srl, t1, lo(dst), 1); /* t1 = dl >> 1 */ + emit(ctx, srlv, t1, t1, t2); /* t1 = t1 >> t2 */ + emit(ctx, sllv, lo(dst), lo(dst), src); /* dl = dl << src */ + emit(ctx, sllv, hi(dst), hi(dst), src); /* dh = dh << src */ + emit(ctx, or, hi(dst), hi(dst), t1); /* dh = dh | t1 */ + break; + /* dst = dst >> src */ + case BPF_RSH: + /* Next: shift >= 32 */ + emit(ctx, srlv, lo(dst), hi(dst), src); /* dl = dh >> src */ + emit(ctx, move, hi(dst), MIPS_R_ZERO); /* dh = 0 */ + emit(ctx, b, 20); /* PC += 20 */ + /* +16: shift < 32 */ + emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */ + emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */ + emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >> src */ + emit(ctx, srlv, hi(dst), hi(dst), src); /* dh = dh >> src */ + emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */ + break; + /* dst = dst >> src (arithmetic) */ + case BPF_ARSH: + /* Next: shift >= 32 */ + emit(ctx, srav, lo(dst), hi(dst), src); /* dl = dh >>a src */ + emit(ctx, sra, hi(dst), hi(dst), 31); /* dh = dh >>a 31 */ + emit(ctx, b, 20); /* PC += 20 */ + /* +16: shift < 32 */ + emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */ + emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */ + emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >>a src */ + emit(ctx, srav, hi(dst), hi(dst), src); /* dh = dh >> src */ + emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */ + break; + } + + /* +20: Done */ + clobber_reg64(ctx, dst); +} + +/* ALU mul immediate (64x32-bit) */ +static void emit_mul_i64(struct jit_context *ctx, const u8 dst[], s32 imm) +{ + u8 src = MIPS_R_T6; + u8 tmp = MIPS_R_T9; + + switch (imm) { + /* dst = dst * 1 is a no-op */ + case 1: + break; + /* dst = dst * -1 */ + case -1: + emit_neg_i64(ctx, dst); + break; + case 0: + emit_mov_r(ctx, lo(dst), MIPS_R_ZERO); + emit_mov_r(ctx, hi(dst), MIPS_R_ZERO); + break; + /* Full 64x32 multiply */ + default: + /* hi(dst) = hi(dst) * src(imm) */ + emit_mov_i(ctx, src, imm); + if (cpu_has_mips32r1 || cpu_has_mips32r6) { + emit(ctx, mul, hi(dst), hi(dst), src); + } else { + emit(ctx, multu, hi(dst), src); + emit(ctx, mflo, hi(dst)); + } + + /* hi(dst) = hi(dst) - lo(dst) */ + if (imm < 0) + emit(ctx, subu, hi(dst), hi(dst), lo(dst)); + + /* tmp = lo(dst) * src(imm) >> 32 */ + /* lo(dst) = lo(dst) * src(imm) */ + if (cpu_has_mips32r6) { + emit(ctx, muhu, tmp, lo(dst), src); + emit(ctx, mulu, lo(dst), lo(dst), src); + } else { + emit(ctx, multu, lo(dst), src); + emit(ctx, mflo, lo(dst)); + emit(ctx, mfhi, tmp); + } + + /* hi(dst) += tmp */ + emit(ctx, addu, hi(dst), hi(dst), tmp); + clobber_reg64(ctx, dst); + break; + } +} + +/* ALU mul register (64x64-bit) */ +static void emit_mul_r64(struct jit_context *ctx, + const u8 dst[], const u8 src[]) +{ + u8 acc = MIPS_R_T8; + u8 tmp = MIPS_R_T9; + + /* acc = hi(dst) * lo(src) */ + if (cpu_has_mips32r1 || cpu_has_mips32r6) { + emit(ctx, mul, acc, hi(dst), lo(src)); + } else { + emit(ctx, multu, hi(dst), lo(src)); + emit(ctx, mflo, acc); + } + + /* tmp = lo(dst) * hi(src) */ + if (cpu_has_mips32r1 || cpu_has_mips32r6) { + emit(ctx, mul, tmp, lo(dst), hi(src)); + } else { + emit(ctx, multu, lo(dst), hi(src)); + emit(ctx, mflo, tmp); + } + + /* acc += tmp */ + emit(ctx, addu, acc, acc, tmp); + + /* tmp = lo(dst) * lo(src) >> 32 */ + /* lo(dst) = lo(dst) * lo(src) */ + if (cpu_has_mips32r6) { + emit(ctx, muhu, tmp, lo(dst), lo(src)); + emit(ctx, mulu, lo(dst), lo(dst), lo(src)); + } else { + emit(ctx, multu, lo(dst), lo(src)); + emit(ctx, mflo, lo(dst)); + emit(ctx, mfhi, tmp); + } + + /* hi(dst) = acc + tmp */ + emit(ctx, addu, hi(dst), acc, tmp); + clobber_reg64(ctx, dst); +} + +/* Helper function for 64-bit modulo */ +static u64 jit_mod64(u64 a, u64 b) +{ + u64 rem; + + div64_u64_rem(a, b, &rem); + return rem; +} + +/* ALU div/mod register (64-bit) */ +static void emit_divmod_r64(struct jit_context *ctx, + const u8 dst[], const u8 src[], u8 op) +{ + const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */ + const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ + const u8 *r2 = bpf2mips32[BPF_REG_2]; /* Mapped to a2-a3 */ + int exclude, k; + u32 addr = 0; + + /* Push caller-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + 0, JIT_RESERVED_STACK); + + /* Put 64-bit arguments 1 and 2 in registers a0-a3 */ + for (k = 0; k < 2; k++) { + emit(ctx, move, MIPS_R_T9, src[k]); + emit(ctx, move, r1[k], dst[k]); + emit(ctx, move, r2[k], MIPS_R_T9); + } + + /* Emit function call */ + switch (BPF_OP(op)) { + /* dst = dst / src */ + case BPF_DIV: + addr = (u32)&div64_u64; + break; + /* dst = dst % src */ + case BPF_MOD: + addr = (u32)&jit_mod64; + break; + } + emit_mov_i(ctx, MIPS_R_T9, addr); + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + emit(ctx, nop); /* Delay slot */ + + /* Store the 64-bit result in dst */ + emit(ctx, move, dst[0], r0[0]); + emit(ctx, move, dst[1], r0[1]); + + /* Restore caller-saved registers, excluding the computed result */ + exclude = BIT(lo(dst)) | BIT(hi(dst)); + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + exclude, JIT_RESERVED_STACK); + emit_load_delay(ctx); + + clobber_reg64(ctx, dst); + clobber_reg(ctx, MIPS_R_V0); + clobber_reg(ctx, MIPS_R_V1); + clobber_reg(ctx, MIPS_R_RA); +} + +/* Swap bytes in a register word */ +static void emit_swap8_r(struct jit_context *ctx, u8 dst, u8 src, u8 mask) +{ + u8 tmp = MIPS_R_T9; + + emit(ctx, and, tmp, src, mask); /* tmp = src & 0x00ff00ff */ + emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */ + emit(ctx, srl, dst, src, 8); /* dst = src >> 8 */ + emit(ctx, and, dst, dst, mask); /* dst = dst & 0x00ff00ff */ + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ +} + +/* Swap half words in a register word */ +static void emit_swap16_r(struct jit_context *ctx, u8 dst, u8 src) +{ + u8 tmp = MIPS_R_T9; + + emit(ctx, sll, tmp, src, 16); /* tmp = src << 16 */ + emit(ctx, srl, dst, src, 16); /* dst = src >> 16 */ + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ +} + +/* Swap bytes and truncate a register double word, word or half word */ +static void emit_bswap_r64(struct jit_context *ctx, const u8 dst[], u32 width) +{ + u8 tmp = MIPS_R_T8; + + switch (width) { + /* Swap bytes in a double word */ + case 64: + if (cpu_has_mips32r2 || cpu_has_mips32r6) { + emit(ctx, rotr, tmp, hi(dst), 16); + emit(ctx, rotr, hi(dst), lo(dst), 16); + emit(ctx, wsbh, lo(dst), tmp); + emit(ctx, wsbh, hi(dst), hi(dst)); + } else { + emit_swap16_r(ctx, tmp, lo(dst)); + emit_swap16_r(ctx, lo(dst), hi(dst)); + emit(ctx, move, hi(dst), tmp); + + emit(ctx, lui, tmp, 0xff); /* tmp = 0x00ff0000 */ + emit(ctx, ori, tmp, tmp, 0xff); /* tmp = 0x00ff00ff */ + emit_swap8_r(ctx, lo(dst), lo(dst), tmp); + emit_swap8_r(ctx, hi(dst), hi(dst), tmp); + } + break; + /* Swap bytes in a word */ + /* Swap bytes in a half word */ + case 32: + case 16: + emit_bswap_r(ctx, lo(dst), width); + emit(ctx, move, hi(dst), MIPS_R_ZERO); + break; + } + clobber_reg64(ctx, dst); +} + +/* Truncate a register double word, word or half word */ +static void emit_trunc_r64(struct jit_context *ctx, const u8 dst[], u32 width) +{ + switch (width) { + case 64: + break; + /* Zero-extend a word */ + case 32: + emit(ctx, move, hi(dst), MIPS_R_ZERO); + clobber_reg(ctx, hi(dst)); + break; + /* Zero-extend a half word */ + case 16: + emit(ctx, move, hi(dst), MIPS_R_ZERO); + emit(ctx, andi, lo(dst), lo(dst), 0xffff); + clobber_reg64(ctx, dst); + break; + } +} + +/* Load operation: dst = *(size*)(src + off) */ +static void emit_ldx(struct jit_context *ctx, + const u8 dst[], u8 src, s16 off, u8 size) +{ + switch (size) { + /* Load a byte */ + case BPF_B: + emit(ctx, lbu, lo(dst), off, src); + emit(ctx, move, hi(dst), MIPS_R_ZERO); + break; + /* Load a half word */ + case BPF_H: + emit(ctx, lhu, lo(dst), off, src); + emit(ctx, move, hi(dst), MIPS_R_ZERO); + break; + /* Load a word */ + case BPF_W: + emit(ctx, lw, lo(dst), off, src); + emit(ctx, move, hi(dst), MIPS_R_ZERO); + break; + /* Load a double word */ + case BPF_DW: + if (dst[1] == src) { + emit(ctx, lw, dst[0], off + 4, src); + emit(ctx, lw, dst[1], off, src); + } else { + emit(ctx, lw, dst[1], off, src); + emit(ctx, lw, dst[0], off + 4, src); + } + emit_load_delay(ctx); + break; + } + clobber_reg64(ctx, dst); +} + +/* Store operation: *(size *)(dst + off) = src */ +static void emit_stx(struct jit_context *ctx, + const u8 dst, const u8 src[], s16 off, u8 size) +{ + switch (size) { + /* Store a byte */ + case BPF_B: + emit(ctx, sb, lo(src), off, dst); + break; + /* Store a half word */ + case BPF_H: + emit(ctx, sh, lo(src), off, dst); + break; + /* Store a word */ + case BPF_W: + emit(ctx, sw, lo(src), off, dst); + break; + /* Store a double word */ + case BPF_DW: + emit(ctx, sw, src[1], off, dst); + emit(ctx, sw, src[0], off + 4, dst); + break; + } +} + +/* Atomic read-modify-write (32-bit, non-ll/sc fallback) */ +static void emit_atomic_r32(struct jit_context *ctx, + u8 dst, u8 src, s16 off, u8 code) +{ + u32 exclude = 0; + u32 addr = 0; + + /* Push caller-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + 0, JIT_RESERVED_STACK); + /* + * Argument 1: dst+off if xchg, otherwise src, passed in register a0 + * Argument 2: src if xchg, othersize dst+off, passed in register a1 + */ + emit(ctx, move, MIPS_R_T9, dst); + if (code == BPF_XCHG) { + emit(ctx, move, MIPS_R_A1, src); + emit(ctx, addiu, MIPS_R_A0, MIPS_R_T9, off); + } else { + emit(ctx, move, MIPS_R_A0, src); + emit(ctx, addiu, MIPS_R_A1, MIPS_R_T9, off); + } + + /* Emit function call */ + switch (code) { + case BPF_ADD: + addr = (u32)&atomic_add; + break; + case BPF_ADD | BPF_FETCH: + addr = (u32)&atomic_fetch_add; + break; + case BPF_SUB: + addr = (u32)&atomic_sub; + break; + case BPF_SUB | BPF_FETCH: + addr = (u32)&atomic_fetch_sub; + break; + case BPF_OR: + addr = (u32)&atomic_or; + break; + case BPF_OR | BPF_FETCH: + addr = (u32)&atomic_fetch_or; + break; + case BPF_AND: + addr = (u32)&atomic_and; + break; + case BPF_AND | BPF_FETCH: + addr = (u32)&atomic_fetch_and; + break; + case BPF_XOR: + addr = (u32)&atomic_xor; + break; + case BPF_XOR | BPF_FETCH: + addr = (u32)&atomic_fetch_xor; + break; + case BPF_XCHG: + addr = (u32)&atomic_xchg; + break; + } + emit_mov_i(ctx, MIPS_R_T9, addr); + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + emit(ctx, nop); /* Delay slot */ + + /* Update src register with old value, if specified */ + if (code & BPF_FETCH) { + emit(ctx, move, src, MIPS_R_V0); + exclude = BIT(src); + clobber_reg(ctx, src); + } + + /* Restore caller-saved registers, except any fetched value */ + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + exclude, JIT_RESERVED_STACK); + emit_load_delay(ctx); + clobber_reg(ctx, MIPS_R_RA); +} + +/* Helper function for 64-bit atomic exchange */ +static s64 jit_xchg64(s64 a, atomic64_t *v) +{ + return atomic64_xchg(v, a); +} + +/* Atomic read-modify-write (64-bit) */ +static void emit_atomic_r64(struct jit_context *ctx, + u8 dst, const u8 src[], s16 off, u8 code) +{ + const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */ + const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ + u32 exclude = 0; + u32 addr = 0; + + /* Push caller-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + 0, JIT_RESERVED_STACK); + /* + * Argument 1: 64-bit src, passed in registers a0-a1 + * Argument 2: 32-bit dst+off, passed in register a2 + */ + emit(ctx, move, MIPS_R_T9, dst); + emit(ctx, move, r1[0], src[0]); + emit(ctx, move, r1[1], src[1]); + emit(ctx, addiu, MIPS_R_A2, MIPS_R_T9, off); + + /* Emit function call */ + switch (code) { + case BPF_ADD: + addr = (u32)&atomic64_add; + break; + case BPF_ADD | BPF_FETCH: + addr = (u32)&atomic64_fetch_add; + break; + case BPF_SUB: + addr = (u32)&atomic64_sub; + break; + case BPF_SUB | BPF_FETCH: + addr = (u32)&atomic64_fetch_sub; + break; + case BPF_OR: + addr = (u32)&atomic64_or; + break; + case BPF_OR | BPF_FETCH: + addr = (u32)&atomic64_fetch_or; + break; + case BPF_AND: + addr = (u32)&atomic64_and; + break; + case BPF_AND | BPF_FETCH: + addr = (u32)&atomic64_fetch_and; + break; + case BPF_XOR: + addr = (u32)&atomic64_xor; + break; + case BPF_XOR | BPF_FETCH: + addr = (u32)&atomic64_fetch_xor; + break; + case BPF_XCHG: + addr = (u32)&jit_xchg64; + break; + } + emit_mov_i(ctx, MIPS_R_T9, addr); + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + emit(ctx, nop); /* Delay slot */ + + /* Update src register with old value, if specified */ + if (code & BPF_FETCH) { + emit(ctx, move, lo(src), lo(r0)); + emit(ctx, move, hi(src), hi(r0)); + exclude = BIT(src[0]) | BIT(src[1]); + clobber_reg64(ctx, src); + } + + /* Restore caller-saved registers, except any fetched value */ + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + exclude, JIT_RESERVED_STACK); + emit_load_delay(ctx); + clobber_reg(ctx, MIPS_R_RA); +} + +/* Atomic compare-and-exchange (32-bit, non-ll/sc fallback) */ +static void emit_cmpxchg_r32(struct jit_context *ctx, u8 dst, u8 src, s16 off) +{ + const u8 *r0 = bpf2mips32[BPF_REG_0]; + + /* Push caller-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + JIT_RETURN_REGS, JIT_RESERVED_STACK + 2 * sizeof(u32)); + /* + * Argument 1: 32-bit dst+off, passed in register a0 + * Argument 2: 32-bit r0, passed in register a1 + * Argument 3: 32-bit src, passed in register a2 + */ + emit(ctx, addiu, MIPS_R_T9, dst, off); + emit(ctx, move, MIPS_R_T8, src); + emit(ctx, move, MIPS_R_A1, lo(r0)); + emit(ctx, move, MIPS_R_A0, MIPS_R_T9); + emit(ctx, move, MIPS_R_A2, MIPS_R_T8); + + /* Emit function call */ + emit_mov_i(ctx, MIPS_R_T9, (u32)&atomic_cmpxchg); + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + emit(ctx, nop); /* Delay slot */ + +#ifdef __BIG_ENDIAN + emit(ctx, move, lo(r0), MIPS_R_V0); +#endif + /* Restore caller-saved registers, except the return value */ + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + JIT_RETURN_REGS, JIT_RESERVED_STACK + 2 * sizeof(u32)); + emit_load_delay(ctx); + clobber_reg(ctx, MIPS_R_V0); + clobber_reg(ctx, MIPS_R_V1); + clobber_reg(ctx, MIPS_R_RA); +} + +/* Atomic compare-and-exchange (64-bit) */ +static void emit_cmpxchg_r64(struct jit_context *ctx, + u8 dst, const u8 src[], s16 off) +{ + const u8 *r0 = bpf2mips32[BPF_REG_0]; + const u8 *r2 = bpf2mips32[BPF_REG_2]; + + /* Push caller-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + JIT_RETURN_REGS, JIT_RESERVED_STACK + 2 * sizeof(u32)); + /* + * Argument 1: 32-bit dst+off, passed in register a0 (a1 unused) + * Argument 2: 64-bit r0, passed in registers a2-a3 + * Argument 3: 64-bit src, passed on stack + */ + push_regs(ctx, BIT(src[0]) | BIT(src[1]), 0, JIT_RESERVED_STACK); + emit(ctx, addiu, MIPS_R_T9, dst, off); + emit(ctx, move, r2[0], r0[0]); + emit(ctx, move, r2[1], r0[1]); + emit(ctx, move, MIPS_R_A0, MIPS_R_T9); + + /* Emit function call */ + emit_mov_i(ctx, MIPS_R_T9, (u32)&atomic64_cmpxchg); + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + emit(ctx, nop); /* Delay slot */ + + /* Restore caller-saved registers, except the return value */ + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, + JIT_RETURN_REGS, JIT_RESERVED_STACK + 2 * sizeof(u32)); + emit_load_delay(ctx); + clobber_reg(ctx, MIPS_R_V0); + clobber_reg(ctx, MIPS_R_V1); + clobber_reg(ctx, MIPS_R_RA); +} + +/* + * Conditional movz or an emulated equivalent. + * Note that the rs register may be modified. + */ +static void emit_movz_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt) +{ + if (cpu_has_mips_2) { + emit(ctx, movz, rd, rs, rt); /* rd = rt ? rd : rs */ + } else if (cpu_has_mips32r6) { + if (rs != MIPS_R_ZERO) + emit(ctx, seleqz, rs, rs, rt); /* rs = 0 if rt == 0 */ + emit(ctx, selnez, rd, rd, rt); /* rd = 0 if rt != 0 */ + if (rs != MIPS_R_ZERO) + emit(ctx, or, rd, rd, rs); /* rd = rd | rs */ + } else { + emit(ctx, bnez, rt, 8); /* PC += 8 if rd != 0 */ + emit(ctx, nop); /* +0: delay slot */ + emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */ + } + clobber_reg(ctx, rd); + clobber_reg(ctx, rs); +} + +/* + * Conditional movn or an emulated equivalent. + * Note that the rs register may be modified. + */ +static void emit_movn_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt) +{ + if (cpu_has_mips_2) { + emit(ctx, movn, rd, rs, rt); /* rd = rt ? rs : rd */ + } else if (cpu_has_mips32r6) { + if (rs != MIPS_R_ZERO) + emit(ctx, selnez, rs, rs, rt); /* rs = 0 if rt == 0 */ + emit(ctx, seleqz, rd, rd, rt); /* rd = 0 if rt != 0 */ + if (rs != MIPS_R_ZERO) + emit(ctx, or, rd, rd, rs); /* rd = rd | rs */ + } else { + emit(ctx, beqz, rt, 8); /* PC += 8 if rd == 0 */ + emit(ctx, nop); /* +0: delay slot */ + emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */ + } + clobber_reg(ctx, rd); + clobber_reg(ctx, rs); +} + +/* Emulation of 64-bit sltiu rd, rs, imm, where imm may be S32_MAX + 1 */ +static void emit_sltiu_r64(struct jit_context *ctx, u8 rd, + const u8 rs[], s64 imm) +{ + u8 tmp = MIPS_R_T9; + + if (imm < 0) { + emit_mov_i(ctx, rd, imm); /* rd = imm */ + emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */ + emit(ctx, sltiu, tmp, hi(rs), -1); /* tmp = rsh < ~0U */ + emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */ + } else { /* imm >= 0 */ + if (imm > 0x7fff) { + emit_mov_i(ctx, rd, (s32)imm); /* rd = imm */ + emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */ + } else { + emit(ctx, sltiu, rd, lo(rs), imm); /* rd = rsl < imm */ + } + emit_movn_r(ctx, rd, MIPS_R_ZERO, hi(rs)); /* rd = 0 if rsh */ + } +} + +/* Emulation of 64-bit sltu rd, rs, rt */ +static void emit_sltu_r64(struct jit_context *ctx, u8 rd, + const u8 rs[], const u8 rt[]) +{ + u8 tmp = MIPS_R_T9; + + emit(ctx, sltu, rd, lo(rs), lo(rt)); /* rd = rsl < rtl */ + emit(ctx, subu, tmp, hi(rs), hi(rt)); /* tmp = rsh - rth */ + emit_movn_r(ctx, rd, MIPS_R_ZERO, tmp); /* rd = 0 if tmp != 0 */ + emit(ctx, sltu, tmp, hi(rs), hi(rt)); /* tmp = rsh < rth */ + emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */ +} + +/* Emulation of 64-bit slti rd, rs, imm, where imm may be S32_MAX + 1 */ +static void emit_slti_r64(struct jit_context *ctx, u8 rd, + const u8 rs[], s64 imm) +{ + u8 t1 = MIPS_R_T8; + u8 t2 = MIPS_R_T9; + u8 cmp; + + /* + * if ((rs < 0) ^ (imm < 0)) t1 = imm >u rsl + * else t1 = rsl > 31 */ + if (imm < 0) + emit_movz_r(ctx, t1, t2, rd); /* t1 = rd ? t1 : t2 */ + else + emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */ + /* + * if ((imm < 0 && rsh != 0xffffffff) || + * (imm >= 0 && rsh != 0)) + * t1 = 0 + */ + if (imm < 0) { + emit(ctx, addiu, rd, hi(rs), 1); /* rd = rsh + 1 */ + cmp = rd; + } else { /* imm >= 0 */ + cmp = hi(rs); + } + emit_movn_r(ctx, t1, MIPS_R_ZERO, cmp); /* t1 = 0 if cmp != 0 */ + + /* + * if (imm < 0) rd = rsh < -1 + * else rd = rsh != 0 + * rd = rd | t1 + */ + emit(ctx, slti, rd, hi(rs), imm < 0 ? -1 : 0); /* rd = rsh < hi(imm) */ + emit(ctx, or, rd, rd, t1); /* rd = rd | t1 */ +} + +/* Emulation of 64-bit(slt rd, rs, rt) */ +static void emit_slt_r64(struct jit_context *ctx, u8 rd, + const u8 rs[], const u8 rt[]) +{ + u8 t1 = MIPS_R_T7; + u8 t2 = MIPS_R_T8; + u8 t3 = MIPS_R_T9; + + /* + * if ((rs < 0) ^ (rt < 0)) t1 = rtl > 31 */ + emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */ + emit_movn_r(ctx, t1, MIPS_R_ZERO, t3); /* t1 = 0 if t3 != 0 */ + + /* rd = (rsh < rth) | t1 */ + emit(ctx, slt, rd, hi(rs), hi(rt)); /* rd = rsh = -0x7fff && imm <= 0x8000) { + emit(ctx, addiu, tmp, lo(dst), -imm); + } else if ((u32)imm <= 0xffff) { + emit(ctx, xori, tmp, lo(dst), imm); + } else { /* Register fallback */ + emit_mov_i(ctx, tmp, imm); + emit(ctx, xor, tmp, lo(dst), tmp); + } + if (imm < 0) { /* Compare sign extension */ + emit(ctx, addu, MIPS_R_T9, hi(dst), 1); + emit(ctx, or, tmp, tmp, MIPS_R_T9); + } else { /* Compare zero extension */ + emit(ctx, or, tmp, tmp, hi(dst)); + } + if (op == BPF_JEQ) + emit(ctx, beqz, tmp, off); + else /* BPF_JNE */ + emit(ctx, bnez, tmp, off); + break; + /* PC += off if dst & imm */ + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ + case BPF_JSET: + case JIT_JNSET: + if ((u32)imm <= 0xffff) { + emit(ctx, andi, tmp, lo(dst), imm); + } else { /* Register fallback */ + emit_mov_i(ctx, tmp, imm); + emit(ctx, and, tmp, lo(dst), tmp); + } + if (imm < 0) /* Sign-extension pulls in high word */ + emit(ctx, or, tmp, tmp, hi(dst)); + if (op == BPF_JSET) + emit(ctx, bnez, tmp, off); + else /* JIT_JNSET */ + emit(ctx, beqz, tmp, off); + break; + /* PC += off if dst > imm */ + case BPF_JGT: + emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1); + emit(ctx, beqz, tmp, off); + break; + /* PC += off if dst >= imm */ + case BPF_JGE: + emit_sltiu_r64(ctx, tmp, dst, imm); + emit(ctx, beqz, tmp, off); + break; + /* PC += off if dst < imm */ + case BPF_JLT: + emit_sltiu_r64(ctx, tmp, dst, imm); + emit(ctx, bnez, tmp, off); + break; + /* PC += off if dst <= imm */ + case BPF_JLE: + emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1); + emit(ctx, bnez, tmp, off); + break; + /* PC += off if dst > imm (signed) */ + case BPF_JSGT: + emit_slti_r64(ctx, tmp, dst, (s64)imm + 1); + emit(ctx, beqz, tmp, off); + break; + /* PC += off if dst >= imm (signed) */ + case BPF_JSGE: + emit_slti_r64(ctx, tmp, dst, imm); + emit(ctx, beqz, tmp, off); + break; + /* PC += off if dst < imm (signed) */ + case BPF_JSLT: + emit_slti_r64(ctx, tmp, dst, imm); + emit(ctx, bnez, tmp, off); + break; + /* PC += off if dst <= imm (signed) */ + case BPF_JSLE: + emit_slti_r64(ctx, tmp, dst, (s64)imm + 1); + emit(ctx, bnez, tmp, off); + break; + } +} + +/* Jump register (64-bit) */ +static void emit_jmp_r64(struct jit_context *ctx, + const u8 dst[], const u8 src[], s32 off, u8 op) +{ + u8 t1 = MIPS_R_T6; + u8 t2 = MIPS_R_T7; + + switch (op) { + /* No-op, used internally for branch optimization */ + case JIT_JNOP: + break; + /* PC += off if dst == src */ + /* PC += off if dst != src */ + case BPF_JEQ: + case BPF_JNE: + emit(ctx, subu, t1, lo(dst), lo(src)); + emit(ctx, subu, t2, hi(dst), hi(src)); + emit(ctx, or, t1, t1, t2); + if (op == BPF_JEQ) + emit(ctx, beqz, t1, off); + else /* BPF_JNE */ + emit(ctx, bnez, t1, off); + break; + /* PC += off if dst & src */ + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ + case BPF_JSET: + case JIT_JNSET: + emit(ctx, and, t1, lo(dst), lo(src)); + emit(ctx, and, t2, hi(dst), hi(src)); + emit(ctx, or, t1, t1, t2); + if (op == BPF_JSET) + emit(ctx, bnez, t1, off); + else /* JIT_JNSET */ + emit(ctx, beqz, t1, off); + break; + /* PC += off if dst > src */ + case BPF_JGT: + emit_sltu_r64(ctx, t1, src, dst); + emit(ctx, bnez, t1, off); + break; + /* PC += off if dst >= src */ + case BPF_JGE: + emit_sltu_r64(ctx, t1, dst, src); + emit(ctx, beqz, t1, off); + break; + /* PC += off if dst < src */ + case BPF_JLT: + emit_sltu_r64(ctx, t1, dst, src); + emit(ctx, bnez, t1, off); + break; + /* PC += off if dst <= src */ + case BPF_JLE: + emit_sltu_r64(ctx, t1, src, dst); + emit(ctx, beqz, t1, off); + break; + /* PC += off if dst > src (signed) */ + case BPF_JSGT: + emit_slt_r64(ctx, t1, src, dst); + emit(ctx, bnez, t1, off); + break; + /* PC += off if dst >= src (signed) */ + case BPF_JSGE: + emit_slt_r64(ctx, t1, dst, src); + emit(ctx, beqz, t1, off); + break; + /* PC += off if dst < src (signed) */ + case BPF_JSLT: + emit_slt_r64(ctx, t1, dst, src); + emit(ctx, bnez, t1, off); + break; + /* PC += off if dst <= src (signed) */ + case BPF_JSLE: + emit_slt_r64(ctx, t1, src, dst); + emit(ctx, beqz, t1, off); + break; + } +} + +/* Function call */ +static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) +{ + bool fixed; + u64 addr; + + /* Decode the call address */ + if (bpf_jit_get_func_addr(ctx->program, insn, false, + &addr, &fixed) < 0) + return -1; + if (!fixed) + return -1; + + /* Push stack arguments */ + push_regs(ctx, JIT_STACK_REGS, 0, JIT_RESERVED_STACK); + + /* Emit function call */ + emit_mov_i(ctx, MIPS_R_T9, addr); + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); + emit(ctx, nop); /* Delay slot */ + + clobber_reg(ctx, MIPS_R_RA); + clobber_reg(ctx, MIPS_R_V0); + clobber_reg(ctx, MIPS_R_V1); + return 0; +} + +/* Function tail call */ +static int emit_tail_call(struct jit_context *ctx) +{ + u8 ary = lo(bpf2mips32[BPF_REG_2]); + u8 ind = lo(bpf2mips32[BPF_REG_3]); + u8 t1 = MIPS_R_T8; + u8 t2 = MIPS_R_T9; + int off; + + /* + * Tail call: + * eBPF R1 - function argument (context ptr), passed in a0-a1 + * eBPF R2 - ptr to object with array of function entry points + * eBPF R3 - array index of function to be called + * stack[sz] - remaining tail call count, initialized in prologue + */ + + /* if (ind >= ary->map.max_entries) goto out */ + off = offsetof(struct bpf_array, map.max_entries); + if (off > 0x7fff) + return -1; + emit(ctx, lw, t1, off, ary); /* t1 = ary->map.max_entries*/ + emit_load_delay(ctx); /* Load delay slot */ + emit(ctx, sltu, t1, ind, t1); /* t1 = ind < t1 */ + emit(ctx, beqz, t1, get_offset(ctx, 1)); /* PC += off(1) if t1 == 0 */ + /* (next insn delay slot) */ + /* if (TCC-- <= 0) goto out */ + emit(ctx, lw, t2, ctx->stack_size, MIPS_R_SP); /* t2 = *(SP + size) */ + emit_load_delay(ctx); /* Load delay slot */ + emit(ctx, blez, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 <= 0 */ + emit(ctx, addiu, t2, t2, -1); /* t2-- (delay slot) */ + emit(ctx, sw, t2, ctx->stack_size, MIPS_R_SP); /* *(SP + size) = t2 */ + + /* prog = ary->ptrs[ind] */ + off = offsetof(struct bpf_array, ptrs); + if (off > 0x7fff) + return -1; + emit(ctx, sll, t1, ind, 2); /* t1 = ind << 2 */ + emit(ctx, addu, t1, t1, ary); /* t1 += ary */ + emit(ctx, lw, t2, off, t1); /* t2 = *(t1 + off) */ + emit_load_delay(ctx); /* Load delay slot */ + + /* if (prog == 0) goto out */ + emit(ctx, beqz, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 == 0 */ + emit(ctx, nop); /* Delay slot */ + + /* func = prog->bpf_func + 8 (prologue skip offset) */ + off = offsetof(struct bpf_prog, bpf_func); + if (off > 0x7fff) + return -1; + emit(ctx, lw, t1, off, t2); /* t1 = *(t2 + off) */ + emit_load_delay(ctx); /* Load delay slot */ + emit(ctx, addiu, t1, t1, JIT_TCALL_SKIP); /* t1 += skip (8 or 12) */ + + /* goto func */ + build_epilogue(ctx, t1); + return 0; +} + +/* + * Stack frame layout for a JITed program (stack grows down). + * + * Higher address : Caller's stack frame : + * :----------------------------: + * : 64-bit eBPF args r3-r5 : + * :----------------------------: + * : Reserved / tail call count : + * +============================+ <--- MIPS sp before call + * | Callee-saved registers, | + * | including RA and FP | + * +----------------------------+ <--- eBPF FP (MIPS zero,fp) + * | Local eBPF variables | + * | allocated by program | + * +----------------------------+ + * | Reserved for caller-saved | + * | registers | + * +----------------------------+ + * | Reserved for 64-bit eBPF | + * | args r3-r5 & args passed | + * | on stack in kernel calls | + * Lower address +============================+ <--- MIPS sp + */ + +/* Build program prologue to set up the stack and registers */ +void build_prologue(struct jit_context *ctx) +{ + const u8 *r1 = bpf2mips32[BPF_REG_1]; + const u8 *fp = bpf2mips32[BPF_REG_FP]; + int stack, saved, locals, reserved; + + /* + * The first two instructions initialize TCC in the reserved (for us) + * 16-byte area in the parent's stack frame. On a tail call, the + * calling function jumps into the prologue after these instructions. + */ + emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, + min(MAX_TAIL_CALL_CNT + 1, 0xffff)); + emit(ctx, sw, MIPS_R_T9, 0, MIPS_R_SP); + + /* + * Register eBPF R1 contains the 32-bit context pointer argument. + * A 32-bit argument is always passed in MIPS register a0, regardless + * of CPU endianness. Initialize R1 accordingly and zero-extend. + */ +#ifdef __BIG_ENDIAN + emit(ctx, move, lo(r1), MIPS_R_A0); +#endif + + /* === Entry-point for tail calls === */ + + /* Zero-extend the 32-bit argument */ + emit(ctx, move, hi(r1), MIPS_R_ZERO); + + /* If the eBPF frame pointer was accessed it must be saved */ + if (ctx->accessed & BIT(BPF_REG_FP)) + clobber_reg64(ctx, fp); + + /* Compute the stack space needed for callee-saved registers */ + saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u32); + saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); + + /* Stack space used by eBPF program local data */ + locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); + + /* + * If we are emitting function calls, reserve extra stack space for + * caller-saved registers and function arguments passed on the stack. + * The required space is computed automatically during resource + * usage discovery (pass 1). + */ + reserved = ctx->stack_used; + + /* Allocate the stack frame */ + stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); + emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, -stack); + + /* Store callee-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); + + /* Initialize the eBPF frame pointer if accessed */ + if (ctx->accessed & BIT(BPF_REG_FP)) + emit(ctx, addiu, lo(fp), MIPS_R_SP, stack - saved); + + ctx->saved_size = saved; + ctx->stack_size = stack; +} + +/* Build the program epilogue to restore the stack and registers */ +void build_epilogue(struct jit_context *ctx, int dest_reg) +{ + /* Restore callee-saved registers from stack */ + pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, + ctx->stack_size - ctx->saved_size); + /* + * A 32-bit return value is always passed in MIPS register v0, + * but on big-endian targets the low part of R0 is mapped to v1. + */ +#ifdef __BIG_ENDIAN + emit(ctx, move, MIPS_R_V0, MIPS_R_V1); +#endif + + /* Jump to the return address and adjust the stack pointer */ + emit(ctx, jr, dest_reg); + emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); +} + +/* Build one eBPF instruction */ +int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) +{ + const u8 *dst = bpf2mips32[insn->dst_reg]; + const u8 *src = bpf2mips32[insn->src_reg]; + const u8 *res = bpf2mips32[BPF_REG_0]; + const u8 *tmp = bpf2mips32[JIT_REG_TMP]; + u8 code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + s32 val, rel; + u8 alu, jmp; + + switch (code) { + /* ALU operations */ + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + emit_mov_i(ctx, lo(dst), imm); + emit_zext_ver(ctx, dst); + break; + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ + emit_mov_i(ctx, hi(dst), 0); + } else { + emit_mov_r(ctx, lo(dst), lo(src)); + emit_zext_ver(ctx, dst); + } + break; + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + emit_alu_i(ctx, lo(dst), 0, BPF_NEG); + emit_zext_ver(ctx, dst); + break; + /* dst = dst & imm */ + /* dst = dst | imm */ + /* dst = dst ^ imm */ + /* dst = dst << imm */ + /* dst = dst >> imm */ + /* dst = dst >> imm (arithmetic) */ + /* dst = dst + imm */ + /* dst = dst - imm */ + /* dst = dst * imm */ + /* dst = dst / imm */ + /* dst = dst % imm */ + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: + if (!valid_alu_i(BPF_OP(code), imm)) { + emit_mov_i(ctx, MIPS_R_T6, imm); + emit_alu_r(ctx, lo(dst), MIPS_R_T6, BPF_OP(code)); + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { + emit_alu_i(ctx, lo(dst), val, alu); + } + emit_zext_ver(ctx, dst); + break; + /* dst = dst & src */ + /* dst = dst | src */ + /* dst = dst ^ src */ + /* dst = dst << src */ + /* dst = dst >> src */ + /* dst = dst >> src (arithmetic) */ + /* dst = dst + src */ + /* dst = dst - src */ + /* dst = dst * src */ + /* dst = dst / src */ + /* dst = dst % src */ + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_X: + emit_alu_r(ctx, lo(dst), lo(src), BPF_OP(code)); + emit_zext_ver(ctx, dst); + break; + /* dst = imm (64-bit) */ + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_mov_se_i64(ctx, dst, imm); + break; + /* dst = src (64-bit) */ + case BPF_ALU64 | BPF_MOV | BPF_X: + emit_mov_r(ctx, lo(dst), lo(src)); + emit_mov_r(ctx, hi(dst), hi(src)); + break; + /* dst = -dst (64-bit) */ + case BPF_ALU64 | BPF_NEG: + emit_neg_i64(ctx, dst); + break; + /* dst = dst & imm (64-bit) */ + case BPF_ALU64 | BPF_AND | BPF_K: + emit_alu_i64(ctx, dst, imm, BPF_OP(code)); + break; + /* dst = dst | imm (64-bit) */ + /* dst = dst ^ imm (64-bit) */ + /* dst = dst + imm (64-bit) */ + /* dst = dst - imm (64-bit) */ + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + if (imm) + emit_alu_i64(ctx, dst, imm, BPF_OP(code)); + break; + /* dst = dst << imm (64-bit) */ + /* dst = dst >> imm (64-bit) */ + /* dst = dst >> imm (64-bit, arithmetic) */ + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (imm) + emit_shift_i64(ctx, dst, imm, BPF_OP(code)); + break; + /* dst = dst * imm (64-bit) */ + case BPF_ALU64 | BPF_MUL | BPF_K: + emit_mul_i64(ctx, dst, imm); + break; + /* dst = dst / imm (64-bit) */ + /* dst = dst % imm (64-bit) */ + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + /* + * Sign-extend the immediate value into a temporary register, + * and then do the operation on this register. + */ + emit_mov_se_i64(ctx, tmp, imm); + emit_divmod_r64(ctx, dst, tmp, BPF_OP(code)); + break; + /* dst = dst & src (64-bit) */ + /* dst = dst | src (64-bit) */ + /* dst = dst ^ src (64-bit) */ + /* dst = dst + src (64-bit) */ + /* dst = dst - src (64-bit) */ + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit_alu_r64(ctx, dst, src, BPF_OP(code)); + break; + /* dst = dst << src (64-bit) */ + /* dst = dst >> src (64-bit) */ + /* dst = dst >> src (64-bit, arithmetic) */ + case BPF_ALU64 | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_shift_r64(ctx, dst, lo(src), BPF_OP(code)); + break; + /* dst = dst * src (64-bit) */ + case BPF_ALU64 | BPF_MUL | BPF_X: + emit_mul_r64(ctx, dst, src); + break; + /* dst = dst / src (64-bit) */ + /* dst = dst % src (64-bit) */ + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_divmod_r64(ctx, dst, src, BPF_OP(code)); + break; + /* dst = htole(dst) */ + /* dst = htobe(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU | BPF_END | BPF_FROM_BE: + if (BPF_SRC(code) == +#ifdef __BIG_ENDIAN + BPF_FROM_LE +#else + BPF_FROM_BE +#endif + ) + emit_bswap_r64(ctx, dst, imm); + else + emit_trunc_r64(ctx, dst, imm); + break; + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + emit_mov_i(ctx, lo(dst), imm); + emit_mov_i(ctx, hi(dst), insn[1].imm); + return 1; + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + emit_ldx(ctx, dst, lo(src), off, BPF_SIZE(code)); + break; + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_DW: + /* Sign-extend immediate value into temporary reg */ + emit_mov_se_i64(ctx, tmp, imm); + break; + case BPF_W: + case BPF_H: + case BPF_B: + emit_mov_i(ctx, lo(tmp), imm); + break; + } + emit_stx(ctx, lo(dst), tmp, off, BPF_SIZE(code)); + break; + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_DW: + emit_stx(ctx, lo(dst), src, off, BPF_SIZE(code)); + break; + /* Speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + /* Atomics */ + case BPF_STX | BPF_ATOMIC | BPF_W: + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + case BPF_AND: + case BPF_AND | BPF_FETCH: + case BPF_OR: + case BPF_OR | BPF_FETCH: + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + case BPF_XCHG: + if (cpu_has_llsc) + emit_atomic_r(ctx, lo(dst), lo(src), off, imm); + else /* Non-ll/sc fallback */ + emit_atomic_r32(ctx, lo(dst), lo(src), + off, imm); + if (imm & BPF_FETCH) + emit_zext_ver(ctx, src); + break; + case BPF_CMPXCHG: + if (cpu_has_llsc) + emit_cmpxchg_r(ctx, lo(dst), lo(src), + lo(res), off); + else /* Non-ll/sc fallback */ + emit_cmpxchg_r32(ctx, lo(dst), lo(src), off); + /* Result zero-extension inserted by verifier */ + break; + default: + goto notyet; + } + break; + /* Atomics (64-bit) */ + case BPF_STX | BPF_ATOMIC | BPF_DW: + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + case BPF_AND: + case BPF_AND | BPF_FETCH: + case BPF_OR: + case BPF_OR | BPF_FETCH: + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + case BPF_XCHG: + emit_atomic_r64(ctx, lo(dst), src, off, imm); + break; + case BPF_CMPXCHG: + emit_cmpxchg_r64(ctx, lo(dst), src, off); + break; + default: + goto notyet; + } + break; + /* PC += off if dst == src */ + /* PC += off if dst != src */ + /* PC += off if dst & src */ + /* PC += off if dst > src */ + /* PC += off if dst >= src */ + /* PC += off if dst < src */ + /* PC += off if dst <= src */ + /* PC += off if dst > src (signed) */ + /* PC += off if dst >= src (signed) */ + /* PC += off if dst < src (signed) */ + /* PC += off if dst <= src (signed) */ + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + if (off == 0) + break; + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); + emit_jmp_r(ctx, lo(dst), lo(src), rel, jmp); + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off if dst == imm */ + /* PC += off if dst != imm */ + /* PC += off if dst & imm */ + /* PC += off if dst > imm */ + /* PC += off if dst >= imm */ + /* PC += off if dst < imm */ + /* PC += off if dst <= imm */ + /* PC += off if dst > imm (signed) */ + /* PC += off if dst >= imm (signed) */ + /* PC += off if dst < imm (signed) */ + /* PC += off if dst <= imm (signed) */ + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + if (off == 0) + break; + setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); + if (valid_jmp_i(jmp, imm)) { + emit_jmp_i(ctx, lo(dst), imm, rel, jmp); + } else { + /* Move large immediate to register */ + emit_mov_i(ctx, MIPS_R_T6, imm); + emit_jmp_r(ctx, lo(dst), MIPS_R_T6, rel, jmp); + } + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off if dst == src */ + /* PC += off if dst != src */ + /* PC += off if dst & src */ + /* PC += off if dst > src */ + /* PC += off if dst >= src */ + /* PC += off if dst < src */ + /* PC += off if dst <= src */ + /* PC += off if dst > src (signed) */ + /* PC += off if dst >= src (signed) */ + /* PC += off if dst < src (signed) */ + /* PC += off if dst <= src (signed) */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + if (off == 0) + break; + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); + emit_jmp_r64(ctx, dst, src, rel, jmp); + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off if dst == imm */ + /* PC += off if dst != imm */ + /* PC += off if dst & imm */ + /* PC += off if dst > imm */ + /* PC += off if dst >= imm */ + /* PC += off if dst < imm */ + /* PC += off if dst <= imm */ + /* PC += off if dst > imm (signed) */ + /* PC += off if dst >= imm (signed) */ + /* PC += off if dst < imm (signed) */ + /* PC += off if dst <= imm (signed) */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + if (off == 0) + break; + setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); + emit_jmp_i64(ctx, dst, imm, rel, jmp); + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off */ + case BPF_JMP | BPF_JA: + if (off == 0) + break; + if (emit_ja(ctx, off) < 0) + goto toofar; + break; + /* Tail call */ + case BPF_JMP | BPF_TAIL_CALL: + if (emit_tail_call(ctx) < 0) + goto invalid; + break; + /* Function call */ + case BPF_JMP | BPF_CALL: + if (emit_call(ctx, insn) < 0) + goto invalid; + break; + /* Function return */ + case BPF_JMP | BPF_EXIT: + /* + * Optimization: when last instruction is EXIT + * simply continue to epilogue. + */ + if (ctx->bpf_index == ctx->program->len - 1) + break; + if (emit_exit(ctx) < 0) + goto toofar; + break; + + default: +invalid: + pr_err_once("unknown opcode %02x\n", code); + return -EINVAL; +notyet: + pr_info_once("*** NOT YET: opcode %02x ***\n", code); + return -EFAULT; +toofar: + pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", + ctx->bpf_index, code); + return -E2BIG; + } + return 0; +} diff --git a/arch/mips/net/bpf_jit_comp64.c b/arch/mips/net/bpf_jit_comp64.c new file mode 100644 index 00000000000000..815ade7242278a --- /dev/null +++ b/arch/mips/net/bpf_jit_comp64.c @@ -0,0 +1,1060 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Just-In-Time compiler for eBPF bytecode on MIPS. + * Implementation of JIT functions for 64-bit CPUs. + * + * Copyright (c) 2021 Anyfi Networks AB. + * Author: Johan Almbladh + * + * Based on code and ideas from + * Copyright (c) 2017 Cavium, Inc. + * Copyright (c) 2017 Shubham Bansal + * Copyright (c) 2011 Mircea Gherzan + */ + +#include +#include +#include +#include +#include +#include + +#include "bpf_jit_comp.h" + +/* MIPS t0-t3 are not available in the n64 ABI */ +#undef MIPS_R_T0 +#undef MIPS_R_T1 +#undef MIPS_R_T2 +#undef MIPS_R_T3 + +/* Stack is 16-byte aligned in n64 ABI */ +#define MIPS_STACK_ALIGNMENT 16 + +/* Extra 64-bit eBPF registers used by JIT */ +#define JIT_REG_TC (MAX_BPF_JIT_REG + 0) +#define JIT_REG_ZX (MAX_BPF_JIT_REG + 1) + +/* Number of prologue bytes to skip when doing a tail call */ +#define JIT_TCALL_SKIP 4 + +/* Callee-saved CPU registers that the JIT must preserve */ +#define JIT_CALLEE_REGS \ + (BIT(MIPS_R_S0) | \ + BIT(MIPS_R_S1) | \ + BIT(MIPS_R_S2) | \ + BIT(MIPS_R_S3) | \ + BIT(MIPS_R_S4) | \ + BIT(MIPS_R_S5) | \ + BIT(MIPS_R_S6) | \ + BIT(MIPS_R_S7) | \ + BIT(MIPS_R_GP) | \ + BIT(MIPS_R_FP) | \ + BIT(MIPS_R_RA)) + +/* Caller-saved CPU registers available for JIT use */ +#define JIT_CALLER_REGS \ + (BIT(MIPS_R_A5) | \ + BIT(MIPS_R_A6) | \ + BIT(MIPS_R_A7)) +/* + * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. + * MIPS registers t4 - t7 may be used by the JIT as temporary registers. + * MIPS registers t8 - t9 are reserved for single-register common functions. + */ +static const u8 bpf2mips64[] = { + /* Return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = MIPS_R_V0, + /* Arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = MIPS_R_A0, + [BPF_REG_2] = MIPS_R_A1, + [BPF_REG_3] = MIPS_R_A2, + [BPF_REG_4] = MIPS_R_A3, + [BPF_REG_5] = MIPS_R_A4, + /* Callee-saved registers that in-kernel function will preserve */ + [BPF_REG_6] = MIPS_R_S0, + [BPF_REG_7] = MIPS_R_S1, + [BPF_REG_8] = MIPS_R_S2, + [BPF_REG_9] = MIPS_R_S3, + /* Read-only frame pointer to access the eBPF stack */ + [BPF_REG_FP] = MIPS_R_FP, + /* Temporary register for blinding constants */ + [BPF_REG_AX] = MIPS_R_AT, + /* Tail call count register, caller-saved */ + [JIT_REG_TC] = MIPS_R_A5, + /* Constant for register zero-extension */ + [JIT_REG_ZX] = MIPS_R_V1, +}; + +/* + * MIPS 32-bit operations on 64-bit registers generate a sign-extended + * result. However, the eBPF ISA mandates zero-extension, so we rely on the + * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic + * operations, right shift and byte swap require properly sign-extended + * operands or the result is unpredictable. We emit explicit sign-extensions + * in those cases. + */ + +/* Sign extension */ +static void emit_sext(struct jit_context *ctx, u8 dst, u8 src) +{ + emit(ctx, sll, dst, src, 0); + clobber_reg(ctx, dst); +} + +/* Zero extension */ +static void emit_zext(struct jit_context *ctx, u8 dst) +{ + if (cpu_has_mips64r2 || cpu_has_mips64r6) { + emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); + } else { + emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]); + access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */ + } + clobber_reg(ctx, dst); +} + +/* Zero extension, if verifier does not do it for us */ +static void emit_zext_ver(struct jit_context *ctx, u8 dst) +{ + if (!ctx->program->aux->verifier_zext) + emit_zext(ctx, dst); +} + +/* dst = imm (64-bit) */ +static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64) +{ + if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) { + emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64); + } else if (imm64 >= 0xffffffff80000000ULL || + (imm64 < 0x80000000 && imm64 > 0xffff)) { + emit(ctx, lui, dst, (s16)(imm64 >> 16)); + emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff); + } else { + u8 acc = MIPS_R_ZERO; + int shift = 0; + int k; + + for (k = 0; k < 4; k++) { + u16 half = imm64 >> (48 - 16 * k); + + if (acc == dst) + shift += 16; + + if (half) { + if (shift) + emit(ctx, dsll_safe, dst, dst, shift); + emit(ctx, ori, dst, acc, half); + acc = dst; + shift = 0; + } + } + if (shift) + emit(ctx, dsll_safe, dst, dst, shift); + } + clobber_reg(ctx, dst); +} + +/* ALU immediate operation (64-bit) */ +static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op) +{ + switch (BPF_OP(op)) { + /* dst = dst | imm */ + case BPF_OR: + emit(ctx, ori, dst, dst, (u16)imm); + break; + /* dst = dst ^ imm */ + case BPF_XOR: + emit(ctx, xori, dst, dst, (u16)imm); + break; + /* dst = -dst */ + case BPF_NEG: + emit(ctx, dsubu, dst, MIPS_R_ZERO, dst); + break; + /* dst = dst << imm */ + case BPF_LSH: + emit(ctx, dsll_safe, dst, dst, imm); + break; + /* dst = dst >> imm */ + case BPF_RSH: + emit(ctx, dsrl_safe, dst, dst, imm); + break; + /* dst = dst >> imm (arithmetic) */ + case BPF_ARSH: + emit(ctx, dsra_safe, dst, dst, imm); + break; + /* dst = dst + imm */ + case BPF_ADD: + emit(ctx, daddiu, dst, dst, imm); + break; + /* dst = dst - imm */ + case BPF_SUB: + emit(ctx, daddiu, dst, dst, -imm); + break; + default: + /* Width-generic operations */ + emit_alu_i(ctx, dst, imm, op); + } + clobber_reg(ctx, dst); +} + +/* ALU register operation (64-bit) */ +static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op) +{ + switch (BPF_OP(op)) { + /* dst = dst << src */ + case BPF_LSH: + emit(ctx, dsllv, dst, dst, src); + break; + /* dst = dst >> src */ + case BPF_RSH: + emit(ctx, dsrlv, dst, dst, src); + break; + /* dst = dst >> src (arithmetic) */ + case BPF_ARSH: + emit(ctx, dsrav, dst, dst, src); + break; + /* dst = dst + src */ + case BPF_ADD: + emit(ctx, daddu, dst, dst, src); + break; + /* dst = dst - src */ + case BPF_SUB: + emit(ctx, dsubu, dst, dst, src); + break; + /* dst = dst * src */ + case BPF_MUL: + if (cpu_has_mips64r6) { + emit(ctx, dmulu, dst, dst, src); + } else { + emit(ctx, dmultu, dst, src); + emit(ctx, mflo, dst); + } + break; + /* dst = dst / src */ + case BPF_DIV: + if (cpu_has_mips64r6) { + emit(ctx, ddivu_r6, dst, dst, src); + } else { + emit(ctx, ddivu, dst, src); + emit(ctx, mflo, dst); + } + break; + /* dst = dst % src */ + case BPF_MOD: + if (cpu_has_mips64r6) { + emit(ctx, dmodu, dst, dst, src); + } else { + emit(ctx, ddivu, dst, src); + emit(ctx, mfhi, dst); + } + break; + default: + /* Width-generic operations */ + emit_alu_r(ctx, dst, src, op); + } + clobber_reg(ctx, dst); +} + +/* Swap sub words in a register double word */ +static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits) +{ + u8 tmp = MIPS_R_T9; + + emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */ + emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */ + emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */ + emit(ctx, and, dst, dst, mask); /* dst = dst & mask */ + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ +} + +/* Swap bytes and truncate a register double word, word or half word */ +static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width) +{ + switch (width) { + /* Swap bytes in a double word */ + case 64: + if (cpu_has_mips64r2 || cpu_has_mips64r6) { + emit(ctx, dsbh, dst, dst); + emit(ctx, dshd, dst, dst); + } else { + u8 t1 = MIPS_R_T6; + u8 t2 = MIPS_R_T7; + + emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */ + emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */ + emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */ + + emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff); + emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ + emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ + emit_swap_r64(ctx, dst, t1, 16);/* dst = swap16(dst) */ + + emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */ + emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */ + emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ + emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ + emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */ + } + break; + /* Swap bytes in a half word */ + /* Swap bytes in a word */ + case 32: + case 16: + emit_sext(ctx, dst, dst); + emit_bswap_r(ctx, dst, width); + if (cpu_has_mips64r2 || cpu_has_mips64r6) + emit_zext(ctx, dst); + break; + } + clobber_reg(ctx, dst); +} + +/* Truncate a register double word, word or half word */ +static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width) +{ + switch (width) { + case 64: + break; + /* Zero-extend a word */ + case 32: + emit_zext(ctx, dst); + break; + /* Zero-extend a half word */ + case 16: + emit(ctx, andi, dst, dst, 0xffff); + break; + } + clobber_reg(ctx, dst); +} + +/* Load operation: dst = *(size*)(src + off) */ +static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) +{ + switch (size) { + /* Load a byte */ + case BPF_B: + emit(ctx, lbu, dst, off, src); + break; + /* Load a half word */ + case BPF_H: + emit(ctx, lhu, dst, off, src); + break; + /* Load a word */ + case BPF_W: + emit(ctx, lwu, dst, off, src); + break; + /* Load a double word */ + case BPF_DW: + emit(ctx, ld, dst, off, src); + break; + } + clobber_reg(ctx, dst); +} + +/* Store operation: *(size *)(dst + off) = src */ +static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) +{ + switch (size) { + /* Store a byte */ + case BPF_B: + emit(ctx, sb, src, off, dst); + break; + /* Store a half word */ + case BPF_H: + emit(ctx, sh, src, off, dst); + break; + /* Store a word */ + case BPF_W: + emit(ctx, sw, src, off, dst); + break; + /* Store a double word */ + case BPF_DW: + emit(ctx, sd, src, off, dst); + break; + } +} + +/* Atomic read-modify-write */ +static void emit_atomic_r64(struct jit_context *ctx, + u8 dst, u8 src, s16 off, u8 code) +{ + u8 t1 = MIPS_R_T6; + u8 t2 = MIPS_R_T7; + + LLSC_sync(ctx); + emit(ctx, lld, t1, off, dst); + switch (code) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + emit(ctx, daddu, t2, t1, src); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + emit(ctx, and, t2, t1, src); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + emit(ctx, or, t2, t1, src); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + emit(ctx, xor, t2, t1, src); + break; + case BPF_XCHG: + emit(ctx, move, t2, src); + break; + } + emit(ctx, scd, t2, off, dst); + emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); + emit(ctx, nop); /* Delay slot */ + + if (code & BPF_FETCH) { + emit(ctx, move, src, t1); + clobber_reg(ctx, src); + } +} + +/* Atomic compare-and-exchange */ +static void emit_cmpxchg_r64(struct jit_context *ctx, u8 dst, u8 src, s16 off) +{ + u8 r0 = bpf2mips64[BPF_REG_0]; + u8 t1 = MIPS_R_T6; + u8 t2 = MIPS_R_T7; + + LLSC_sync(ctx); + emit(ctx, lld, t1, off, dst); + emit(ctx, bne, t1, r0, 12); + emit(ctx, move, t2, src); /* Delay slot */ + emit(ctx, scd, t2, off, dst); + emit(ctx, LLSC_beqz, t2, -20 - LLSC_offset); + emit(ctx, move, r0, t1); /* Delay slot */ + + clobber_reg(ctx, r0); +} + +/* Function call */ +static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) +{ + u8 zx = bpf2mips64[JIT_REG_ZX]; + u8 tmp = MIPS_R_T6; + bool fixed; + u64 addr; + + /* Decode the call address */ + if (bpf_jit_get_func_addr(ctx->program, insn, false, + &addr, &fixed) < 0) + return -1; + if (!fixed) + return -1; + + /* Push caller-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); + + /* Emit function call */ + emit_mov_i64(ctx, tmp, addr & JALR_MASK); + emit(ctx, jalr, MIPS_R_RA, tmp); + emit(ctx, nop); /* Delay slot */ + + /* Restore caller-saved registers */ + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); + + /* Re-initialize the JIT zero-extension register if accessed */ + if (ctx->accessed & BIT(JIT_REG_ZX)) { + emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); + emit(ctx, dsrl32, zx, zx, 0); + } + + clobber_reg(ctx, MIPS_R_RA); + clobber_reg(ctx, MIPS_R_V0); + clobber_reg(ctx, MIPS_R_V1); + return 0; +} + +/* Function tail call */ +static int emit_tail_call(struct jit_context *ctx) +{ + u8 ary = bpf2mips64[BPF_REG_2]; + u8 ind = bpf2mips64[BPF_REG_3]; + u8 tcc = bpf2mips64[JIT_REG_TC]; + u8 tmp = MIPS_R_T6; + int off; + + /* + * Tail call: + * eBPF R1 - function argument (context ptr), passed in a0-a1 + * eBPF R2 - ptr to object with array of function entry points + * eBPF R3 - array index of function to be called + */ + + /* if (ind >= ary->map.max_entries) goto out */ + off = offsetof(struct bpf_array, map.max_entries); + if (off > 0x7fff) + return -1; + emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ + emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */ + emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ + + /* if (--TCC < 0) goto out */ + emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ + emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ + /* (next insn delay slot) */ + /* prog = ary->ptrs[ind] */ + off = offsetof(struct bpf_array, ptrs); + if (off > 0x7fff) + return -1; + emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */ + emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */ + emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ + + /* if (prog == 0) goto out */ + emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ + emit(ctx, nop); /* Delay slot */ + + /* func = prog->bpf_func + 8 (prologue skip offset) */ + off = offsetof(struct bpf_prog, bpf_func); + if (off > 0x7fff) + return -1; + emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ + emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */ + + /* goto func */ + build_epilogue(ctx, tmp); + access_reg(ctx, JIT_REG_TC); + return 0; +} + +/* + * Stack frame layout for a JITed program (stack grows down). + * + * Higher address : Previous stack frame : + * +===========================+ <--- MIPS sp before call + * | Callee-saved registers, | + * | including RA and FP | + * +---------------------------+ <--- eBPF FP (MIPS fp) + * | Local eBPF variables | + * | allocated by program | + * +---------------------------+ + * | Reserved for caller-saved | + * | registers | + * Lower address +===========================+ <--- MIPS sp + */ + +/* Build program prologue to set up the stack and registers */ +void build_prologue(struct jit_context *ctx) +{ + u8 fp = bpf2mips64[BPF_REG_FP]; + u8 tc = bpf2mips64[JIT_REG_TC]; + u8 zx = bpf2mips64[JIT_REG_ZX]; + int stack, saved, locals, reserved; + + /* + * The first instruction initializes the tail call count register. + * On a tail call, the calling function jumps into the prologue + * after this instruction. + */ + emit(ctx, addiu, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT + 1, 0xffff)); + + /* === Entry-point for tail calls === */ + + /* + * If the eBPF frame pointer and tail call count registers were + * accessed they must be preserved. Mark them as clobbered here + * to save and restore them on the stack as needed. + */ + if (ctx->accessed & BIT(BPF_REG_FP)) + clobber_reg(ctx, fp); + if (ctx->accessed & BIT(JIT_REG_TC)) + clobber_reg(ctx, tc); + if (ctx->accessed & BIT(JIT_REG_ZX)) + clobber_reg(ctx, zx); + + /* Compute the stack space needed for callee-saved registers */ + saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); + saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); + + /* Stack space used by eBPF program local data */ + locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); + + /* + * If we are emitting function calls, reserve extra stack space for + * caller-saved registers needed by the JIT. The required space is + * computed automatically during resource usage discovery (pass 1). + */ + reserved = ctx->stack_used; + + /* Allocate the stack frame */ + stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); + if (stack) + emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); + + /* Store callee-saved registers on stack */ + push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); + + /* Initialize the eBPF frame pointer if accessed */ + if (ctx->accessed & BIT(BPF_REG_FP)) + emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); + + /* Initialize the ePF JIT zero-extension register if accessed */ + if (ctx->accessed & BIT(JIT_REG_ZX)) { + emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); + emit(ctx, dsrl32, zx, zx, 0); + } + + ctx->saved_size = saved; + ctx->stack_size = stack; +} + +/* Build the program epilogue to restore the stack and registers */ +void build_epilogue(struct jit_context *ctx, int dest_reg) +{ + /* Restore callee-saved registers from stack */ + pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, + ctx->stack_size - ctx->saved_size); + + /* Release the stack frame */ + if (ctx->stack_size) + emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); + + /* Jump to return address and sign-extend the 32-bit return value */ + emit(ctx, jr, dest_reg); + emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */ +} + +/* Build one eBPF instruction */ +int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) +{ + u8 dst = bpf2mips64[insn->dst_reg]; + u8 src = bpf2mips64[insn->src_reg]; + u8 res = bpf2mips64[BPF_REG_0]; + u8 code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + s32 val, rel; + u8 alu, jmp; + + switch (code) { + /* ALU operations */ + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + emit_mov_i(ctx, dst, imm); + emit_zext_ver(ctx, dst); + break; + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ + emit_zext(ctx, dst); + } else { + emit_mov_r(ctx, dst, src); + emit_zext_ver(ctx, dst); + } + break; + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + emit_sext(ctx, dst, dst); + emit_alu_i(ctx, dst, 0, BPF_NEG); + emit_zext_ver(ctx, dst); + break; + /* dst = dst & imm */ + /* dst = dst | imm */ + /* dst = dst ^ imm */ + /* dst = dst << imm */ + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + if (!valid_alu_i(BPF_OP(code), imm)) { + emit_mov_i(ctx, MIPS_R_T4, imm); + emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { + emit_alu_i(ctx, dst, val, alu); + } + emit_zext_ver(ctx, dst); + break; + /* dst = dst >> imm */ + /* dst = dst >> imm (arithmetic) */ + /* dst = dst + imm */ + /* dst = dst - imm */ + /* dst = dst * imm */ + /* dst = dst / imm */ + /* dst = dst % imm */ + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: + if (!valid_alu_i(BPF_OP(code), imm)) { + emit_sext(ctx, dst, dst); + emit_mov_i(ctx, MIPS_R_T4, imm); + emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { + emit_sext(ctx, dst, dst); + emit_alu_i(ctx, dst, val, alu); + } + emit_zext_ver(ctx, dst); + break; + /* dst = dst & src */ + /* dst = dst | src */ + /* dst = dst ^ src */ + /* dst = dst << src */ + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU | BPF_LSH | BPF_X: + emit_alu_r(ctx, dst, src, BPF_OP(code)); + emit_zext_ver(ctx, dst); + break; + /* dst = dst >> src */ + /* dst = dst >> src (arithmetic) */ + /* dst = dst + src */ + /* dst = dst - src */ + /* dst = dst * src */ + /* dst = dst / src */ + /* dst = dst % src */ + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_X: + emit_sext(ctx, dst, dst); + emit_sext(ctx, MIPS_R_T4, src); + emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); + emit_zext_ver(ctx, dst); + break; + /* dst = imm (64-bit) */ + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_mov_i(ctx, dst, imm); + break; + /* dst = src (64-bit) */ + case BPF_ALU64 | BPF_MOV | BPF_X: + emit_mov_r(ctx, dst, src); + break; + /* dst = -dst (64-bit) */ + case BPF_ALU64 | BPF_NEG: + emit_alu_i64(ctx, dst, 0, BPF_NEG); + break; + /* dst = dst & imm (64-bit) */ + /* dst = dst | imm (64-bit) */ + /* dst = dst ^ imm (64-bit) */ + /* dst = dst << imm (64-bit) */ + /* dst = dst >> imm (64-bit) */ + /* dst = dst >> imm ((64-bit, arithmetic) */ + /* dst = dst + imm (64-bit) */ + /* dst = dst - imm (64-bit) */ + /* dst = dst * imm (64-bit) */ + /* dst = dst / imm (64-bit) */ + /* dst = dst % imm (64-bit) */ + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + if (!valid_alu_i(BPF_OP(code), imm)) { + emit_mov_i(ctx, MIPS_R_T4, imm); + emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code)); + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { + emit_alu_i64(ctx, dst, val, alu); + } + break; + /* dst = dst & src (64-bit) */ + /* dst = dst | src (64-bit) */ + /* dst = dst ^ src (64-bit) */ + /* dst = dst << src (64-bit) */ + /* dst = dst >> src (64-bit) */ + /* dst = dst >> src (64-bit, arithmetic) */ + /* dst = dst + src (64-bit) */ + /* dst = dst - src (64-bit) */ + /* dst = dst * src (64-bit) */ + /* dst = dst / src (64-bit) */ + /* dst = dst % src (64-bit) */ + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_alu_r64(ctx, dst, src, BPF_OP(code)); + break; + /* dst = htole(dst) */ + /* dst = htobe(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU | BPF_END | BPF_FROM_BE: + if (BPF_SRC(code) == +#ifdef __BIG_ENDIAN + BPF_FROM_LE +#else + BPF_FROM_BE +#endif + ) + emit_bswap_r64(ctx, dst, imm); + else + emit_trunc_r64(ctx, dst, imm); + break; + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + emit_mov_i64(ctx, dst, (u32)imm | ((u64)insn[1].imm << 32)); + return 1; + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); + break; + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + emit_mov_i(ctx, MIPS_R_T4, imm); + emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); + break; + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_DW: + emit_stx(ctx, dst, src, off, BPF_SIZE(code)); + break; + /* Speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + /* Atomics */ + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + case BPF_AND: + case BPF_AND | BPF_FETCH: + case BPF_OR: + case BPF_OR | BPF_FETCH: + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + case BPF_XCHG: + if (BPF_SIZE(code) == BPF_DW) { + emit_atomic_r64(ctx, dst, src, off, imm); + } else if (imm & BPF_FETCH) { + u8 tmp = dst; + + if (src == dst) { /* Don't overwrite dst */ + emit_mov_r(ctx, MIPS_R_T4, dst); + tmp = MIPS_R_T4; + } + emit_sext(ctx, src, src); + emit_atomic_r(ctx, tmp, src, off, imm); + emit_zext_ver(ctx, src); + } else { /* 32-bit, no fetch */ + emit_sext(ctx, MIPS_R_T4, src); + emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm); + } + break; + case BPF_CMPXCHG: + if (BPF_SIZE(code) == BPF_DW) { + emit_cmpxchg_r64(ctx, dst, src, off); + } else { + u8 tmp = res; + + if (res == dst) /* Don't overwrite dst */ + tmp = MIPS_R_T4; + emit_sext(ctx, tmp, res); + emit_sext(ctx, MIPS_R_T5, src); + emit_cmpxchg_r(ctx, dst, MIPS_R_T5, tmp, off); + if (res == dst) /* Restore result */ + emit_mov_r(ctx, res, MIPS_R_T4); + /* Result zext inserted by verifier */ + } + break; + default: + goto notyet; + } + break; + /* PC += off if dst == src */ + /* PC += off if dst != src */ + /* PC += off if dst & src */ + /* PC += off if dst > src */ + /* PC += off if dst >= src */ + /* PC += off if dst < src */ + /* PC += off if dst <= src */ + /* PC += off if dst > src (signed) */ + /* PC += off if dst >= src (signed) */ + /* PC += off if dst < src (signed) */ + /* PC += off if dst <= src (signed) */ + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + if (off == 0) + break; + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); + emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ + emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ + emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off if dst == imm */ + /* PC += off if dst != imm */ + /* PC += off if dst & imm */ + /* PC += off if dst > imm */ + /* PC += off if dst >= imm */ + /* PC += off if dst < imm */ + /* PC += off if dst <= imm */ + /* PC += off if dst > imm (signed) */ + /* PC += off if dst >= imm (signed) */ + /* PC += off if dst < imm (signed) */ + /* PC += off if dst <= imm (signed) */ + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + if (off == 0) + break; + setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); + emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ + if (valid_jmp_i(jmp, imm)) { + emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp); + } else { + /* Move large immediate to register, sign-extended */ + emit_mov_i(ctx, MIPS_R_T5, imm); + emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); + } + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off if dst == src */ + /* PC += off if dst != src */ + /* PC += off if dst & src */ + /* PC += off if dst > src */ + /* PC += off if dst >= src */ + /* PC += off if dst < src */ + /* PC += off if dst <= src */ + /* PC += off if dst > src (signed) */ + /* PC += off if dst >= src (signed) */ + /* PC += off if dst < src (signed) */ + /* PC += off if dst <= src (signed) */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + if (off == 0) + break; + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); + emit_jmp_r(ctx, dst, src, rel, jmp); + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off if dst == imm */ + /* PC += off if dst != imm */ + /* PC += off if dst & imm */ + /* PC += off if dst > imm */ + /* PC += off if dst >= imm */ + /* PC += off if dst < imm */ + /* PC += off if dst <= imm */ + /* PC += off if dst > imm (signed) */ + /* PC += off if dst >= imm (signed) */ + /* PC += off if dst < imm (signed) */ + /* PC += off if dst <= imm (signed) */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + if (off == 0) + break; + setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); + if (valid_jmp_i(jmp, imm)) { + emit_jmp_i(ctx, dst, imm, rel, jmp); + } else { + /* Move large immediate to register */ + emit_mov_i(ctx, MIPS_R_T4, imm); + emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp); + } + if (finish_jmp(ctx, jmp, off) < 0) + goto toofar; + break; + /* PC += off */ + case BPF_JMP | BPF_JA: + if (off == 0) + break; + if (emit_ja(ctx, off) < 0) + goto toofar; + break; + /* Tail call */ + case BPF_JMP | BPF_TAIL_CALL: + if (emit_tail_call(ctx) < 0) + goto invalid; + break; + /* Function call */ + case BPF_JMP | BPF_CALL: + if (emit_call(ctx, insn) < 0) + goto invalid; + break; + /* Function return */ + case BPF_JMP | BPF_EXIT: + /* + * Optimization: when last instruction is EXIT + * simply continue to epilogue. + */ + if (ctx->bpf_index == ctx->program->len - 1) + break; + if (emit_exit(ctx) < 0) + goto toofar; + break; + + default: +invalid: + pr_err_once("unknown opcode %02x\n", code); + return -EINVAL; +notyet: + pr_info_once("*** NOT YET: opcode %02x ***\n", code); + return -EFAULT; +toofar: + pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", + ctx->bpf_index, code); + return -E2BIG; + } + return 0; +} diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c deleted file mode 100644 index 3a73e937571217..00000000000000 --- a/arch/mips/net/ebpf_jit.c +++ /dev/null @@ -1,1938 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Just-In-Time compiler for eBPF filters on MIPS - * - * Copyright (c) 2017 Cavium, Inc. - * - * Based on code from: - * - * Copyright (c) 2014 Imagination Technologies Ltd. - * Author: Markos Chandras - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Registers used by JIT */ -#define MIPS_R_ZERO 0 -#define MIPS_R_AT 1 -#define MIPS_R_V0 2 /* BPF_R0 */ -#define MIPS_R_V1 3 -#define MIPS_R_A0 4 /* BPF_R1 */ -#define MIPS_R_A1 5 /* BPF_R2 */ -#define MIPS_R_A2 6 /* BPF_R3 */ -#define MIPS_R_A3 7 /* BPF_R4 */ -#define MIPS_R_A4 8 /* BPF_R5 */ -#define MIPS_R_T4 12 /* BPF_AX */ -#define MIPS_R_T5 13 -#define MIPS_R_T6 14 -#define MIPS_R_T7 15 -#define MIPS_R_S0 16 /* BPF_R6 */ -#define MIPS_R_S1 17 /* BPF_R7 */ -#define MIPS_R_S2 18 /* BPF_R8 */ -#define MIPS_R_S3 19 /* BPF_R9 */ -#define MIPS_R_S4 20 /* BPF_TCC */ -#define MIPS_R_S5 21 -#define MIPS_R_S6 22 -#define MIPS_R_S7 23 -#define MIPS_R_T8 24 -#define MIPS_R_T9 25 -#define MIPS_R_SP 29 -#define MIPS_R_RA 31 - -/* eBPF flags */ -#define EBPF_SAVE_S0 BIT(0) -#define EBPF_SAVE_S1 BIT(1) -#define EBPF_SAVE_S2 BIT(2) -#define EBPF_SAVE_S3 BIT(3) -#define EBPF_SAVE_S4 BIT(4) -#define EBPF_SAVE_RA BIT(5) -#define EBPF_SEEN_FP BIT(6) -#define EBPF_SEEN_TC BIT(7) -#define EBPF_TCC_IN_V1 BIT(8) - -/* - * For the mips64 ISA, we need to track the value range or type for - * each JIT register. The BPF machine requires zero extended 32-bit - * values, but the mips64 ISA requires sign extended 32-bit values. - * At each point in the BPF program we track the state of every - * register so that we can zero extend or sign extend as the BPF - * semantics require. - */ -enum reg_val_type { - /* uninitialized */ - REG_UNKNOWN, - /* not known to be 32-bit compatible. */ - REG_64BIT, - /* 32-bit compatible, no truncation needed for 64-bit ops. */ - REG_64BIT_32BIT, - /* 32-bit compatible, need truncation for 64-bit ops. */ - REG_32BIT, - /* 32-bit no sign/zero extension needed. */ - REG_32BIT_POS -}; - -/* - * high bit of offsets indicates if long branch conversion done at - * this insn. - */ -#define OFFSETS_B_CONV BIT(31) - -/** - * struct jit_ctx - JIT context - * @skf: The sk_filter - * @stack_size: eBPF stack size - * @idx: Instruction index - * @flags: JIT flags - * @offsets: Instruction offsets - * @target: Memory location for the compiled filter - * @reg_val_types Packed enum reg_val_type for each register. - */ -struct jit_ctx { - const struct bpf_prog *skf; - int stack_size; - u32 idx; - u32 flags; - u32 *offsets; - u32 *target; - u64 *reg_val_types; - unsigned int long_b_conversion:1; - unsigned int gen_b_offsets:1; - unsigned int use_bbit_insns:1; -}; - -static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) -{ - *rvt &= ~(7ull << (reg * 3)); - *rvt |= ((u64)type << (reg * 3)); -} - -static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx, - int index, int reg) -{ - return (ctx->reg_val_types[index] >> (reg * 3)) & 7; -} - -/* Simply emit the instruction if the JIT memory space has been allocated */ -#define emit_instr_long(ctx, func64, func32, ...) \ -do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->idx]; \ - if (IS_ENABLED(CONFIG_64BIT)) \ - uasm_i_##func64(&p, ##__VA_ARGS__); \ - else \ - uasm_i_##func32(&p, ##__VA_ARGS__); \ - } \ - (ctx)->idx++; \ -} while (0) - -#define emit_instr(ctx, func, ...) \ - emit_instr_long(ctx, func, func, ##__VA_ARGS__) - -static unsigned int j_target(struct jit_ctx *ctx, int target_idx) -{ - unsigned long target_va, base_va; - unsigned int r; - - if (!ctx->target) - return 0; - - base_va = (unsigned long)ctx->target; - target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV); - - if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful)) - return (unsigned int)-1; - r = target_va & 0x0ffffffful; - return r; -} - -/* Compute the immediate value for PC-relative branches. */ -static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) -{ - if (!ctx->gen_b_offsets) - return 0; - - /* - * We want a pc-relative branch. tgt is the instruction offset - * we want to jump to. - - * Branch on MIPS: - * I: target_offset <- sign_extend(offset) - * I+1: PC += target_offset (delay slot) - * - * ctx->idx currently points to the branch instruction - * but the offset is added to the delay slot so we need - * to subtract 4. - */ - return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) - - (ctx->idx * 4) - 4; -} - -enum which_ebpf_reg { - src_reg, - src_reg_no_fp, - dst_reg, - dst_reg_fp_ok -}; - -/* - * For eBPF, the register mapping naturally falls out of the - * requirements of eBPF and the MIPS n64 ABI. We don't maintain a - * separate frame pointer, so BPF_REG_10 relative accesses are - * adjusted to be $sp relative. - */ -static int ebpf_to_mips_reg(struct jit_ctx *ctx, - const struct bpf_insn *insn, - enum which_ebpf_reg w) -{ - int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ? - insn->src_reg : insn->dst_reg; - - switch (ebpf_reg) { - case BPF_REG_0: - return MIPS_R_V0; - case BPF_REG_1: - return MIPS_R_A0; - case BPF_REG_2: - return MIPS_R_A1; - case BPF_REG_3: - return MIPS_R_A2; - case BPF_REG_4: - return MIPS_R_A3; - case BPF_REG_5: - return MIPS_R_A4; - case BPF_REG_6: - ctx->flags |= EBPF_SAVE_S0; - return MIPS_R_S0; - case BPF_REG_7: - ctx->flags |= EBPF_SAVE_S1; - return MIPS_R_S1; - case BPF_REG_8: - ctx->flags |= EBPF_SAVE_S2; - return MIPS_R_S2; - case BPF_REG_9: - ctx->flags |= EBPF_SAVE_S3; - return MIPS_R_S3; - case BPF_REG_10: - if (w == dst_reg || w == src_reg_no_fp) - goto bad_reg; - ctx->flags |= EBPF_SEEN_FP; - /* - * Needs special handling, return something that - * cannot be clobbered just in case. - */ - return MIPS_R_ZERO; - case BPF_REG_AX: - return MIPS_R_T4; - default: -bad_reg: - WARN(1, "Illegal bpf reg: %d\n", ebpf_reg); - return -EINVAL; - } -} -/* - * eBPF stack frame will be something like: - * - * Entry $sp ------> +--------------------------------+ - * | $ra (optional) | - * +--------------------------------+ - * | $s0 (optional) | - * +--------------------------------+ - * | $s1 (optional) | - * +--------------------------------+ - * | $s2 (optional) | - * +--------------------------------+ - * | $s3 (optional) | - * +--------------------------------+ - * | $s4 (optional) | - * +--------------------------------+ - * | tmp-storage (if $ra saved) | - * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10 - * | BPF_REG_10 relative storage | - * | MAX_BPF_STACK (optional) | - * | . | - * | . | - * | . | - * $sp --------> +--------------------------------+ - * - * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized - * area is not allocated. - */ -static int gen_int_prologue(struct jit_ctx *ctx) -{ - int stack_adjust = 0; - int store_offset; - int locals_size; - - if (ctx->flags & EBPF_SAVE_RA) - /* - * If RA we are doing a function call and may need - * extra 8-byte tmp area. - */ - stack_adjust += 2 * sizeof(long); - if (ctx->flags & EBPF_SAVE_S0) - stack_adjust += sizeof(long); - if (ctx->flags & EBPF_SAVE_S1) - stack_adjust += sizeof(long); - if (ctx->flags & EBPF_SAVE_S2) - stack_adjust += sizeof(long); - if (ctx->flags & EBPF_SAVE_S3) - stack_adjust += sizeof(long); - if (ctx->flags & EBPF_SAVE_S4) - stack_adjust += sizeof(long); - - BUILD_BUG_ON(MAX_BPF_STACK & 7); - locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; - - stack_adjust += locals_size; - - ctx->stack_size = stack_adjust; - - /* - * First instruction initializes the tail call count (TCC). - * On tail call we skip this instruction, and the TCC is - * passed in $v1 from the caller. - */ - emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); - if (stack_adjust) - emit_instr_long(ctx, daddiu, addiu, - MIPS_R_SP, MIPS_R_SP, -stack_adjust); - else - return 0; - - store_offset = stack_adjust - sizeof(long); - - if (ctx->flags & EBPF_SAVE_RA) { - emit_instr_long(ctx, sd, sw, - MIPS_R_RA, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S0) { - emit_instr_long(ctx, sd, sw, - MIPS_R_S0, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S1) { - emit_instr_long(ctx, sd, sw, - MIPS_R_S1, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S2) { - emit_instr_long(ctx, sd, sw, - MIPS_R_S2, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S3) { - emit_instr_long(ctx, sd, sw, - MIPS_R_S3, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S4) { - emit_instr_long(ctx, sd, sw, - MIPS_R_S4, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - - if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) - emit_instr_long(ctx, daddu, addu, - MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); - - return 0; -} - -static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) -{ - const struct bpf_prog *prog = ctx->skf; - int stack_adjust = ctx->stack_size; - int store_offset = stack_adjust - sizeof(long); - enum reg_val_type td; - int r0 = MIPS_R_V0; - - if (dest_reg == MIPS_R_RA) { - /* Don't let zero extended value escape. */ - td = get_reg_val_type(ctx, prog->len, BPF_REG_0); - if (td == REG_64BIT) - emit_instr(ctx, sll, r0, r0, 0); - } - - if (ctx->flags & EBPF_SAVE_RA) { - emit_instr_long(ctx, ld, lw, - MIPS_R_RA, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S0) { - emit_instr_long(ctx, ld, lw, - MIPS_R_S0, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S1) { - emit_instr_long(ctx, ld, lw, - MIPS_R_S1, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S2) { - emit_instr_long(ctx, ld, lw, - MIPS_R_S2, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S3) { - emit_instr_long(ctx, ld, lw, - MIPS_R_S3, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - if (ctx->flags & EBPF_SAVE_S4) { - emit_instr_long(ctx, ld, lw, - MIPS_R_S4, store_offset, MIPS_R_SP); - store_offset -= sizeof(long); - } - emit_instr(ctx, jr, dest_reg); - - if (stack_adjust) - emit_instr_long(ctx, daddiu, addiu, - MIPS_R_SP, MIPS_R_SP, stack_adjust); - else - emit_instr(ctx, nop); - - return 0; -} - -static void gen_imm_to_reg(const struct bpf_insn *insn, int reg, - struct jit_ctx *ctx) -{ - if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { - emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm); - } else { - int lower = (s16)(insn->imm & 0xffff); - int upper = insn->imm - lower; - - emit_instr(ctx, lui, reg, upper >> 16); - emit_instr(ctx, addiu, reg, reg, lower); - } -} - -static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, - int idx) -{ - int upper_bound, lower_bound; - int dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - - if (dst < 0) - return dst; - - switch (BPF_OP(insn->code)) { - case BPF_MOV: - case BPF_ADD: - upper_bound = S16_MAX; - lower_bound = S16_MIN; - break; - case BPF_SUB: - upper_bound = -(int)S16_MIN; - lower_bound = -(int)S16_MAX; - break; - case BPF_AND: - case BPF_OR: - case BPF_XOR: - upper_bound = 0xffff; - lower_bound = 0; - break; - case BPF_RSH: - case BPF_LSH: - case BPF_ARSH: - /* Shift amounts are truncated, no need for bounds */ - upper_bound = S32_MAX; - lower_bound = S32_MIN; - break; - default: - return -EINVAL; - } - - /* - * Immediate move clobbers the register, so no sign/zero - * extension needed. - */ - if (BPF_CLASS(insn->code) == BPF_ALU64 && - BPF_OP(insn->code) != BPF_MOV && - get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT) - emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - /* BPF_ALU | BPF_LSH doesn't need separate sign extension */ - if (BPF_CLASS(insn->code) == BPF_ALU && - BPF_OP(insn->code) != BPF_LSH && - BPF_OP(insn->code) != BPF_MOV && - get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT) - emit_instr(ctx, sll, dst, dst, 0); - - if (insn->imm >= lower_bound && insn->imm <= upper_bound) { - /* single insn immediate case */ - switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { - case BPF_ALU64 | BPF_MOV: - emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm); - break; - case BPF_ALU64 | BPF_AND: - case BPF_ALU | BPF_AND: - emit_instr(ctx, andi, dst, dst, insn->imm); - break; - case BPF_ALU64 | BPF_OR: - case BPF_ALU | BPF_OR: - emit_instr(ctx, ori, dst, dst, insn->imm); - break; - case BPF_ALU64 | BPF_XOR: - case BPF_ALU | BPF_XOR: - emit_instr(ctx, xori, dst, dst, insn->imm); - break; - case BPF_ALU64 | BPF_ADD: - emit_instr(ctx, daddiu, dst, dst, insn->imm); - break; - case BPF_ALU64 | BPF_SUB: - emit_instr(ctx, daddiu, dst, dst, -insn->imm); - break; - case BPF_ALU64 | BPF_RSH: - emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f); - break; - case BPF_ALU | BPF_RSH: - emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f); - break; - case BPF_ALU64 | BPF_LSH: - emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f); - break; - case BPF_ALU | BPF_LSH: - emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f); - break; - case BPF_ALU64 | BPF_ARSH: - emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f); - break; - case BPF_ALU | BPF_ARSH: - emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f); - break; - case BPF_ALU | BPF_MOV: - emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm); - break; - case BPF_ALU | BPF_ADD: - emit_instr(ctx, addiu, dst, dst, insn->imm); - break; - case BPF_ALU | BPF_SUB: - emit_instr(ctx, addiu, dst, dst, -insn->imm); - break; - default: - return -EINVAL; - } - } else { - /* multi insn immediate case */ - if (BPF_OP(insn->code) == BPF_MOV) { - gen_imm_to_reg(insn, dst, ctx); - } else { - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { - case BPF_ALU64 | BPF_AND: - case BPF_ALU | BPF_AND: - emit_instr(ctx, and, dst, dst, MIPS_R_AT); - break; - case BPF_ALU64 | BPF_OR: - case BPF_ALU | BPF_OR: - emit_instr(ctx, or, dst, dst, MIPS_R_AT); - break; - case BPF_ALU64 | BPF_XOR: - case BPF_ALU | BPF_XOR: - emit_instr(ctx, xor, dst, dst, MIPS_R_AT); - break; - case BPF_ALU64 | BPF_ADD: - emit_instr(ctx, daddu, dst, dst, MIPS_R_AT); - break; - case BPF_ALU64 | BPF_SUB: - emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT); - break; - case BPF_ALU | BPF_ADD: - emit_instr(ctx, addu, dst, dst, MIPS_R_AT); - break; - case BPF_ALU | BPF_SUB: - emit_instr(ctx, subu, dst, dst, MIPS_R_AT); - break; - default: - return -EINVAL; - } - } - } - - return 0; -} - -static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) -{ - if (value >= 0xffffffffffff8000ull || value < 0x8000ull) { - emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value); - } else if (value >= 0xffffffff80000000ull || - (value < 0x80000000 && value > 0xffff)) { - emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16)); - emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff)); - } else { - int i; - bool seen_part = false; - int needed_shift = 0; - - for (i = 0; i < 4; i++) { - u64 part = (value >> (16 * (3 - i))) & 0xffff; - - if (seen_part && needed_shift > 0 && (part || i == 3)) { - emit_instr(ctx, dsll_safe, dst, dst, needed_shift); - needed_shift = 0; - } - if (part) { - if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) { - emit_instr(ctx, lui, dst, (s32)(s16)part); - needed_shift = -16; - } else { - emit_instr(ctx, ori, dst, - seen_part ? dst : MIPS_R_ZERO, - (unsigned int)part); - } - seen_part = true; - } - if (seen_part) - needed_shift += 16; - } - } -} - -static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) -{ - int off, b_off; - int tcc_reg; - - ctx->flags |= EBPF_SEEN_TC; - /* - * if (index >= array->map.max_entries) - * goto out; - */ - off = offsetof(struct bpf_array, map.max_entries); - emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1); - emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2); - b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); - /* - * if (TCC-- < 0) - * goto out; - */ - /* Delay slot */ - tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4; - emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1); - b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bltz, tcc_reg, b_off); - /* - * prog = array->ptrs[index]; - * if (prog == NULL) - * goto out; - */ - /* Delay slot */ - emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3); - emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1); - off = offsetof(struct bpf_array, ptrs); - emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8); - b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off); - /* Delay slot */ - emit_instr(ctx, nop); - - /* goto *(prog->bpf_func + 4); */ - off = offsetof(struct bpf_prog, bpf_func); - emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT); - /* All systems are go... propagate TCC */ - emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO); - /* Skip first instruction (TCC initialization) */ - emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4); - return build_int_epilogue(ctx, MIPS_R_T9); -} - -static bool is_bad_offset(int b_off) -{ - return b_off > 0x1ffff || b_off < -0x20000; -} - -/* Returns the number of insn slots consumed. */ -static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, - int this_idx, int exit_idx) -{ - int src, dst, r, td, ts, mem_off, b_off; - bool need_swap, did_move, cmp_eq; - unsigned int target = 0; - u64 t64; - s64 t64s; - int bpf_op = BPF_OP(insn->code); - - if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64) - || (bpf_op == BPF_DW))) - return -EINVAL; - - switch (insn->code) { - case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */ - case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */ - case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */ - case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */ - case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */ - case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */ - case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */ - case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */ - case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */ - case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */ - case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */ - r = gen_imm_insn(insn, ctx, this_idx); - if (r < 0) - return r; - break; - case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */ - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) - emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - if (insn->imm == 1) /* Mult by 1 is a nop */ - break; - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - if (MIPS_ISA_REV >= 6) { - emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT); - } else { - emit_instr(ctx, dmultu, MIPS_R_AT, dst); - emit_instr(ctx, mflo, dst); - } - break; - case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) - emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst); - break; - case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */ - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT) { - /* sign extend */ - emit_instr(ctx, sll, dst, dst, 0); - } - if (insn->imm == 1) /* Mult by 1 is a nop */ - break; - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - if (MIPS_ISA_REV >= 6) { - emit_instr(ctx, mulu, dst, dst, MIPS_R_AT); - } else { - emit_instr(ctx, multu, dst, MIPS_R_AT); - emit_instr(ctx, mflo, dst); - } - break; - case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT) { - /* sign extend */ - emit_instr(ctx, sll, dst, dst, 0); - } - emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst); - break; - case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */ - case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */ - if (insn->imm == 0) - return -EINVAL; - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT) - /* sign extend */ - emit_instr(ctx, sll, dst, dst, 0); - if (insn->imm == 1) { - /* div by 1 is a nop, mod by 1 is zero */ - if (bpf_op == BPF_MOD) - emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); - break; - } - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - if (MIPS_ISA_REV >= 6) { - if (bpf_op == BPF_DIV) - emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT); - else - emit_instr(ctx, modu, dst, dst, MIPS_R_AT); - break; - } - emit_instr(ctx, divu, dst, MIPS_R_AT); - if (bpf_op == BPF_DIV) - emit_instr(ctx, mflo, dst); - else - emit_instr(ctx, mfhi, dst); - break; - case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */ - case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */ - if (insn->imm == 0) - return -EINVAL; - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) - emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - if (insn->imm == 1) { - /* div by 1 is a nop, mod by 1 is zero */ - if (bpf_op == BPF_MOD) - emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); - break; - } - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - if (MIPS_ISA_REV >= 6) { - if (bpf_op == BPF_DIV) - emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT); - else - emit_instr(ctx, modu, dst, dst, MIPS_R_AT); - break; - } - emit_instr(ctx, ddivu, dst, MIPS_R_AT); - if (bpf_op == BPF_DIV) - emit_instr(ctx, mflo, dst); - else - emit_instr(ctx, mfhi, dst); - break; - case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */ - case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */ - src = ebpf_to_mips_reg(ctx, insn, src_reg); - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (src < 0 || dst < 0) - return -EINVAL; - if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) - emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - did_move = false; - if (insn->src_reg == BPF_REG_10) { - if (bpf_op == BPF_MOV) { - emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); - did_move = true; - } else { - emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK); - src = MIPS_R_AT; - } - } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { - int tmp_reg = MIPS_R_AT; - - if (bpf_op == BPF_MOV) { - tmp_reg = dst; - did_move = true; - } - emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO); - emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); - src = MIPS_R_AT; - } - switch (bpf_op) { - case BPF_MOV: - if (!did_move) - emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); - break; - case BPF_ADD: - emit_instr(ctx, daddu, dst, dst, src); - break; - case BPF_SUB: - emit_instr(ctx, dsubu, dst, dst, src); - break; - case BPF_XOR: - emit_instr(ctx, xor, dst, dst, src); - break; - case BPF_OR: - emit_instr(ctx, or, dst, dst, src); - break; - case BPF_AND: - emit_instr(ctx, and, dst, dst, src); - break; - case BPF_MUL: - if (MIPS_ISA_REV >= 6) { - emit_instr(ctx, dmulu, dst, dst, src); - } else { - emit_instr(ctx, dmultu, dst, src); - emit_instr(ctx, mflo, dst); - } - break; - case BPF_DIV: - case BPF_MOD: - if (MIPS_ISA_REV >= 6) { - if (bpf_op == BPF_DIV) - emit_instr(ctx, ddivu_r6, - dst, dst, src); - else - emit_instr(ctx, modu, dst, dst, src); - break; - } - emit_instr(ctx, ddivu, dst, src); - if (bpf_op == BPF_DIV) - emit_instr(ctx, mflo, dst); - else - emit_instr(ctx, mfhi, dst); - break; - case BPF_LSH: - emit_instr(ctx, dsllv, dst, dst, src); - break; - case BPF_RSH: - emit_instr(ctx, dsrlv, dst, dst, src); - break; - case BPF_ARSH: - emit_instr(ctx, dsrav, dst, dst, src); - break; - default: - pr_err("ALU64_REG NOT HANDLED\n"); - return -EINVAL; - } - break; - case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */ - case BPF_ALU | BPF_ARSH | BPF_X: /* ALU_REG */ - src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (src < 0 || dst < 0) - return -EINVAL; - td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT) { - /* sign extend */ - emit_instr(ctx, sll, dst, dst, 0); - } - did_move = false; - ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (ts == REG_64BIT) { - int tmp_reg = MIPS_R_AT; - - if (bpf_op == BPF_MOV) { - tmp_reg = dst; - did_move = true; - } - /* sign extend */ - emit_instr(ctx, sll, tmp_reg, src, 0); - src = MIPS_R_AT; - } - switch (bpf_op) { - case BPF_MOV: - if (!did_move) - emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); - break; - case BPF_ADD: - emit_instr(ctx, addu, dst, dst, src); - break; - case BPF_SUB: - emit_instr(ctx, subu, dst, dst, src); - break; - case BPF_XOR: - emit_instr(ctx, xor, dst, dst, src); - break; - case BPF_OR: - emit_instr(ctx, or, dst, dst, src); - break; - case BPF_AND: - emit_instr(ctx, and, dst, dst, src); - break; - case BPF_MUL: - emit_instr(ctx, mul, dst, dst, src); - break; - case BPF_DIV: - case BPF_MOD: - if (MIPS_ISA_REV >= 6) { - if (bpf_op == BPF_DIV) - emit_instr(ctx, divu_r6, dst, dst, src); - else - emit_instr(ctx, modu, dst, dst, src); - break; - } - emit_instr(ctx, divu, dst, src); - if (bpf_op == BPF_DIV) - emit_instr(ctx, mflo, dst); - else - emit_instr(ctx, mfhi, dst); - break; - case BPF_LSH: - emit_instr(ctx, sllv, dst, dst, src); - break; - case BPF_RSH: - emit_instr(ctx, srlv, dst, dst, src); - break; - case BPF_ARSH: - emit_instr(ctx, srav, dst, dst, src); - break; - default: - pr_err("ALU_REG NOT HANDLED\n"); - return -EINVAL; - } - break; - case BPF_JMP | BPF_EXIT: - if (this_idx + 1 < exit_idx) { - b_off = b_imm(exit_idx, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); - emit_instr(ctx, nop); - } - break; - case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ - case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ - cmp_eq = (bpf_op == BPF_JEQ); - dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); - if (dst < 0) - return dst; - if (insn->imm == 0) { - src = MIPS_R_ZERO; - } else { - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - src = MIPS_R_AT; - } - goto jeq_common; - case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ - case BPF_JMP | BPF_JNE | BPF_X: - case BPF_JMP | BPF_JSLT | BPF_X: - case BPF_JMP | BPF_JSLE | BPF_X: - case BPF_JMP | BPF_JSGT | BPF_X: - case BPF_JMP | BPF_JSGE | BPF_X: - case BPF_JMP | BPF_JLT | BPF_X: - case BPF_JMP | BPF_JLE | BPF_X: - case BPF_JMP | BPF_JGT | BPF_X: - case BPF_JMP | BPF_JGE | BPF_X: - case BPF_JMP | BPF_JSET | BPF_X: - src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (src < 0 || dst < 0) - return -EINVAL; - td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (td == REG_32BIT && ts != REG_32BIT) { - emit_instr(ctx, sll, MIPS_R_AT, src, 0); - src = MIPS_R_AT; - } else if (ts == REG_32BIT && td != REG_32BIT) { - emit_instr(ctx, sll, MIPS_R_AT, dst, 0); - dst = MIPS_R_AT; - } - if (bpf_op == BPF_JSET) { - emit_instr(ctx, and, MIPS_R_AT, dst, src); - cmp_eq = false; - dst = MIPS_R_AT; - src = MIPS_R_ZERO; - } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) { - emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); - if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { - b_off = b_imm(exit_idx, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - if (bpf_op == BPF_JSGT) - emit_instr(ctx, blez, MIPS_R_AT, b_off); - else - emit_instr(ctx, bgtz, MIPS_R_AT, b_off); - emit_instr(ctx, nop); - return 2; /* We consumed the exit. */ - } - b_off = b_imm(this_idx + insn->off + 1, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - if (bpf_op == BPF_JSGT) - emit_instr(ctx, bgtz, MIPS_R_AT, b_off); - else - emit_instr(ctx, blez, MIPS_R_AT, b_off); - emit_instr(ctx, nop); - break; - } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) { - emit_instr(ctx, slt, MIPS_R_AT, dst, src); - cmp_eq = bpf_op == BPF_JSGE; - dst = MIPS_R_AT; - src = MIPS_R_ZERO; - } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) { - /* dst or src could be AT */ - emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); - emit_instr(ctx, sltu, MIPS_R_AT, dst, src); - /* SP known to be non-zero, movz becomes boolean not */ - if (MIPS_ISA_REV >= 6) { - emit_instr(ctx, seleqz, MIPS_R_T9, - MIPS_R_SP, MIPS_R_T8); - } else { - emit_instr(ctx, movz, MIPS_R_T9, - MIPS_R_SP, MIPS_R_T8); - emit_instr(ctx, movn, MIPS_R_T9, - MIPS_R_ZERO, MIPS_R_T8); - } - emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); - cmp_eq = bpf_op == BPF_JGT; - dst = MIPS_R_AT; - src = MIPS_R_ZERO; - } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) { - emit_instr(ctx, sltu, MIPS_R_AT, dst, src); - cmp_eq = bpf_op == BPF_JGE; - dst = MIPS_R_AT; - src = MIPS_R_ZERO; - } else { /* JNE/JEQ case */ - cmp_eq = (bpf_op == BPF_JEQ); - } -jeq_common: - /* - * If the next insn is EXIT and we are jumping arround - * only it, invert the sense of the compare and - * conditionally jump to the exit. Poor man's branch - * chaining. - */ - if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { - b_off = b_imm(exit_idx, ctx); - if (is_bad_offset(b_off)) { - target = j_target(ctx, exit_idx); - if (target == (unsigned int)-1) - return -E2BIG; - cmp_eq = !cmp_eq; - b_off = 4 * 3; - if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { - ctx->offsets[this_idx] |= OFFSETS_B_CONV; - ctx->long_b_conversion = 1; - } - } - - if (cmp_eq) - emit_instr(ctx, bne, dst, src, b_off); - else - emit_instr(ctx, beq, dst, src, b_off); - emit_instr(ctx, nop); - if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { - emit_instr(ctx, j, target); - emit_instr(ctx, nop); - } - return 2; /* We consumed the exit. */ - } - b_off = b_imm(this_idx + insn->off + 1, ctx); - if (is_bad_offset(b_off)) { - target = j_target(ctx, this_idx + insn->off + 1); - if (target == (unsigned int)-1) - return -E2BIG; - cmp_eq = !cmp_eq; - b_off = 4 * 3; - if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { - ctx->offsets[this_idx] |= OFFSETS_B_CONV; - ctx->long_b_conversion = 1; - } - } - - if (cmp_eq) - emit_instr(ctx, beq, dst, src, b_off); - else - emit_instr(ctx, bne, dst, src, b_off); - emit_instr(ctx, nop); - if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { - emit_instr(ctx, j, target); - emit_instr(ctx, nop); - } - break; - case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ - case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ - case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */ - case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */ - cmp_eq = (bpf_op == BPF_JSGE); - dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); - if (dst < 0) - return dst; - - if (insn->imm == 0) { - if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { - b_off = b_imm(exit_idx, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - switch (bpf_op) { - case BPF_JSGT: - emit_instr(ctx, blez, dst, b_off); - break; - case BPF_JSGE: - emit_instr(ctx, bltz, dst, b_off); - break; - case BPF_JSLT: - emit_instr(ctx, bgez, dst, b_off); - break; - case BPF_JSLE: - emit_instr(ctx, bgtz, dst, b_off); - break; - } - emit_instr(ctx, nop); - return 2; /* We consumed the exit. */ - } - b_off = b_imm(this_idx + insn->off + 1, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - switch (bpf_op) { - case BPF_JSGT: - emit_instr(ctx, bgtz, dst, b_off); - break; - case BPF_JSGE: - emit_instr(ctx, bgez, dst, b_off); - break; - case BPF_JSLT: - emit_instr(ctx, bltz, dst, b_off); - break; - case BPF_JSLE: - emit_instr(ctx, blez, dst, b_off); - break; - } - emit_instr(ctx, nop); - break; - } - /* - * only "LT" compare available, so we must use imm + 1 - * to generate "GT" and imm -1 to generate LE - */ - if (bpf_op == BPF_JSGT) - t64s = insn->imm + 1; - else if (bpf_op == BPF_JSLE) - t64s = insn->imm + 1; - else - t64s = insn->imm; - - cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE; - if (t64s >= S16_MIN && t64s <= S16_MAX) { - emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - goto jeq_common; - } - emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); - emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - goto jeq_common; - - case BPF_JMP | BPF_JGT | BPF_K: - case BPF_JMP | BPF_JGE | BPF_K: - case BPF_JMP | BPF_JLT | BPF_K: - case BPF_JMP | BPF_JLE | BPF_K: - cmp_eq = (bpf_op == BPF_JGE); - dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); - if (dst < 0) - return dst; - /* - * only "LT" compare available, so we must use imm + 1 - * to generate "GT" and imm -1 to generate LE - */ - if (bpf_op == BPF_JGT) - t64s = (u64)(u32)(insn->imm) + 1; - else if (bpf_op == BPF_JLE) - t64s = (u64)(u32)(insn->imm) + 1; - else - t64s = (u64)(u32)(insn->imm); - - cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE; - - emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); - emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - goto jeq_common; - - case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ - dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); - if (dst < 0) - return dst; - - if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) { - if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { - b_off = b_imm(exit_idx, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off); - emit_instr(ctx, nop); - return 2; /* We consumed the exit. */ - } - b_off = b_imm(this_idx + insn->off + 1, ctx); - if (is_bad_offset(b_off)) - return -E2BIG; - emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off); - emit_instr(ctx, nop); - break; - } - t64 = (u32)insn->imm; - emit_const_to_reg(ctx, MIPS_R_AT, t64); - emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - cmp_eq = false; - goto jeq_common; - - case BPF_JMP | BPF_JA: - /* - * Prefer relative branch for easier debugging, but - * fall back if needed. - */ - b_off = b_imm(this_idx + insn->off + 1, ctx); - if (is_bad_offset(b_off)) { - target = j_target(ctx, this_idx + insn->off + 1); - if (target == (unsigned int)-1) - return -E2BIG; - emit_instr(ctx, j, target); - } else { - emit_instr(ctx, b, b_off); - } - emit_instr(ctx, nop); - break; - case BPF_LD | BPF_DW | BPF_IMM: - if (insn->src_reg != 0) - return -EINVAL; - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32); - emit_const_to_reg(ctx, dst, t64); - return 2; /* Double slot insn */ - - case BPF_JMP | BPF_CALL: - ctx->flags |= EBPF_SAVE_RA; - t64s = (s64)insn->imm + (long)__bpf_call_base; - emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); - emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); - /* delay slot */ - emit_instr(ctx, nop); - break; - - case BPF_JMP | BPF_TAIL_CALL: - if (emit_bpf_tail_call(ctx, this_idx)) - return -EINVAL; - break; - - case BPF_ALU | BPF_END | BPF_FROM_BE: - case BPF_ALU | BPF_END | BPF_FROM_LE: - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (insn->imm == 64 && td == REG_32BIT) - emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - - if (insn->imm != 64 && td == REG_64BIT) { - /* sign extend */ - emit_instr(ctx, sll, dst, dst, 0); - } - -#ifdef __BIG_ENDIAN - need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE); -#else - need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE); -#endif - if (insn->imm == 16) { - if (need_swap) - emit_instr(ctx, wsbh, dst, dst); - emit_instr(ctx, andi, dst, dst, 0xffff); - } else if (insn->imm == 32) { - if (need_swap) { - emit_instr(ctx, wsbh, dst, dst); - emit_instr(ctx, rotr, dst, dst, 16); - } - } else { /* 64-bit*/ - if (need_swap) { - emit_instr(ctx, dsbh, dst, dst); - emit_instr(ctx, dshd, dst, dst); - } - } - break; - - case BPF_ST | BPF_NOSPEC: /* speculation barrier */ - break; - - case BPF_ST | BPF_B | BPF_MEM: - case BPF_ST | BPF_H | BPF_MEM: - case BPF_ST | BPF_W | BPF_MEM: - case BPF_ST | BPF_DW | BPF_MEM: - if (insn->dst_reg == BPF_REG_10) { - ctx->flags |= EBPF_SEEN_FP; - dst = MIPS_R_SP; - mem_off = insn->off + MAX_BPF_STACK; - } else { - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - mem_off = insn->off; - } - gen_imm_to_reg(insn, MIPS_R_AT, ctx); - switch (BPF_SIZE(insn->code)) { - case BPF_B: - emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst); - break; - case BPF_H: - emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst); - break; - case BPF_W: - emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst); - break; - case BPF_DW: - emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst); - break; - } - break; - - case BPF_LDX | BPF_B | BPF_MEM: - case BPF_LDX | BPF_H | BPF_MEM: - case BPF_LDX | BPF_W | BPF_MEM: - case BPF_LDX | BPF_DW | BPF_MEM: - if (insn->src_reg == BPF_REG_10) { - ctx->flags |= EBPF_SEEN_FP; - src = MIPS_R_SP; - mem_off = insn->off + MAX_BPF_STACK; - } else { - src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); - if (src < 0) - return src; - mem_off = insn->off; - } - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - switch (BPF_SIZE(insn->code)) { - case BPF_B: - emit_instr(ctx, lbu, dst, mem_off, src); - break; - case BPF_H: - emit_instr(ctx, lhu, dst, mem_off, src); - break; - case BPF_W: - emit_instr(ctx, lw, dst, mem_off, src); - break; - case BPF_DW: - emit_instr(ctx, ld, dst, mem_off, src); - break; - } - break; - - case BPF_STX | BPF_B | BPF_MEM: - case BPF_STX | BPF_H | BPF_MEM: - case BPF_STX | BPF_W | BPF_MEM: - case BPF_STX | BPF_DW | BPF_MEM: - case BPF_STX | BPF_W | BPF_ATOMIC: - case BPF_STX | BPF_DW | BPF_ATOMIC: - if (insn->dst_reg == BPF_REG_10) { - ctx->flags |= EBPF_SEEN_FP; - dst = MIPS_R_SP; - mem_off = insn->off + MAX_BPF_STACK; - } else { - dst = ebpf_to_mips_reg(ctx, insn, dst_reg); - if (dst < 0) - return dst; - mem_off = insn->off; - } - src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); - if (src < 0) - return src; - if (BPF_MODE(insn->code) == BPF_ATOMIC) { - if (insn->imm != BPF_ADD) { - pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm); - return -EINVAL; - } - - /* - * If mem_off does not fit within the 9 bit ll/sc - * instruction immediate field, use a temp reg. - */ - if (MIPS_ISA_REV >= 6 && - (mem_off >= BIT(8) || mem_off < -BIT(8))) { - emit_instr(ctx, daddiu, MIPS_R_T6, - dst, mem_off); - mem_off = 0; - dst = MIPS_R_T6; - } - switch (BPF_SIZE(insn->code)) { - case BPF_W: - if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { - emit_instr(ctx, sll, MIPS_R_AT, src, 0); - src = MIPS_R_AT; - } - emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst); - emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src); - emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst); - /* - * On failure back up to LL (-4 - * instructions of 4 bytes each - */ - emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); - emit_instr(ctx, nop); - break; - case BPF_DW: - if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { - emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); - emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); - src = MIPS_R_AT; - } - emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst); - emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src); - emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst); - emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); - emit_instr(ctx, nop); - break; - } - } else { /* BPF_MEM */ - switch (BPF_SIZE(insn->code)) { - case BPF_B: - emit_instr(ctx, sb, src, mem_off, dst); - break; - case BPF_H: - emit_instr(ctx, sh, src, mem_off, dst); - break; - case BPF_W: - emit_instr(ctx, sw, src, mem_off, dst); - break; - case BPF_DW: - if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { - emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); - emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); - src = MIPS_R_AT; - } - emit_instr(ctx, sd, src, mem_off, dst); - break; - } - } - break; - - default: - pr_err("NOT HANDLED %d - (%02x)\n", - this_idx, (unsigned int)insn->code); - return -EINVAL; - } - return 1; -} - -#define RVT_VISITED_MASK 0xc000000000000000ull -#define RVT_FALL_THROUGH 0x4000000000000000ull -#define RVT_BRANCH_TAKEN 0x8000000000000000ull -#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN) - -static int build_int_body(struct jit_ctx *ctx) -{ - const struct bpf_prog *prog = ctx->skf; - const struct bpf_insn *insn; - int i, r; - - for (i = 0; i < prog->len; ) { - insn = prog->insnsi + i; - if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) { - /* dead instruction, don't emit it. */ - i++; - continue; - } - - if (ctx->target == NULL) - ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4); - - r = build_one_insn(insn, ctx, i, prog->len); - if (r < 0) - return r; - i += r; - } - /* epilogue offset */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; - - /* - * All exits have an offset of the epilogue, some offsets may - * not have been set due to banch-around threading, so set - * them now. - */ - if (ctx->target == NULL) - for (i = 0; i < prog->len; i++) { - insn = prog->insnsi + i; - if (insn->code == (BPF_JMP | BPF_EXIT)) - ctx->offsets[i] = ctx->idx * 4; - } - return 0; -} - -/* return the last idx processed, or negative for error */ -static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, - int start_idx, bool follow_taken) -{ - const struct bpf_prog *prog = ctx->skf; - const struct bpf_insn *insn; - u64 exit_rvt = initial_rvt; - u64 *rvt = ctx->reg_val_types; - int idx; - int reg; - - for (idx = start_idx; idx < prog->len; idx++) { - rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt; - insn = prog->insnsi + idx; - switch (BPF_CLASS(insn->code)) { - case BPF_ALU: - switch (BPF_OP(insn->code)) { - case BPF_ADD: - case BPF_SUB: - case BPF_MUL: - case BPF_DIV: - case BPF_OR: - case BPF_AND: - case BPF_LSH: - case BPF_RSH: - case BPF_NEG: - case BPF_MOD: - case BPF_XOR: - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); - break; - case BPF_MOV: - if (BPF_SRC(insn->code)) { - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); - } else { - /* IMM to REG move*/ - if (insn->imm >= 0) - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); - else - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); - } - break; - case BPF_END: - if (insn->imm == 64) - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); - else if (insn->imm == 32) - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); - else /* insn->imm == 16 */ - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); - break; - } - rvt[idx] |= RVT_DONE; - break; - case BPF_ALU64: - switch (BPF_OP(insn->code)) { - case BPF_MOV: - if (BPF_SRC(insn->code)) { - /* REG to REG move*/ - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); - } else { - /* IMM to REG move*/ - if (insn->imm >= 0) - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); - else - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); - } - break; - default: - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); - } - rvt[idx] |= RVT_DONE; - break; - case BPF_LD: - switch (BPF_SIZE(insn->code)) { - case BPF_DW: - if (BPF_MODE(insn->code) == BPF_IMM) { - s64 val; - - val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32)); - if (val > 0 && val <= S32_MAX) - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); - else if (val >= S32_MIN && val <= S32_MAX) - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); - else - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); - rvt[idx] |= RVT_DONE; - idx++; - } else { - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); - } - break; - case BPF_B: - case BPF_H: - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); - break; - case BPF_W: - if (BPF_MODE(insn->code) == BPF_IMM) - set_reg_val_type(&exit_rvt, insn->dst_reg, - insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT); - else - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); - break; - } - rvt[idx] |= RVT_DONE; - break; - case BPF_LDX: - switch (BPF_SIZE(insn->code)) { - case BPF_DW: - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); - break; - case BPF_B: - case BPF_H: - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); - break; - case BPF_W: - set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); - break; - } - rvt[idx] |= RVT_DONE; - break; - case BPF_JMP: - switch (BPF_OP(insn->code)) { - case BPF_EXIT: - rvt[idx] = RVT_DONE | exit_rvt; - rvt[prog->len] = exit_rvt; - return idx; - case BPF_JA: - rvt[idx] |= RVT_DONE; - idx += insn->off; - break; - case BPF_JEQ: - case BPF_JGT: - case BPF_JGE: - case BPF_JLT: - case BPF_JLE: - case BPF_JSET: - case BPF_JNE: - case BPF_JSGT: - case BPF_JSGE: - case BPF_JSLT: - case BPF_JSLE: - if (follow_taken) { - rvt[idx] |= RVT_BRANCH_TAKEN; - idx += insn->off; - follow_taken = false; - } else { - rvt[idx] |= RVT_FALL_THROUGH; - } - break; - case BPF_CALL: - set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT); - /* Upon call return, argument registers are clobbered. */ - for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++) - set_reg_val_type(&exit_rvt, reg, REG_64BIT); - - rvt[idx] |= RVT_DONE; - break; - default: - WARN(1, "Unhandled BPF_JMP case.\n"); - rvt[idx] |= RVT_DONE; - break; - } - break; - default: - rvt[idx] |= RVT_DONE; - break; - } - } - return idx; -} - -/* - * Track the value range (i.e. 32-bit vs. 64-bit) of each register at - * each eBPF insn. This allows unneeded sign and zero extension - * operations to be omitted. - * - * Doesn't handle yet confluence of control paths with conflicting - * ranges, but it is good enough for most sane code. - */ -static int reg_val_propagate(struct jit_ctx *ctx) -{ - const struct bpf_prog *prog = ctx->skf; - u64 exit_rvt; - int reg; - int i; - - /* - * 11 registers * 3 bits/reg leaves top bits free for other - * uses. Bit-62..63 used to see if we have visited an insn. - */ - exit_rvt = 0; - - /* Upon entry, argument registers are 64-bit. */ - for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++) - set_reg_val_type(&exit_rvt, reg, REG_64BIT); - - /* - * First follow all conditional branches on the fall-through - * edge of control flow.. - */ - reg_val_propagate_range(ctx, exit_rvt, 0, false); -restart_search: - /* - * Then repeatedly find the first conditional branch where - * both edges of control flow have not been taken, and follow - * the branch taken edge. We will end up restarting the - * search once per conditional branch insn. - */ - for (i = 0; i < prog->len; i++) { - u64 rvt = ctx->reg_val_types[i]; - - if ((rvt & RVT_VISITED_MASK) == RVT_DONE || - (rvt & RVT_VISITED_MASK) == 0) - continue; - if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) { - reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true); - } else { /* RVT_BRANCH_TAKEN */ - WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n"); - reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false); - } - goto restart_search; - } - /* - * Eventually all conditional branches have been followed on - * both branches and we are done. Any insn that has not been - * visited at this point is dead. - */ - - return 0; -} - -static void jit_fill_hole(void *area, unsigned int size) -{ - u32 *p; - - /* We are guaranteed to have aligned memory. */ - for (p = area; size >= sizeof(u32); size -= sizeof(u32)) - uasm_i_break(&p, BRK_BUG); /* Increments p */ -} - -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) -{ - struct bpf_prog *orig_prog = prog; - bool tmp_blinded = false; - struct bpf_prog *tmp; - struct bpf_binary_header *header = NULL; - struct jit_ctx ctx; - unsigned int image_size; - u8 *image_ptr; - - if (!prog->jit_requested) - return prog; - - tmp = bpf_jit_blind_constants(prog); - /* If blinding was requested and we failed during blinding, - * we must fall back to the interpreter. - */ - if (IS_ERR(tmp)) - return orig_prog; - if (tmp != prog) { - tmp_blinded = true; - prog = tmp; - } - - memset(&ctx, 0, sizeof(ctx)); - - preempt_disable(); - switch (current_cpu_type()) { - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_CAVIUM_OCTEON3: - ctx.use_bbit_insns = 1; - break; - default: - ctx.use_bbit_insns = 0; - } - preempt_enable(); - - ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); - if (ctx.offsets == NULL) - goto out_err; - - ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL); - if (ctx.reg_val_types == NULL) - goto out_err; - - ctx.skf = prog; - - if (reg_val_propagate(&ctx)) - goto out_err; - - /* - * First pass discovers used resources and instruction offsets - * assuming short branches are used. - */ - if (build_int_body(&ctx)) - goto out_err; - - /* - * If no calls are made (EBPF_SAVE_RA), then tail call count - * in $v1, else we must save in n$s4. - */ - if (ctx.flags & EBPF_SEEN_TC) { - if (ctx.flags & EBPF_SAVE_RA) - ctx.flags |= EBPF_SAVE_S4; - else - ctx.flags |= EBPF_TCC_IN_V1; - } - - /* - * Second pass generates offsets, if any branches are out of - * range a jump-around long sequence is generated, and we have - * to try again from the beginning to generate the new - * offsets. This is done until no additional conversions are - * necessary. - */ - do { - ctx.idx = 0; - ctx.gen_b_offsets = 1; - ctx.long_b_conversion = 0; - if (gen_int_prologue(&ctx)) - goto out_err; - if (build_int_body(&ctx)) - goto out_err; - if (build_int_epilogue(&ctx, MIPS_R_RA)) - goto out_err; - } while (ctx.long_b_conversion); - - image_size = 4 * ctx.idx; - - header = bpf_jit_binary_alloc(image_size, &image_ptr, - sizeof(u32), jit_fill_hole); - if (header == NULL) - goto out_err; - - ctx.target = (u32 *)image_ptr; - - /* Third pass generates the code */ - ctx.idx = 0; - if (gen_int_prologue(&ctx)) - goto out_err; - if (build_int_body(&ctx)) - goto out_err; - if (build_int_epilogue(&ctx, MIPS_R_RA)) - goto out_err; - - /* Update the icache */ - flush_icache_range((unsigned long)ctx.target, - (unsigned long)&ctx.target[ctx.idx]); - - if (bpf_jit_enable > 1) - /* Dump JIT code */ - bpf_jit_dump(prog->len, image_size, 2, ctx.target); - - bpf_jit_binary_lock_ro(header); - prog->bpf_func = (void *)ctx.target; - prog->jited = 1; - prog->jited_len = image_size; -out_normal: - if (tmp_blinded) - bpf_jit_prog_release_other(prog, prog == orig_prog ? - tmp : orig_prog); - kfree(ctx.offsets); - kfree(ctx.reg_val_types); - - return prog; - -out_err: - prog = orig_prog; - if (header) - bpf_jit_binary_free(header); - goto out_normal; -} diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 8baaad52d79962..845ddc63c88227 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -123,6 +123,8 @@ #define SO_BUF_LOCK 0x4046 +#define SO_RESERVE_MEM 0x4047 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index 2fc72942215155..18bf338303b677 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -11,14 +11,23 @@ #include #include +#ifdef CONFIG_BPF_JIT +int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs); +#endif + int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->epc); - if (fixup) { - regs->epc = fixup->fixup; - return 1; - } - return 0; + if (!fixup) + return 0; + +#ifdef CONFIG_BPF_JIT + if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END) + return rv_bpf_fixup_exception(fixup, regs); +#endif + + regs->epc = fixup->fixup; + return 1; } diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index 75c1e999686758..f42d9cd3b64df4 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -71,6 +71,7 @@ struct rv_jit_context { int ninsns; int epilogue_offset; int *offset; /* BPF to RV */ + int nexentries; unsigned long flags; int stack_size; }; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 3af4131c22c7a9..2ca345c7b0bf32 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -5,6 +5,7 @@ * */ +#include #include #include #include "bpf_jit.h" @@ -27,6 +28,21 @@ static const int regmap[] = { [BPF_REG_AX] = RV_REG_T0, }; +static const int pt_regmap[] = { + [RV_REG_A0] = offsetof(struct pt_regs, a0), + [RV_REG_A1] = offsetof(struct pt_regs, a1), + [RV_REG_A2] = offsetof(struct pt_regs, a2), + [RV_REG_A3] = offsetof(struct pt_regs, a3), + [RV_REG_A4] = offsetof(struct pt_regs, a4), + [RV_REG_A5] = offsetof(struct pt_regs, a5), + [RV_REG_S1] = offsetof(struct pt_regs, s1), + [RV_REG_S2] = offsetof(struct pt_regs, s2), + [RV_REG_S3] = offsetof(struct pt_regs, s3), + [RV_REG_S4] = offsetof(struct pt_regs, s4), + [RV_REG_S5] = offsetof(struct pt_regs, s5), + [RV_REG_T0] = offsetof(struct pt_regs, t0), +}; + enum { RV_CTX_F_SEEN_TAIL_CALL = 0, RV_CTX_F_SEEN_CALL = RV_REG_RA, @@ -440,6 +456,69 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) return 0; } +#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) +#define BPF_FIXUP_REG_MASK GENMASK(31, 27) + +int rv_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); + int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); + + *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0; + regs->epc = (unsigned long)&ex->fixup - offset; + + return 1; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct rv_jit_context *ctx, + int dst_reg, int insn_len) +{ + struct exception_table_entry *ex; + unsigned long pc; + off_t offset; + + if (!ctx->insns || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + if (WARN_ON_ONCE(insn_len > ctx->ninsns)) + return -EINVAL; + + if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->nexentries]; + pc = (unsigned long)&ctx->insns[ctx->ninsns - insn_len]; + + offset = pc - (long)&ex->insn; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; + ex->insn = pc; + + /* + * Since the extable follows the program, the fixup offset is always + * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value + * to keep things simple, and put the destination register in the upper + * bits. We don't need to worry about buildtime or runtime sort + * modifying the upper bits because the table is already sorted, and + * isn't part of the main exception table. + */ + offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16)); + if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset)) + return -ERANGE; + + ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | + FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); + + ctx->nexentries++; + return 0; +} + int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, bool extra_pass) { @@ -893,52 +972,86 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* LDX: dst = *(size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_B: - if (is_12b_int(off)) { - emit(rv_lbu(rd, off, rs), ctx); + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + { + int insn_len, insns_start; + + switch (BPF_SIZE(code)) { + case BPF_B: + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + emit(rv_lbu(rd, off, rs), ctx); + insn_len = ctx->ninsns - insns_start; + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + insn_len = ctx->ninsns - insns_start; + if (insn_is_zext(&insn[1])) + return 1; break; - } + case BPF_H: + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + emit(rv_lhu(rd, off, rs), ctx); + insn_len = ctx->ninsns - insns_start; + break; + } - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - emit(rv_lbu(rd, 0, RV_REG_T1), ctx); - if (insn_is_zext(&insn[1])) - return 1; - break; - case BPF_LDX | BPF_MEM | BPF_H: - if (is_12b_int(off)) { - emit(rv_lhu(rd, off, rs), ctx); + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + insn_len = ctx->ninsns - insns_start; + if (insn_is_zext(&insn[1])) + return 1; break; - } + case BPF_W: + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + emit(rv_lwu(rd, off, rs), ctx); + insn_len = ctx->ninsns - insns_start; + break; + } - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - emit(rv_lhu(rd, 0, RV_REG_T1), ctx); - if (insn_is_zext(&insn[1])) - return 1; - break; - case BPF_LDX | BPF_MEM | BPF_W: - if (is_12b_int(off)) { - emit(rv_lwu(rd, off, rs), ctx); + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + insn_len = ctx->ninsns - insns_start; + if (insn_is_zext(&insn[1])) + return 1; break; - } + case BPF_DW: + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + emit_ld(rd, off, rs, ctx); + insn_len = ctx->ninsns - insns_start; + break; + } - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - emit(rv_lwu(rd, 0, RV_REG_T1), ctx); - if (insn_is_zext(&insn[1])) - return 1; - break; - case BPF_LDX | BPF_MEM | BPF_DW: - if (is_12b_int(off)) { - emit_ld(rd, off, rs, ctx); + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + emit_ld(rd, 0, RV_REG_T1, ctx); + insn_len = ctx->ninsns - insns_start; break; } - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - emit_ld(rd, 0, RV_REG_T1, ctx); + ret = add_exception_handler(insn, ctx, rd, insn_len); + if (ret) + return ret; break; - + } /* speculation barrier */ case BPF_ST | BPF_NOSPEC: break; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 753d85bdfad074..be743d700aa703 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -11,7 +11,7 @@ #include "bpf_jit.h" /* Number of iterations to try until offsets converge. */ -#define NR_JIT_ITERATIONS 16 +#define NR_JIT_ITERATIONS 32 static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset) { @@ -41,12 +41,12 @@ bool bpf_jit_needs_zext(void) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { + unsigned int prog_size = 0, extable_size = 0; bool tmp_blinded = false, extra_pass = false; struct bpf_prog *tmp, *orig_prog = prog; int pass = 0, prev_ninsns = 0, i; struct rv_jit_data *jit_data; struct rv_jit_context *ctx; - unsigned int image_size = 0; if (!prog->jit_requested) return orig_prog; @@ -73,7 +73,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (ctx->offset) { extra_pass = true; - image_size = sizeof(*ctx->insns) * ctx->ninsns; + prog_size = sizeof(*ctx->insns) * ctx->ninsns; goto skip_init_ctx; } @@ -102,10 +102,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (ctx->ninsns == prev_ninsns) { if (jit_data->header) break; + /* obtain the actual image size */ + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + prog_size = sizeof(*ctx->insns) * ctx->ninsns; - image_size = sizeof(*ctx->insns) * ctx->ninsns; jit_data->header = - bpf_jit_binary_alloc(image_size, + bpf_jit_binary_alloc(prog_size + extable_size, &jit_data->image, sizeof(u32), bpf_fill_ill_insns); @@ -131,9 +134,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_offset; } + if (extable_size) + prog->aux->extable = (void *)ctx->insns + prog_size; + skip_init_ctx: pass++; ctx->ninsns = 0; + ctx->nexentries = 0; bpf_jit_build_prologue(ctx); if (build_body(ctx, extra_pass, NULL)) { @@ -144,11 +151,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_jit_build_epilogue(ctx); if (bpf_jit_enable > 1) - bpf_jit_dump(prog->len, image_size, pass, ctx->insns); + bpf_jit_dump(prog->len, prog_size, pass, ctx->insns); prog->bpf_func = (void *)ctx->insns; prog->jited = 1; - prog->jited_len = image_size; + prog->jited_len = prog_size; bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 25b5dc34db75f5..4b9b14b20984ea 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -349,8 +349,6 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, extern int qdio_establish(struct ccw_device *cdev, struct qdio_initialize *init_data); extern int qdio_activate(struct ccw_device *); -extern struct qaob *qdio_allocate_aob(void); -extern void qdio_release_aob(struct qaob *); extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob); extern int qdio_start_irq(struct ccw_device *cdev); diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index e80ee8641ac3ea..2672dd03faf311 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -124,6 +124,9 @@ #define SO_BUF_LOCK 0x0051 +#define SO_RESERVE_MEM 0x0052 + + #if !defined(__KERNEL__) diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 2fc0b038ff8a2b..59331384c2d389 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -276,7 +276,7 @@ static const struct ethtool_ops uml_net_ethtool_ops = { void uml_net_setup_etheraddr(struct net_device *dev, char *str) { - unsigned char *addr = dev->dev_addr; + u8 addr[ETH_ALEN]; char *end; int i; @@ -316,6 +316,7 @@ void uml_net_setup_etheraddr(struct net_device *dev, char *str) addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], addr[5]); } + eth_hw_addr_set(dev, addr); return; random: diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index aa3f5b527dc05d..603964408d2d7e 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2145,19 +2145,19 @@ static __initconst const u64 knl_hw_cache_extra_regs * However, there are some cases which may change PEBS status, e.g. PMI * throttle. The PEBS_ENABLE should be updated where the status changes. */ -static void __intel_pmu_disable_all(void) +static __always_inline void __intel_pmu_disable_all(bool bts) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); - if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) + if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) intel_pmu_disable_bts(); } -static void intel_pmu_disable_all(void) +static __always_inline void intel_pmu_disable_all(void) { - __intel_pmu_disable_all(); + __intel_pmu_disable_all(true); intel_pmu_pebs_disable_all(); intel_pmu_lbr_disable_all(); } @@ -2188,6 +2188,49 @@ static void intel_pmu_enable_all(int added) __intel_pmu_enable_all(added, false); } +static noinline int +__intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, + unsigned int cnt, unsigned long flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + intel_pmu_lbr_read(); + cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); + + memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); + intel_pmu_enable_all(0); + local_irq_restore(flags); + return cnt; +} + +static int +intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) +{ + unsigned long flags; + + /* must not have branches... */ + local_irq_save(flags); + __intel_pmu_disable_all(false); /* we don't care about BTS */ + __intel_pmu_pebs_disable_all(); + __intel_pmu_lbr_disable(); + /* ... until here */ + return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); +} + +static int +intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) +{ + unsigned long flags; + + /* must not have branches... */ + local_irq_save(flags); + __intel_pmu_disable_all(false); /* we don't care about BTS */ + __intel_pmu_pebs_disable_all(); + __intel_pmu_arch_lbr_disable(); + /* ... until here */ + return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); +} + /* * Workaround for: * Intel Errata AAK100 (model 26) @@ -2937,7 +2980,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) apic_write(APIC_LVTPC, APIC_DM_NMI); intel_bts_disable_local(); cpuc->enabled = 0; - __intel_pmu_disable_all(); + __intel_pmu_disable_all(true); handled = intel_pmu_drain_bts_buffer(); handled += intel_bts_interrupt(); status = intel_pmu_get_status(); @@ -6299,9 +6342,21 @@ __init int intel_pmu_init(void) x86_pmu.lbr_nr = 0; } - if (x86_pmu.lbr_nr) + if (x86_pmu.lbr_nr) { pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); + /* only support branch_stack snapshot for perfmon >= v2 */ + if (x86_pmu.disable_all == intel_pmu_disable_all) { + if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { + static_call_update(perf_snapshot_branch_stack, + intel_pmu_snapshot_arch_branch_stack); + } else { + static_call_update(perf_snapshot_branch_stack, + intel_pmu_snapshot_branch_stack); + } + } + } + intel_pmu_check_extra_regs(x86_pmu.extra_regs); /* Support full width counters using alternative MSR range */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 4dbb55a43dad2b..2e215369df4a83 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1302,7 +1302,7 @@ void intel_pmu_pebs_disable_all(void) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->pebs_enabled) - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + __intel_pmu_pebs_disable_all(); } static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 9e6d6eaeb4cb60..6b72e9b55c69cf 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -228,20 +228,6 @@ static void __intel_pmu_lbr_enable(bool pmi) wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN); } -static void __intel_pmu_lbr_disable(void) -{ - u64 debugctl; - - if (static_cpu_has(X86_FEATURE_ARCH_LBR)) { - wrmsrl(MSR_ARCH_LBR_CTL, 0); - return; - } - - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); - debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); -} - void intel_pmu_lbr_reset_32(void) { int i; @@ -779,8 +765,12 @@ void intel_pmu_lbr_disable_all(void) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (cpuc->lbr_users && !vlbr_exclude_host()) + if (cpuc->lbr_users && !vlbr_exclude_host()) { + if (static_cpu_has(X86_FEATURE_ARCH_LBR)) + return __intel_pmu_arch_lbr_disable(); + __intel_pmu_lbr_disable(); + } } void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 7b5167db3e78ac..5480db242083a1 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1242,6 +1242,25 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) return intel_pmu_has_bts_period(event, hwc->sample_period); } +static __always_inline void __intel_pmu_pebs_disable_all(void) +{ + wrmsrl(MSR_IA32_PEBS_ENABLE, 0); +} + +static __always_inline void __intel_pmu_arch_lbr_disable(void) +{ + wrmsrl(MSR_ARCH_LBR_CTL, 0); +} + +static __always_inline void __intel_pmu_lbr_disable(void) +{ + u64 debugctl; + + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); +} + int intel_pmu_save_and_restart(struct perf_event *event); struct event_constraint * diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 8796559f62a41d..726700fabca6d4 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -697,6 +697,20 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) *pprog = prog; } +/* + * Similar version of maybe_emit_mod() for a single register + */ +static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) +{ + u8 *prog = *pprog; + + if (is64) + EMIT1(add_1mod(0x48, reg)); + else if (is_ereg(reg)) + EMIT1(add_1mod(0x40, reg)); + *pprog = prog; +} + /* LDX: dst_reg = *(u8*)(src_reg + off) */ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) { @@ -925,10 +939,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, /* neg dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: - if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + maybe_emit_1mod(&prog, dst_reg, + BPF_CLASS(insn->code) == BPF_ALU64); EMIT2(0xF7, add_1reg(0xD8, dst_reg)); break; @@ -942,10 +954,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_AND | BPF_K: case BPF_ALU64 | BPF_OR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: - if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + maybe_emit_1mod(&prog, dst_reg, + BPF_CLASS(insn->code) == BPF_ALU64); /* * b3 holds 'normal' opcode, b2 short form only valid @@ -1002,19 +1012,30 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_K: - case BPF_ALU64 | BPF_DIV | BPF_K: - EMIT1(0x50); /* push rax */ - EMIT1(0x52); /* push rdx */ + case BPF_ALU64 | BPF_DIV | BPF_K: { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; - if (BPF_SRC(insn->code) == BPF_X) - /* mov r11, src_reg */ - EMIT_mov(AUX_REG, src_reg); - else + if (dst_reg != BPF_REG_0) + EMIT1(0x50); /* push rax */ + if (dst_reg != BPF_REG_3) + EMIT1(0x52); /* push rdx */ + + if (BPF_SRC(insn->code) == BPF_X) { + if (src_reg == BPF_REG_0 || + src_reg == BPF_REG_3) { + /* mov r11, src_reg */ + EMIT_mov(AUX_REG, src_reg); + src_reg = AUX_REG; + } + } else { /* mov r11, imm32 */ EMIT3_off32(0x49, 0xC7, 0xC3, imm32); + src_reg = AUX_REG; + } - /* mov rax, dst_reg */ - EMIT_mov(BPF_REG_0, dst_reg); + if (dst_reg != BPF_REG_0) + /* mov rax, dst_reg */ + emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); /* * xor edx, edx @@ -1022,63 +1043,51 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, */ EMIT2(0x31, 0xd2); - if (BPF_CLASS(insn->code) == BPF_ALU64) - /* div r11 */ - EMIT3(0x49, 0xF7, 0xF3); - else - /* div r11d */ - EMIT3(0x41, 0xF7, 0xF3); - - if (BPF_OP(insn->code) == BPF_MOD) - /* mov r11, rdx */ - EMIT3(0x49, 0x89, 0xD3); - else - /* mov r11, rax */ - EMIT3(0x49, 0x89, 0xC3); + /* div src_reg */ + maybe_emit_1mod(&prog, src_reg, is64); + EMIT2(0xF7, add_1reg(0xF0, src_reg)); - EMIT1(0x5A); /* pop rdx */ - EMIT1(0x58); /* pop rax */ + if (BPF_OP(insn->code) == BPF_MOD && + dst_reg != BPF_REG_3) + /* mov dst_reg, rdx */ + emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); + else if (BPF_OP(insn->code) == BPF_DIV && + dst_reg != BPF_REG_0) + /* mov dst_reg, rax */ + emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); - /* mov dst_reg, r11 */ - EMIT_mov(dst_reg, AUX_REG); + if (dst_reg != BPF_REG_3) + EMIT1(0x5A); /* pop rdx */ + if (dst_reg != BPF_REG_0) + EMIT1(0x58); /* pop rax */ break; + } case BPF_ALU | BPF_MUL | BPF_K: - case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_K: - case BPF_ALU64 | BPF_MUL | BPF_X: - { - bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; - - if (dst_reg != BPF_REG_0) - EMIT1(0x50); /* push rax */ - if (dst_reg != BPF_REG_3) - EMIT1(0x52); /* push rdx */ - - /* mov r11, dst_reg */ - EMIT_mov(AUX_REG, dst_reg); + maybe_emit_mod(&prog, dst_reg, dst_reg, + BPF_CLASS(insn->code) == BPF_ALU64); - if (BPF_SRC(insn->code) == BPF_X) - emit_mov_reg(&prog, is64, BPF_REG_0, src_reg); + if (is_imm8(imm32)) + /* imul dst_reg, dst_reg, imm8 */ + EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), + imm32); else - emit_mov_imm32(&prog, is64, BPF_REG_0, imm32); + /* imul dst_reg, dst_reg, imm32 */ + EMIT2_off32(0x69, + add_2reg(0xC0, dst_reg, dst_reg), + imm32); + break; - if (is64) - EMIT1(add_1mod(0x48, AUX_REG)); - else if (is_ereg(AUX_REG)) - EMIT1(add_1mod(0x40, AUX_REG)); - /* mul(q) r11 */ - EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + maybe_emit_mod(&prog, src_reg, dst_reg, + BPF_CLASS(insn->code) == BPF_ALU64); - if (dst_reg != BPF_REG_3) - EMIT1(0x5A); /* pop rdx */ - if (dst_reg != BPF_REG_0) { - /* mov dst_reg, rax */ - EMIT_mov(dst_reg, BPF_REG_0); - EMIT1(0x58); /* pop rax */ - } + /* imul dst_reg, src_reg */ + EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); break; - } + /* Shifts */ case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: @@ -1086,10 +1095,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_LSH | BPF_K: case BPF_ALU64 | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: - if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + maybe_emit_1mod(&prog, dst_reg, + BPF_CLASS(insn->code) == BPF_ALU64); b3 = simple_alu_opcodes[BPF_OP(insn->code)]; if (imm32 == 1) @@ -1120,10 +1127,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ - if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + maybe_emit_1mod(&prog, dst_reg, + BPF_CLASS(insn->code) == BPF_ALU64); b3 = simple_alu_opcodes[BPF_OP(insn->code)]; EMIT2(0xD3, add_1reg(b3, dst_reg)); @@ -1430,10 +1435,8 @@ st: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K: /* test dst_reg, imm32 */ - if (BPF_CLASS(insn->code) == BPF_JMP) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + maybe_emit_1mod(&prog, dst_reg, + BPF_CLASS(insn->code) == BPF_JMP); EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; @@ -1466,10 +1469,8 @@ st: if (is_imm8(insn->off)) } /* cmp dst_reg, imm8/32 */ - if (BPF_CLASS(insn->code) == BPF_JMP) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + maybe_emit_1mod(&prog, dst_reg, + BPF_CLASS(insn->code) == BPF_JMP); if (is_imm8(imm32)) EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 4986226a5ab267..962e5e14520975 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -124,7 +124,7 @@ static char *split_if_spec(char *str, ...) static void setup_etheraddr(struct net_device *dev, char *str) { - unsigned char *addr = dev->dev_addr; + u8 addr[ETH_ALEN]; if (str == NULL) goto random; @@ -147,6 +147,7 @@ static void setup_etheraddr(struct net_device *dev, char *str) if (!is_local_ether_addr(addr)) pr_warn("%s: assigning a globally valid ethernet address\n", dev->name); + eth_hw_addr_set(dev, addr); return; random: @@ -467,7 +468,7 @@ static int iss_net_set_mac(struct net_device *dev, void *addr) if (!is_valid_ether_addr(hwaddr->sa_data)) return -EADDRNOTAVAIL; spin_lock_bh(&lp->lock); - memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, hwaddr->sa_data); spin_unlock_bh(&lp->lock); return 0; } diff --git a/drivers/base/property.c b/drivers/base/property.c index 453918eb7390cb..f1f35b48ab8b96 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -15,7 +15,6 @@ #include #include #include -#include #include struct fwnode_handle *dev_fwnode(struct device *dev) @@ -935,68 +934,6 @@ int device_get_phy_mode(struct device *dev) } EXPORT_SYMBOL_GPL(device_get_phy_mode); -static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode, - const char *name, char *addr, - int alen) -{ - int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen); - - if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) - return addr; - return NULL; -} - -/** - * fwnode_get_mac_address - Get the MAC from the firmware node - * @fwnode: Pointer to the firmware node - * @addr: Address of buffer to store the MAC in - * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN - * - * Search the firmware node for the best MAC address to use. 'mac-address' is - * checked first, because that is supposed to contain to "most recent" MAC - * address. If that isn't set, then 'local-mac-address' is checked next, - * because that is the default address. If that isn't set, then the obsolete - * 'address' is checked, just in case we're using an old device tree. - * - * Note that the 'address' property is supposed to contain a virtual address of - * the register set, but some DTS files have redefined that property to be the - * MAC address. - * - * All-zero MAC addresses are rejected, because those could be properties that - * exist in the firmware tables, but were not updated by the firmware. For - * example, the DTS could define 'mac-address' and 'local-mac-address', with - * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. - * In this case, the real MAC is in 'local-mac-address', and 'mac-address' - * exists but is all zeros. -*/ -void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen) -{ - char *res; - - res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen); - if (res) - return res; - - res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen); - if (res) - return res; - - return fwnode_get_mac_addr(fwnode, "address", addr, alen); -} -EXPORT_SYMBOL(fwnode_get_mac_address); - -/** - * device_get_mac_address - Get the MAC for a given device - * @dev: Pointer to the device - * @addr: Address of buffer to store the MAC in - * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN - */ -void *device_get_mac_address(struct device *dev, char *addr, int alen) -{ - return fwnode_get_mac_address(dev_fwnode(dev), addr, alen); -} -EXPORT_SYMBOL(device_get_mac_address); - /** * fwnode_irq_get - Get IRQ directly from a fwnode * @fwnode: Pointer to the firmware node diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c index 6a20201299f564..f7293040a2b18a 100644 --- a/drivers/base/regmap/regmap-mdio.c +++ b/drivers/base/regmap/regmap-mdio.c @@ -14,7 +14,7 @@ static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int { int ret; - ret = mdiobus_read(mdio_dev->bus, mdio_dev->addr, reg); + ret = mdiodev_read(mdio_dev, reg); if (ret < 0) return ret; @@ -24,7 +24,7 @@ static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val) { - return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val); + return mdiodev_write(mdio_dev, reg, val); } static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val) @@ -44,7 +44,7 @@ static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int v if (unlikely(reg & ~REGNUM_C22_MASK)) return -ENXIO; - return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val); + return mdiodev_write(mdio_dev, reg, val); } static const struct regmap_bus regmap_mdio_c22_bus = { diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index c6d6ba0d00b1ed..8e7ca3e4c8c49d 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -20,7 +20,7 @@ MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); MODULE_LICENSE("GPL"); /* contains the number the next bus should get. */ -static unsigned int bcma_bus_next_num = 0; +static unsigned int bcma_bus_next_num; /* bcma_buses_mutex locks the bcma_bus_next_num */ static DEFINE_MUTEX(bcma_buses_mutex); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index f1705b46fc8898..9359bff4729659 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -1037,8 +1037,9 @@ static bool btintel_firmware_version(struct hci_dev *hdev, params = (void *)(fw_ptr + sizeof(*cmd)); - bt_dev_info(hdev, "Boot Address: 0x%x", - le32_to_cpu(params->boot_addr)); + *boot_addr = le32_to_cpu(params->boot_addr); + + bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr); bt_dev_info(hdev, "Firmware Version: %u-%u.%u", params->fw_build_num, params->fw_build_ww, @@ -1071,9 +1072,6 @@ int btintel_download_firmware(struct hci_dev *hdev, /* Skip version checking */ break; default: - /* Skip reading firmware file version in bootloader mode */ - if (ver->fw_variant == 0x06) - break; /* Skip download if firmware has the same version */ if (btintel_firmware_version(hdev, ver->fw_build_num, @@ -1114,19 +1112,16 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev, int err; u32 css_header_ver; - /* Skip reading firmware file version in bootloader mode */ - if (ver->img_type != 0x01) { - /* Skip download if firmware has the same version */ - if (btintel_firmware_version(hdev, ver->min_fw_build_nn, - ver->min_fw_build_cw, - ver->min_fw_build_yy, - fw, boot_param)) { - bt_dev_info(hdev, "Firmware already loaded"); - /* Return -EALREADY to indicate that firmware has - * already been loaded. - */ - return -EALREADY; - } + /* Skip download if firmware has the same version */ + if (btintel_firmware_version(hdev, ver->min_fw_build_nn, + ver->min_fw_build_cw, + ver->min_fw_build_yy, + fw, boot_param)) { + bt_dev_info(hdev, "Firmware already loaded"); + /* Return -EALREADY to indicate that firmware has + * already been loaded. + */ + return -EALREADY; } /* The firmware variant determines if the device is in bootloader @@ -1285,12 +1280,16 @@ static int btintel_read_debug_features(struct hci_dev *hdev, static int btintel_set_debug_features(struct hci_dev *hdev, const struct intel_debug_features *features) { - u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00, + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 }; + u8 trace_enable = 0x02; struct sk_buff *skb; - if (!features) + if (!features) { + bt_dev_warn(hdev, "Debug features not read"); return -EINVAL; + } if (!(features->page1[0] & 0x3f)) { bt_dev_info(hdev, "Telemetry exception format not supported"); @@ -1303,11 +1302,95 @@ static int btintel_set_debug_features(struct hci_dev *hdev, PTR_ERR(skb)); return PTR_ERR(skb); } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x", + trace_enable, mask[3]); + + return 0; +} + +static int btintel_reset_debug_features(struct hci_dev *hdev, + const struct intel_debug_features *features) +{ + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00 }; + u8 trace_enable = 0x00; + struct sk_buff *skb; + + if (!features) { + bt_dev_warn(hdev, "Debug features not read"); + return -EINVAL; + } + + if (!(features->page1[0] & 0x3f)) { + bt_dev_info(hdev, "Telemetry exception format not supported"); + return 0; + } + + /* Should stop the trace before writing ddc event mask. */ + skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } kfree_skb(skb); + + bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x", + trace_enable, mask[3]); + return 0; } +int btintel_set_quality_report(struct hci_dev *hdev, bool enable) +{ + struct intel_debug_features features; + int err; + + bt_dev_dbg(hdev, "enable %d", enable); + + /* Read the Intel supported features and if new exception formats + * supported, need to load the additional DDC config to enable. + */ + err = btintel_read_debug_features(hdev, &features); + if (err) + return err; + + /* Set or reset the debug features. */ + if (enable) + err = btintel_set_debug_features(hdev, &features); + else + err = btintel_reset_debug_features(hdev, &features); + + return err; +} +EXPORT_SYMBOL_GPL(btintel_set_quality_report); + static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev, struct intel_version *ver) { @@ -1893,7 +1976,6 @@ static int btintel_bootloader_setup(struct hci_dev *hdev, u32 boot_param; char ddcname[64]; int err; - struct intel_debug_features features; BT_DBG("%s", hdev->name); @@ -1934,14 +2016,7 @@ static int btintel_bootloader_setup(struct hci_dev *hdev, btintel_load_ddc_config(hdev, ddcname); } - /* Read the Intel supported features and if new exception formats - * supported, need to load the additional DDC config to enable. - */ - err = btintel_read_debug_features(hdev, &features); - if (!err) { - /* Set DDC mask for available debug features */ - btintel_set_debug_features(hdev, &features); - } + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); /* Read the Intel version information after loading the FW */ err = btintel_read_version(hdev, &new_ver); @@ -2083,13 +2158,102 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, return err; } +static int btintel_get_codec_config_data(struct hci_dev *hdev, + __u8 link, struct bt_codec *codec, + __u8 *ven_len, __u8 **ven_data) +{ + int err = 0; + + if (!ven_data || !ven_len) + return -EINVAL; + + *ven_len = 0; + *ven_data = NULL; + + if (link != ESCO_LINK) { + bt_dev_err(hdev, "Invalid link type(%u)", link); + return -EINVAL; + } + + *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); + if (!*ven_data) { + err = -ENOMEM; + goto error; + } + + /* supports only CVSD and mSBC offload codecs */ + switch (codec->id) { + case 0x02: + **ven_data = 0x00; + break; + case 0x05: + **ven_data = 0x01; + break; + default: + err = -EINVAL; + bt_dev_err(hdev, "Invalid codec id(%u)", codec->id); + goto error; + } + /* codec and its capabilities are pre-defined to ids + * preset id = 0x00 represents CVSD codec with sampling rate 8K + * preset id = 0x01 represents mSBC codec with sampling rate 16K + */ + *ven_len = sizeof(__u8); + return err; + +error: + kfree(*ven_data); + *ven_data = NULL; + return err; +} + +static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) +{ + /* Intel uses 1 as data path id for all the usecases */ + *data_path_id = 1; + return 0; +} + +static int btintel_configure_offload(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err = 0; + struct intel_offload_use_cases *use_cases; + + skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading offload use cases failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len < sizeof(*use_cases)) { + err = -EIO; + goto error; + } + + use_cases = (void *)skb->data; + + if (use_cases->status) { + err = -bt_to_errno(skb->data[0]); + goto error; + } + + if (use_cases->preset[0] & 0x03) { + hdev->get_data_path_id = btintel_get_data_path_id; + hdev->get_codec_config_data = btintel_get_codec_config_data; + } +error: + kfree_skb(skb); + return err; +} + static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver) { u32 boot_param; char ddcname[64]; int err; - struct intel_debug_features features; struct intel_version_tlv new_ver; bt_dev_dbg(hdev, ""); @@ -2125,14 +2289,10 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, */ btintel_load_ddc_config(hdev, ddcname); - /* Read the Intel supported features and if new exception formats - * supported, need to load the additional DDC config to enable. - */ - err = btintel_read_debug_features(hdev, &features); - if (!err) { - /* Set DDC mask for available debug features */ - btintel_set_debug_features(hdev, &features); - } + /* Read supported use cases and set callbacks to fetch datapath id */ + btintel_configure_offload(hdev); + + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); /* Read the Intel version information after loading the FW */ err = btintel_read_version_tlv(hdev, &new_ver); @@ -2232,6 +2392,9 @@ static int btintel_setup_combined(struct hci_dev *hdev) set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); + /* Set up the quality report callback for Intel devices */ + hdev->set_quality_report = btintel_set_quality_report; + /* For Legacy device, check the HW platform value and size */ if (skb->len == sizeof(ver) && skb->data[1] == 0x37) { bt_dev_dbg(hdev, "Read the legacy Intel version information"); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index aa64072bbe68d7..e500c0d7a72984 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -132,6 +132,11 @@ struct intel_debug_features { __u8 page1[16]; } __packed; +struct intel_offload_use_cases { + __u8 status; + __u8 preset[8]; +} __packed; + #define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8)) #define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16)) #define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff) @@ -204,6 +209,7 @@ int btintel_configure_setup(struct hci_dev *hdev); void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len); void btintel_secure_send_result(struct hci_dev *hdev, const void *ptr, unsigned int len); +int btintel_set_quality_report(struct hci_dev *hdev, bool enable); #else static inline int btintel_check_bdaddr(struct hci_dev *hdev) @@ -294,4 +300,9 @@ static inline void btintel_secure_send_result(struct hci_dev *hdev, const void *ptr, unsigned int len) { } + +static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable) +{ + return -ENODEV; +} #endif diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 8b9d78ce6bb29e..5ccbe4d459d0dd 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -587,12 +587,12 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) return 0; } -static bool btmrvl_prevent_wake(struct hci_dev *hdev) +static bool btmrvl_wakeup(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; - return !device_may_wakeup(&card->func->dev); + return device_may_wakeup(&card->func->dev); } /* @@ -696,7 +696,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv) hdev->send = btmrvl_send_frame; hdev->setup = btmrvl_setup; hdev->set_bdaddr = btmrvl_set_bdaddr; - hdev->prevent_wake = btmrvl_prevent_wake; + hdev->wakeup = btmrvl_wakeup; SET_HCIDEV_DEV(hdev, &card->func->dev); hdev->dev_type = priv->btmrvl_dev.dev_type; diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index e9d91d7c0db48d..9ba22b13b4fa0e 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -158,8 +158,10 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, int err; hlen = sizeof(*hdr) + wmt_params->dlen; - if (hlen > 255) - return -EINVAL; + if (hlen > 255) { + err = -EINVAL; + goto err_free_skb; + } hdr = (struct mtk_wmt_hdr *)&wc; hdr->dir = 1; @@ -173,7 +175,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); if (err < 0) { clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); - return err; + goto err_free_skb; } /* The vendor specific WMT commands are all answered by a vendor @@ -190,13 +192,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); - return err; + goto err_free_skb; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); - return -ETIMEDOUT; + err = -ETIMEDOUT; + goto err_free_skb; } /* Parse and handle the return WMT event */ diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 8646b6dd11e94d..634cf8f5ed2dbd 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -19,7 +19,6 @@ #include #include #include -#include #define RSI_DMA_ALIGN 8 #define RSI_FRAME_DESC_SIZE 16 diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 1f8afa0244d851..c2bdd1e6060e83 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -59,6 +59,7 @@ struct id_table { __u8 hci_bus; bool config_needed; bool has_rom_version; + bool has_msft_ext; char *fw_name; char *cfg_name; }; @@ -121,6 +122,7 @@ static const struct id_table ic_id_table[] = { { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB), .config_needed = false, .has_rom_version = true, + .has_msft_ext = true, .fw_name = "rtl_bt/rtl8821c_fw.bin", .cfg_name = "rtl_bt/rtl8821c_config" }, @@ -135,6 +137,7 @@ static const struct id_table ic_id_table[] = { { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART), .config_needed = false, .has_rom_version = true, + .has_msft_ext = true, .fw_name = "rtl_bt/rtl8761b_fw.bin", .cfg_name = "rtl_bt/rtl8761b_config" }, @@ -149,6 +152,7 @@ static const struct id_table ic_id_table[] = { { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART), .config_needed = true, .has_rom_version = true, + .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822cs_fw.bin", .cfg_name = "rtl_bt/rtl8822cs_config" }, @@ -156,6 +160,7 @@ static const struct id_table ic_id_table[] = { { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB), .config_needed = false, .has_rom_version = true, + .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822cu_fw.bin", .cfg_name = "rtl_bt/rtl8822cu_config" }, @@ -163,6 +168,7 @@ static const struct id_table ic_id_table[] = { { IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB), .config_needed = true, .has_rom_version = true, + .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822b_fw.bin", .cfg_name = "rtl_bt/rtl8822b_config" }, @@ -170,6 +176,7 @@ static const struct id_table ic_id_table[] = { { IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB), .config_needed = false, .has_rom_version = true, + .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852au_fw.bin", .cfg_name = "rtl_bt/rtl8852au_config" }, }; @@ -594,8 +601,10 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, hci_rev = le16_to_cpu(resp->hci_rev); lmp_subver = le16_to_cpu(resp->lmp_subver); - if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c && - resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e) + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, + hdev->bus); + + if (!btrtl_dev->ic_info) btrtl_dev->drop_fw = true; if (btrtl_dev->drop_fw) { @@ -634,13 +643,13 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, hci_ver = resp->hci_ver; hci_rev = le16_to_cpu(resp->hci_rev); lmp_subver = le16_to_cpu(resp->lmp_subver); + + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, + hdev->bus); } out_free: kfree_skb(skb); - btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, - hdev->bus); - if (!btrtl_dev->ic_info) { rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", lmp_subver, hci_rev, hci_ver); @@ -684,12 +693,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, /* The following chips supports the Microsoft vendor extension, * therefore set the corresponding VsMsftOpCode. */ - switch (lmp_subver) { - case RTL_ROM_LMP_8822B: - case RTL_ROM_LMP_8852A: + if (btrtl_dev->ic_info->has_msft_ext) hci_set_msft_opcode(hdev, 0xFCF0); - break; - } return btrtl_dev; @@ -746,6 +751,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) case CHIP_ID_8852A: set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + hci_set_aosp_capable(hdev); break; default: rtl_dev_dbg(hdev, "Central-peripheral role not enabled."); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 60d2fce59a71d2..75c83768c25734 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -384,6 +384,12 @@ static const struct usb_device_id blacklist_table[] = { /* Realtek 8852AE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), @@ -410,6 +416,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -433,6 +442,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8761B Bluetooth devices */ + { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Additional Realtek 8761BU Bluetooth devices */ { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, @@ -451,10 +464,6 @@ static const struct usb_device_id blacklist_table[] = { /* Additional Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, - /* Bluetooth component of Realtek 8852AE device */ - { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | - BTUSB_WIDEBAND_SPEECH }, - { USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK | @@ -652,11 +661,33 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev) static void btusb_qca_cmd_timeout(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; int err; if (++data->cmd_timeout_cnt < 5) return; + if (reset_gpio) { + bt_dev_err(hdev, "Reset qca device via bt_en gpio"); + + /* Toggle the hard reset line. The qca bt device is going to + * yank itself off the USB and then replug. The cleanup is handled + * correctly on the way out (standard USB disconnect), and the new + * device is detected cleanly and bound to the driver again like + * it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + gpiod_set_value_cansleep(reset_gpio, 0); + msleep(200); + gpiod_set_value_cansleep(reset_gpio, 1); + + return; + } + bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device."); /* This is not an unbalanced PM reference since the device will reset */ err = usb_autopm_get_interface(data->intf); @@ -2200,6 +2231,23 @@ struct btmtk_section_map { }; } __packed; +static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + long ret; + + skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "changing Mediatek device address failed (%ld)", + ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + static void btusb_mtk_wmt_recv(struct urb *urb) { struct hci_dev *hdev = urb->context; @@ -2804,6 +2852,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev) case 0x7668: fwname = FIRMWARE_MT7668; break; + case 0x7922: case 0x7961: snprintf(fw_bin_name, sizeof(fw_bin_name), "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", @@ -3591,11 +3640,11 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf) interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } -static bool btusb_prevent_wake(struct hci_dev *hdev) +static bool btusb_wakeup(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); - return !device_may_wakeup(&data->udev->dev); + return device_may_wakeup(&data->udev->dev); } static int btusb_shutdown_qca(struct hci_dev *hdev) @@ -3752,7 +3801,7 @@ static int btusb_probe(struct usb_interface *intf, hdev->flush = btusb_flush; hdev->send = btusb_send_frame; hdev->notify = btusb_notify; - hdev->prevent_wake = btusb_prevent_wake; + hdev->wakeup = btusb_wakeup; #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); @@ -3819,6 +3868,7 @@ static int btusb_probe(struct usb_interface *intf, hdev->shutdown = btusb_mtk_shutdown; hdev->manufacturer = 70; hdev->cmd_timeout = btusb_mtk_cmd_timeout; + hdev->set_bdaddr = btusb_set_bdaddr_mtk; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); data->recv_acl = btusb_recv_acl_mtk; } diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 0c0dedece59c55..34286ffe0568fd 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -587,9 +587,11 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count) count -= processed; } - pm_runtime_get(&hu->serdev->dev); - pm_runtime_mark_last_busy(&hu->serdev->dev); - pm_runtime_put_autosuspend(&hu->serdev->dev); + if (hu->serdev) { + pm_runtime_get(&hu->serdev->dev); + pm_runtime_mark_last_busy(&hu->serdev->dev); + pm_runtime_put_autosuspend(&hu->serdev->dev); + } return 0; } @@ -814,7 +816,6 @@ static int h5_serdev_probe(struct serdev_device *serdev) struct device *dev = &serdev->dev; struct h5 *h5; const struct h5_device_data *data; - int err; h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); if (!h5) @@ -846,6 +847,8 @@ static int h5_serdev_probe(struct serdev_device *serdev) h5->vnd = data->vnd; } + if (data->driver_info & H5_INFO_WAKEUP_DISABLE) + set_bit(H5_WAKEUP_DISABLE, &h5->flags); h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(h5->enable_gpio)) @@ -856,14 +859,7 @@ static int h5_serdev_probe(struct serdev_device *serdev) if (IS_ERR(h5->device_wake_gpio)) return PTR_ERR(h5->device_wake_gpio); - err = hci_uart_register_device(&h5->serdev_hu, &h5p); - if (err) - return err; - - if (data->driver_info & H5_INFO_WAKEUP_DISABLE) - set_bit(H5_WAKEUP_DISABLE, &h5->flags); - - return 0; + return hci_uart_register_device(&h5->serdev_hu, &h5p); } static void h5_serdev_remove(struct serdev_device *serdev) @@ -962,11 +958,13 @@ static void h5_btrtl_open(struct h5 *h5) serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); serdev_device_set_baudrate(h5->hu->serdev, 115200); - pm_runtime_set_active(&h5->hu->serdev->dev); - pm_runtime_use_autosuspend(&h5->hu->serdev->dev); - pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, - SUSPEND_TIMEOUT_MS); - pm_runtime_enable(&h5->hu->serdev->dev); + if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { + pm_runtime_set_active(&h5->hu->serdev->dev); + pm_runtime_use_autosuspend(&h5->hu->serdev->dev); + pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, + SUSPEND_TIMEOUT_MS); + pm_runtime_enable(&h5->hu->serdev->dev); + } /* The controller needs up to 500ms to wakeup */ gpiod_set_value_cansleep(h5->enable_gpio, 1); @@ -976,7 +974,8 @@ static void h5_btrtl_open(struct h5 *h5) static void h5_btrtl_close(struct h5 *h5) { - pm_runtime_disable(&h5->hu->serdev->dev); + if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) + pm_runtime_disable(&h5->hu->serdev->dev); gpiod_set_value_cansleep(h5->device_wake_gpio, 0); gpiod_set_value_cansleep(h5->enable_gpio, 0); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 5ed2cfa7da1d9e..5e32e4d5367af5 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -479,6 +479,9 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_DBG("tty %p", tty); + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + /* Error if the tty has no write op instead of leaving an exploitable * hole */ diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 53deea2eb7b4df..dd768a8ed7cbb0 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1577,7 +1577,7 @@ static void qca_cmd_timeout(struct hci_dev *hdev) mutex_unlock(&qca->hci_memdump_lock); } -static bool qca_prevent_wake(struct hci_dev *hdev) +static bool qca_wakeup(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); bool wakeup; @@ -1730,6 +1730,7 @@ static int qca_setup(struct hci_uart *hu) if (qca_is_wcn399x(soc_type) || qca_is_wcn6750(soc_type)) { set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + hci_set_aosp_capable(hdev); ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) @@ -1764,7 +1765,7 @@ static int qca_setup(struct hci_uart *hu) qca_debugfs_init(hdev); hu->hdev->hw_error = qca_hw_error; hu->hdev->cmd_timeout = qca_cmd_timeout; - hu->hdev->prevent_wake = qca_prevent_wake; + hu->hdev->wakeup = qca_wakeup; } else if (ret == -ENOENT) { /* No patch/nvm-config found, run with original fw/config */ set_bit(QCA_ROM_FW, &qca->flags); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 8ab26dec5f6e8c..b45db0db347c67 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -37,6 +38,9 @@ struct vhci_data { struct mutex open_mutex; struct delayed_work open_timeout; + + bool suspended; + bool wakeup; }; static int vhci_open_dev(struct hci_dev *hdev) @@ -73,6 +77,115 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) +{ + *data_path_id = 0; + return 0; +} + +static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, + struct bt_codec *codec, __u8 *vnd_len, + __u8 **vnd_data) +{ + if (type != ESCO_LINK) + return -EINVAL; + + *vnd_len = 0; + *vnd_data = NULL; + return 0; +} + +static bool vhci_wakeup(struct hci_dev *hdev) +{ + struct vhci_data *data = hci_get_drvdata(hdev); + + return data->wakeup; +} + +static ssize_t force_suspend_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + char buf[3]; + + buf[0] = data->suspended ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_suspend_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (data->suspended == enable) + return -EALREADY; + + if (enable) + err = hci_suspend_dev(data->hdev); + else + err = hci_resume_dev(data->hdev); + + if (err) + return err; + + data->suspended = enable; + + return count; +} + +static const struct file_operations force_suspend_fops = { + .open = simple_open, + .read = force_suspend_read, + .write = force_suspend_write, + .llseek = default_llseek, +}; + +static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + char buf[3]; + + buf[0] = data->wakeup ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_wakeup_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (data->wakeup == enable) + return -EALREADY; + + return count; +} + +static const struct file_operations force_wakeup_fops = { + .open = simple_open, + .read = force_wakeup_read, + .write = force_wakeup_write, + .llseek = default_llseek, +}; + static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; @@ -112,6 +225,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; + hdev->get_data_path_id = vhci_get_data_path_id; + hdev->get_codec_config_data = vhci_get_codec_config_data; + hdev->wakeup = vhci_wakeup; /* bit 6 is for external configuration */ if (opcode & 0x40) @@ -129,6 +245,12 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) return -EBUSY; } + debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, + &force_suspend_fops); + + debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, + &force_wakeup_fops); + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 4c3fd2eed1da47..dcc141068128c5 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -1443,8 +1443,8 @@ static int fwnet_probe(struct fw_unit *unit, struct net_device *net; bool allocated_netdev = false; struct fwnet_device *dev; + union fwnet_hwaddr ha; int ret; - union fwnet_hwaddr *ha; mutex_lock(&fwnet_device_mutex); @@ -1491,12 +1491,12 @@ static int fwnet_probe(struct fw_unit *unit, net->max_mtu = 4096U; /* Set our hardware address while we're at it */ - ha = (union fwnet_hwaddr *)net->dev_addr; - put_unaligned_be64(card->guid, &ha->uc.uniq_id); - ha->uc.max_rec = dev->card->max_receive; - ha->uc.sspd = dev->card->link_speed; - put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); - put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); + ha.uc.uniq_id = cpu_to_be64(card->guid); + ha.uc.max_rec = dev->card->max_receive; + ha.uc.sspd = dev->card->link_speed; + ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32); + ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff); + dev_addr_set(net, ha.u); memset(net->broadcast, -1, net->addr_len); diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 96d0eccca3aa74..21f11a5b965b11 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -1055,14 +1055,16 @@ static const struct net_device_ops ssip_pn_ops = { static void ssip_pn_setup(struct net_device *dev) { + static const u8 addr = PN_MEDIA_SOS; + dev->features = 0; dev->netdev_ops = &ssip_pn_ops; dev->type = ARPHRD_PHONET; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = SSIP_DEFAULT_MTU; dev->hard_header_len = 1; - dev->dev_addr[0] = PN_MEDIA_SOS; dev->addr_len = 1; + dev_addr_set(dev, &addr); dev->tx_queue_len = SSIP_TXQUEUE_LEN; dev->needs_free_netdev = true; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f367f4a4abffc1..f3fa2fe6a88a00 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2275,7 +2275,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, u64 release_mac = MLX4_IB_INVALID_MAC; struct mlx4_ib_qp *qp; - new_smac = mlx4_mac_to_u64(dev->dev_addr); + new_smac = ether_addr_to_u64(dev->dev_addr); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); /* no need for update QP1 and mac registration in non-SRIOV */ diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8662f462e2a5fa..aea4182f33a48c 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1853,7 +1853,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, - mlx4_mac_to_u64(smac), + ether_addr_to_u64(smac), vlan_id, path, &mqp->pri, port); } diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index a8db8a051170e1..ff3742b0460ac0 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -206,3 +206,29 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, kfree(in); return err; } + +int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid) +{ + u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {}; + int err; + + MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); + MLX5_SET(alloc_uar_in, in, uid, uid); + err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out); + if (err) + return err; + + *uarn = MLX5_GET(alloc_uar_out, out, uar); + return 0; +} + +int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {}; + + MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); + MLX5_SET(dealloc_uar_in, in, uar, uarn); + MLX5_SET(dealloc_uar_in, in, uid, uid); + return mlx5_cmd_exec_in(dev, dealloc_uar, in); +} diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 66c96292ed432e..ee46638db5de31 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -57,4 +57,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid); int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid); int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); +int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid); +int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid); #endif /* MLX5_IB_CMD_H */ diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index e95967aefe788c..08b7f6bc56c37a 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1292,21 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj, struct mlx5_ib_dev *dev, void *in, void *out) { - struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; - struct mlx5_core_mkey *mkey; + struct mlx5_ib_mkey *mkey = &obj->mkey; void *mkc; u8 key; - mkey = &devx_mr->mmkey; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); key = MLX5_GET(mkc, mkc, mkey_7_0); mkey->key = mlx5_idx_to_mkey( MLX5_GET(create_mkey_out, out, mkey_index)) | key; mkey->type = MLX5_MKEY_INDIRECT_DEVX; - mkey->iova = MLX5_GET64(mkc, mkc, start_addr); - mkey->size = MLX5_GET64(mkc, mkc, len); - mkey->pd = MLX5_GET(mkc, mkc, pd); - devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); + mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); init_waitqueue_head(&mkey->wait); return mlx5r_store_odp_mkey(dev, mkey); @@ -1384,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, dev = mlx5_udata_to_mdev(&attrs->driver_udata); if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY && xa_erase(&obj->ib_dev->odp_mkeys, - mlx5_base_mkey(obj->devx_mr.mmkey.key))) + mlx5_base_mkey(obj->mkey.key))) /* * The pagefault_single_data_segment() does commands against * the mmkey, we must wait for that to stop before freeing the * mkey, as another allocation could get the same mkey #. */ - mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey); + mlx5r_deref_wait_odp_mkey(&obj->mkey); if (obj->flags & DEVX_OBJ_FLAGS_DCT) ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h index 1f69866aed16ed..ee2213275fd6fd 100644 --- a/drivers/infiniband/hw/mlx5/devx.h +++ b/drivers/infiniband/hw/mlx5/devx.h @@ -16,7 +16,7 @@ struct devx_obj { u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; u32 flags; union { - struct mlx5_ib_devx_mr devx_mr; + struct mlx5_ib_mkey mkey; struct mlx5_core_dct core_dct; struct mlx5_core_cq core_cq; u32 flow_counter_bulk_size; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8664bcf6d3f590..5ec8bd2f0b2ff3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1643,7 +1643,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte bfregi = &context->bfregi; for (i = 0; i < bfregi->num_static_sys_pages; i++) { - err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); + err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i], + context->devx_uid); if (err) goto error; @@ -1657,7 +1658,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte error: for (--i; i >= 0; i--) - if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) + if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], + context->devx_uid)) mlx5_ib_warn(dev, "failed to free uar %d\n", i); return err; @@ -1673,7 +1675,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, for (i = 0; i < bfregi->num_sys_pages; i++) if (i < bfregi->num_static_sys_pages || bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) - mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); + mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], + context->devx_uid); } int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) @@ -1891,6 +1894,13 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) return -EINVAL; + if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { + err = mlx5_ib_devx_create(dev, true); + if (err < 0) + goto out_ctx; + context->devx_uid = err; + } + lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; bfregi = &context->bfregi; @@ -1903,7 +1913,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, /* updates req->total_num_bfregs */ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); if (err) - goto out_ctx; + goto out_devx; mutex_init(&bfregi->lock); bfregi->lib_uar_4k = lib_uar_4k; @@ -1911,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, GFP_KERNEL); if (!bfregi->count) { err = -ENOMEM; - goto out_ctx; + goto out_devx; } bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, @@ -1927,17 +1937,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, goto out_sys_pages; uar_done: - if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { - err = mlx5_ib_devx_create(dev, true); - if (err < 0) - goto out_uars; - context->devx_uid = err; - } - err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, context->devx_uid); if (err) - goto out_devx; + goto out_uars; INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1972,9 +1975,6 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, out_mdev: mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); -out_devx: - if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) - mlx5_ib_devx_destroy(dev, context->devx_uid); out_uars: deallocate_uars(dev, context); @@ -1985,6 +1985,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, out_count: kfree(bfregi->count); +out_devx: + if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) + mlx5_ib_devx_destroy(dev, context->devx_uid); + out_ctx: return err; } @@ -2021,12 +2025,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) bfregi = &context->bfregi; mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); - if (context->devx_uid) - mlx5_ib_devx_destroy(dev, context->devx_uid); - deallocate_uars(dev, context); kfree(bfregi->sys_pages); kfree(bfregi->count); + + if (context->devx_uid) + mlx5_ib_devx_destroy(dev, context->devx_uid); } static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, @@ -2119,6 +2123,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); struct mlx5_var_table *var_table = &dev->var_table; + struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext); switch (mentry->mmap_flag) { case MLX5_IB_MMAP_TYPE_MEMIC: @@ -2133,7 +2138,8 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) break; case MLX5_IB_MMAP_TYPE_UAR_WC: case MLX5_IB_MMAP_TYPE_UAR_NC: - mlx5_cmd_free_uar(dev->mdev, mentry->page_idx); + mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx, + context->devx_uid); kfree(mentry); break; default: @@ -2211,7 +2217,8 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, bfregi->count[bfreg_dyn_idx]++; mutex_unlock(&bfregi->lock); - err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); + err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, + context->devx_uid); if (err) { mlx5_ib_warn(dev, "UAR alloc failed\n"); goto free_bfreg; @@ -2240,7 +2247,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, if (!dyn_uar) return err; - mlx5_cmd_free_uar(dev->mdev, idx); + mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid); free_bfreg: mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); @@ -3489,7 +3496,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c, return ERR_PTR(-ENOMEM); dev = to_mdev(c->ibucontext.device); - err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); + err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid); if (err) goto end; @@ -3507,7 +3514,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c, return entry; err_insert: - mlx5_cmd_free_uar(dev->mdev, uar_index); + mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid); end: kfree(entry); return ERR_PTR(err); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bf20a388eabe13..e462e368c3539e 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -619,6 +619,20 @@ struct mlx5_user_mmap_entry { u32 page_idx; }; +enum mlx5_mkey_type { + MLX5_MKEY_MR = 1, + MLX5_MKEY_MW, + MLX5_MKEY_INDIRECT_DEVX, +}; + +struct mlx5_ib_mkey { + u32 key; + enum mlx5_mkey_type type; + unsigned int ndescs; + struct wait_queue_head wait; + refcount_t usecount; +}; + #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ @@ -637,7 +651,7 @@ struct mlx5_user_mmap_entry { struct mlx5_ib_mr { struct ib_mr ibmr; - struct mlx5_core_mkey mmkey; + struct mlx5_ib_mkey mmkey; /* User MR data */ struct mlx5_cache_ent *cache_ent; @@ -659,7 +673,6 @@ struct mlx5_ib_mr { void *descs_alloc; dma_addr_t desc_map; int max_descs; - int ndescs; int desc_size; int access_mode; @@ -713,13 +726,7 @@ static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr) struct mlx5_ib_mw { struct ib_mw ibmw; - struct mlx5_core_mkey mmkey; - int ndescs; -}; - -struct mlx5_ib_devx_mr { - struct mlx5_core_mkey mmkey; - int ndescs; + struct mlx5_ib_mkey mmkey; }; struct mlx5_ib_umr_context { @@ -1579,7 +1586,7 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev, } static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, - struct mlx5_core_mkey *mmkey) + struct mlx5_ib_mkey *mmkey) { refcount_set(&mmkey->usecount, 1); @@ -1588,14 +1595,14 @@ static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, } /* deref an mkey that can participate in ODP flow */ -static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey) +static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey) { if (refcount_dec_and_test(&mmkey->usecount)) wake_up(&mmkey->wait); } /* deref an mkey that can participate in ODP flow and wait for relese */ -static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey) +static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey) { mlx5r_deref_odp_mkey(mmkey); wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 22e2f4d79743d1..d2044df3039465 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -88,9 +88,8 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, MLX5_SET64(mkc, mkc, start_addr, start_addr); } -static void -assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in) +static void assign_mkey_variant(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in) { u8 key = atomic_inc_return(&dev->mkey_var); void *mkc; @@ -100,17 +99,22 @@ assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, mkey->key = key; } -static int -mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in, int inlen) +static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in, int inlen) { + int ret; + assign_mkey_variant(dev, mkey, in); - return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen); + ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); + if (!ret) + init_waitqueue_head(&mkey->wait); + + return ret; } static int mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev, - struct mlx5_core_mkey *mkey, + struct mlx5_ib_mkey *mkey, struct mlx5_async_ctx *async_ctx, u32 *in, int inlen, u32 *out, int outlen, struct mlx5_async_work *context) @@ -133,7 +137,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); - return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } static void create_mkey_callback(int status, struct mlx5_async_work *context) @@ -260,10 +264,11 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) goto free_in; } - err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); + err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen); if (err) goto free_mr; + init_waitqueue_head(&mr->mmkey.wait); mr->mmkey.type = MLX5_MKEY_MR; WRITE_ONCE(ent->dev->cache.last_add, jiffies); spin_lock_irq(&ent->lock); @@ -290,7 +295,7 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key); kfree(mr); spin_lock_irq(&ent->lock); } @@ -658,7 +663,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { @@ -911,12 +916,13 @@ static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, } static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, - u64 length, int access_flags) + u64 length, int access_flags, u64 iova) { mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->ibmr.length = length; mr->ibmr.device = &dev->ib_dev; + mr->ibmr.iova = iova; mr->access_flags = access_flags; } @@ -974,11 +980,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, mr->ibmr.pd = pd; mr->umem = umem; - mr->mmkey.iova = iova; - mr->mmkey.size = umem->length; - mr->mmkey.pd = to_mpd(pd)->pdn; mr->page_shift = order_base_2(page_size); - set_mr_fields(dev, mr, umem->length, access_flags); + set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; } @@ -1087,8 +1090,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr, wr->wr.opcode = MLX5_IB_WR_UMR; wr->pd = mr->ibmr.pd; wr->mkey = mr->mmkey.key; - wr->length = mr->mmkey.size; - wr->virt_addr = mr->mmkey.iova; + wr->length = mr->ibmr.length; + wr->virt_addr = mr->ibmr.iova; wr->access_flags = mr->access_flags; wr->page_shift = mr->page_shift; wr->xlt_size = sg->length; @@ -1340,7 +1343,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, } mr->mmkey.type = MLX5_MKEY_MR; mr->umem = umem; - set_mr_fields(dev, mr, umem->length, access_flags); + set_mr_fields(dev, mr, umem->length, access_flags, iova); kvfree(in); mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); @@ -1387,7 +1390,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, kfree(in); - set_mr_fields(dev, mr, length, acc); + set_mr_fields(dev, mr, length, acc, start_addr); return &mr->ibmr; @@ -1709,7 +1712,6 @@ static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, return err; mr->access_flags = access_flags; - mr->mmkey.pd = to_mpd(pd)->pdn; return 0; } @@ -1754,7 +1756,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, if (flags & IB_MR_REREG_PD) { mr->ibmr.pd = pd; - mr->mmkey.pd = to_mpd(pd)->pdn; upd_flags |= MLX5_IB_UPD_XLT_PD; } if (flags & IB_MR_REREG_ACCESS) { @@ -1763,8 +1764,8 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, } mr->ibmr.length = new_umem->length; - mr->mmkey.iova = iova; - mr->mmkey.size = new_umem->length; + mr->ibmr.iova = iova; + mr->ibmr.length = new_umem->length; mr->page_shift = order_base_2(page_size); mr->umem = new_umem; err = mlx5_ib_update_mr_pas(mr, upd_flags); @@ -1834,7 +1835,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, mr->umem = NULL; atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); - return create_real_mr(new_pd, umem, mr->mmkey.iova, + return create_real_mr(new_pd, umem, mr->ibmr.iova, new_access_flags); } @@ -2263,9 +2264,9 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) struct mlx5_ib_dev *dev = to_mdev(ibmw->device); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mw *mw = to_mmw(ibmw); + unsigned int ndescs; u32 *in = NULL; void *mkc; - int ndescs; int err; struct mlx5_ib_alloc_mw req = {}; struct { @@ -2310,7 +2311,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) mw->mmkey.type = MLX5_MKEY_MW; ibmw->rkey = mw->mmkey.key; - mw->ndescs = ndescs; + mw->mmkey.ndescs = ndescs; resp.response_length = min(offsetofend(typeof(resp), response_length), udata->outlen); @@ -2330,7 +2331,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) return 0; free_mkey: - mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); free: kfree(in); return err; @@ -2349,7 +2350,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw) */ mlx5r_deref_wait_odp_mkey(&mmw->mmkey); - return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); } int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, @@ -2406,7 +2407,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, mr->meta_length = 0; if (data_sg_nents == 1) { n++; - mr->ndescs = 1; + mr->mmkey.ndescs = 1; if (data_sg_offset) sg_offset = *data_sg_offset; mr->data_length = sg_dma_len(data_sg) - sg_offset; @@ -2459,7 +2460,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, if (sg_offset_p) *sg_offset_p = sg_offset; - mr->ndescs = i; + mr->mmkey.ndescs = i; mr->data_length = mr->ibmr.length; if (meta_sg_nents) { @@ -2492,11 +2493,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs; - if (unlikely(mr->ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs == mr->max_descs)) return -ENOMEM; descs = mr->descs; - descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); + descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); return 0; } @@ -2506,11 +2507,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs; - if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) return -ENOMEM; descs = mr->descs; - descs[mr->ndescs + mr->meta_ndescs++] = + descs[mr->mmkey.ndescs + mr->meta_ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); return 0; @@ -2526,7 +2527,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, struct mlx5_ib_mr *pi_mr = mr->mtt_mr; int n; - pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0; @@ -2560,7 +2561,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, * metadata offset at the first metadata page */ pi_mr->pi_iova = (iova & page_mask) + - pi_mr->ndescs * ibmr->page_size + + pi_mr->mmkey.ndescs * ibmr->page_size + (pi_mr->ibmr.iova & ~page_mask); /* * In order to use one MTT MR for data and metadata, we register @@ -2591,7 +2592,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, struct mlx5_ib_mr *pi_mr = mr->klm_mr; int n; - pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0; @@ -2626,7 +2627,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); - mr->ndescs = 0; + mr->mmkey.ndescs = 0; mr->data_length = 0; mr->data_iova = 0; mr->meta_ndescs = 0; @@ -2682,7 +2683,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct mlx5_ib_mr *mr = to_mmr(ibmr); int n; - mr->ndescs = 0; + mr->mmkey.ndescs = 0; ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, mr->desc_size * mr->max_descs, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d0d98e584ebcc3..b1e2725f4586fc 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -430,7 +430,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, mr->umem = &odp->umem; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; - mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE; + mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; mr->parent = imr; odp->private = mr; @@ -500,7 +500,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, } imr->ibmr.pd = &pd->ibpd; - imr->mmkey.iova = 0; + imr->ibmr.iova = 0; imr->umem = &umem_odp->umem; imr->ibmr.lkey = imr->mmkey.key; imr->ibmr.rkey = imr->mmkey.key; @@ -738,7 +738,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); - if (unlikely(io_virt < mr->mmkey.iova)) + if (unlikely(io_virt < mr->ibmr.iova)) return -EFAULT; if (mr->umem->is_dmabuf) @@ -747,7 +747,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, if (!odp->is_implicit_odp) { u64 user_va; - if (check_add_overflow(io_virt - mr->mmkey.iova, + if (check_add_overflow(io_virt - mr->ibmr.iova, (u64)odp->umem.address, &user_va)) return -EFAULT; if (unlikely(user_va >= ib_umem_end(odp) || @@ -788,7 +788,7 @@ struct pf_frame { int depth; }; -static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) +static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) { if (!mmkey) return false; @@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) return mmkey->key == key; } -static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey) -{ - struct mlx5_ib_mw *mw; - struct mlx5_ib_devx_mr *devx_mr; - - if (mmkey->type == MLX5_MKEY_MW) { - mw = container_of(mmkey, struct mlx5_ib_mw, mmkey); - return mw->ndescs; - } - - devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr, - mmkey); - return devx_mr->ndescs; -} - /* * Handle a single data segment in a page-fault WQE or RDMA region. * @@ -831,12 +816,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, { int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; struct pf_frame *head = NULL, *frame; - struct mlx5_core_mkey *mmkey; + struct mlx5_ib_mkey *mmkey; struct mlx5_ib_mr *mr; struct mlx5_klm *pklm; u32 *out = NULL; size_t offset; - int ndescs; io_virt += *bytes_committed; bcnt -= *bytes_committed; @@ -885,8 +869,6 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, case MLX5_MKEY_MW: case MLX5_MKEY_INDIRECT_DEVX: - ndescs = get_indirect_num_descs(mmkey); - if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { mlx5_ib_dbg(dev, "indirection level exceeded\n"); ret = -EFAULT; @@ -894,7 +876,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, } outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + - sizeof(*pklm) * (ndescs - 2); + sizeof(*pklm) * (mmkey->ndescs - 2); if (outlen > cur_outlen) { kfree(out); @@ -909,14 +891,14 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, bsf0_klm0_pas_mtt0_1); - ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen); + ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); if (ret) goto end; offset = io_virt - MLX5_GET64(query_mkey_out, out, memory_key_mkey_entry.start_addr); - for (i = 0; bcnt && i < ndescs; i++, pklm++) { + for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { if (offset >= be32_to_cpu(pklm->bcount)) { offset -= be32_to_cpu(pklm->bcount); continue; @@ -1559,6 +1541,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_IB_NUM_PF_EQE, }; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; @@ -1703,8 +1686,8 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 lkey) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr = NULL; + struct mlx5_ib_mkey *mmkey; xa_lock(&dev->odp_mkeys); mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index 8841620af82f1d..51e48ca9016e80 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -217,7 +217,7 @@ static __be64 sig_mkey_mask(void) static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, struct mlx5_ib_mr *mr, u8 flags, bool atomic) { - int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; + int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size; memset(umr, 0, sizeof(*umr)); @@ -374,7 +374,7 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, struct mlx5_ib_mr *mr, u32 key, int access) { - int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1; + int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1; memset(seg, 0, sizeof(*seg)); @@ -439,7 +439,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, struct mlx5_ib_mr *mr, struct mlx5_ib_pd *pd) { - int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs); + int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs); dseg->addr = cpu_to_be64(mr->desc_map); dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); @@ -861,7 +861,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, struct mlx5_ib_mr *mr = to_mmr(wr->mr); struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); - int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; + int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size; bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; u8 flags = 0; @@ -1111,7 +1111,7 @@ static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev, memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr)); /* No UMR, use local_dma_lkey */ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey; - pa_pi_mr.ndescs = mr->ndescs; + pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs; pa_pi_mr.data_length = mr->data_length; pa_pi_mr.data_iova = mr->data_iova; if (mr->meta_ndescs) { diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 755930be01b8e6..dc203f3d0f2517 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -272,7 +272,7 @@ static int qedr_register_device(struct qedr_dev *dev) static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int rc; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index dddebea644bb84..8a2febf33ce28f 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -1008,7 +1008,7 @@ static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; static int dvb_net_filter_sec_set(struct net_device *dev, struct dmx_section_filter **secfilter, - u8 *mac, u8 *mac_mask) + const u8 *mac, u8 *mac_mask) { struct dvb_net_priv *priv = netdev_priv(dev); int ret; @@ -1052,7 +1052,7 @@ static int dvb_net_feed_start(struct net_device *dev) int ret = 0, i; struct dvb_net_priv *priv = netdev_priv(dev); struct dmx_demux *demux = priv->demux; - unsigned char *mac = (unsigned char *) dev->dev_addr; + const unsigned char *mac = (const unsigned char *) dev->dev_addr; netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode); mutex_lock(&priv->mutex); @@ -1272,7 +1272,7 @@ static int dvb_net_set_mac (struct net_device *dev, void *p) struct dvb_net_priv *priv = netdev_priv(dev); struct sockaddr *addr=p; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) schedule_work(&priv->restart_net_feed_wq); @@ -1367,7 +1367,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) dvbnet->dvbdev->adapter->num, if_num); net->addr_len = 6; - memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6); + eth_hw_addr_set(net, dvbnet->dvbdev->adapter->proposed_mac); dvbnet->device[if_num] = net; diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 3261cac762def9..acdc257a900eb8 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -1350,7 +1350,7 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) HWaddr[5] = a[0]; dev->addr_len = FC_ALEN; - memcpy(dev->dev_addr, HWaddr, FC_ALEN); + dev_addr_set(dev, HWaddr); memset(dev->broadcast, 0xff, FC_ALEN); /* The Tx queue is 127 deep on the 909. diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 2508f83bdc3fa7..dab7b92db790ac 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -514,6 +514,7 @@ static const struct net_device_ops xpnet_netdev_ops = { static int __init xpnet_init(void) { + u8 addr[ETH_ALEN]; int result; if (!is_uv_system()) @@ -545,15 +546,17 @@ xpnet_init(void) xpnet_device->min_mtu = XPNET_MIN_MTU; xpnet_device->max_mtu = XPNET_MAX_MTU; + memset(addr, 0, sizeof(addr)); /* * Multicast assumes the LSB of the first octet is set for multicast * MAC addresses. We chose the first octet of the MAC to be unlikely * to collide with any vendor's officially issued MAC. */ - xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */ + addr[0] = 0x02; /* locally administered, no OUI */ - xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id; - xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8); + addr[XPNET_PARTID_OCTET + 1] = xp_partition_id; + addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8); + eth_hw_addr_set(xpnet_device, addr); /* * ether_setup() sets this to a multicast device. We are diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f37b1c56f7c430..034dbd487c3347 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -150,7 +150,7 @@ config NET_FC config IFB tristate "Intermediate Functional Block support" - depends on NET_CLS_ACT + depends on NET_ACT_MIRRED || NFT_FWD_NETDEV select NET_REDIRECT help This is an intermediate driver that allows sharing of @@ -291,6 +291,22 @@ config GTP To compile this drivers as a module, choose M here: the module will be called gtp. +config AMT + tristate "Automatic Multicast Tunneling (AMT)" + depends on INET && IP_MULTICAST + select NET_UDP_TUNNEL + help + This allows one to create AMT(Automatic Multicast Tunneling) + virtual interfaces that provide multicast tunneling. + There are two roles, Gateway, and Relay. + Gateway Encapsulates IGMP/MLD traffic from listeners to the Relay. + Gateway Decapsulates multicast traffic from the Relay to Listeners. + Relay Encapsulates multicast traffic from Sources to Gateway. + Relay Decapsulates IGMP/MLD traffic from Gateway. + + To compile this drivers as a module, choose M here: the module + will be called amt. + config MACSEC tristate "IEEE 802.1AE MAC-level encryption (MACsec)" select CRYPTO diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 739838623cf656..50b23e71065f6a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_WIREGUARD) += wireguard/ obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_IFB) += ifb.o obj-$(CONFIG_MACSEC) += macsec.o +obj-$(CONFIG_AMT) += amt.o obj-$(CONFIG_MACVLAN) += macvlan.o obj-$(CONFIG_MACVTAP) += macvtap.o obj-$(CONFIG_MII) += mii.o diff --git a/drivers/net/amt.c b/drivers/net/amt.c new file mode 100644 index 00000000000000..60a7053a9cf7e2 --- /dev/null +++ b/drivers/net/amt.c @@ -0,0 +1,3296 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2021 Taehee Yoo */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct workqueue_struct *amt_wq; + +static HLIST_HEAD(source_gc_list); +/* Lock for source_gc_list */ +static spinlock_t source_gc_lock; +static struct delayed_work source_gc_wq; +static char *status_str[] = { + "AMT_STATUS_INIT", + "AMT_STATUS_SENT_DISCOVERY", + "AMT_STATUS_RECEIVED_DISCOVERY", + "AMT_STATUS_SENT_ADVERTISEMENT", + "AMT_STATUS_RECEIVED_ADVERTISEMENT", + "AMT_STATUS_SENT_REQUEST", + "AMT_STATUS_RECEIVED_REQUEST", + "AMT_STATUS_SENT_QUERY", + "AMT_STATUS_RECEIVED_QUERY", + "AMT_STATUS_SENT_UPDATE", + "AMT_STATUS_RECEIVED_UPDATE", +}; + +static char *type_str[] = { + "AMT_MSG_DISCOVERY", + "AMT_MSG_ADVERTISEMENT", + "AMT_MSG_REQUEST", + "AMT_MSG_MEMBERSHIP_QUERY", + "AMT_MSG_MEMBERSHIP_UPDATE", + "AMT_MSG_MULTICAST_DATA", + "AMT_MSG_TEARDOWM", +}; + +static char *action_str[] = { + "AMT_ACT_GMI", + "AMT_ACT_GMI_ZERO", + "AMT_ACT_GT", + "AMT_ACT_STATUS_FWD_NEW", + "AMT_ACT_STATUS_D_FWD_NEW", + "AMT_ACT_STATUS_NONE_NEW", +}; + +static struct igmpv3_grec igmpv3_zero_grec; + +#if IS_ENABLED(CONFIG_IPV6) +#define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } } +static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT; +static struct mld2_grec mldv2_zero_grec; +#endif + +static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) > + sizeof_field(struct sk_buff, cb)); + + return (struct amt_skb_cb *)((void *)skb->cb + + sizeof(struct qdisc_skb_cb)); +} + +static void __amt_source_gc_work(void) +{ + struct amt_source_node *snode; + struct hlist_head gc_list; + struct hlist_node *t; + + spin_lock_bh(&source_gc_lock); + hlist_move_list(&source_gc_list, &gc_list); + spin_unlock_bh(&source_gc_lock); + + hlist_for_each_entry_safe(snode, t, &gc_list, node) { + hlist_del_rcu(&snode->node); + kfree_rcu(snode, rcu); + } +} + +static void amt_source_gc_work(struct work_struct *work) +{ + __amt_source_gc_work(); + + spin_lock_bh(&source_gc_lock); + mod_delayed_work(amt_wq, &source_gc_wq, + msecs_to_jiffies(AMT_GC_INTERVAL)); + spin_unlock_bh(&source_gc_lock); +} + +static bool amt_addr_equal(union amt_addr *a, union amt_addr *b) +{ + return !memcmp(a, b, sizeof(union amt_addr)); +} + +static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src) +{ + u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed); + + return reciprocal_scale(hash, tunnel->amt->hash_buckets); +} + +static bool amt_status_filter(struct amt_source_node *snode, + enum amt_filter filter) +{ + bool rc = false; + + switch (filter) { + case AMT_FILTER_FWD: + if (snode->status == AMT_SOURCE_STATUS_FWD && + snode->flags == AMT_SOURCE_OLD) + rc = true; + break; + case AMT_FILTER_D_FWD: + if (snode->status == AMT_SOURCE_STATUS_D_FWD && + snode->flags == AMT_SOURCE_OLD) + rc = true; + break; + case AMT_FILTER_FWD_NEW: + if (snode->status == AMT_SOURCE_STATUS_FWD && + snode->flags == AMT_SOURCE_NEW) + rc = true; + break; + case AMT_FILTER_D_FWD_NEW: + if (snode->status == AMT_SOURCE_STATUS_D_FWD && + snode->flags == AMT_SOURCE_NEW) + rc = true; + break; + case AMT_FILTER_ALL: + rc = true; + break; + case AMT_FILTER_NONE_NEW: + if (snode->status == AMT_SOURCE_STATUS_NONE && + snode->flags == AMT_SOURCE_NEW) + rc = true; + break; + case AMT_FILTER_BOTH: + if ((snode->status == AMT_SOURCE_STATUS_D_FWD || + snode->status == AMT_SOURCE_STATUS_FWD) && + snode->flags == AMT_SOURCE_OLD) + rc = true; + break; + case AMT_FILTER_BOTH_NEW: + if ((snode->status == AMT_SOURCE_STATUS_D_FWD || + snode->status == AMT_SOURCE_STATUS_FWD) && + snode->flags == AMT_SOURCE_NEW) + rc = true; + break; + default: + WARN_ON_ONCE(1); + break; + } + + return rc; +} + +static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + enum amt_filter filter, + union amt_addr *src) +{ + u32 hash = amt_source_hash(tunnel, src); + struct amt_source_node *snode; + + hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node) + if (amt_status_filter(snode, filter) && + amt_addr_equal(&snode->source_addr, src)) + return snode; + + return NULL; +} + +static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group) +{ + u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed); + + return reciprocal_scale(hash, tunnel->amt->hash_buckets); +} + +static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel, + union amt_addr *group, + union amt_addr *host, + bool v6) +{ + u32 hash = amt_group_hash(tunnel, group); + struct amt_group_node *gnode; + + hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) { + if (amt_addr_equal(&gnode->group_addr, group) && + amt_addr_equal(&gnode->host_addr, host) && + gnode->v6 == v6) + return gnode; + } + + return NULL; +} + +static void amt_destroy_source(struct amt_source_node *snode) +{ + struct amt_group_node *gnode = snode->gnode; + struct amt_tunnel_list *tunnel; + + tunnel = gnode->tunnel_list; + + if (!gnode->v6) { + netdev_dbg(snode->gnode->amt->dev, + "Delete source %pI4 from %pI4\n", + &snode->source_addr.ip4, + &gnode->group_addr.ip4); +#if IS_ENABLED(CONFIG_IPV6) + } else { + netdev_dbg(snode->gnode->amt->dev, + "Delete source %pI6 from %pI6\n", + &snode->source_addr.ip6, + &gnode->group_addr.ip6); +#endif + } + + cancel_delayed_work(&snode->source_timer); + hlist_del_init_rcu(&snode->node); + tunnel->nr_sources--; + gnode->nr_sources--; + spin_lock_bh(&source_gc_lock); + hlist_add_head_rcu(&snode->node, &source_gc_list); + spin_unlock_bh(&source_gc_lock); +} + +static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode) +{ + struct amt_source_node *snode; + struct hlist_node *t; + int i; + + if (cancel_delayed_work(&gnode->group_timer)) + dev_put(amt->dev); + hlist_del_rcu(&gnode->node); + gnode->tunnel_list->nr_groups--; + + if (!gnode->v6) + netdev_dbg(amt->dev, "Leave group %pI4\n", + &gnode->group_addr.ip4); +#if IS_ENABLED(CONFIG_IPV6) + else + netdev_dbg(amt->dev, "Leave group %pI6\n", + &gnode->group_addr.ip6); +#endif + for (i = 0; i < amt->hash_buckets; i++) + hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) + amt_destroy_source(snode); + + /* tunnel->lock was acquired outside of amt_del_group() + * But rcu_read_lock() was acquired too so It's safe. + */ + kfree_rcu(gnode, rcu); +} + +/* If a source timer expires with a router filter-mode for the group of + * INCLUDE, the router concludes that traffic from this particular + * source is no longer desired on the attached network, and deletes the + * associated source record. + */ +static void amt_source_work(struct work_struct *work) +{ + struct amt_source_node *snode = container_of(to_delayed_work(work), + struct amt_source_node, + source_timer); + struct amt_group_node *gnode = snode->gnode; + struct amt_dev *amt = gnode->amt; + struct amt_tunnel_list *tunnel; + + tunnel = gnode->tunnel_list; + spin_lock_bh(&tunnel->lock); + rcu_read_lock(); + if (gnode->filter_mode == MCAST_INCLUDE) { + amt_destroy_source(snode); + if (!gnode->nr_sources) + amt_del_group(amt, gnode); + } else { + /* When a router filter-mode for a group is EXCLUDE, + * source records are only deleted when the group timer expires + */ + snode->status = AMT_SOURCE_STATUS_D_FWD; + } + rcu_read_unlock(); + spin_unlock_bh(&tunnel->lock); +} + +static void amt_act_src(struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + struct amt_source_node *snode, + enum amt_act act) +{ + struct amt_dev *amt = tunnel->amt; + + switch (act) { + case AMT_ACT_GMI: + mod_delayed_work(amt_wq, &snode->source_timer, + msecs_to_jiffies(amt_gmi(amt))); + break; + case AMT_ACT_GMI_ZERO: + cancel_delayed_work(&snode->source_timer); + break; + case AMT_ACT_GT: + mod_delayed_work(amt_wq, &snode->source_timer, + gnode->group_timer.timer.expires); + break; + case AMT_ACT_STATUS_FWD_NEW: + snode->status = AMT_SOURCE_STATUS_FWD; + snode->flags = AMT_SOURCE_NEW; + break; + case AMT_ACT_STATUS_D_FWD_NEW: + snode->status = AMT_SOURCE_STATUS_D_FWD; + snode->flags = AMT_SOURCE_NEW; + break; + case AMT_ACT_STATUS_NONE_NEW: + cancel_delayed_work(&snode->source_timer); + snode->status = AMT_SOURCE_STATUS_NONE; + snode->flags = AMT_SOURCE_NEW; + break; + default: + WARN_ON_ONCE(1); + return; + } + + if (!gnode->v6) + netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n", + &snode->source_addr.ip4, + &gnode->group_addr.ip4, + action_str[act]); +#if IS_ENABLED(CONFIG_IPV6) + else + netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n", + &snode->source_addr.ip6, + &gnode->group_addr.ip6, + action_str[act]); +#endif +} + +static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode, + union amt_addr *src) +{ + struct amt_source_node *snode; + + snode = kzalloc(sizeof(*snode), GFP_ATOMIC); + if (!snode) + return NULL; + + memcpy(&snode->source_addr, src, sizeof(union amt_addr)); + snode->gnode = gnode; + snode->status = AMT_SOURCE_STATUS_NONE; + snode->flags = AMT_SOURCE_NEW; + INIT_HLIST_NODE(&snode->node); + INIT_DELAYED_WORK(&snode->source_timer, amt_source_work); + + return snode; +} + +/* RFC 3810 - 7.2.2. Definition of Filter Timers + * + * Router Mode Filter Timer Actions/Comments + * ----------- ----------------- ---------------- + * + * INCLUDE Not Used All listeners in + * INCLUDE mode. + * + * EXCLUDE Timer > 0 At least one listener + * in EXCLUDE mode. + * + * EXCLUDE Timer == 0 No more listeners in + * EXCLUDE mode for the + * multicast address. + * If the Requested List + * is empty, delete + * Multicast Address + * Record. If not, switch + * to INCLUDE filter mode; + * the sources in the + * Requested List are + * moved to the Include + * List, and the Exclude + * List is deleted. + */ +static void amt_group_work(struct work_struct *work) +{ + struct amt_group_node *gnode = container_of(to_delayed_work(work), + struct amt_group_node, + group_timer); + struct amt_tunnel_list *tunnel = gnode->tunnel_list; + struct amt_dev *amt = gnode->amt; + struct amt_source_node *snode; + bool delete_group = true; + struct hlist_node *t; + int i, buckets; + + buckets = amt->hash_buckets; + + spin_lock_bh(&tunnel->lock); + if (gnode->filter_mode == MCAST_INCLUDE) { + /* Not Used */ + spin_unlock_bh(&tunnel->lock); + goto out; + } + + rcu_read_lock(); + for (i = 0; i < buckets; i++) { + hlist_for_each_entry_safe(snode, t, + &gnode->sources[i], node) { + if (!delayed_work_pending(&snode->source_timer) || + snode->status == AMT_SOURCE_STATUS_D_FWD) { + amt_destroy_source(snode); + } else { + delete_group = false; + snode->status = AMT_SOURCE_STATUS_FWD; + } + } + } + if (delete_group) + amt_del_group(amt, gnode); + else + gnode->filter_mode = MCAST_INCLUDE; + rcu_read_unlock(); + spin_unlock_bh(&tunnel->lock); +out: + dev_put(amt->dev); +} + +/* Non-existant group is created as INCLUDE {empty}: + * + * RFC 3376 - 5.1. Action on Change of Interface State + * + * If no interface state existed for that multicast address before + * the change (i.e., the change consisted of creating a new + * per-interface record), or if no state exists after the change + * (i.e., the change consisted of deleting a per-interface record), + * then the "non-existent" state is considered to have a filter mode + * of INCLUDE and an empty source list. + */ +static struct amt_group_node *amt_add_group(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + union amt_addr *group, + union amt_addr *host, + bool v6) +{ + struct amt_group_node *gnode; + u32 hash; + int i; + + if (tunnel->nr_groups >= amt->max_groups) + return ERR_PTR(-ENOSPC); + + gnode = kzalloc(sizeof(*gnode) + + (sizeof(struct hlist_head) * amt->hash_buckets), + GFP_ATOMIC); + if (unlikely(!gnode)) + return ERR_PTR(-ENOMEM); + + gnode->amt = amt; + gnode->group_addr = *group; + gnode->host_addr = *host; + gnode->v6 = v6; + gnode->tunnel_list = tunnel; + gnode->filter_mode = MCAST_INCLUDE; + INIT_HLIST_NODE(&gnode->node); + INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work); + for (i = 0; i < amt->hash_buckets; i++) + INIT_HLIST_HEAD(&gnode->sources[i]); + + hash = amt_group_hash(tunnel, group); + hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]); + tunnel->nr_groups++; + + if (!gnode->v6) + netdev_dbg(amt->dev, "Join group %pI4\n", + &gnode->group_addr.ip4); +#if IS_ENABLED(CONFIG_IPV6) + else + netdev_dbg(amt->dev, "Join group %pI6\n", + &gnode->group_addr.ip6); +#endif + + return gnode; +} + +static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) +{ + u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 }; + int hlen = LL_RESERVED_SPACE(amt->dev); + int tlen = amt->dev->needed_tailroom; + struct igmpv3_query *ihv3; + void *csum_start = NULL; + __sum16 *csum = NULL; + struct sk_buff *skb; + struct ethhdr *eth; + struct iphdr *iph; + unsigned int len; + int offset; + + len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3); + skb = netdev_alloc_skb_ip_align(amt->dev, len); + if (!skb) + return NULL; + + skb_reserve(skb, hlen); + skb_push(skb, sizeof(*eth)); + skb->protocol = htons(ETH_P_IP); + skb_reset_mac_header(skb); + skb->priority = TC_PRIO_CONTROL; + skb_put(skb, sizeof(*iph)); + skb_put_data(skb, ra, sizeof(ra)); + skb_put(skb, sizeof(*ihv3)); + skb_pull(skb, sizeof(*eth)); + skb_reset_network_header(skb); + + iph = ip_hdr(skb); + iph->version = 4; + iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2; + iph->tos = AMT_TOS; + iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3)); + iph->frag_off = htons(IP_DF); + iph->ttl = 1; + iph->id = 0; + iph->protocol = IPPROTO_IGMP; + iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); + iph->saddr = htonl(INADDR_ANY); + ip_send_check(iph); + + eth = eth_hdr(skb); + ether_addr_copy(eth->h_source, amt->dev->dev_addr); + ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest); + eth->h_proto = htons(ETH_P_IP); + + ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); + skb_reset_transport_header(skb); + ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY; + ihv3->code = 1; + ihv3->group = 0; + ihv3->qqic = amt->qi; + ihv3->nsrcs = 0; + ihv3->resv = 0; + ihv3->suppress = false; + ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv; + ihv3->csum = 0; + csum = &ihv3->csum; + csum_start = (void *)ihv3; + *csum = ip_compute_csum(csum_start, sizeof(*ihv3)); + offset = skb_transport_offset(skb); + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + skb->ip_summed = CHECKSUM_NONE; + + skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS); + + return skb; +} + +static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status, + bool validate) +{ + if (validate && amt->status >= status) + return; + netdev_dbg(amt->dev, "Update GW status %s -> %s", + status_str[amt->status], status_str[status]); + amt->status = status; +} + +static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, + enum amt_status status, + bool validate) +{ + if (validate && tunnel->status >= status) + return; + netdev_dbg(tunnel->amt->dev, + "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s", + &tunnel->ip4, ntohs(tunnel->source_port), + status_str[tunnel->status], status_str[status]); + tunnel->status = status; +} + +static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, + bool validate) +{ + spin_lock_bh(&amt->lock); + __amt_update_gw_status(amt, status, validate); + spin_unlock_bh(&amt->lock); +} + +static void amt_update_relay_status(struct amt_tunnel_list *tunnel, + enum amt_status status, bool validate) +{ + spin_lock_bh(&tunnel->lock); + __amt_update_relay_status(tunnel, status, validate); + spin_unlock_bh(&tunnel->lock); +} + +static void amt_send_discovery(struct amt_dev *amt) +{ + struct amt_header_discovery *amtd; + int hlen, tlen, offset; + struct socket *sock; + struct udphdr *udph; + struct sk_buff *skb; + struct iphdr *iph; + struct rtable *rt; + struct flowi4 fl4; + u32 len; + int err; + + rcu_read_lock(); + sock = rcu_dereference(amt->sock); + if (!sock) + goto out; + + if (!netif_running(amt->stream_dev) || !netif_running(amt->dev)) + goto out; + + rt = ip_route_output_ports(amt->net, &fl4, sock->sk, + amt->discovery_ip, amt->local_ip, + amt->gw_port, amt->relay_port, + IPPROTO_UDP, 0, + amt->stream_dev->ifindex); + if (IS_ERR(rt)) { + amt->dev->stats.tx_errors++; + goto out; + } + + hlen = LL_RESERVED_SPACE(amt->dev); + tlen = amt->dev->needed_tailroom; + len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd); + skb = netdev_alloc_skb_ip_align(amt->dev, len); + if (!skb) { + ip_rt_put(rt); + amt->dev->stats.tx_errors++; + goto out; + } + + skb->priority = TC_PRIO_CONTROL; + skb_dst_set(skb, &rt->dst); + + len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd); + skb_reset_network_header(skb); + skb_put(skb, len); + amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph)); + amtd->version = 0; + amtd->type = AMT_MSG_DISCOVERY; + amtd->reserved = 0; + amtd->nonce = amt->nonce; + skb_push(skb, sizeof(*udph)); + skb_reset_transport_header(skb); + udph = udp_hdr(skb); + udph->source = amt->gw_port; + udph->dest = amt->relay_port; + udph->len = htons(sizeof(*udph) + sizeof(*amtd)); + udph->check = 0; + offset = skb_transport_offset(skb); + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip, + sizeof(*udph) + sizeof(*amtd), + IPPROTO_UDP, skb->csum); + + skb_push(skb, sizeof(*iph)); + iph = ip_hdr(skb); + iph->version = 4; + iph->ihl = (sizeof(struct iphdr)) >> 2; + iph->tos = AMT_TOS; + iph->frag_off = 0; + iph->ttl = ip4_dst_hoplimit(&rt->dst); + iph->daddr = amt->discovery_ip; + iph->saddr = amt->local_ip; + iph->protocol = IPPROTO_UDP; + iph->tot_len = htons(len); + + skb->ip_summed = CHECKSUM_NONE; + ip_select_ident(amt->net, skb, NULL); + ip_send_check(iph); + err = ip_local_out(amt->net, sock->sk, skb); + if (unlikely(net_xmit_eval(err))) + amt->dev->stats.tx_errors++; + + spin_lock_bh(&amt->lock); + __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); + spin_unlock_bh(&amt->lock); +out: + rcu_read_unlock(); +} + +static void amt_send_request(struct amt_dev *amt, bool v6) +{ + struct amt_header_request *amtrh; + int hlen, tlen, offset; + struct socket *sock; + struct udphdr *udph; + struct sk_buff *skb; + struct iphdr *iph; + struct rtable *rt; + struct flowi4 fl4; + u32 len; + int err; + + rcu_read_lock(); + sock = rcu_dereference(amt->sock); + if (!sock) + goto out; + + if (!netif_running(amt->stream_dev) || !netif_running(amt->dev)) + goto out; + + rt = ip_route_output_ports(amt->net, &fl4, sock->sk, + amt->remote_ip, amt->local_ip, + amt->gw_port, amt->relay_port, + IPPROTO_UDP, 0, + amt->stream_dev->ifindex); + if (IS_ERR(rt)) { + amt->dev->stats.tx_errors++; + goto out; + } + + hlen = LL_RESERVED_SPACE(amt->dev); + tlen = amt->dev->needed_tailroom; + len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh); + skb = netdev_alloc_skb_ip_align(amt->dev, len); + if (!skb) { + ip_rt_put(rt); + amt->dev->stats.tx_errors++; + goto out; + } + + skb->priority = TC_PRIO_CONTROL; + skb_dst_set(skb, &rt->dst); + + len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh); + skb_reset_network_header(skb); + skb_put(skb, len); + amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph)); + amtrh->version = 0; + amtrh->type = AMT_MSG_REQUEST; + amtrh->reserved1 = 0; + amtrh->p = v6; + amtrh->reserved2 = 0; + amtrh->nonce = amt->nonce; + skb_push(skb, sizeof(*udph)); + skb_reset_transport_header(skb); + udph = udp_hdr(skb); + udph->source = amt->gw_port; + udph->dest = amt->relay_port; + udph->len = htons(sizeof(*amtrh) + sizeof(*udph)); + udph->check = 0; + offset = skb_transport_offset(skb); + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip, + sizeof(*udph) + sizeof(*amtrh), + IPPROTO_UDP, skb->csum); + + skb_push(skb, sizeof(*iph)); + iph = ip_hdr(skb); + iph->version = 4; + iph->ihl = (sizeof(struct iphdr)) >> 2; + iph->tos = AMT_TOS; + iph->frag_off = 0; + iph->ttl = ip4_dst_hoplimit(&rt->dst); + iph->daddr = amt->remote_ip; + iph->saddr = amt->local_ip; + iph->protocol = IPPROTO_UDP; + iph->tot_len = htons(len); + + skb->ip_summed = CHECKSUM_NONE; + ip_select_ident(amt->net, skb, NULL); + ip_send_check(iph); + err = ip_local_out(amt->net, sock->sk, skb); + if (unlikely(net_xmit_eval(err))) + amt->dev->stats.tx_errors++; + +out: + rcu_read_unlock(); +} + +static void amt_send_igmp_gq(struct amt_dev *amt, + struct amt_tunnel_list *tunnel) +{ + struct sk_buff *skb; + + skb = amt_build_igmp_gq(amt); + if (!skb) + return; + + amt_skb_cb(skb)->tunnel = tunnel; + dev_queue_xmit(skb); +} + +#if IS_ENABLED(CONFIG_IPV6) +static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt) +{ + u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, + 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 }; + int hlen = LL_RESERVED_SPACE(amt->dev); + int tlen = amt->dev->needed_tailroom; + struct mld2_query *mld2q; + void *csum_start = NULL; + struct ipv6hdr *ip6h; + struct sk_buff *skb; + struct ethhdr *eth; + u32 len; + + len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q); + skb = netdev_alloc_skb_ip_align(amt->dev, len); + if (!skb) + return NULL; + + skb_reserve(skb, hlen); + skb_push(skb, sizeof(*eth)); + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + skb->priority = TC_PRIO_CONTROL; + skb->protocol = htons(ETH_P_IPV6); + skb_put_zero(skb, sizeof(*ip6h)); + skb_put_data(skb, ra, sizeof(ra)); + skb_put_zero(skb, sizeof(*mld2q)); + skb_pull(skb, sizeof(*eth)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q)); + ip6h->nexthdr = NEXTHDR_HOP; + ip6h->hop_limit = 1; + ip6h->daddr = mld2_all_node; + ip6_flow_hdr(ip6h, 0, 0); + + if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0, + &ip6h->saddr)) { + amt->dev->stats.tx_errors++; + kfree_skb(skb); + return NULL; + } + + eth->h_proto = htons(ETH_P_IPV6); + ether_addr_copy(eth->h_source, amt->dev->dev_addr); + ipv6_eth_mc_map(&mld2_all_node, eth->h_dest); + + skb_pull(skb, sizeof(*ip6h) + sizeof(ra)); + skb_reset_transport_header(skb); + mld2q = (struct mld2_query *)icmp6_hdr(skb); + mld2q->mld2q_mrc = htons(1); + mld2q->mld2q_type = ICMPV6_MGM_QUERY; + mld2q->mld2q_code = 0; + mld2q->mld2q_cksum = 0; + mld2q->mld2q_resv1 = 0; + mld2q->mld2q_resv2 = 0; + mld2q->mld2q_suppress = 0; + mld2q->mld2q_qrv = amt->qrv; + mld2q->mld2q_nsrcs = 0; + mld2q->mld2q_qqic = amt->qi; + csum_start = (void *)mld2q; + mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + sizeof(*mld2q), + IPPROTO_ICMPV6, + csum_partial(csum_start, + sizeof(*mld2q), 0)); + + skb->ip_summed = CHECKSUM_NONE; + skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra)); + return skb; +} + +static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) +{ + struct sk_buff *skb; + + skb = amt_build_mld_gq(amt); + if (!skb) + return; + + amt_skb_cb(skb)->tunnel = tunnel; + dev_queue_xmit(skb); +} +#else +static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) +{ +} +#endif + +static void amt_secret_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(to_delayed_work(work), + struct amt_dev, + secret_wq); + + spin_lock_bh(&amt->lock); + get_random_bytes(&amt->key, sizeof(siphash_key_t)); + spin_unlock_bh(&amt->lock); + mod_delayed_work(amt_wq, &amt->secret_wq, + msecs_to_jiffies(AMT_SECRET_TIMEOUT)); +} + +static void amt_discovery_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(to_delayed_work(work), + struct amt_dev, + discovery_wq); + + spin_lock_bh(&amt->lock); + if (amt->status > AMT_STATUS_SENT_DISCOVERY) + goto out; + get_random_bytes(&amt->nonce, sizeof(__be32)); + spin_unlock_bh(&amt->lock); + + amt_send_discovery(amt); + spin_lock_bh(&amt->lock); +out: + mod_delayed_work(amt_wq, &amt->discovery_wq, + msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); + spin_unlock_bh(&amt->lock); +} + +static void amt_req_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(to_delayed_work(work), + struct amt_dev, + req_wq); + u32 exp; + + spin_lock_bh(&amt->lock); + if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) + goto out; + + if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) { + netdev_dbg(amt->dev, "Gateway is not ready"); + amt->qi = AMT_INIT_REQ_TIMEOUT; + amt->ready4 = false; + amt->ready6 = false; + amt->remote_ip = 0; + __amt_update_gw_status(amt, AMT_STATUS_INIT, false); + amt->req_cnt = 0; + } + spin_unlock_bh(&amt->lock); + + amt_send_request(amt, false); + amt_send_request(amt, true); + amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); + spin_lock_bh(&amt->lock); +out: + exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); + mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); + spin_unlock_bh(&amt->lock); +} + +static bool amt_send_membership_update(struct amt_dev *amt, + struct sk_buff *skb, + bool v6) +{ + struct amt_header_membership_update *amtmu; + struct socket *sock; + struct iphdr *iph; + struct flowi4 fl4; + struct rtable *rt; + int err; + + sock = rcu_dereference_bh(amt->sock); + if (!sock) + return true; + + err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) + + sizeof(*iph) + sizeof(struct udphdr)); + if (err) + return true; + + skb_reset_inner_headers(skb); + memset(&fl4, 0, sizeof(struct flowi4)); + fl4.flowi4_oif = amt->stream_dev->ifindex; + fl4.daddr = amt->remote_ip; + fl4.saddr = amt->local_ip; + fl4.flowi4_tos = AMT_TOS; + fl4.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(amt->net, &fl4); + if (IS_ERR(rt)) { + netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip); + return true; + } + + amtmu = skb_push(skb, sizeof(*amtmu)); + amtmu->version = 0; + amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE; + amtmu->reserved = 0; + amtmu->nonce = amt->nonce; + amtmu->response_mac = amt->mac; + + if (!v6) + skb_set_inner_protocol(skb, htons(ETH_P_IP)); + else + skb_set_inner_protocol(skb, htons(ETH_P_IPV6)); + udp_tunnel_xmit_skb(rt, sock->sk, skb, + fl4.saddr, + fl4.daddr, + AMT_TOS, + ip4_dst_hoplimit(&rt->dst), + 0, + amt->gw_port, + amt->relay_port, + false, + false); + amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true); + return false; +} + +static void amt_send_multicast_data(struct amt_dev *amt, + const struct sk_buff *oskb, + struct amt_tunnel_list *tunnel, + bool v6) +{ + struct amt_header_mcast_data *amtmd; + struct socket *sock; + struct sk_buff *skb; + struct iphdr *iph; + struct flowi4 fl4; + struct rtable *rt; + + sock = rcu_dereference_bh(amt->sock); + if (!sock) + return; + + skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) + + sizeof(struct udphdr), 0, GFP_ATOMIC); + if (!skb) + return; + + skb_reset_inner_headers(skb); + memset(&fl4, 0, sizeof(struct flowi4)); + fl4.flowi4_oif = amt->stream_dev->ifindex; + fl4.daddr = tunnel->ip4; + fl4.saddr = amt->local_ip; + fl4.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(amt->net, &fl4); + if (IS_ERR(rt)) { + netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4); + kfree_skb(skb); + return; + } + + amtmd = skb_push(skb, sizeof(*amtmd)); + amtmd->version = 0; + amtmd->reserved = 0; + amtmd->type = AMT_MSG_MULTICAST_DATA; + + if (!v6) + skb_set_inner_protocol(skb, htons(ETH_P_IP)); + else + skb_set_inner_protocol(skb, htons(ETH_P_IPV6)); + udp_tunnel_xmit_skb(rt, sock->sk, skb, + fl4.saddr, + fl4.daddr, + AMT_TOS, + ip4_dst_hoplimit(&rt->dst), + 0, + amt->relay_port, + tunnel->source_port, + false, + false); +} + +static bool amt_send_membership_query(struct amt_dev *amt, + struct sk_buff *skb, + struct amt_tunnel_list *tunnel, + bool v6) +{ + struct amt_header_membership_query *amtmq; + struct socket *sock; + struct rtable *rt; + struct flowi4 fl4; + int err; + + sock = rcu_dereference_bh(amt->sock); + if (!sock) + return true; + + err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) + + sizeof(struct iphdr) + sizeof(struct udphdr)); + if (err) + return true; + + skb_reset_inner_headers(skb); + memset(&fl4, 0, sizeof(struct flowi4)); + fl4.flowi4_oif = amt->stream_dev->ifindex; + fl4.daddr = tunnel->ip4; + fl4.saddr = amt->local_ip; + fl4.flowi4_tos = AMT_TOS; + fl4.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(amt->net, &fl4); + if (IS_ERR(rt)) { + netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4); + return -1; + } + + amtmq = skb_push(skb, sizeof(*amtmq)); + amtmq->version = 0; + amtmq->type = AMT_MSG_MEMBERSHIP_QUERY; + amtmq->reserved = 0; + amtmq->l = 0; + amtmq->g = 0; + amtmq->nonce = tunnel->nonce; + amtmq->response_mac = tunnel->mac; + + if (!v6) + skb_set_inner_protocol(skb, htons(ETH_P_IP)); + else + skb_set_inner_protocol(skb, htons(ETH_P_IPV6)); + udp_tunnel_xmit_skb(rt, sock->sk, skb, + fl4.saddr, + fl4.daddr, + AMT_TOS, + ip4_dst_hoplimit(&rt->dst), + 0, + amt->relay_port, + tunnel->source_port, + false, + false); + amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true); + return false; +} + +static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct amt_dev *amt = netdev_priv(dev); + struct amt_tunnel_list *tunnel; + struct amt_group_node *gnode; + union amt_addr group = {0,}; +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6hdr *ip6h; + struct mld_msg *mld; +#endif + bool report = false; + struct igmphdr *ih; + bool query = false; + struct iphdr *iph; + bool data = false; + bool v6 = false; + u32 hash; + + iph = ip_hdr(skb); + if (iph->version == 4) { + if (!ipv4_is_multicast(iph->daddr)) + goto free; + + if (!ip_mc_check_igmp(skb)) { + ih = igmp_hdr(skb); + switch (ih->type) { + case IGMPV3_HOST_MEMBERSHIP_REPORT: + case IGMP_HOST_MEMBERSHIP_REPORT: + report = true; + break; + case IGMP_HOST_MEMBERSHIP_QUERY: + query = true; + break; + default: + goto free; + } + } else { + data = true; + } + v6 = false; + group.ip4 = iph->daddr; +#if IS_ENABLED(CONFIG_IPV6) + } else if (iph->version == 6) { + ip6h = ipv6_hdr(skb); + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + goto free; + + if (!ipv6_mc_check_mld(skb)) { + mld = (struct mld_msg *)skb_transport_header(skb); + switch (mld->mld_type) { + case ICMPV6_MGM_REPORT: + case ICMPV6_MLD2_REPORT: + report = true; + break; + case ICMPV6_MGM_QUERY: + query = true; + break; + default: + goto free; + } + } else { + data = true; + } + v6 = true; + group.ip6 = ip6h->daddr; +#endif + } else { + dev->stats.tx_errors++; + goto free; + } + + if (!pskb_may_pull(skb, sizeof(struct ethhdr))) + goto free; + + skb_pull(skb, sizeof(struct ethhdr)); + + if (amt->mode == AMT_MODE_GATEWAY) { + /* Gateway only passes IGMP/MLD packets */ + if (!report) + goto free; + if ((!v6 && !amt->ready4) || (v6 && !amt->ready6)) + goto free; + if (amt_send_membership_update(amt, skb, v6)) + goto free; + goto unlock; + } else if (amt->mode == AMT_MODE_RELAY) { + if (query) { + tunnel = amt_skb_cb(skb)->tunnel; + if (!tunnel) { + WARN_ON(1); + goto free; + } + + /* Do not forward unexpected query */ + if (amt_send_membership_query(amt, skb, tunnel, v6)) + goto free; + goto unlock; + } + + if (!data) + goto free; + list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { + hash = amt_group_hash(tunnel, &group); + hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], + node) { + if (!v6) { + if (gnode->group_addr.ip4 == iph->daddr) + goto found; +#if IS_ENABLED(CONFIG_IPV6) + } else { + if (ipv6_addr_equal(&gnode->group_addr.ip6, + &ip6h->daddr)) + goto found; +#endif + } + } + continue; +found: + amt_send_multicast_data(amt, skb, tunnel, v6); + } + } + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +free: + dev_kfree_skb(skb); +unlock: + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static int amt_parse_type(struct sk_buff *skb) +{ + struct amt_header *amth; + + if (!pskb_may_pull(skb, sizeof(struct udphdr) + + sizeof(struct amt_header))) + return -1; + + amth = (struct amt_header *)(udp_hdr(skb) + 1); + + if (amth->version != 0) + return -1; + + if (amth->type >= __AMT_MSG_MAX || !amth->type) + return -1; + return amth->type; +} + +static void amt_clear_groups(struct amt_tunnel_list *tunnel) +{ + struct amt_dev *amt = tunnel->amt; + struct amt_group_node *gnode; + struct hlist_node *t; + int i; + + spin_lock_bh(&tunnel->lock); + rcu_read_lock(); + for (i = 0; i < amt->hash_buckets; i++) + hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node) + amt_del_group(amt, gnode); + rcu_read_unlock(); + spin_unlock_bh(&tunnel->lock); +} + +static void amt_tunnel_expire(struct work_struct *work) +{ + struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work), + struct amt_tunnel_list, + gc_wq); + struct amt_dev *amt = tunnel->amt; + + spin_lock_bh(&amt->lock); + rcu_read_lock(); + list_del_rcu(&tunnel->list); + amt->nr_tunnels--; + amt_clear_groups(tunnel); + rcu_read_unlock(); + spin_unlock_bh(&amt->lock); + kfree_rcu(tunnel, rcu); +} + +static void amt_cleanup_srcs(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode) +{ + struct amt_source_node *snode; + struct hlist_node *t; + int i; + + /* Delete old sources */ + for (i = 0; i < amt->hash_buckets; i++) { + hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) { + if (snode->flags == AMT_SOURCE_OLD) + amt_destroy_source(snode); + } + } + + /* switch from new to old */ + for (i = 0; i < amt->hash_buckets; i++) { + hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) { + snode->flags = AMT_SOURCE_OLD; + if (!gnode->v6) + netdev_dbg(snode->gnode->amt->dev, + "Add source as OLD %pI4 from %pI4\n", + &snode->source_addr.ip4, + &gnode->group_addr.ip4); +#if IS_ENABLED(CONFIG_IPV6) + else + netdev_dbg(snode->gnode->amt->dev, + "Add source as OLD %pI6 from %pI6\n", + &snode->source_addr.ip6, + &gnode->group_addr.ip6); +#endif + } + } +} + +static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, void *grec, + bool v6) +{ + struct igmpv3_grec *igmp_grec; + struct amt_source_node *snode; +#if IS_ENABLED(CONFIG_IPV6) + struct mld2_grec *mld_grec; +#endif + union amt_addr src = {0,}; + u16 nsrcs; + u32 hash; + int i; + + if (!v6) { + igmp_grec = (struct igmpv3_grec *)grec; + nsrcs = ntohs(igmp_grec->grec_nsrcs); + } else { +#if IS_ENABLED(CONFIG_IPV6) + mld_grec = (struct mld2_grec *)grec; + nsrcs = ntohs(mld_grec->grec_nsrcs); +#else + return; +#endif + } + for (i = 0; i < nsrcs; i++) { + if (tunnel->nr_sources >= amt->max_sources) + return; + if (!v6) + src.ip4 = igmp_grec->grec_src[i]; +#if IS_ENABLED(CONFIG_IPV6) + else + memcpy(&src.ip6, &mld_grec->grec_src[i], + sizeof(struct in6_addr)); +#endif + if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src)) + continue; + + snode = amt_alloc_snode(gnode, &src); + if (snode) { + hash = amt_source_hash(tunnel, &snode->source_addr); + hlist_add_head_rcu(&snode->node, &gnode->sources[hash]); + tunnel->nr_sources++; + gnode->nr_sources++; + + if (!gnode->v6) + netdev_dbg(snode->gnode->amt->dev, + "Add source as NEW %pI4 from %pI4\n", + &snode->source_addr.ip4, + &gnode->group_addr.ip4); +#if IS_ENABLED(CONFIG_IPV6) + else + netdev_dbg(snode->gnode->amt->dev, + "Add source as NEW %pI6 from %pI6\n", + &snode->source_addr.ip6, + &gnode->group_addr.ip6); +#endif + } + } +} + +/* Router State Report Rec'd New Router State + * ------------ ------------ ---------------- + * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) + * + * -----------+-----------+-----------+ + * | OLD | NEW | + * -----------+-----------+-----------+ + * FWD | X | X+A | + * -----------+-----------+-----------+ + * D_FWD | Y | Y-A | + * -----------+-----------+-----------+ + * NONE | | A | + * -----------+-----------+-----------+ + * + * a) Received sources are NONE/NEW + * b) All NONE will be deleted by amt_cleanup_srcs(). + * c) All OLD will be deleted by amt_cleanup_srcs(). + * d) After delete, NEW source will be switched to OLD. + */ +static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, + enum amt_ops ops, + enum amt_filter filter, + enum amt_act act, + bool v6) +{ + struct amt_dev *amt = tunnel->amt; + struct amt_source_node *snode; + struct igmpv3_grec *igmp_grec; +#if IS_ENABLED(CONFIG_IPV6) + struct mld2_grec *mld_grec; +#endif + union amt_addr src = {0,}; + struct hlist_node *t; + u16 nsrcs; + int i, j; + + if (!v6) { + igmp_grec = (struct igmpv3_grec *)grec; + nsrcs = ntohs(igmp_grec->grec_nsrcs); + } else { +#if IS_ENABLED(CONFIG_IPV6) + mld_grec = (struct mld2_grec *)grec; + nsrcs = ntohs(mld_grec->grec_nsrcs); +#else + return; +#endif + } + + memset(&src, 0, sizeof(union amt_addr)); + switch (ops) { + case AMT_OPS_INT: + /* A*B */ + for (i = 0; i < nsrcs; i++) { + if (!v6) + src.ip4 = igmp_grec->grec_src[i]; +#if IS_ENABLED(CONFIG_IPV6) + else + memcpy(&src.ip6, &mld_grec->grec_src[i], + sizeof(struct in6_addr)); +#endif + snode = amt_lookup_src(tunnel, gnode, filter, &src); + if (!snode) + continue; + amt_act_src(tunnel, gnode, snode, act); + } + break; + case AMT_OPS_UNI: + /* A+B */ + for (i = 0; i < amt->hash_buckets; i++) { + hlist_for_each_entry_safe(snode, t, &gnode->sources[i], + node) { + if (amt_status_filter(snode, filter)) + amt_act_src(tunnel, gnode, snode, act); + } + } + for (i = 0; i < nsrcs; i++) { + if (!v6) + src.ip4 = igmp_grec->grec_src[i]; +#if IS_ENABLED(CONFIG_IPV6) + else + memcpy(&src.ip6, &mld_grec->grec_src[i], + sizeof(struct in6_addr)); +#endif + snode = amt_lookup_src(tunnel, gnode, filter, &src); + if (!snode) + continue; + amt_act_src(tunnel, gnode, snode, act); + } + break; + case AMT_OPS_SUB: + /* A-B */ + for (i = 0; i < amt->hash_buckets; i++) { + hlist_for_each_entry_safe(snode, t, &gnode->sources[i], + node) { + if (!amt_status_filter(snode, filter)) + continue; + for (j = 0; j < nsrcs; j++) { + if (!v6) + src.ip4 = igmp_grec->grec_src[j]; +#if IS_ENABLED(CONFIG_IPV6) + else + memcpy(&src.ip6, + &mld_grec->grec_src[j], + sizeof(struct in6_addr)); +#endif + if (amt_addr_equal(&snode->source_addr, + &src)) + goto out_sub; + } + amt_act_src(tunnel, gnode, snode, act); + continue; +out_sub:; + } + } + break; + case AMT_OPS_SUB_REV: + /* B-A */ + for (i = 0; i < nsrcs; i++) { + if (!v6) + src.ip4 = igmp_grec->grec_src[i]; +#if IS_ENABLED(CONFIG_IPV6) + else + memcpy(&src.ip6, &mld_grec->grec_src[i], + sizeof(struct in6_addr)); +#endif + snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, + &src); + if (!snode) { + snode = amt_lookup_src(tunnel, gnode, + filter, &src); + if (snode) + amt_act_src(tunnel, gnode, snode, act); + } + } + break; + default: + netdev_dbg(amt->dev, "Invalid type\n"); + return; + } +} + +static void amt_mcast_is_in_handler(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, void *zero_grec, bool v6) +{ + if (gnode->filter_mode == MCAST_INCLUDE) { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI + */ + /* Update IS_IN (B) as FWD/NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_NONE_NEW, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* Update INCLUDE (A) as NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* (B)=GMI */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD_NEW, + AMT_ACT_GMI, + v6); + } else { +/* State Actions + * ------------ ------------ ---------------- ------- + * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI + */ + /* Update (A) in (X, Y) as NONE/NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_BOTH, + AMT_ACT_STATUS_NONE_NEW, + v6); + /* Update FWD/OLD as FWD/NEW */ + amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* Update IS_IN (A) as FWD/NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_NONE_NEW, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* Update EXCLUDE (, Y-A) as D_FWD_NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + } +} + +static void amt_mcast_is_ex_handler(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, void *zero_grec, bool v6) +{ + if (gnode->filter_mode == MCAST_INCLUDE) { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 + * Delete (A-B) + * Group Timer=GMI + */ + /* EXCLUDE(A*B, ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE(, B-A) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + /* (B-A)=0 */ + amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, + AMT_FILTER_D_FWD_NEW, + AMT_ACT_GMI_ZERO, + v6); + /* Group Timer=GMI */ + if (!mod_delayed_work(amt_wq, &gnode->group_timer, + msecs_to_jiffies(amt_gmi(amt)))) + dev_hold(amt->dev); + gnode->filter_mode = MCAST_EXCLUDE; + /* Delete (A-B) will be worked by amt_cleanup_srcs(). */ + } else { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI + * Delete (X-A) + * Delete (Y-A) + * Group Timer=GMI + */ + /* EXCLUDE (A-Y, ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (, Y*A ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + /* (A-X-Y)=GMI */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_BOTH_NEW, + AMT_ACT_GMI, + v6); + /* Group Timer=GMI */ + if (!mod_delayed_work(amt_wq, &gnode->group_timer, + msecs_to_jiffies(amt_gmi(amt)))) + dev_hold(amt->dev); + /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */ + } +} + +static void amt_mcast_to_in_handler(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, void *zero_grec, bool v6) +{ + if (gnode->filter_mode == MCAST_INCLUDE) { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI + * Send Q(G,A-B) + */ + /* Update TO_IN (B) sources as FWD/NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_NONE_NEW, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* Update INCLUDE (A) sources as NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* (B)=GMI */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD_NEW, + AMT_ACT_GMI, + v6); + } else { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI + * Send Q(G,X-A) + * Send Q(G) + */ + /* Update TO_IN (A) sources as FWD/NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_NONE_NEW, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* Update EXCLUDE(X,) sources as FWD/NEW */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (, Y-A) + * (A) are already switched to FWD_NEW. + * So, D_FWD/OLD -> D_FWD/NEW is okay. + */ + amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + /* (A)=GMI + * Only FWD_NEW will have (A) sources. + */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD_NEW, + AMT_ACT_GMI, + v6); + } +} + +static void amt_mcast_to_ex_handler(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, void *zero_grec, bool v6) +{ + if (gnode->filter_mode == MCAST_INCLUDE) { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0 + * Delete (A-B) + * Send Q(G,A*B) + * Group Timer=GMI + */ + /* EXCLUDE (A*B, ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (, B-A) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + /* (B-A)=0 */ + amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, + AMT_FILTER_D_FWD_NEW, + AMT_ACT_GMI_ZERO, + v6); + /* Group Timer=GMI */ + if (!mod_delayed_work(amt_wq, &gnode->group_timer, + msecs_to_jiffies(amt_gmi(amt)))) + dev_hold(amt->dev); + gnode->filter_mode = MCAST_EXCLUDE; + /* Delete (A-B) will be worked by amt_cleanup_srcs(). */ + } else { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer + * Delete (X-A) + * Delete (Y-A) + * Send Q(G,A-Y) + * Group Timer=GMI + */ + /* Update (A-X-Y) as NONE/OLD */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_BOTH, + AMT_ACT_GT, + v6); + /* EXCLUDE (A-Y, ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (, Y*A) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + /* Group Timer=GMI */ + if (!mod_delayed_work(amt_wq, &gnode->group_timer, + msecs_to_jiffies(amt_gmi(amt)))) + dev_hold(amt->dev); + /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */ + } +} + +static void amt_mcast_allow_handler(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, void *zero_grec, bool v6) +{ + if (gnode->filter_mode == MCAST_INCLUDE) { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI + */ + /* INCLUDE (A+B) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* (B)=GMI */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD_NEW, + AMT_ACT_GMI, + v6); + } else { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI + */ + /* EXCLUDE (X+A, ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (, Y-A) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + /* (A)=GMI + * All (A) source are now FWD/NEW status. + */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT, + AMT_FILTER_FWD_NEW, + AMT_ACT_GMI, + v6); + } +} + +static void amt_mcast_block_handler(struct amt_dev *amt, + struct amt_tunnel_list *tunnel, + struct amt_group_node *gnode, + void *grec, void *zero_grec, bool v6) +{ + if (gnode->filter_mode == MCAST_INCLUDE) { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B) + */ + /* INCLUDE (A) */ + amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + } else { +/* Router State Report Rec'd New Router State Actions + * ------------ ------------ ---------------- ------- + * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer + * Send Q(G,A-Y) + */ + /* (A-X-Y)=Group Timer */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_BOTH, + AMT_ACT_GT, + v6); + /* EXCLUDE (X, ) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (X+(A-Y) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_FWD_NEW, + v6); + /* EXCLUDE (, Y) */ + amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI, + AMT_FILTER_D_FWD, + AMT_ACT_STATUS_D_FWD_NEW, + v6); + } +} + +/* RFC 3376 + * 7.3.2. In the Presence of Older Version Group Members + * + * When Group Compatibility Mode is IGMPv2, a router internally + * translates the following IGMPv2 messages for that group to their + * IGMPv3 equivalents: + * + * IGMPv2 Message IGMPv3 Equivalent + * -------------- ----------------- + * Report IS_EX( {} ) + * Leave TO_IN( {} ) + */ +static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct igmphdr *ih = igmp_hdr(skb); + struct iphdr *iph = ip_hdr(skb); + struct amt_group_node *gnode; + union amt_addr group, host; + + memset(&group, 0, sizeof(union amt_addr)); + group.ip4 = ih->group; + memset(&host, 0, sizeof(union amt_addr)); + host.ip4 = iph->saddr; + + gnode = amt_lookup_group(tunnel, &group, &host, false); + if (!gnode) { + gnode = amt_add_group(amt, tunnel, &group, &host, false); + if (!IS_ERR(gnode)) { + gnode->filter_mode = MCAST_EXCLUDE; + if (!mod_delayed_work(amt_wq, &gnode->group_timer, + msecs_to_jiffies(amt_gmi(amt)))) + dev_hold(amt->dev); + } + } +} + +/* RFC 3376 + * 7.3.2. In the Presence of Older Version Group Members + * + * When Group Compatibility Mode is IGMPv2, a router internally + * translates the following IGMPv2 messages for that group to their + * IGMPv3 equivalents: + * + * IGMPv2 Message IGMPv3 Equivalent + * -------------- ----------------- + * Report IS_EX( {} ) + * Leave TO_IN( {} ) + */ +static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct igmphdr *ih = igmp_hdr(skb); + struct iphdr *iph = ip_hdr(skb); + struct amt_group_node *gnode; + union amt_addr group, host; + + memset(&group, 0, sizeof(union amt_addr)); + group.ip4 = ih->group; + memset(&host, 0, sizeof(union amt_addr)); + host.ip4 = iph->saddr; + + gnode = amt_lookup_group(tunnel, &group, &host, false); + if (gnode) + amt_del_group(amt, gnode); +} + +static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb); + int len = skb_transport_offset(skb) + sizeof(*ihrv3); + void *zero_grec = (void *)&igmpv3_zero_grec; + struct iphdr *iph = ip_hdr(skb); + struct amt_group_node *gnode; + union amt_addr group, host; + struct igmpv3_grec *grec; + u16 nsrcs; + int i; + + for (i = 0; i < ntohs(ihrv3->ngrec); i++) { + len += sizeof(*grec); + if (!ip_mc_may_pull(skb, len)) + break; + + grec = (void *)(skb->data + len - sizeof(*grec)); + nsrcs = ntohs(grec->grec_nsrcs); + + len += nsrcs * sizeof(__be32); + if (!ip_mc_may_pull(skb, len)) + break; + + memset(&group, 0, sizeof(union amt_addr)); + group.ip4 = grec->grec_mca; + memset(&host, 0, sizeof(union amt_addr)); + host.ip4 = iph->saddr; + gnode = amt_lookup_group(tunnel, &group, &host, false); + if (!gnode) { + gnode = amt_add_group(amt, tunnel, &group, &host, + false); + if (IS_ERR(gnode)) + continue; + } + + amt_add_srcs(amt, tunnel, gnode, grec, false); + switch (grec->grec_type) { + case IGMPV3_MODE_IS_INCLUDE: + amt_mcast_is_in_handler(amt, tunnel, gnode, grec, + zero_grec, false); + break; + case IGMPV3_MODE_IS_EXCLUDE: + amt_mcast_is_ex_handler(amt, tunnel, gnode, grec, + zero_grec, false); + break; + case IGMPV3_CHANGE_TO_INCLUDE: + amt_mcast_to_in_handler(amt, tunnel, gnode, grec, + zero_grec, false); + break; + case IGMPV3_CHANGE_TO_EXCLUDE: + amt_mcast_to_ex_handler(amt, tunnel, gnode, grec, + zero_grec, false); + break; + case IGMPV3_ALLOW_NEW_SOURCES: + amt_mcast_allow_handler(amt, tunnel, gnode, grec, + zero_grec, false); + break; + case IGMPV3_BLOCK_OLD_SOURCES: + amt_mcast_block_handler(amt, tunnel, gnode, grec, + zero_grec, false); + break; + default: + break; + } + amt_cleanup_srcs(amt, tunnel, gnode); + } +} + +/* caller held tunnel->lock */ +static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct igmphdr *ih = igmp_hdr(skb); + + switch (ih->type) { + case IGMPV3_HOST_MEMBERSHIP_REPORT: + amt_igmpv3_report_handler(amt, skb, tunnel); + break; + case IGMPV2_HOST_MEMBERSHIP_REPORT: + amt_igmpv2_report_handler(amt, skb, tunnel); + break; + case IGMP_HOST_LEAVE_MESSAGE: + amt_igmpv2_leave_handler(amt, skb, tunnel); + break; + default: + break; + } +} + +#if IS_ENABLED(CONFIG_IPV6) +/* RFC 3810 + * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners + * + * When Multicast Address Compatibility Mode is MLDv2, a router acts + * using the MLDv2 protocol for that multicast address. When Multicast + * Address Compatibility Mode is MLDv1, a router internally translates + * the following MLDv1 messages for that multicast address to their + * MLDv2 equivalents: + * + * MLDv1 Message MLDv2 Equivalent + * -------------- ----------------- + * Report IS_EX( {} ) + * Done TO_IN( {} ) + */ +static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb); + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct amt_group_node *gnode; + union amt_addr group, host; + + memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr)); + memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr)); + + gnode = amt_lookup_group(tunnel, &group, &host, true); + if (!gnode) { + gnode = amt_add_group(amt, tunnel, &group, &host, true); + if (!IS_ERR(gnode)) { + gnode->filter_mode = MCAST_EXCLUDE; + if (!mod_delayed_work(amt_wq, &gnode->group_timer, + msecs_to_jiffies(amt_gmi(amt)))) + dev_hold(amt->dev); + } + } +} + +/* RFC 3810 + * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners + * + * When Multicast Address Compatibility Mode is MLDv2, a router acts + * using the MLDv2 protocol for that multicast address. When Multicast + * Address Compatibility Mode is MLDv1, a router internally translates + * the following MLDv1 messages for that multicast address to their + * MLDv2 equivalents: + * + * MLDv1 Message MLDv2 Equivalent + * -------------- ----------------- + * Report IS_EX( {} ) + * Done TO_IN( {} ) + */ +static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb); + struct iphdr *iph = ip_hdr(skb); + struct amt_group_node *gnode; + union amt_addr group, host; + + memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr)); + memset(&host, 0, sizeof(union amt_addr)); + host.ip4 = iph->saddr; + + gnode = amt_lookup_group(tunnel, &group, &host, true); + if (gnode) { + amt_del_group(amt, gnode); + return; + } +} + +static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb); + int len = skb_transport_offset(skb) + sizeof(*mld2r); + void *zero_grec = (void *)&mldv2_zero_grec; + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct amt_group_node *gnode; + union amt_addr group, host; + struct mld2_grec *grec; + u16 nsrcs; + int i; + + for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) { + len += sizeof(*grec); + if (!ipv6_mc_may_pull(skb, len)) + break; + + grec = (void *)(skb->data + len - sizeof(*grec)); + nsrcs = ntohs(grec->grec_nsrcs); + + len += nsrcs * sizeof(struct in6_addr); + if (!ipv6_mc_may_pull(skb, len)) + break; + + memset(&group, 0, sizeof(union amt_addr)); + group.ip6 = grec->grec_mca; + memset(&host, 0, sizeof(union amt_addr)); + host.ip6 = ip6h->saddr; + gnode = amt_lookup_group(tunnel, &group, &host, true); + if (!gnode) { + gnode = amt_add_group(amt, tunnel, &group, &host, + ETH_P_IPV6); + if (IS_ERR(gnode)) + continue; + } + + amt_add_srcs(amt, tunnel, gnode, grec, true); + switch (grec->grec_type) { + case MLD2_MODE_IS_INCLUDE: + amt_mcast_is_in_handler(amt, tunnel, gnode, grec, + zero_grec, true); + break; + case MLD2_MODE_IS_EXCLUDE: + amt_mcast_is_ex_handler(amt, tunnel, gnode, grec, + zero_grec, true); + break; + case MLD2_CHANGE_TO_INCLUDE: + amt_mcast_to_in_handler(amt, tunnel, gnode, grec, + zero_grec, true); + break; + case MLD2_CHANGE_TO_EXCLUDE: + amt_mcast_to_ex_handler(amt, tunnel, gnode, grec, + zero_grec, true); + break; + case MLD2_ALLOW_NEW_SOURCES: + amt_mcast_allow_handler(amt, tunnel, gnode, grec, + zero_grec, true); + break; + case MLD2_BLOCK_OLD_SOURCES: + amt_mcast_block_handler(amt, tunnel, gnode, grec, + zero_grec, true); + break; + default: + break; + } + amt_cleanup_srcs(amt, tunnel, gnode); + } +} + +/* caller held tunnel->lock */ +static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb, + struct amt_tunnel_list *tunnel) +{ + struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb); + + switch (mld->mld_type) { + case ICMPV6_MGM_REPORT: + amt_mldv1_report_handler(amt, skb, tunnel); + break; + case ICMPV6_MLD2_REPORT: + amt_mldv2_report_handler(amt, skb, tunnel); + break; + case ICMPV6_MGM_REDUCTION: + amt_mldv1_leave_handler(amt, skb, tunnel); + break; + default: + break; + } +} +#endif + +static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) +{ + struct amt_header_advertisement *amta; + int hdr_size; + + hdr_size = sizeof(*amta) - sizeof(struct amt_header); + + if (!pskb_may_pull(skb, hdr_size)) + return true; + + amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1); + if (!amta->ip4) + return true; + + if (amta->reserved || amta->version) + return true; + + if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) || + ipv4_is_zeronet(amta->ip4)) + return true; + + amt->remote_ip = amta->ip4; + netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip); + mod_delayed_work(amt_wq, &amt->req_wq, 0); + + amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true); + return false; +} + +static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) +{ + struct amt_header_mcast_data *amtmd; + int hdr_size, len, err; + struct ethhdr *eth; + struct iphdr *iph; + + amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1); + if (amtmd->reserved || amtmd->version) + return true; + + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); + if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false)) + return true; + skb_reset_network_header(skb); + skb_push(skb, sizeof(*eth)); + skb_reset_mac_header(skb); + skb_pull(skb, sizeof(*eth)); + eth = eth_hdr(skb); + iph = ip_hdr(skb); + if (iph->version == 4) { + if (!ipv4_is_multicast(iph->daddr)) + return true; + skb->protocol = htons(ETH_P_IP); + eth->h_proto = htons(ETH_P_IP); + ip_eth_mc_map(iph->daddr, eth->h_dest); +#if IS_ENABLED(CONFIG_IPV6) + } else if (iph->version == 6) { + struct ipv6hdr *ip6h; + + ip6h = ipv6_hdr(skb); + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + return true; + skb->protocol = htons(ETH_P_IPV6); + eth->h_proto = htons(ETH_P_IPV6); + ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); +#endif + } else { + return true; + } + + skb->pkt_type = PACKET_MULTICAST; + skb->ip_summed = CHECKSUM_NONE; + len = skb->len; + err = gro_cells_receive(&amt->gro_cells, skb); + if (likely(err == NET_RX_SUCCESS)) + dev_sw_netstats_rx_add(amt->dev, len); + else + amt->dev->stats.rx_dropped++; + + return false; +} + +static bool amt_membership_query_handler(struct amt_dev *amt, + struct sk_buff *skb) +{ + struct amt_header_membership_query *amtmq; + struct igmpv3_query *ihv3; + struct ethhdr *eth, *oeth; + struct iphdr *iph; + int hdr_size, len; + + hdr_size = sizeof(*amtmq) - sizeof(struct amt_header); + + if (!pskb_may_pull(skb, hdr_size)) + return true; + + amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1); + if (amtmq->reserved || amtmq->version) + return true; + + hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth); + if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) + return true; + oeth = eth_hdr(skb); + skb_reset_mac_header(skb); + skb_pull(skb, sizeof(*eth)); + skb_reset_network_header(skb); + eth = eth_hdr(skb); + iph = ip_hdr(skb); + if (iph->version == 4) { + if (!ipv4_is_multicast(iph->daddr)) + return true; + if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + + sizeof(*ihv3))) + return true; + + ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); + skb_reset_transport_header(skb); + skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); + spin_lock_bh(&amt->lock); + amt->ready4 = true; + amt->mac = amtmq->response_mac; + amt->req_cnt = 0; + amt->qi = ihv3->qqic; + spin_unlock_bh(&amt->lock); + skb->protocol = htons(ETH_P_IP); + eth->h_proto = htons(ETH_P_IP); + ip_eth_mc_map(iph->daddr, eth->h_dest); +#if IS_ENABLED(CONFIG_IPV6) + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct mld2_query *mld2q; + + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + return true; + if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + + sizeof(*mld2q))) + return true; + + mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); + skb_reset_transport_header(skb); + skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); + spin_lock_bh(&amt->lock); + amt->ready6 = true; + amt->mac = amtmq->response_mac; + amt->req_cnt = 0; + amt->qi = mld2q->mld2q_qqic; + spin_unlock_bh(&amt->lock); + skb->protocol = htons(ETH_P_IPV6); + eth->h_proto = htons(ETH_P_IPV6); + ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); +#endif + } else { + return true; + } + + ether_addr_copy(eth->h_source, oeth->h_source); + skb->pkt_type = PACKET_MULTICAST; + skb->ip_summed = CHECKSUM_NONE; + len = skb->len; + if (netif_rx(skb) == NET_RX_SUCCESS) { + amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); + dev_sw_netstats_rx_add(amt->dev, len); + } else { + amt->dev->stats.rx_dropped++; + } + + return false; +} + +static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) +{ + struct amt_header_membership_update *amtmu; + struct amt_tunnel_list *tunnel; + struct udphdr *udph; + struct ethhdr *eth; + struct iphdr *iph; + int len; + + iph = ip_hdr(skb); + udph = udp_hdr(skb); + + if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol, + false, false)) + return true; + + amtmu = (struct amt_header_membership_update *)skb->data; + if (amtmu->reserved || amtmu->version) + return true; + + skb_pull(skb, sizeof(*amtmu)); + skb_reset_network_header(skb); + + list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { + if (tunnel->ip4 == iph->saddr) { + if ((amtmu->nonce == tunnel->nonce && + amtmu->response_mac == tunnel->mac)) { + mod_delayed_work(amt_wq, &tunnel->gc_wq, + msecs_to_jiffies(amt_gmi(amt)) + * 3); + goto report; + } else { + netdev_dbg(amt->dev, "Invalid MAC\n"); + return true; + } + } + } + + return false; + +report: + iph = ip_hdr(skb); + if (iph->version == 4) { + if (ip_mc_check_igmp(skb)) { + netdev_dbg(amt->dev, "Invalid IGMP\n"); + return true; + } + + spin_lock_bh(&tunnel->lock); + amt_igmp_report_handler(amt, skb, tunnel); + spin_unlock_bh(&tunnel->lock); + + skb_push(skb, sizeof(struct ethhdr)); + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + skb->protocol = htons(ETH_P_IP); + eth->h_proto = htons(ETH_P_IP); + ip_eth_mc_map(iph->daddr, eth->h_dest); +#if IS_ENABLED(CONFIG_IPV6) + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + if (ipv6_mc_check_mld(skb)) { + netdev_dbg(amt->dev, "Invalid MLD\n"); + return true; + } + + spin_lock_bh(&tunnel->lock); + amt_mld_report_handler(amt, skb, tunnel); + spin_unlock_bh(&tunnel->lock); + + skb_push(skb, sizeof(struct ethhdr)); + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + skb->protocol = htons(ETH_P_IPV6); + eth->h_proto = htons(ETH_P_IPV6); + ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); +#endif + } else { + netdev_dbg(amt->dev, "Unsupported Protocol\n"); + return true; + } + + skb_pull(skb, sizeof(struct ethhdr)); + skb->pkt_type = PACKET_MULTICAST; + skb->ip_summed = CHECKSUM_NONE; + len = skb->len; + if (netif_rx(skb) == NET_RX_SUCCESS) { + amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE, + true); + dev_sw_netstats_rx_add(amt->dev, len); + } else { + amt->dev->stats.rx_dropped++; + } + + return false; +} + +static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce, + __be32 daddr, __be16 dport) +{ + struct amt_header_advertisement *amta; + int hlen, tlen, offset; + struct socket *sock; + struct udphdr *udph; + struct sk_buff *skb; + struct iphdr *iph; + struct rtable *rt; + struct flowi4 fl4; + u32 len; + int err; + + rcu_read_lock(); + sock = rcu_dereference(amt->sock); + if (!sock) + goto out; + + if (!netif_running(amt->stream_dev) || !netif_running(amt->dev)) + goto out; + + rt = ip_route_output_ports(amt->net, &fl4, sock->sk, + daddr, amt->local_ip, + dport, amt->relay_port, + IPPROTO_UDP, 0, + amt->stream_dev->ifindex); + if (IS_ERR(rt)) { + amt->dev->stats.tx_errors++; + goto out; + } + + hlen = LL_RESERVED_SPACE(amt->dev); + tlen = amt->dev->needed_tailroom; + len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta); + skb = netdev_alloc_skb_ip_align(amt->dev, len); + if (!skb) { + ip_rt_put(rt); + amt->dev->stats.tx_errors++; + goto out; + } + + skb->priority = TC_PRIO_CONTROL; + skb_dst_set(skb, &rt->dst); + + len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta); + skb_reset_network_header(skb); + skb_put(skb, len); + amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph)); + amta->version = 0; + amta->type = AMT_MSG_ADVERTISEMENT; + amta->reserved = 0; + amta->nonce = nonce; + amta->ip4 = amt->local_ip; + skb_push(skb, sizeof(*udph)); + skb_reset_transport_header(skb); + udph = udp_hdr(skb); + udph->source = amt->relay_port; + udph->dest = dport; + udph->len = htons(sizeof(*amta) + sizeof(*udph)); + udph->check = 0; + offset = skb_transport_offset(skb); + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + udph->check = csum_tcpudp_magic(amt->local_ip, daddr, + sizeof(*udph) + sizeof(*amta), + IPPROTO_UDP, skb->csum); + + skb_push(skb, sizeof(*iph)); + iph = ip_hdr(skb); + iph->version = 4; + iph->ihl = (sizeof(struct iphdr)) >> 2; + iph->tos = AMT_TOS; + iph->frag_off = 0; + iph->ttl = ip4_dst_hoplimit(&rt->dst); + iph->daddr = daddr; + iph->saddr = amt->local_ip; + iph->protocol = IPPROTO_UDP; + iph->tot_len = htons(len); + + skb->ip_summed = CHECKSUM_NONE; + ip_select_ident(amt->net, skb, NULL); + ip_send_check(iph); + err = ip_local_out(amt->net, sock->sk, skb); + if (unlikely(net_xmit_eval(err))) + amt->dev->stats.tx_errors++; + +out: + rcu_read_unlock(); +} + +static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb) +{ + struct amt_header_discovery *amtd; + struct udphdr *udph; + struct iphdr *iph; + + if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd))) + return true; + + iph = ip_hdr(skb); + udph = udp_hdr(skb); + amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1); + + if (amtd->reserved || amtd->version) + return true; + + amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source); + + return false; +} + +static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) +{ + struct amt_header_request *amtrh; + struct amt_tunnel_list *tunnel; + unsigned long long key; + struct udphdr *udph; + struct iphdr *iph; + u64 mac; + int i; + + if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh))) + return true; + + iph = ip_hdr(skb); + udph = udp_hdr(skb); + amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1); + + if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version) + return true; + + list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) + if (tunnel->ip4 == iph->saddr) + goto send; + + if (amt->nr_tunnels >= amt->max_tunnels) { + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); + return true; + } + + tunnel = kzalloc(sizeof(*tunnel) + + (sizeof(struct hlist_head) * amt->hash_buckets), + GFP_ATOMIC); + if (!tunnel) + return true; + + tunnel->source_port = udph->source; + tunnel->ip4 = iph->saddr; + + memcpy(&key, &tunnel->key, sizeof(unsigned long long)); + tunnel->amt = amt; + spin_lock_init(&tunnel->lock); + for (i = 0; i < amt->hash_buckets; i++) + INIT_HLIST_HEAD(&tunnel->groups[i]); + + INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire); + + spin_lock_bh(&amt->lock); + list_add_tail_rcu(&tunnel->list, &amt->tunnel_list); + tunnel->key = amt->key; + amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); + amt->nr_tunnels++; + mod_delayed_work(amt_wq, &tunnel->gc_wq, + msecs_to_jiffies(amt_gmi(amt))); + spin_unlock_bh(&amt->lock); + +send: + tunnel->nonce = amtrh->nonce; + mac = siphash_3u32((__force u32)tunnel->ip4, + (__force u32)tunnel->source_port, + (__force u32)tunnel->nonce, + &tunnel->key); + tunnel->mac = mac >> 16; + + if (!netif_running(amt->dev) || !netif_running(amt->stream_dev)) + return true; + + if (!amtrh->p) + amt_send_igmp_gq(amt, tunnel); + else + amt_send_mld_gq(amt, tunnel); + + return false; +} + +static int amt_rcv(struct sock *sk, struct sk_buff *skb) +{ + struct amt_dev *amt; + struct iphdr *iph; + int type; + bool err; + + rcu_read_lock_bh(); + amt = rcu_dereference_sk_user_data(sk); + if (!amt) { + err = true; + goto out; + } + + skb->dev = amt->dev; + iph = ip_hdr(skb); + type = amt_parse_type(skb); + if (type == -1) { + err = true; + goto drop; + } + + if (amt->mode == AMT_MODE_GATEWAY) { + switch (type) { + case AMT_MSG_ADVERTISEMENT: + if (iph->saddr != amt->discovery_ip) { + netdev_dbg(amt->dev, "Invalid Relay IP\n"); + err = true; + goto drop; + } + if (amt_advertisement_handler(amt, skb)) + amt->dev->stats.rx_dropped++; + goto out; + case AMT_MSG_MULTICAST_DATA: + if (iph->saddr != amt->remote_ip) { + netdev_dbg(amt->dev, "Invalid Relay IP\n"); + err = true; + goto drop; + } + err = amt_multicast_data_handler(amt, skb); + if (err) + goto drop; + else + goto out; + case AMT_MSG_MEMBERSHIP_QUERY: + if (iph->saddr != amt->remote_ip) { + netdev_dbg(amt->dev, "Invalid Relay IP\n"); + err = true; + goto drop; + } + err = amt_membership_query_handler(amt, skb); + if (err) + goto drop; + else + goto out; + default: + err = true; + netdev_dbg(amt->dev, "Invalid type of Gateway\n"); + break; + } + } else { + switch (type) { + case AMT_MSG_DISCOVERY: + err = amt_discovery_handler(amt, skb); + break; + case AMT_MSG_REQUEST: + err = amt_request_handler(amt, skb); + break; + case AMT_MSG_MEMBERSHIP_UPDATE: + err = amt_update_handler(amt, skb); + if (err) + goto drop; + else + goto out; + default: + err = true; + netdev_dbg(amt->dev, "Invalid type of relay\n"); + break; + } + } +drop: + if (err) { + amt->dev->stats.rx_dropped++; + kfree_skb(skb); + } else { + consume_skb(skb); + } +out: + rcu_read_unlock_bh(); + return 0; +} + +static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) +{ + struct amt_dev *amt; + int type; + + rcu_read_lock_bh(); + amt = rcu_dereference_sk_user_data(sk); + if (!amt) + goto drop; + + if (amt->mode != AMT_MODE_GATEWAY) + goto drop; + + type = amt_parse_type(skb); + if (type == -1) + goto drop; + + netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n", + type_str[type]); + switch (type) { + case AMT_MSG_DISCOVERY: + break; + case AMT_MSG_REQUEST: + case AMT_MSG_MEMBERSHIP_UPDATE: + if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT) + mod_delayed_work(amt_wq, &amt->req_wq, 0); + break; + default: + goto drop; + } + rcu_read_unlock_bh(); + return 0; +drop: + rcu_read_unlock_bh(); + amt->dev->stats.rx_dropped++; + return 0; +} + +static struct socket *amt_create_sock(struct net *net, __be16 port) +{ + struct udp_port_cfg udp_conf; + struct socket *sock; + int err; + + memset(&udp_conf, 0, sizeof(udp_conf)); + udp_conf.family = AF_INET; + udp_conf.local_ip.s_addr = htonl(INADDR_ANY); + + udp_conf.local_udp_port = port; + + err = udp_sock_create(net, &udp_conf, &sock); + if (err < 0) + return ERR_PTR(err); + + return sock; +} + +static int amt_socket_create(struct amt_dev *amt) +{ + struct udp_tunnel_sock_cfg tunnel_cfg; + struct socket *sock; + + sock = amt_create_sock(amt->net, amt->relay_port); + if (IS_ERR(sock)) + return PTR_ERR(sock); + + /* Mark socket as an encapsulation socket */ + memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); + tunnel_cfg.sk_user_data = amt; + tunnel_cfg.encap_type = 1; + tunnel_cfg.encap_rcv = amt_rcv; + tunnel_cfg.encap_err_lookup = amt_err_lookup; + tunnel_cfg.encap_destroy = NULL; + setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg); + + rcu_assign_pointer(amt->sock, sock); + return 0; +} + +static int amt_dev_open(struct net_device *dev) +{ + struct amt_dev *amt = netdev_priv(dev); + int err; + + amt->ready4 = false; + amt->ready6 = false; + + err = amt_socket_create(amt); + if (err) + return err; + + amt->req_cnt = 0; + amt->remote_ip = 0; + get_random_bytes(&amt->key, sizeof(siphash_key_t)); + + amt->status = AMT_STATUS_INIT; + if (amt->mode == AMT_MODE_GATEWAY) { + mod_delayed_work(amt_wq, &amt->discovery_wq, 0); + mod_delayed_work(amt_wq, &amt->req_wq, 0); + } else if (amt->mode == AMT_MODE_RELAY) { + mod_delayed_work(amt_wq, &amt->secret_wq, + msecs_to_jiffies(AMT_SECRET_TIMEOUT)); + } + return err; +} + +static int amt_dev_stop(struct net_device *dev) +{ + struct amt_dev *amt = netdev_priv(dev); + struct amt_tunnel_list *tunnel, *tmp; + struct socket *sock; + + cancel_delayed_work_sync(&amt->req_wq); + cancel_delayed_work_sync(&amt->discovery_wq); + cancel_delayed_work_sync(&amt->secret_wq); + + /* shutdown */ + sock = rtnl_dereference(amt->sock); + RCU_INIT_POINTER(amt->sock, NULL); + synchronize_net(); + if (sock) + udp_tunnel_sock_release(sock); + + amt->ready4 = false; + amt->ready6 = false; + amt->req_cnt = 0; + amt->remote_ip = 0; + + list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) { + list_del_rcu(&tunnel->list); + amt->nr_tunnels--; + cancel_delayed_work_sync(&tunnel->gc_wq); + amt_clear_groups(tunnel); + kfree_rcu(tunnel, rcu); + } + + return 0; +} + +static const struct device_type amt_type = { + .name = "amt", +}; + +static int amt_dev_init(struct net_device *dev) +{ + struct amt_dev *amt = netdev_priv(dev); + int err; + + amt->dev = dev; + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + err = gro_cells_init(&amt->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + + return 0; +} + +static void amt_dev_uninit(struct net_device *dev) +{ + struct amt_dev *amt = netdev_priv(dev); + + gro_cells_destroy(&amt->gro_cells); + free_percpu(dev->tstats); +} + +static const struct net_device_ops amt_netdev_ops = { + .ndo_init = amt_dev_init, + .ndo_uninit = amt_dev_uninit, + .ndo_open = amt_dev_open, + .ndo_stop = amt_dev_stop, + .ndo_start_xmit = amt_dev_xmit, + .ndo_get_stats64 = dev_get_tstats64, +}; + +static void amt_link_setup(struct net_device *dev) +{ + dev->netdev_ops = &amt_netdev_ops; + dev->needs_free_netdev = true; + SET_NETDEV_DEVTYPE(dev, &amt_type); + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU; + dev->type = ARPHRD_NONE; + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; + dev->features |= NETIF_F_LLTX; + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->features |= NETIF_F_NETNS_LOCAL; + dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; + dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + eth_hw_addr_random(dev); + eth_zero_addr(dev->broadcast); + ether_setup(dev); +} + +static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = { + [IFLA_AMT_MODE] = { .type = NLA_U32 }, + [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 }, + [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 }, + [IFLA_AMT_LINK] = { .type = NLA_U32 }, + [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) }, + [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) }, + [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) }, + [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 }, +}; + +static int amt_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + if (!data) + return -EINVAL; + + if (!data[IFLA_AMT_LINK]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK], + "Link attribute is required"); + return -EINVAL; + } + + if (!data[IFLA_AMT_MODE]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE], + "Mode attribute is required"); + return -EINVAL; + } + + if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE], + "Mode attribute is not valid"); + return -EINVAL; + } + + if (!data[IFLA_AMT_LOCAL_IP]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP], + "Local attribute is required"); + return -EINVAL; + } + + if (!data[IFLA_AMT_DISCOVERY_IP] && + nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP], + "Discovery attribute is required"); + return -EINVAL; + } + + return 0; +} + +static int amt_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct amt_dev *amt = netdev_priv(dev); + int err = -EINVAL; + + amt->net = net; + amt->mode = nla_get_u32(data[IFLA_AMT_MODE]); + + if (data[IFLA_AMT_MAX_TUNNELS] && + nla_get_u32(data[IFLA_AMT_MAX_TUNNELS])) + amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]); + else + amt->max_tunnels = AMT_MAX_TUNNELS; + + spin_lock_init(&amt->lock); + amt->max_groups = AMT_MAX_GROUP; + amt->max_sources = AMT_MAX_SOURCE; + amt->hash_buckets = AMT_HSIZE; + amt->nr_tunnels = 0; + get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed)); + amt->stream_dev = dev_get_by_index(net, + nla_get_u32(data[IFLA_AMT_LINK])); + if (!amt->stream_dev) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK], + "Can't find stream device"); + return -ENODEV; + } + + if (amt->stream_dev->type != ARPHRD_ETHER) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK], + "Invalid stream device type"); + goto err; + } + + amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]); + if (ipv4_is_loopback(amt->local_ip) || + ipv4_is_zeronet(amt->local_ip) || + ipv4_is_multicast(amt->local_ip)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP], + "Invalid Local address"); + goto err; + } + + if (data[IFLA_AMT_RELAY_PORT]) + amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]); + else + amt->relay_port = htons(IANA_AMT_UDP_PORT); + + if (data[IFLA_AMT_GATEWAY_PORT]) + amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]); + else + amt->gw_port = htons(IANA_AMT_UDP_PORT); + + if (!amt->relay_port) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], + "relay port must not be 0"); + goto err; + } + if (amt->mode == AMT_MODE_RELAY) { + amt->qrv = amt->net->ipv4.sysctl_igmp_qrv; + amt->qri = 10; + dev->needed_headroom = amt->stream_dev->needed_headroom + + AMT_RELAY_HLEN; + dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN; + dev->max_mtu = dev->mtu; + dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN; + } else { + if (!data[IFLA_AMT_DISCOVERY_IP]) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], + "discovery must be set in gateway mode"); + goto err; + } + if (!amt->gw_port) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], + "gateway port must not be 0"); + goto err; + } + amt->remote_ip = 0; + amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]); + if (ipv4_is_loopback(amt->discovery_ip) || + ipv4_is_zeronet(amt->discovery_ip) || + ipv4_is_multicast(amt->discovery_ip)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP], + "discovery must be unicast"); + goto err; + } + + dev->needed_headroom = amt->stream_dev->needed_headroom + + AMT_GW_HLEN; + dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN; + dev->max_mtu = dev->mtu; + dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN; + } + amt->qi = AMT_INIT_QUERY_INTERVAL; + + err = register_netdevice(dev); + if (err < 0) { + netdev_dbg(dev, "failed to register new netdev %d\n", err); + goto err; + } + + err = netdev_upper_dev_link(amt->stream_dev, dev, extack); + if (err < 0) { + unregister_netdevice(dev); + goto err; + } + + INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work); + INIT_DELAYED_WORK(&amt->req_wq, amt_req_work); + INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work); + INIT_LIST_HEAD(&amt->tunnel_list); + + return 0; +err: + dev_put(amt->stream_dev); + return err; +} + +static void amt_dellink(struct net_device *dev, struct list_head *head) +{ + struct amt_dev *amt = netdev_priv(dev); + + unregister_netdevice_queue(dev, head); + netdev_upper_dev_unlink(amt->stream_dev, dev); + dev_put(amt->stream_dev); +} + +static size_t amt_get_size(const struct net_device *dev) +{ + return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */ + nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */ + nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */ + nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */ + nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */ + nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */ + nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */ + nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */ +} + +static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct amt_dev *amt = netdev_priv(dev); + + if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode)) + goto nla_put_failure; + if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port)) + goto nla_put_failure; + if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex)) + goto nla_put_failure; + if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip)) + goto nla_put_failure; + if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip)) + goto nla_put_failure; + if (amt->remote_ip) + if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static struct rtnl_link_ops amt_link_ops __read_mostly = { + .kind = "amt", + .maxtype = IFLA_AMT_MAX, + .policy = amt_policy, + .priv_size = sizeof(struct amt_dev), + .setup = amt_link_setup, + .validate = amt_validate, + .newlink = amt_newlink, + .dellink = amt_dellink, + .get_size = amt_get_size, + .fill_info = amt_fill_info, +}; + +static struct net_device *amt_lookup_upper_dev(struct net_device *dev) +{ + struct net_device *upper_dev; + struct amt_dev *amt; + + for_each_netdev(dev_net(dev), upper_dev) { + if (netif_is_amt(upper_dev)) { + amt = netdev_priv(upper_dev); + if (amt->stream_dev == dev) + return upper_dev; + } + } + + return NULL; +} + +static int amt_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net_device *upper_dev; + struct amt_dev *amt; + LIST_HEAD(list); + int new_mtu; + + upper_dev = amt_lookup_upper_dev(dev); + if (!upper_dev) + return NOTIFY_DONE; + amt = netdev_priv(upper_dev); + + switch (event) { + case NETDEV_UNREGISTER: + amt_dellink(amt->dev, &list); + unregister_netdevice_many(&list); + break; + case NETDEV_CHANGEMTU: + if (amt->mode == AMT_MODE_RELAY) + new_mtu = dev->mtu - AMT_RELAY_HLEN; + else + new_mtu = dev->mtu - AMT_GW_HLEN; + + dev_set_mtu(amt->dev, new_mtu); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block amt_notifier_block __read_mostly = { + .notifier_call = amt_device_event, +}; + +static int __init amt_init(void) +{ + int err; + + err = register_netdevice_notifier(&amt_notifier_block); + if (err < 0) + goto err; + + err = rtnl_link_register(&amt_link_ops); + if (err < 0) + goto unregister_notifier; + + amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1); + if (!amt_wq) + goto rtnl_unregister; + + spin_lock_init(&source_gc_lock); + spin_lock_bh(&source_gc_lock); + INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work); + mod_delayed_work(amt_wq, &source_gc_wq, + msecs_to_jiffies(AMT_GC_INTERVAL)); + spin_unlock_bh(&source_gc_lock); + + return 0; + +rtnl_unregister: + rtnl_link_unregister(&amt_link_ops); +unregister_notifier: + unregister_netdevice_notifier(&amt_notifier_block); +err: + pr_err("error loading AMT module loaded\n"); + return err; +} +late_initcall(amt_init); + +static void __exit amt_fini(void) +{ + rtnl_link_unregister(&amt_link_ops); + unregister_netdevice_notifier(&amt_notifier_block); + flush_delayed_work(&source_gc_wq); + __amt_source_gc_work(); + destroy_workqueue(amt_wq); +} +module_exit(amt_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Taehee Yoo "); +MODULE_ALIAS_RTNL_LINK("amt"); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index f0695d68c47e66..97f254bdbb1609 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -945,8 +945,8 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) dev->broadcast[0] = 0xFF; /* Set hardware address. */ - dev->dev_addr[0] = aa->s_node; dev->addr_len = 1; + dev_addr_set(dev, &aa->s_node); return 0; case SIOCGIFADDR: diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 1f8925e75b3fe3..388d7b3bd4c234 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -846,9 +846,8 @@ static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) set_30 (dev,ltflags); dev->broadcast[0] = 0xFF; - dev->dev_addr[0] = aa->s_node; - dev->addr_len=1; + dev_addr_set(dev, &aa->s_node); return 0; diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 12d085405bd052..8c3ccc7c83cd3c 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -207,7 +207,8 @@ static int __init arcrimi_found(struct net_device *dev) } /* get and check the station ID from offset 1 in shmem */ - dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION); + arcnet_set_addr(dev, arcnet_readb(lp->mem_start, + COM9026_REG_R_STATION)); arc_printk(D_NORMAL, dev, "ARCnet RIM I: station %02Xh found at IRQ %d, ShMem %lXh (%ld*%d bytes)\n", dev->dev_addr[0], @@ -324,7 +325,7 @@ static int __init arc_rimi_init(void) return -ENOMEM; if (node && node != 0xff) - dev->dev_addr[0] = node; + arcnet_set_addr(dev, node); dev->mem_start = io; dev->irq = irq; diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index 5d4a4c7efbbff3..19e996a829c9db 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -364,6 +364,11 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); void arcnet_timeout(struct net_device *dev, unsigned int txqueue); +static inline void arcnet_set_addr(struct net_device *dev, u8 addr) +{ + dev_addr_set(dev, &addr); +} + /* I/O equivalents */ #ifdef CONFIG_SA1100_CT6001 diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index be618e4b9ed5e0..293a621e654ca9 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -151,7 +151,7 @@ static int __init com20020_init(void) return -ENOMEM; if (node && node != 0xff) - dev->dev_addr[0] = node; + arcnet_set_addr(dev, node); dev->netdev_ops = &com20020_netdev_ops; diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 3c8f665c15580b..6382e1937cca2e 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -194,7 +194,7 @@ static int com20020pci_probe(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); dev->base_addr = ioaddr; - dev->dev_addr[0] = node; + arcnet_set_addr(dev, node); dev->sysfs_groups[0] = &com20020_state_group; dev->irq = pdev->irq; lp->card_name = "PCI COM20020"; diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 78043a9c5981e5..06e1651b594ba8 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -157,7 +157,7 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr) struct arcnet_local *lp = netdev_priv(dev); struct sockaddr *hwaddr = addr; - memcpy(dev->dev_addr, hwaddr->sa_data, 1); + dev_addr_set(dev, hwaddr->sa_data); com20020_set_subaddress(lp, ioaddr, SUB_NODE); arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG); @@ -220,7 +220,7 @@ int com20020_found(struct net_device *dev, int shared) /* FIXME: do this some other way! */ if (!dev->dev_addr[0]) - dev->dev_addr[0] = arcnet_inb(ioaddr, 8); + arcnet_set_addr(dev, arcnet_inb(ioaddr, 8)); com20020_set_subaddress(lp, ioaddr, SUB_SETUP1); arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG); diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index b88a109b3b150c..24150c933fcbe9 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -133,7 +133,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) lp->hw.owner = THIS_MODULE; /* fill in our module parameters as defaults */ - dev->dev_addr[0] = node; + arcnet_set_addr(dev, node); p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[0]->end = 16; diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 3856b447d38ed2..37b47749fc8b4a 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -252,7 +252,7 @@ static int __init com90io_found(struct net_device *dev) /* get and check the station ID from offset 1 in shmem */ - dev->dev_addr[0] = get_buffer_byte(dev, 1); + arcnet_set_addr(dev, get_buffer_byte(dev, 1)); err = register_netdev(dev); if (err) { diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index d8dfb9ea0de89b..f49dae1942846d 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -531,7 +531,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem, } /* get and check the station ID from offset 1 in shmem */ - dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION); + arcnet_set_addr(dev, arcnet_readb(lp->mem_start, + COM9026_REG_R_STATION)); dev->base_addr = ioaddr; diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 54e321a695ce92..edffc3489a1205 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -577,11 +577,8 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, return -EINVAL; } - if (data[IFLA_BAREUDP_PORT]) - conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); - - if (data[IFLA_BAREUDP_ETHERTYPE]) - conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); + conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); + conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); if (data[IFLA_BAREUDP_SRCPORT_MIN]) conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7d3752cbf761d7..2ec8e015c7b336 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -50,7 +50,7 @@ struct arp_pkt { #pragma pack() /* Forward declaration */ -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], +static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[], bool strict_match); static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); static void rlb_src_unlink(struct bonding *bond, u32 index); @@ -353,7 +353,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond) * * Caller must hold RTNL */ -static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) +static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, + const u8 addr[]) { struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); @@ -904,7 +905,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) /*********************** tlb/rlb shared functions *********************/ -static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], +static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[], __be16 vlan_proto, u16 vid) { struct learning_pkt pkt; @@ -940,7 +941,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], struct alb_walk_data { struct bonding *bond; struct slave *slave; - u8 *mac_addr; + const u8 *mac_addr; bool strict_match; }; @@ -949,9 +950,9 @@ static int alb_upper_dev_walk(struct net_device *upper, { struct alb_walk_data *data = (struct alb_walk_data *)priv->data; bool strict_match = data->strict_match; + const u8 *mac_addr = data->mac_addr; struct bonding *bond = data->bond; struct slave *slave = data->slave; - u8 *mac_addr = data->mac_addr; struct bond_vlan_tag *tags; if (is_vlan_dev(upper) && @@ -982,7 +983,7 @@ static int alb_upper_dev_walk(struct net_device *upper, return 0; } -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], +static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[], bool strict_match) { struct bonding *bond = bond_get_bond_by_slave(slave); @@ -1006,14 +1007,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], rcu_read_unlock(); } -static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], +static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[], unsigned int len) { struct net_device *dev = slave->dev; struct sockaddr_storage ss; if (BOND_MODE(slave->bond) == BOND_MODE_TLB) { - memcpy(dev->dev_addr, addr, len); + __dev_addr_set(dev, addr, len); return 0; } @@ -1242,8 +1243,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) res = dev_set_mac_address(slave->dev, addr, NULL); /* restore net_device's hw address */ - bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr, - slave->dev->addr_len); + dev_addr_set(slave->dev, tmp_addr); if (res) goto unwind; @@ -1263,8 +1263,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) rollback_slave->dev->addr_len); dev_set_mac_address(rollback_slave->dev, (struct sockaddr *)&ss, NULL); - bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr, - rollback_slave->dev->addr_len); + dev_addr_set(rollback_slave->dev, tmp_addr); } return res; @@ -1727,8 +1726,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss, NULL); - bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr, - new_slave->dev->addr_len); + dev_addr_set(new_slave->dev, tmp_addr); } /* curr_active_slave must be set before calling alb_swap_mac_addr */ @@ -1761,7 +1759,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) if (res) return res; - bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len); + dev_addr_set(bond_dev, ss->__data); /* If there is no curr_active_slave there is nothing else to do. * Otherwise we'll need to pass the new address to it and handle diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 77dc79a7f5748b..ff8da720a33a79 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -923,7 +923,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev, if (err) return err; - memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); + __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len); bond_dev->addr_assign_type = NET_ADDR_STOLEN; call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); return 0; @@ -4414,7 +4414,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) } /* success */ - memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len); + dev_addr_set(bond_dev, ss->__data); return 0; unwind: diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index b9e9842fed94ef..c48b77167fab6d 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -811,8 +811,8 @@ int bond_create_sysfs(struct bond_net *bn) */ if (ret == -EEXIST) { /* Is someone being kinky and naming a device bonding_master? */ - if (__dev_get_by_name(bn->net, - class_attr_bonding_masters.attr.name)) + if (netdev_name_in_use(bn->net, + class_attr_bonding_masters.attr.name)) pr_err("network device named %s already exists in sysfs\n", class_attr_bonding_masters.attr.name); ret = 0; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index b06af90a996408..3aea32c9b108f9 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1170,9 +1170,9 @@ static ssize_t mb0_id_show(struct device *dev, struct at91_priv *priv = netdev_priv(to_net_dev(dev)); if (priv->mb0_id & CAN_EFF_FLAG) - return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); + return sysfs_emit(buf, "0x%08x\n", priv->mb0_id); else - return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); + return sysfs_emit(buf, "0x%03x\n", priv->mb0_id); } static ssize_t mb0_id_store(struct device *dev, diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index f49170eadd547c..0509625c30827d 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -175,27 +175,29 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, return 0; } -void can_calc_tdco(struct net_device *dev) -{ - struct can_priv *priv = netdev_priv(dev); - const struct can_bittiming *dbt = &priv->data_bittiming; - struct can_tdc *tdc = &priv->tdc; - const struct can_tdc_const *tdc_const = priv->tdc_const; +void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, + const struct can_bittiming *dbt, + u32 *ctrlmode, u32 ctrlmode_supported) - if (!tdc_const) +{ + if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO)) return; + *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; + /* As specified in ISO 11898-1 section 11.3.3 "Transmitter * delay compensation" (TDC) is only applicable if data BRP is * one or two. */ if (dbt->brp == 1 || dbt->brp == 2) { - /* Reuse "normal" sample point and convert it to time quanta */ - u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000; - - tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max); - } else { - tdc->tdco = 0; + /* Sample point in clock periods */ + u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg + + dbt->phase_seg1) * dbt->brp; + + if (sample_point_in_tc < tdc_const->tdco_min) + return; + tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max); + *ctrlmode |= CAN_CTRLMODE_TDC_AUTO; } } #endif /* CONFIG_CAN_CALC_BITTIMING */ @@ -209,7 +211,7 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); - int tseg1, alltseg; + unsigned int tseg1, alltseg; u64 brp64; tseg1 = bt->prop_seg + bt->phase_seg1; diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 80425636049d2e..95cca4e5251f0a 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -2,6 +2,7 @@ /* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix * Copyright (C) 2006 Andrey Volkov, Varma Electronics * Copyright (C) 2008-2009 Wolfgang Grandegger + * Copyright (C) 2021 Vincent Mailhol */ #include @@ -19,6 +20,19 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, + [IFLA_CAN_TDC] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = { + [IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 }, + [IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 }, }; static int can_validate(struct nlattr *tb[], struct nlattr *data[], @@ -30,6 +44,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], * - nominal/arbitration bittiming * - data bittiming * - control mode with CAN_CTRLMODE_FD set + * - TDC parameters are coherent (details below) */ if (!data) @@ -37,8 +52,43 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK; is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; + + /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */ + if (tdc_flags == CAN_CTRLMODE_TDC_MASK) + return -EOPNOTSUPP; + /* If one of the CAN_CTRLMODE_TDC_* flag is set then + * TDC must be set and vice-versa + */ + if (!!tdc_flags != !!data[IFLA_CAN_TDC]) + return -EOPNOTSUPP; + /* If providing TDC parameters, at least TDCO is + * needed. TDCV is needed if and only if + * CAN_CTRLMODE_TDC_MANUAL is set + */ + if (data[IFLA_CAN_TDC]) { + struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; + int err; + + err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, + data[IFLA_CAN_TDC], + can_tdc_policy, extack); + if (err) + return err; + + if (tb_tdc[IFLA_CAN_TDC_TDCV]) { + if (tdc_flags & CAN_CTRLMODE_TDC_AUTO) + return -EOPNOTSUPP; + } else { + if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL) + return -EOPNOTSUPP; + } + + if (!tb_tdc[IFLA_CAN_TDC_TDCO]) + return -EOPNOTSUPP; + } } if (is_can_fd) { @@ -46,7 +96,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], return -EOPNOTSUPP; } - if (data[IFLA_CAN_DATA_BITTIMING]) { + if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) { if (!is_can_fd) return -EOPNOTSUPP; } @@ -54,11 +104,60 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; + struct can_tdc tdc = { 0 }; + const struct can_tdc_const *tdc_const = priv->tdc_const; + int err; + + if (!tdc_const || !can_tdc_is_enabled(priv)) + return -EOPNOTSUPP; + + err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla, + can_tdc_policy, extack); + if (err) + return err; + + if (tb_tdc[IFLA_CAN_TDC_TDCV]) { + u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]); + + if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max) + return -EINVAL; + + tdc.tdcv = tdcv; + } + + if (tb_tdc[IFLA_CAN_TDC_TDCO]) { + u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]); + + if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max) + return -EINVAL; + + tdc.tdco = tdco; + } + + if (tb_tdc[IFLA_CAN_TDC_TDCF]) { + u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]); + + if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max) + return -EINVAL; + + tdc.tdcf = tdcf; + } + + priv->tdc = tdc; + + return 0; +} + static int can_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct can_priv *priv = netdev_priv(dev); + u32 tdc_mask = 0; int err; /* We need synchronization with dev->stop() */ @@ -138,7 +237,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], dev->mtu = CAN_MTU; memset(&priv->data_bittiming, 0, sizeof(priv->data_bittiming)); + priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; + memset(&priv->tdc, 0, sizeof(priv->tdc)); } + + tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK; + /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually + * exclusive: make sure to turn the other one off + */ + if (tdc_mask) + priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK; } if (data[IFLA_CAN_RESTART_MS]) { @@ -187,9 +295,26 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], return -EINVAL; } - memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); + memset(&priv->tdc, 0, sizeof(priv->tdc)); + if (data[IFLA_CAN_TDC]) { + /* TDC parameters are provided: use them */ + err = can_tdc_changelink(priv, data[IFLA_CAN_TDC], + extack); + if (err) { + priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; + return err; + } + } else if (!tdc_mask) { + /* Neither of TDC parameters nor TDC flags are + * provided: do calculation + */ + can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, + &priv->ctrlmode, priv->ctrlmode_supported); + } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly + * turned off. TDC is disabled: do nothing + */ - can_calc_tdco(dev); + memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); if (priv->do_set_data_bittiming) { /* Finally, set the bit-timing registers */ @@ -226,6 +351,38 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], return 0; } +static size_t can_tdc_get_size(const struct net_device *dev) +{ + struct can_priv *priv = netdev_priv(dev); + size_t size; + + if (!priv->tdc_const) + return 0; + + size = nla_total_size(0); /* nest IFLA_CAN_TDC */ + if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) { + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */ + } + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */ + if (priv->tdc_const->tdcf_max) { + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */ + } + + if (can_tdc_is_enabled(priv)) { + if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL || + priv->do_get_auto_tdcv) + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */ + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */ + if (priv->tdc_const->tdcf_max) + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */ + } + + return size; +} + static size_t can_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -257,10 +414,64 @@ static size_t can_get_size(const struct net_device *dev) size += nla_total_size(sizeof(*priv->data_bitrate_const) * priv->data_bitrate_const_cnt); size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ + size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */ return size; } +static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct nlattr *nest; + struct can_priv *priv = netdev_priv(dev); + struct can_tdc *tdc = &priv->tdc; + const struct can_tdc_const *tdc_const = priv->tdc_const; + + if (!tdc_const) + return 0; + + nest = nla_nest_start(skb, IFLA_CAN_TDC); + if (!nest) + return -EMSGSIZE; + + if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL && + (nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) || + nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max))) + goto err_cancel; + if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) || + nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max)) + goto err_cancel; + if (tdc_const->tdcf_max && + (nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) || + nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max))) + goto err_cancel; + + if (can_tdc_is_enabled(priv)) { + u32 tdcv; + int err = -EINVAL; + + if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) { + tdcv = tdc->tdcv; + err = 0; + } else if (priv->do_get_auto_tdcv) { + err = priv->do_get_auto_tdcv(dev, &tdcv); + } + if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv)) + goto err_cancel; + if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco)) + goto err_cancel; + if (tdc_const->tdcf_max && + nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf)) + goto err_cancel; + } + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -318,7 +529,9 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) (nla_put(skb, IFLA_CAN_BITRATE_MAX, sizeof(priv->bitrate_max), - &priv->bitrate_max)) + &priv->bitrate_max)) || + + (can_tdc_fill_info(skb, dev)) ) return -EMSGSIZE; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index c68ad56628bd44..32006dbf5abd56 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1831,7 +1831,7 @@ static ssize_t termination_show(struct device *dev, return -ETIMEDOUT; } - return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); + return sysfs_emit(buf, "%u\n", mod->termination_enabled); } static ssize_t termination_store(struct device *dev, diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 35892c1efef02f..de4ddf79ba9be7 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -293,10 +293,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) return -EINVAL; base = of_iomap(np, 0); - if (!base) { - dev_err(&ofdev->dev, "couldn't ioremap\n"); - return err; - } + if (!base) + return dev_err_probe(&ofdev->dev, err, "couldn't ioremap\n"); irq = irq_of_parse_and_map(np, 0); if (!irq) { diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig index 56320a7f828b66..c66762ef631b08 100644 --- a/drivers/net/can/rcar/Kconfig +++ b/drivers/net/can/rcar/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config CAN_RCAR tristate "Renesas R-Car and RZ/G CAN controller" - depends on ARCH_RENESAS || ARM || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST help Say Y here if you want to use CAN controller found on Renesas R-Car or RZ/G SoCs. @@ -11,7 +11,7 @@ config CAN_RCAR config CAN_RCAR_CANFD tristate "Renesas R-Car CAN FD controller" - depends on ARCH_RENESAS || ARM || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST help Say Y here if you want to use CAN FD controller found on Renesas R-Car SoCs. The driver puts the controller in CAN FD only diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index af042aa55f59e2..4f0cae29f4d8b2 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -428,7 +428,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv) es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, &priv->can.data_bittiming); - if (priv->can.tdc.tdco) { + if (can_tdc_is_enabled(&priv->can)) { tx_conf_msg.tdc_enabled = 1; tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco); tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf); @@ -505,8 +505,11 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = { * Register" from Microchip. */ static const struct can_tdc_const es58x_tdc_const = { + .tdcv_min = 0, .tdcv_max = 0, /* Manual mode not supported. */ + .tdco_min = 0, .tdco_max = 127, + .tdcf_min = 0, .tdcf_max = 127 }; @@ -523,7 +526,7 @@ const struct es58x_parameters es58x_fd_param = { .clock = {.freq = 80 * CAN_MHZ}, .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | - CAN_CTRLMODE_CC_LEN8_DLC, + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO, .tx_start_of_frame = 0xCEFA, /* FACE in little endian */ .rx_start_of_frame = 0xFECA, /* CAFE in little endian */ .tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN, diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 5e892bef46b006..1b400de00f5175 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -352,7 +352,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } else { /* echo_id == hf->echo_id */ if (hf->echo_id >= GS_MAX_TX_URBS) { netdev_err(netdev, - "Unexpected out of range echo id %d\n", + "Unexpected out of range echo id %u\n", hf->echo_id); goto resubmit_urb; } @@ -365,7 +365,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* bad devices send bad echo_ids. */ if (!txc) { netdev_err(netdev, - "Unexpected unused echo id %d\n", + "Unexpected unused echo id %u\n", hf->echo_id); goto resubmit_urb; } @@ -458,7 +458,7 @@ static void gs_usb_xmit_callback(struct urb *urb) struct net_device *netdev = dev->netdev; if (urb->status) - netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id); + netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id); usb_free_coherent(urb->dev, urb->transfer_buffer_length, @@ -501,7 +501,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, idx = txc->echo_id; if (idx >= GS_MAX_TX_URBS) { - netdev_err(netdev, "Invalid tx context %d\n", idx); + netdev_err(netdev, "Invalid tx context %u\n", idx); goto badidx; } @@ -964,11 +964,11 @@ static int gs_usb_probe(struct usb_interface *intf, } icount = dconf->icount + 1; - dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); + dev_info(&intf->dev, "Configuring for %u interfaces\n", icount); if (icount > GS_MAX_INTF) { dev_err(&intf->dev, - "Driver cannot handle more that %d CAN interfaces\n", + "Driver cannot handle more that %u CAN interfaces\n", GS_MAX_INTF); kfree(dconf); return -EINVAL; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index e8f43ed90b7225..6107fef9f4a035 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -205,6 +205,19 @@ int peak_usb_netif_rx(struct sk_buff *skb, return netif_rx(skb); } +/* post received skb with native 64-bit hw timestamp */ +int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high) +{ + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + u64 ns_ts; + + ns_ts = (u64)ts_high << 32 | ts_low; + ns_ts *= NSEC_PER_USEC; + hwts->hwtstamp = ns_to_ktime(ns_ts); + + return netif_rx(skb); +} + /* * callback for bulk Rx urb */ diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index b00a4811bf61fd..daa19f57e7422a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -143,6 +143,7 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now); void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv); int peak_usb_netif_rx(struct sk_buff *skb, struct peak_time_ref *time_ref, u32 ts_low); +int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high); void peak_usb_async_complete(struct urb *urb); void peak_usb_restart_complete(struct peak_usb_device *dev); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 09029a3bad1ac3..6bd12549f10142 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -515,7 +515,8 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, netdev->stats.rx_packets++; netdev->stats.rx_bytes += cfd->len; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); + peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low), + le32_to_cpu(rm->ts_high)); return 0; } @@ -579,7 +580,8 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, netdev->stats.rx_packets++; netdev->stats.rx_bytes += cf->len; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); + peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low), + le32_to_cpu(sm->ts_high)); return 0; } @@ -629,7 +631,8 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low)); + peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low), + le32_to_cpu(ov->ts_high)); netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 3b883e607d8baf..e2b15d29d15eb5 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -516,8 +516,7 @@ static int xcan_chip_start(struct net_device *ndev) * @ndev: Pointer to net_device structure * @mode: Tells the mode of the driver * - * This check the drivers state and calls the - * the corresponding modes to set. + * This check the drivers state and calls the corresponding modes to set. * * Return: 0 on success and failure value on error */ @@ -982,7 +981,7 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev) * @isr: interrupt status register value * * This is the CAN error interrupt and it will - * check the the type of error and forward the error + * check the type of error and forward the error * frame to upper layers. */ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) @@ -1844,11 +1843,9 @@ static int xcan_probe(struct platform_device *pdev) static int xcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct xcan_priv *priv = netdev_priv(ndev); unregister_candev(ndev); pm_runtime_disable(&pdev->dev); - netif_napi_del(&priv->napi); free_candev(ndev); return 0; diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index a5f1aa911fe2a3..7b1457a6e327b0 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -70,6 +70,7 @@ config NET_DSA_QCA8K config NET_DSA_REALTEK_SMI tristate "Realtek SMI Ethernet switch family support" select NET_DSA_TAG_RTL4_A + select NET_DSA_TAG_RTL8_4 select FIXED_PHY select IRQ_DOMAIN select REALTEK_PHY diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index f3598c0409945f..8da1569a34e6ed 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o -realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o +realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 604f541126654f..af4761968733cf 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, return; /* Enable flow control on BCM5301x's CPU port */ - if (is5301x(dev) && port == dev->cpu_port) + if (is5301x(dev) && dsa_is_cpu_port(ds, port)) tx_pause = rx_pause = true; if (phydev->pause) { @@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, return; } } - } else if (is5301x(dev)) { - if (port != dev->cpu_port) { - b53_force_port_config(dev, dev->cpu_port, 2000, - DUPLEX_FULL, true, true); - b53_force_link(dev, dev->cpu_port, 1); - } } /* Re-negotiate EEE if it was enabled already */ @@ -1349,10 +1343,8 @@ void b53_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, 100baseT_Full); } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); phylink_helper_basex_speed(state); } @@ -1550,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_vlan_del); -/* Address Resolution Logic routines */ +/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */ static int b53_arl_op_wait(struct b53_device *dev) { unsigned int timeout = 10; @@ -1715,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; + int ret; /* 5325 and 5365 require some more massaging, but could * be supported eventually @@ -1722,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port, if (is5325(priv) || is5365(priv)) return -EOPNOTSUPP; - return b53_arl_op(priv, 0, port, addr, vid, true); + mutex_lock(&priv->arl_mutex); + ret = b53_arl_op(priv, 0, port, addr, vid, true); + mutex_unlock(&priv->arl_mutex); + + return ret; } EXPORT_SYMBOL(b53_fdb_add); @@ -1730,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; + int ret; + + mutex_lock(&priv->arl_mutex); + ret = b53_arl_op(priv, 0, port, addr, vid, false); + mutex_unlock(&priv->arl_mutex); - return b53_arl_op(priv, 0, port, addr, vid, false); + return ret; } EXPORT_SYMBOL(b53_fdb_del); @@ -1788,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, int ret; u8 reg; + mutex_lock(&priv->arl_mutex); + /* Start search operation */ reg = ARL_SRCH_STDN; b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); @@ -1795,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, do { ret = b53_arl_search_wait(priv); if (ret) - return ret; + break; b53_arl_search_rd(priv, 0, &results[0]); ret = b53_fdb_copy(port, &results[0], cb, data); if (ret) - return ret; + break; if (priv->num_arl_bins > 2) { b53_arl_search_rd(priv, 1, &results[1]); ret = b53_fdb_copy(port, &results[1], cb, data); if (ret) - return ret; + break; if (!results[0].is_valid && !results[1].is_valid) break; @@ -1814,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, } while (count++ < b53_max_arl_entries(priv) / 2); + mutex_unlock(&priv->arl_mutex); + return 0; } EXPORT_SYMBOL(b53_fdb_dump); @@ -1822,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb) { struct b53_device *priv = ds->priv; + int ret; /* 5325 and 5365 require some more massaging, but could * be supported eventually @@ -1829,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port, if (is5325(priv) || is5365(priv)) return -EOPNOTSUPP; - return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); + mutex_lock(&priv->arl_mutex); + ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); + mutex_unlock(&priv->arl_mutex); + + return ret; } EXPORT_SYMBOL(b53_mdb_add); @@ -1839,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port, struct b53_device *priv = ds->priv; int ret; + mutex_lock(&priv->arl_mutex); ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); + mutex_unlock(&priv->arl_mutex); if (ret) dev_err(ds->dev, "failed to delete MDB entry\n"); @@ -2302,33 +2315,30 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5325_DEVICE_ID, .dev_name = "BCM5325", .vlans = 16, - .enabled_ports = 0x1f, + .enabled_ports = 0x3f, .arl_bins = 2, .arl_buckets = 1024, .imp_port = 5, - .cpu_port = B53_CPU_PORT_25, .duplex_reg = B53_DUPLEX_STAT_FE, }, { .chip_id = BCM5365_DEVICE_ID, .dev_name = "BCM5365", .vlans = 256, - .enabled_ports = 0x1f, + .enabled_ports = 0x3f, .arl_bins = 2, .arl_buckets = 1024, .imp_port = 5, - .cpu_port = B53_CPU_PORT_25, .duplex_reg = B53_DUPLEX_STAT_FE, }, { .chip_id = BCM5389_DEVICE_ID, .dev_name = "BCM5389", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2338,11 +2348,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5395_DEVICE_ID, .dev_name = "BCM5395", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2352,11 +2361,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5397_DEVICE_ID, .dev_name = "BCM5397", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS_9798, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2366,11 +2374,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5398_DEVICE_ID, .dev_name = "BCM5398", .vlans = 4096, - .enabled_ports = 0x7f, + .enabled_ports = 0x17f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS_9798, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2380,12 +2387,11 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53115_DEVICE_ID, .dev_name = "BCM53115", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .vta_regs = B53_VTA_REGS, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, @@ -2394,11 +2400,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53125_DEVICE_ID, .dev_name = "BCM53125", .vlans = 4096, - .enabled_ports = 0xff, + .enabled_ports = 0x1ff, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2412,7 +2417,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2426,7 +2430,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS_63XX, .duplex_reg = B53_DUPLEX_STAT_63XX, .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, @@ -2436,11 +2439,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53010_DEVICE_ID, .dev_name = "BCM53010", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x1bf, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2454,7 +2456,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2468,7 +2469,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2478,11 +2478,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53018_DEVICE_ID, .dev_name = "BCM53018", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x1bf, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2492,11 +2491,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53019_DEVICE_ID, .dev_name = "BCM53019", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x1bf, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2510,7 +2508,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2524,7 +2521,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2539,7 +2535,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 256, .imp_port = 8, - .cpu_port = 8, /* TODO: ports 4, 5, 8 */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2553,7 +2548,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2567,7 +2561,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 256, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2593,7 +2586,6 @@ static int b53_switch_init(struct b53_device *dev) dev->vta_regs[2] = chip->vta_regs[2]; dev->jumbo_pm_reg = chip->jumbo_pm_reg; dev->imp_port = chip->imp_port; - dev->cpu_port = chip->cpu_port; dev->num_vlans = chip->vlans; dev->num_arl_bins = chip->arl_bins; dev->num_arl_buckets = chip->arl_buckets; @@ -2625,16 +2617,8 @@ static int b53_switch_init(struct b53_device *dev) break; #endif } - } else if (dev->chip_id == BCM53115_DEVICE_ID) { - u64 strap_value; - - b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); - /* use second IMP port if GMII is enabled */ - if (strap_value & SV_GMII_CTRL_115) - dev->cpu_port = 5; } - dev->enabled_ports |= BIT(dev->cpu_port); dev->num_ports = fls(dev->enabled_ports); dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); @@ -2705,6 +2689,7 @@ struct b53_device *b53_switch_alloc(struct device *base, mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); + mutex_init(&dev->arl_mutex); return dev; } diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 959a52d41f0a17..579da74ada645d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -107,6 +107,7 @@ struct b53_device { struct mutex reg_mutex; struct mutex stats_mutex; + struct mutex arl_mutex; const struct b53_io_ops *ops; /* chip specific data */ @@ -124,7 +125,6 @@ struct b53_device { /* used ports mask */ u16 enabled_ports; unsigned int imp_port; - unsigned int cpu_port; /* connect specific data */ u8 current_page; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 7578a5c38df597..13aa43b5cffd90 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -667,7 +667,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) if (priv->int_phy_mask & BIT(port)) return priv->hw_params.gphy_rev; else - return 0; + return PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; } static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, @@ -683,7 +685,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, state->interface != PHY_INTERFACE_MODE_GMII && state->interface != PHY_INTERFACE_MODE_INTERNAL && state->interface != PHY_INTERFACE_MODE_MOCA) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); if (port != core_readl(priv, CORE_IMP0_PRT_ID)) dev_err(ds->dev, "Unsupported interface: %d for port %d\n", @@ -711,10 +713,8 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 354655f9ed0036..4e0b53d94b5250 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1403,10 +1403,8 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port, else phylink_set(mask, 1000baseT_Full); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static int diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index dbd4486a173ff2..7056d98d8177b3 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -276,6 +276,7 @@ struct gswip_priv { int num_gphy_fw; struct gswip_gphy_fw *gphy_fw; u32 port_vlan_filter; + struct mutex pce_table_lock; }; struct gswip_pce_table_entry { @@ -523,10 +524,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv, u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; + mutex_lock(&priv->pce_table_lock); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, GSWIP_PCE_TBL_CTRL_BAS); - if (err) + if (err) { + mutex_unlock(&priv->pce_table_lock); return err; + } gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | @@ -536,8 +541,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv, err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, GSWIP_PCE_TBL_CTRL_BAS); - if (err) + if (err) { + mutex_unlock(&priv->pce_table_lock); return err; + } for (i = 0; i < ARRAY_SIZE(tbl->key); i++) tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); @@ -553,6 +560,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv, tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; + mutex_unlock(&priv->pce_table_lock); + return 0; } @@ -565,10 +574,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv, u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; + mutex_lock(&priv->pce_table_lock); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, GSWIP_PCE_TBL_CTRL_BAS); - if (err) + if (err) { + mutex_unlock(&priv->pce_table_lock); return err; + } gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | @@ -600,8 +613,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv, crtl |= GSWIP_PCE_TBL_CTRL_BAS; gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); - return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + + mutex_unlock(&priv->pce_table_lock); + + return err; } /* Add the LAN port into a bridge with the CPU port by @@ -1447,10 +1464,8 @@ static void gswip_phylink_set_capab(unsigned long *supported, phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, @@ -1478,7 +1493,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, goto unsupported; break; default: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported port: %i\n", port); return; } @@ -1488,7 +1503,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, return; unsupported: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", phy_modes(state->interface), port); } @@ -1518,7 +1533,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, goto unsupported; break; default: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported port: %i\n", port); return; } @@ -1528,7 +1543,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, return; unsupported: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", phy_modes(state->interface), port); } @@ -2106,6 +2121,7 @@ static int gswip_probe(struct platform_device *pdev) priv->ds->priv = priv; priv->ds->ops = priv->hw_info->ops; priv->dev = dev; + mutex_init(&priv->pce_table_lock); version = gswip_switch_r(priv, GSWIP_VERSION); np = dev->of_node; diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index c5142f86a3c755..43fc3087aeb3ea 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1542,15 +1542,13 @@ static void ksz8_validate(struct dsa_switch *ds, int port, phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); return; unsupported: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported interface: %s, port: %d\n", phy_modes(state->interface), port); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dadcae93c9b53..14c678a9e41b35 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -674,9 +674,8 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port, if (chip->info->ops->phylink_validate) chip->info->ops->phylink_validate(chip, port, mask, state); - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); /* We can only operate at 2500BaseX or 1000BaseX. If requested * to advertise both, only report advertising at 2500BaseX. diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 341236dcbdb472..83808e7dbddaf5 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -958,8 +958,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) switch_node = dev->of_node; ports_node = of_get_child_by_name(switch_node, "ports"); + if (!ports_node) + ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); if (!ports_node) { - dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); + dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); return -ENODEV; } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 11b42fd812e4a9..45c5ec7a83eafc 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -943,7 +943,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != ocelot_port->phy_mode) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } @@ -965,10 +965,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, phylink_set(mask, 2500baseX_Full); } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index de1d34a1f1e474..92eae63150eae3 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -999,7 +999,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != ocelot_port->phy_mode) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } @@ -1018,10 +1018,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, phylink_set(mask, 2500baseX_Full); } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index a6bfb6abc51a7d..da0d7e68643a9d 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -522,7 +522,7 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port, goto unsupported; break; default: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported port: %i\n", port); return; } @@ -536,15 +536,13 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); return; unsupported: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported interface: %d, port: %d\n", state->interface, port); } diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a984f06f6f04f3..ea7f1277892219 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -889,62 +889,183 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv) } static int -qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv) +qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) { - struct device_node *port_dn; - phy_interface_t mode; - struct dsa_port *dp; - u32 val; + u32 mask = 0; + int ret = 0; - /* CPU port is already checked */ - dp = dsa_to_port(priv->ds, 0); + /* SoC specific settings for ipq8064. + * If more device require this consider adding + * a dedicated binding. + */ + if (of_machine_is_compatible("qcom,ipq8064")) + mask |= QCA8K_MAC_PWR_RGMII0_1_8V; + + /* SoC specific settings for ipq8065 */ + if (of_machine_is_compatible("qcom,ipq8065")) + mask |= QCA8K_MAC_PWR_RGMII1_1_8V; + + if (mask) { + ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, + QCA8K_MAC_PWR_RGMII0_1_8V | + QCA8K_MAC_PWR_RGMII1_1_8V, + mask); + } + + return ret; +} - port_dn = dp->dn; +static int qca8k_find_cpu_port(struct dsa_switch *ds) +{ + struct qca8k_priv *priv = ds->priv; - /* Check if port 0 is set to the correct type */ - of_get_phy_mode(port_dn, &mode); - if (mode != PHY_INTERFACE_MODE_RGMII_ID && - mode != PHY_INTERFACE_MODE_RGMII_RXID && - mode != PHY_INTERFACE_MODE_RGMII_TXID) { + /* Find the connected cpu port. Valid port are 0 or 6 */ + if (dsa_is_cpu_port(ds, 0)) return 0; + + dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); + + if (dsa_is_cpu_port(ds, 6)) + return 6; + + return -EINVAL; +} + +static int +qca8k_setup_of_pws_reg(struct qca8k_priv *priv) +{ + struct device_node *node = priv->dev->of_node; + const struct qca8k_match_data *data; + u32 val = 0; + int ret; + + /* QCA8327 require to set to the correct mode. + * His bigger brother QCA8328 have the 172 pin layout. + * Should be applied by default but we set this just to make sure. + */ + if (priv->switch_id == QCA8K_ID_QCA8327) { + data = of_device_get_match_data(priv->dev); + + /* Set the correct package of 148 pin for QCA8327 */ + if (data->reduced_package) + val |= QCA8327_PWS_PACKAGE148_EN; + + ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, + val); + if (ret) + return ret; } - switch (mode) { - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val)) - val = 2; - else - /* Switch regs accept value in ns, convert ps to ns */ - val = val / 1000; + if (of_property_read_bool(node, "qca,ignore-power-on-sel")) + val |= QCA8K_PWS_POWER_ON_SEL; - if (val > QCA8K_MAX_DELAY) { - dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); - val = 3; + if (of_property_read_bool(node, "qca,led-open-drain")) { + if (!(val & QCA8K_PWS_POWER_ON_SEL)) { + dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); + return -EINVAL; } - priv->rgmii_rx_delay = val; - /* Stop here if we need to check only for rx delay */ - if (mode != PHY_INTERFACE_MODE_RGMII_ID) - break; + val |= QCA8K_PWS_LED_OPEN_EN_CSR; + } - fallthrough; - case PHY_INTERFACE_MODE_RGMII_TXID: - if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val)) - val = 1; - else - /* Switch regs accept value in ns, convert ps to ns */ - val = val / 1000; + return qca8k_rmw(priv, QCA8K_REG_PWS, + QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, + val); +} - if (val > QCA8K_MAX_DELAY) { - dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); - val = 3; - } +static int +qca8k_parse_port_config(struct qca8k_priv *priv) +{ + int port, cpu_port_index = -1, ret; + struct device_node *port_dn; + phy_interface_t mode; + struct dsa_port *dp; + u32 delay; - priv->rgmii_tx_delay = val; - break; - default: - return 0; + /* We have 2 CPU port. Check them */ + for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { + /* Skip every other port */ + if (port != 0 && port != 6) + continue; + + dp = dsa_to_port(priv->ds, port); + port_dn = dp->dn; + cpu_port_index++; + + if (!of_device_is_available(port_dn)) + continue; + + ret = of_get_phy_mode(port_dn, &mode); + if (ret) + continue; + + switch (mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_SGMII: + delay = 0; + + if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) + /* Switch regs accept value in ns, convert ps to ns */ + delay = delay / 1000; + else if (mode == PHY_INTERFACE_MODE_RGMII_ID || + mode == PHY_INTERFACE_MODE_RGMII_TXID) + delay = 1; + + if (delay > QCA8K_MAX_DELAY) { + dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); + delay = 3; + } + + priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; + + delay = 0; + + if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) + /* Switch regs accept value in ns, convert ps to ns */ + delay = delay / 1000; + else if (mode == PHY_INTERFACE_MODE_RGMII_ID || + mode == PHY_INTERFACE_MODE_RGMII_RXID) + delay = 2; + + if (delay > QCA8K_MAX_DELAY) { + dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); + delay = 3; + } + + priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; + + /* Skip sgmii parsing for rgmii* mode */ + if (mode == PHY_INTERFACE_MODE_RGMII || + mode == PHY_INTERFACE_MODE_RGMII_ID || + mode == PHY_INTERFACE_MODE_RGMII_TXID || + mode == PHY_INTERFACE_MODE_RGMII_RXID) + break; + + if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) + priv->ports_config.sgmii_tx_clk_falling_edge = true; + + if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) + priv->ports_config.sgmii_rx_clk_falling_edge = true; + + if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { + priv->ports_config.sgmii_enable_pll = true; + + if (priv->switch_id == QCA8K_ID_QCA8327) { + dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); + priv->ports_config.sgmii_enable_pll = false; + } + + if (priv->switch_revision < 2) + dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); + } + + break; + default: + continue; + } } return 0; @@ -954,15 +1075,20 @@ static int qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - int ret, i; + int cpu_port, ret, i; u32 mask; - /* Make sure that port 0 is the cpu port */ - if (!dsa_is_cpu_port(ds, 0)) { - dev_err(priv->dev, "port 0 is not the CPU port"); - return -EINVAL; + cpu_port = qca8k_find_cpu_port(ds); + if (cpu_port < 0) { + dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); + return cpu_port; } + /* Parse CPU port config to be later used in phy_link mac_config */ + ret = qca8k_parse_port_config(priv); + if (ret) + return ret; + mutex_init(&priv->reg_mutex); /* Start by setting up the register mapping */ @@ -975,7 +1101,11 @@ qca8k_setup(struct dsa_switch *ds) if (ret) return ret; - ret = qca8k_setup_of_rgmii_delay(priv); + ret = qca8k_setup_of_pws_reg(priv); + if (ret) + return ret; + + ret = qca8k_setup_mac_pwr_sel(priv); if (ret) return ret; @@ -992,41 +1122,49 @@ qca8k_setup(struct dsa_switch *ds) if (ret) dev_warn(priv->dev, "mib init failed"); - /* Enable QCA header mode on the cpu port */ - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT), - QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | - QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); - if (ret) { - dev_err(priv->dev, "failed enabling QCA header mode"); - return ret; - } - - /* Disable forwarding by default on all ports */ + /* Initial setup of all ports */ for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* Disable forwarding by default on all ports */ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), QCA8K_PORT_LOOKUP_MEMBER, 0); if (ret) return ret; - } - /* Disable MAC by default on all ports */ - for (i = 1; i < QCA8K_NUM_PORTS; i++) - qca8k_port_set_status(priv, i, 0); + /* Enable QCA header mode on all cpu ports */ + if (dsa_is_cpu_port(ds, i)) { + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), + QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | + QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); + if (ret) { + dev_err(priv->dev, "failed enabling QCA header mode"); + return ret; + } + } + + /* Disable MAC by default on all user ports */ + if (dsa_is_user_port(ds, i)) + qca8k_port_set_status(priv, i, 0); + } - /* Forward all unknown frames to CPU port for Linux processing */ + /* Forward all unknown frames to CPU port for Linux processing + * Notice that in multi-cpu config only one port should be set + * for igmp, unknown, multicast and broadcast packet + */ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, - BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | - BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | - BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | - BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); + BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | + BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | + BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | + BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); if (ret) return ret; - /* Setup connection between CPU port & user ports */ + /* Setup connection between CPU port & user ports + * Configure specific switch configuration for ports + */ for (i = 0; i < QCA8K_NUM_PORTS; i++) { /* CPU port gets connected to all user ports of the switch */ if (dsa_is_cpu_port(ds, i)) { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); if (ret) return ret; @@ -1038,7 +1176,7 @@ qca8k_setup(struct dsa_switch *ds) ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), QCA8K_PORT_LOOKUP_MEMBER, - BIT(QCA8K_CPU_PORT)); + BIT(cpu_port)); if (ret) return ret; @@ -1063,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds) if (ret) return ret; } - } - /* The port 5 of the qca8337 have some problem in flood condition. The - * original legacy driver had some specific buffer and priority settings - * for the different port suggested by the QCA switch team. Add this - * missing settings to improve switch stability under load condition. - * This problem is limited to qca8337 and other qca8k switch are not affected. - */ - if (priv->switch_id == QCA8K_ID_QCA8337) { - for (i = 0; i < QCA8K_NUM_PORTS; i++) { + /* The port 5 of the qca8337 have some problem in flood condition. The + * original legacy driver had some specific buffer and priority settings + * for the different port suggested by the QCA switch team. Add this + * missing settings to improve switch stability under load condition. + * This problem is limited to qca8337 and other qca8k switch are not affected. + */ + if (priv->switch_id == QCA8K_ID_QCA8337) { switch (i) { /* The 2 CPU port and port 5 requires some different * priority than any other ports. @@ -1108,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds) QCA8K_PORT_HOL_CTRL1_WRED_EN, mask); } + + /* Set initial MTU for every port. + * We have only have a general MTU setting. So track + * every port and set the max across all port. + */ + priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; } /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ @@ -1121,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds) } /* Setup our port MTUs to match power on defaults */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) - priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); if (ret) dev_warn(priv->dev, "failed setting MTU settings"); @@ -1136,13 +1276,54 @@ qca8k_setup(struct dsa_switch *ds) return 0; } +static void +qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, + u32 reg) +{ + u32 delay, val = 0; + int ret; + + /* Delay can be declared in 3 different way. + * Mode to rgmii and internal-delay standard binding defined + * rgmii-id or rgmii-tx/rx phy mode set. + * The parse logic set a delay different than 0 only when one + * of the 3 different way is used. In all other case delay is + * not enabled. With ID or TX/RXID delay is enabled and set + * to the default and recommended value. + */ + if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { + delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; + + val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; + } + + if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { + delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; + + val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; + } + + /* Set RGMII delay based on the selected values */ + ret = qca8k_rmw(priv, reg, + QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | + QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | + QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, + val); + if (ret) + dev_err(priv->dev, "Failed to set internal delay for CPU port%d", + cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); +} + static void qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) { struct qca8k_priv *priv = ds->priv; + int cpu_port_index, ret; u32 reg, val; - int ret; switch (port) { case 0: /* 1st CPU port */ @@ -1154,6 +1335,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, return; reg = QCA8K_REG_PORT0_PAD_CTRL; + cpu_port_index = QCA8K_CPU_PORT0; break; case 1: case 2: @@ -1172,6 +1354,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, return; reg = QCA8K_REG_PORT6_PAD_CTRL; + cpu_port_index = QCA8K_CPU_PORT6; break; default: dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); @@ -1186,23 +1369,18 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, switch (state->interface) { case PHY_INTERFACE_MODE_RGMII: - /* RGMII mode means no delay so don't enable the delay */ - qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); - break; case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_RXID: - /* RGMII_ID needs internal delay. This is enabled through - * PORT5_PAD_CTRL for all ports, rather than individual port - * registers + qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); + + /* Configure rgmii delay */ + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + + /* QCA8337 requires to set rgmii rx delay for all ports. + * This is enabled through PORT5_PAD_CTRL for all ports, + * rather than individual port registers. */ - qca8k_write(priv, reg, - QCA8K_PORT_PAD_RGMII_EN | - QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) | - QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) | - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); - /* QCA8337 requires to set rgmii rx delay */ if (priv->switch_id == QCA8K_ID_QCA8337) qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); @@ -1227,8 +1405,11 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, if (ret) return; - val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | - QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; + val |= QCA8K_SGMII_EN_SD; + + if (priv->ports_config.sgmii_enable_pll) + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | + QCA8K_SGMII_EN_TX; if (dsa_is_cpu_port(ds, port)) { /* CPU port, we're talking to the CPU MAC, be a PHY */ @@ -1243,6 +1424,35 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, } qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and + * falling edge is set writing in the PORT0 PAD reg + */ + if (priv->switch_id == QCA8K_ID_QCA8327 || + priv->switch_id == QCA8K_ID_QCA8337) + reg = QCA8K_REG_PORT0_PAD_CTRL; + + val = 0; + + /* SGMII Clock phase configuration */ + if (priv->ports_config.sgmii_rx_clk_falling_edge) + val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; + + if (priv->ports_config.sgmii_tx_clk_falling_edge) + val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; + + if (val) + ret = qca8k_rmw(priv, reg, + QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, + val); + + /* From original code is reported port instability as SGMII also + * require delay set. Apply advised values here or take them from DT. + */ + if (state->interface == PHY_INTERFACE_MODE_SGMII) + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + break; default: dev_err(ds->dev, "xMII mode %s not supported for port %d\n", @@ -1522,10 +1732,15 @@ static int qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - int port_mask = BIT(QCA8K_CPU_PORT); + int port_mask, cpu_port; int i, ret; - for (i = 1; i < QCA8K_NUM_PORTS; i++) { + cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + port_mask = BIT(cpu_port); + + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + if (dsa_is_cpu_port(ds, i)) + continue; if (dsa_to_port(ds, i)->bridge_dev != br) continue; /* Add this port to the portvlan mask of the other ports @@ -1551,9 +1766,13 @@ static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - int i; + int cpu_port, i; - for (i = 1; i < QCA8K_NUM_PORTS; i++) { + cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + if (dsa_is_cpu_port(ds, i)) + continue; if (dsa_to_port(ds, i)->bridge_dev != br) continue; /* Remove this port to the portvlan mask of the other ports @@ -1568,7 +1787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) * this port */ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), - QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT)); + QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); } static int @@ -1939,7 +2158,12 @@ static int qca8k_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, qca8k_suspend, qca8k_resume); -static const struct qca8k_match_data qca832x = { +static const struct qca8k_match_data qca8327 = { + .id = QCA8K_ID_QCA8327, + .reduced_package = true, +}; + +static const struct qca8k_match_data qca8328 = { .id = QCA8K_ID_QCA8327, }; @@ -1948,7 +2172,8 @@ static const struct qca8k_match_data qca833x = { }; static const struct of_device_id qca8k_of_match[] = { - { .compatible = "qca,qca8327", .data = &qca832x }, + { .compatible = "qca,qca8327", .data = &qca8327 }, + { .compatible = "qca,qca8328", .data = &qca8328 }, { .compatible = "qca,qca8334", .data = &qca833x }, { .compatible = "qca,qca8337", .data = &qca833x }, { /* sentinel */ }, diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index ed3b05ad67454a..e10571a398c98a 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -13,6 +13,7 @@ #include #define QCA8K_NUM_PORTS 7 +#define QCA8K_NUM_CPU_PORTS 2 #define QCA8K_MAX_MTU 9000 #define PHY_ID_QCA8327 0x004dd034 @@ -24,8 +25,6 @@ #define QCA8K_NUM_FDB_RECORDS 2048 -#define QCA8K_CPU_PORT 0 - #define QCA8K_PORT_VID_DEF 1 /* Global control registers */ @@ -35,16 +34,26 @@ #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) #define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) #define QCA8K_REG_PORT0_PAD_CTRL 0x004 +#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) +#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) #define QCA8K_REG_PORT5_PAD_CTRL 0x008 #define QCA8K_REG_PORT6_PAD_CTRL 0x00c #define QCA8K_PORT_PAD_RGMII_EN BIT(26) +#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) #define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) +#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) #define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) #define QCA8K_MAX_DELAY 3 #define QCA8K_PORT_PAD_SGMII_EN BIT(7) #define QCA8K_REG_PWS 0x010 +#define QCA8K_PWS_POWER_ON_SEL BIT(31) +/* This reg is only valid for QCA832x and toggle the package + * type from 176 pin (by default) to 148 pin used on QCA8327 + */ +#define QCA8327_PWS_PACKAGE148_EN BIT(30) +#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24) #define QCA8K_PWS_SERDES_AEN_DIS BIT(7) #define QCA8K_REG_MODULE_EN 0x030 #define QCA8K_MODULE_EN_MIB BIT(0) @@ -100,6 +109,11 @@ #define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) #define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) +/* MAC_PWR_SEL registers */ +#define QCA8K_REG_MAC_PWR_SEL 0x0e4 +#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18) +#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19) + /* EEE control registers */ #define QCA8K_REG_EEE_CTRL 0x100 #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) @@ -248,14 +262,27 @@ struct ar8xxx_port_status { struct qca8k_match_data { u8 id; + bool reduced_package; +}; + +enum { + QCA8K_CPU_PORT0, + QCA8K_CPU_PORT6, +}; + +struct qca8k_ports_config { + bool sgmii_rx_clk_falling_edge; + bool sgmii_tx_clk_falling_edge; + bool sgmii_enable_pll; + u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ + u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ }; struct qca8k_priv { u8 switch_id; u8 switch_revision; - u8 rgmii_tx_delay; - u8 rgmii_rx_delay; bool legacy_phy_port_mapping; + struct qca8k_ports_config ports_config; struct regmap *regmap; struct mii_bus *bus; struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c index 2fcfd917b876b5..c66ebd0ee21711 100644 --- a/drivers/net/dsa/realtek-smi-core.c +++ b/drivers/net/dsa/realtek-smi-core.c @@ -501,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = { .compatible = "realtek,rtl8366s", .data = NULL, }, + { + .compatible = "realtek,rtl8365mb", + .data = &rtl8365mb_variant, + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, realtek_smi_of_match); diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h index fcf465f7f92266..5bfa53e2480ae8 100644 --- a/drivers/net/dsa/realtek-smi-core.h +++ b/drivers/net/dsa/realtek-smi-core.h @@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); int rtl8366_reset_vlan(struct realtek_smi *smi); -int rtl8366_init_vlan(struct realtek_smi *smi); -int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack); int rtl8366_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack); @@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset); void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); extern const struct realtek_smi_variant rtl8366rb_variant; +extern const struct realtek_smi_variant rtl8365mb_variant; #endif /* _REALTEK_SMI_H */ diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c new file mode 100644 index 00000000000000..baaae97283c5e2 --- /dev/null +++ b/drivers/net/dsa/rtl8365mb.c @@ -0,0 +1,1982 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch. + * + * Copyright (C) 2021 Alvin Ć ipraga + * Copyright (C) 2021 Michael Rasmussen + * + * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4 + * integrated PHYs for the user facing ports, and an extension interface which + * can be connected to the CPU - or another PHY - via either MII, RMII, or + * RGMII. The switch is configured via the Realtek Simple Management Interface + * (SMI), which uses the MDIO/MDC lines. + * + * Below is a simplified block diagram of the chip and its relevant interfaces. + * + * .-----------------------------------. + * | | + * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC | + * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC | + * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC | + * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC | + * | | + * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension | + * | interface 1 GMAC 1 | + * | | + * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ | + * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ | + * | ~RTL8365MB ~~~ | + * | ~GXXXC TAIWAN~ | + * GPIO <--------------> Reset ~~~~~~~~~~~~~~ | + * | | + * Interrupt <----------> Link UP/DOWN events | + * controller | | + * '-----------------------------------' + * + * The driver uses DSA to integrate the 4 user and 1 extension ports into the + * kernel. Netdevices are created for the user ports, as are PHY devices for + * their integrated PHYs. The device tree firmware should also specify the link + * partner of the extension port - either via a fixed-link or other phy-handle. + * See the device tree bindings for more detailed information. Note that the + * driver has only been tested with a fixed-link, but in principle it should not + * matter. + * + * NOTE: Currently, only the RGMII interface is implemented in this driver. + * + * The interrupt line is asserted on link UP/DOWN events. The driver creates a + * custom irqchip to handle this interrupt and demultiplex the events by reading + * the status registers via SMI. Interrupts are then propagated to the relevant + * PHY device. + * + * The EEPROM contains initial register values which the chip will read over I2C + * upon hardware reset. It is also possible to omit the EEPROM. In both cases, + * the driver will manually reprogram some registers using jam tables to reach + * an initial state defined by the vendor driver. + * + * This Linux driver is written based on an OS-agnostic vendor driver from + * Realtek. The reference GPL-licensed sources can be found in the OpenWrt + * source tree under the name rtl8367c. The vendor driver claims to support a + * number of similar switch controllers from Realtek, but the only hardware we + * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under + * the name RTL8367C. Although one wishes that the 'C' stood for some kind of + * common hardware revision, there exist examples of chips with the suffix -VC + * which are explicitly not supported by the rtl8367c driver and which instead + * require the rtl8367d vendor driver. With all this uncertainty, the driver has + * been modestly named rtl8365mb. Future implementors may wish to rename things + * accordingly. + * + * In the same family of chips, some carry up to 8 user ports and up to 2 + * extension ports. Where possible this driver tries to make things generic, but + * more work must be done to support these configurations. According to + * documentation from Realtek, the family should include the following chips: + * + * - RTL8363NB + * - RTL8363NB-VB + * - RTL8363SC + * - RTL8363SC-VB + * - RTL8364NB + * - RTL8364NB-VB + * - RTL8365MB-VC + * - RTL8366SC + * - RTL8367RB-VB + * - RTL8367SB + * - RTL8367S + * - RTL8370MB + * - RTL8310SR + * + * Some of the register logic for these additional chips has been skipped over + * while implementing this driver. It is therefore not possible to assume that + * things will work out-of-the-box for other chips, and a careful review of the + * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be + * one of the simpler chips. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "realtek-smi-core.h" + +/* Chip-specific data and limits */ +#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367 +#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6 +#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112 + +/* Family-specific data and limits */ +#define RTL8365MB_NUM_PHYREGS 32 +#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1) +#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1) + +/* Chip identification registers */ +#define RTL8365MB_CHIP_ID_REG 0x1300 + +#define RTL8365MB_CHIP_VER_REG 0x1301 + +#define RTL8365MB_MAGIC_REG 0x13C2 +#define RTL8365MB_MAGIC_VALUE 0x0249 + +/* Chip reset register */ +#define RTL8365MB_CHIP_RESET_REG 0x1322 +#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002 +#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001 + +/* Interrupt polarity register */ +#define RTL8365MB_INTR_POLARITY_REG 0x1100 +#define RTL8365MB_INTR_POLARITY_MASK 0x0001 +#define RTL8365MB_INTR_POLARITY_HIGH 0 +#define RTL8365MB_INTR_POLARITY_LOW 1 + +/* Interrupt control/status register - enable/check specific interrupt types */ +#define RTL8365MB_INTR_CTRL_REG 0x1101 +#define RTL8365MB_INTR_STATUS_REG 0x1102 +#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000 +#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800 +#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200 +#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100 +#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080 +#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040 +#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020 +#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010 +#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008 +#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004 +#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002 +#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001 +#define RTL8365MB_INTR_ALL_MASK \ + (RTL8365MB_INTR_SLIENT_START_2_MASK | \ + RTL8365MB_INTR_SLIENT_START_MASK | \ + RTL8365MB_INTR_ACL_ACTION_MASK | \ + RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \ + RTL8365MB_INTR_INTERRUPT_8051_MASK | \ + RTL8365MB_INTR_LOOP_DETECTION_MASK | \ + RTL8365MB_INTR_GREEN_TIMER_MASK | \ + RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \ + RTL8365MB_INTR_SPEED_CHANGE_MASK | \ + RTL8365MB_INTR_LEARN_OVER_MASK | \ + RTL8365MB_INTR_METER_EXCEEDED_MASK | \ + RTL8365MB_INTR_LINK_CHANGE_MASK) + +/* Per-port interrupt type status registers */ +#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106 +#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF + +#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107 +#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF + +/* PHY indirect access registers */ +#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00 +#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002 +#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0 +#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1 +#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001 +#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1 +#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01 +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02 +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0) +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5) +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8) +#define RTL8365MB_PHY_BASE 0x2000 +#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03 +#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04 + +/* PHY OCP address prefix register */ +#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15 +#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0 +#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00 + +/* The PHY OCP addresses of PHY registers 0~31 start here */ +#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400 + +/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */ +#define RTL8365MB_EXT_PORT_MODE_DISABLE 0 +#define RTL8365MB_EXT_PORT_MODE_RGMII 1 +#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2 +#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3 +#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4 +#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5 +#define RTL8365MB_EXT_PORT_MODE_GMII 6 +#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7 +#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8 +#define RTL8365MB_EXT_PORT_MODE_SGMII 9 +#define RTL8365MB_EXT_PORT_MODE_HSGMII 10 +#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11 +#define RTL8365MB_EXT_PORT_MODE_1000X 12 +#define RTL8365MB_EXT_PORT_MODE_100FX 13 + +/* EXT port interface mode configuration registers 0~1 */ +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \ + (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \ + ((_extport) >> 1) * (0x13C3 - 0x1305)) +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \ + (0xF << (((_extport) % 2))) +#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \ + (((_extport) % 2) * 4) + +/* EXT port RGMII TX/RX delay configuration registers 1~2 */ +#define RTL8365MB_EXT_RGMXF_REG1 0x1307 +#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 +#define RTL8365MB_EXT_RGMXF_REG(_extport) \ + (RTL8365MB_EXT_RGMXF_REG1 + \ + (((_extport) >> 1) * (0x13C5 - 0x1307))) +#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007 +#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008 + +/* External port speed values - used in DIGITAL_INTERFACE_FORCE */ +#define RTL8365MB_PORT_SPEED_10M 0 +#define RTL8365MB_PORT_SPEED_100M 1 +#define RTL8365MB_PORT_SPEED_1000M 2 + +/* EXT port force configuration registers 0~2 */ +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \ + (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \ + ((_extport) & 0x1) + \ + ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310))) +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004 +#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003 + +/* CPU port mask register - controls which ports are treated as CPU ports */ +#define RTL8365MB_CPU_PORT_MASK_REG 0x1219 +#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF + +/* CPU control register */ +#define RTL8365MB_CPU_CTRL_REG 0x121A +#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400 +#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200 +#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080 +#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040 +#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038 +#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006 +#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001 + +/* Maximum packet length register */ +#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C +#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF + +/* Port learning limit registers */ +#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20 +#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \ + (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport)) + +/* Port isolation (forwarding mask) registers */ +#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2 +#define RTL8365MB_PORT_ISOLATION_REG(_physport) \ + (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport)) +#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF + +/* MSTP port state registers - indexed by tree instancrSTI (tree ine */ +#define RTL8365MB_MSTI_CTRL_BASE 0x0A00 +#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \ + (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3)) +#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1) +#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \ + (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport))) + +/* MIB counter value registers */ +#define RTL8365MB_MIB_COUNTER_BASE 0x1000 +#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x)) + +/* MIB counter address register */ +#define RTL8365MB_MIB_ADDRESS_REG 0x1004 +#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C +#define RTL8365MB_MIB_ADDRESS(_p, _x) \ + (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2) + +#define RTL8365MB_MIB_CTRL0_REG 0x1005 +#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002 +#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001 + +/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed + * to block. On the other hand, accessing MIB counters absolutely requires us to + * block. The solution is thus to schedule work which polls the MIB counters + * asynchronously and updates some private data, which the callback can then + * fetch atomically. Three seconds should be a good enough polling interval. + */ +#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ) + +enum rtl8365mb_mib_counter_index { + RTL8365MB_MIB_ifInOctets, + RTL8365MB_MIB_dot3StatsFCSErrors, + RTL8365MB_MIB_dot3StatsSymbolErrors, + RTL8365MB_MIB_dot3InPauseFrames, + RTL8365MB_MIB_dot3ControlInUnknownOpcodes, + RTL8365MB_MIB_etherStatsFragments, + RTL8365MB_MIB_etherStatsJabbers, + RTL8365MB_MIB_ifInUcastPkts, + RTL8365MB_MIB_etherStatsDropEvents, + RTL8365MB_MIB_ifInMulticastPkts, + RTL8365MB_MIB_ifInBroadcastPkts, + RTL8365MB_MIB_inMldChecksumError, + RTL8365MB_MIB_inIgmpChecksumError, + RTL8365MB_MIB_inMldSpecificQuery, + RTL8365MB_MIB_inMldGeneralQuery, + RTL8365MB_MIB_inIgmpSpecificQuery, + RTL8365MB_MIB_inIgmpGeneralQuery, + RTL8365MB_MIB_inMldLeaves, + RTL8365MB_MIB_inIgmpLeaves, + RTL8365MB_MIB_etherStatsOctets, + RTL8365MB_MIB_etherStatsUnderSizePkts, + RTL8365MB_MIB_etherOversizeStats, + RTL8365MB_MIB_etherStatsPkts64Octets, + RTL8365MB_MIB_etherStatsPkts65to127Octets, + RTL8365MB_MIB_etherStatsPkts128to255Octets, + RTL8365MB_MIB_etherStatsPkts256to511Octets, + RTL8365MB_MIB_etherStatsPkts512to1023Octets, + RTL8365MB_MIB_etherStatsPkts1024to1518Octets, + RTL8365MB_MIB_ifOutOctets, + RTL8365MB_MIB_dot3StatsSingleCollisionFrames, + RTL8365MB_MIB_dot3StatsMultipleCollisionFrames, + RTL8365MB_MIB_dot3StatsDeferredTransmissions, + RTL8365MB_MIB_dot3StatsLateCollisions, + RTL8365MB_MIB_etherStatsCollisions, + RTL8365MB_MIB_dot3StatsExcessiveCollisions, + RTL8365MB_MIB_dot3OutPauseFrames, + RTL8365MB_MIB_ifOutDiscards, + RTL8365MB_MIB_dot1dTpPortInDiscards, + RTL8365MB_MIB_ifOutUcastPkts, + RTL8365MB_MIB_ifOutMulticastPkts, + RTL8365MB_MIB_ifOutBroadcastPkts, + RTL8365MB_MIB_outOampduPkts, + RTL8365MB_MIB_inOampduPkts, + RTL8365MB_MIB_inIgmpJoinsSuccess, + RTL8365MB_MIB_inIgmpJoinsFail, + RTL8365MB_MIB_inMldJoinsSuccess, + RTL8365MB_MIB_inMldJoinsFail, + RTL8365MB_MIB_inReportSuppressionDrop, + RTL8365MB_MIB_inLeaveSuppressionDrop, + RTL8365MB_MIB_outIgmpReports, + RTL8365MB_MIB_outIgmpLeaves, + RTL8365MB_MIB_outIgmpGeneralQuery, + RTL8365MB_MIB_outIgmpSpecificQuery, + RTL8365MB_MIB_outMldReports, + RTL8365MB_MIB_outMldLeaves, + RTL8365MB_MIB_outMldGeneralQuery, + RTL8365MB_MIB_outMldSpecificQuery, + RTL8365MB_MIB_inKnownMulticastPkts, + RTL8365MB_MIB_END, +}; + +struct rtl8365mb_mib_counter { + u32 offset; + u32 length; + const char *name; +}; + +#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \ + [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name } + +static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = { + RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets), + RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors), + RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors), + RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames), + RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes), + RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments), + RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers), + RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts), + RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents), + RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts), + RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts), + RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError), + RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError), + RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery), + RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery), + RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery), + RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery), + RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves), + RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves), + RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets), + RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts), + RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats), + RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets), + RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets), + RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets), + RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets), + RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets), + RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets), + RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets), + RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames), + RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames), + RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions), + RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions), + RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions), + RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions), + RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames), + RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards), + RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards), + RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts), + RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts), + RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts), + RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts), + RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts), + RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess), + RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail), + RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess), + RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail), + RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop), + RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop), + RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports), + RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves), + RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery), + RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery), + RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports), + RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves), + RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery), + RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery), + RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts), +}; + +static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END); + +struct rtl8365mb_jam_tbl_entry { + u16 reg; + u16 val; +}; + +/* Lifted from the vendor driver sources */ +static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = { + { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 }, + { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA }, + { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 }, + { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F }, + { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 }, + { 0x13F0, 0x0000 }, +}; + +static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = { + { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 }, + { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E }, + { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 }, + { 0x1D32, 0x0002 }, +}; + +enum rtl8365mb_stp_state { + RTL8365MB_STP_STATE_DISABLED = 0, + RTL8365MB_STP_STATE_BLOCKING = 1, + RTL8365MB_STP_STATE_LEARNING = 2, + RTL8365MB_STP_STATE_FORWARDING = 3, +}; + +enum rtl8365mb_cpu_insert { + RTL8365MB_CPU_INSERT_TO_ALL = 0, + RTL8365MB_CPU_INSERT_TO_TRAPPING = 1, + RTL8365MB_CPU_INSERT_TO_NONE = 2, +}; + +enum rtl8365mb_cpu_position { + RTL8365MB_CPU_POS_AFTER_SA = 0, + RTL8365MB_CPU_POS_BEFORE_CRC = 1, +}; + +enum rtl8365mb_cpu_format { + RTL8365MB_CPU_FORMAT_8BYTES = 0, + RTL8365MB_CPU_FORMAT_4BYTES = 1, +}; + +enum rtl8365mb_cpu_rxlen { + RTL8365MB_CPU_RXLEN_72BYTES = 0, + RTL8365MB_CPU_RXLEN_64BYTES = 1, +}; + +/** + * struct rtl8365mb_cpu - CPU port configuration + * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames + * @mask: port mask of ports that parse should parse CPU tags + * @trap_port: forward trapped frames to this port + * @insert: CPU tag insertion mode in switch->CPU frames + * @position: position of CPU tag in frame + * @rx_length: minimum CPU RX length + * @format: CPU tag format + * + * Represents the CPU tagging and CPU port configuration of the switch. These + * settings are configurable at runtime. + */ +struct rtl8365mb_cpu { + bool enable; + u32 mask; + u32 trap_port; + enum rtl8365mb_cpu_insert insert; + enum rtl8365mb_cpu_position position; + enum rtl8365mb_cpu_rxlen rx_length; + enum rtl8365mb_cpu_format format; +}; + +/** + * struct rtl8365mb_port - private per-port data + * @smi: pointer to parent realtek_smi data + * @index: DSA port index, same as dsa_port::index + * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic + * access via rtl8365mb_get_stats64 + * @stats_lock: protect the stats structure during read/update + * @mib_work: delayed work for polling MIB counters + */ +struct rtl8365mb_port { + struct realtek_smi *smi; + unsigned int index; + struct rtnl_link_stats64 stats; + spinlock_t stats_lock; + struct delayed_work mib_work; +}; + +/** + * struct rtl8365mb - private chip-specific driver data + * @smi: pointer to parent realtek_smi data + * @irq: registered IRQ or zero + * @chip_id: chip identifier + * @chip_ver: chip silicon revision + * @port_mask: mask of all ports + * @learn_limit_max: maximum number of L2 addresses the chip can learn + * @cpu: CPU tagging and CPU port configuration for this chip + * @mib_lock: prevent concurrent reads of MIB counters + * @ports: per-port data + * @jam_table: chip-specific initialization jam table + * @jam_size: size of the chip's jam table + * + * Private data for this driver. + */ +struct rtl8365mb { + struct realtek_smi *smi; + int irq; + u32 chip_id; + u32 chip_ver; + u32 port_mask; + u32 learn_limit_max; + struct rtl8365mb_cpu cpu; + struct mutex mib_lock; + struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS]; + const struct rtl8365mb_jam_tbl_entry *jam_table; + size_t jam_size; +}; + +static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi) +{ + u32 val; + + return regmap_read_poll_timeout(smi->map, + RTL8365MB_INDIRECT_ACCESS_STATUS_REG, + val, !val, 10, 100); +} + +static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy, + u32 ocp_addr) +{ + u32 val; + int ret; + + /* Set OCP prefix */ + val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr); + ret = regmap_update_bits( + smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG, + RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, + FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val)); + if (ret) + return ret; + + /* Set PHY register address */ + val = RTL8365MB_PHY_BASE; + val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy); + val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK, + ocp_addr >> 1); + val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK, + ocp_addr >> 6); + ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, + val); + if (ret) + return ret; + + return 0; +} + +static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy, + u32 ocp_addr, u16 *data) +{ + u32 val; + int ret; + + ret = rtl8365mb_phy_poll_busy(smi); + if (ret) + return ret; + + ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr); + if (ret) + return ret; + + /* Execute read operation */ + val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK, + RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) | + FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK, + RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ); + ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val); + if (ret) + return ret; + + ret = rtl8365mb_phy_poll_busy(smi); + if (ret) + return ret; + + /* Get PHY register data */ + ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, + &val); + if (ret) + return ret; + + *data = val & 0xFFFF; + + return 0; +} + +static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy, + u32 ocp_addr, u16 data) +{ + u32 val; + int ret; + + ret = rtl8365mb_phy_poll_busy(smi); + if (ret) + return ret; + + ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr); + if (ret) + return ret; + + /* Set PHY register data */ + ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, + data); + if (ret) + return ret; + + /* Execute write operation */ + val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK, + RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) | + FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK, + RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE); + ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val); + if (ret) + return ret; + + ret = rtl8365mb_phy_poll_busy(smi); + if (ret) + return ret; + + return 0; +} + +static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum) +{ + u32 ocp_addr; + u16 val; + int ret; + + if (regnum > RTL8365MB_PHYREGMAX) + return -EINVAL; + + ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2; + + ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val); + if (ret) { + dev_err(smi->dev, + "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy, + regnum, ocp_addr, ret); + return ret; + } + + dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n", + phy, regnum, ocp_addr, val); + + return val; +} + +static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum, + u16 val) +{ + u32 ocp_addr; + int ret; + + if (regnum > RTL8365MB_PHYREGMAX) + return -EINVAL; + + ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2; + + ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val); + if (ret) { + dev_err(smi->dev, + "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy, + regnum, ocp_addr, ret); + return ret; + } + + dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n", + phy, regnum, ocp_addr, val); + + return 0; +} + +static enum dsa_tag_protocol +rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port, + enum dsa_tag_protocol mp) +{ + return DSA_TAG_PROTO_RTL8_4; +} + +static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port, + phy_interface_t interface) +{ + struct device_node *dn; + struct dsa_port *dp; + int tx_delay = 0; + int rx_delay = 0; + int ext_port; + u32 val; + int ret; + + if (port == smi->cpu_port) { + ext_port = 1; + } else { + dev_err(smi->dev, "only one EXT port is currently supported\n"); + return -EINVAL; + } + + dp = dsa_to_port(smi->ds, port); + dn = dp->dn; + + /* Set the RGMII TX/RX delay + * + * The Realtek vendor driver indicates the following possible + * configuration settings: + * + * TX delay: + * 0 = no delay, 1 = 2 ns delay + * RX delay: + * 0 = no delay, 7 = maximum delay + * No units are specified, but there are a total of 8 steps. + * + * The vendor driver also states that this must be configured *before* + * forcing the external interface into a particular mode, which is done + * in the rtl8365mb_phylink_mac_link_{up,down} functions. + * + * Only configure an RGMII TX (resp. RX) delay if the + * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is + * specified. We ignore the detail of the RGMII interface mode + * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only + * property. + * + * For the RX delay, we assume that a register value of 4 corresponds to + * 2 ns. But this is just an educated guess, so ignore all other values + * to avoid too much confusion. + */ + if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) { + val = val / 1000; /* convert to ns */ + + if (val == 0 || val == 2) + tx_delay = val / 2; + else + dev_warn(smi->dev, + "EXT port TX delay must be 0 or 2 ns\n"); + } + + if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) { + val = val / 1000; /* convert to ns */ + + if (val == 0 || val == 2) + rx_delay = val * 2; + else + dev_warn(smi->dev, + "EXT port RX delay must be 0 to 2 ns\n"); + } + + ret = regmap_update_bits( + smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port), + RTL8365MB_EXT_RGMXF_TXDELAY_MASK | + RTL8365MB_EXT_RGMXF_RXDELAY_MASK, + FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) | + FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay)); + if (ret) + return ret; + + ret = regmap_update_bits( + smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port), + RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port), + RTL8365MB_EXT_PORT_MODE_RGMII + << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET( + ext_port)); + if (ret) + return ret; + + return 0; +} + +static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port, + bool link, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + u32 r_tx_pause; + u32 r_rx_pause; + u32 r_duplex; + u32 r_speed; + u32 r_link; + int ext_port; + int val; + int ret; + + if (port == smi->cpu_port) { + ext_port = 1; + } else { + dev_err(smi->dev, "only one EXT port is currently supported\n"); + return -EINVAL; + } + + if (link) { + /* Force the link up with the desired configuration */ + r_link = 1; + r_rx_pause = rx_pause ? 1 : 0; + r_tx_pause = tx_pause ? 1 : 0; + + if (speed == SPEED_1000) { + r_speed = RTL8365MB_PORT_SPEED_1000M; + } else if (speed == SPEED_100) { + r_speed = RTL8365MB_PORT_SPEED_100M; + } else if (speed == SPEED_10) { + r_speed = RTL8365MB_PORT_SPEED_10M; + } else { + dev_err(smi->dev, "unsupported port speed %s\n", + phy_speed_to_str(speed)); + return -EINVAL; + } + + if (duplex == DUPLEX_FULL) { + r_duplex = 1; + } else if (duplex == DUPLEX_HALF) { + r_duplex = 0; + } else { + dev_err(smi->dev, "unsupported duplex %s\n", + phy_duplex_to_str(duplex)); + return -EINVAL; + } + } else { + /* Force the link down and reset any programmed configuration */ + r_link = 0; + r_tx_pause = 0; + r_rx_pause = 0; + r_speed = 0; + r_duplex = 0; + } + + val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) | + FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK, + r_tx_pause) | + FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK, + r_rx_pause) | + FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) | + FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK, + r_duplex) | + FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed); + ret = regmap_write(smi->map, + RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port), + val); + if (ret) + return ret; + + return 0; +} + +static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, + phy_interface_t interface) +{ + if (dsa_is_user_port(ds, port) && + (interface == PHY_INTERFACE_MODE_NA || + interface == PHY_INTERFACE_MODE_INTERNAL)) + /* Internal PHY */ + return true; + else if (dsa_is_cpu_port(ds, port) && + phy_interface_mode_is_rgmii(interface)) + /* Extension MAC */ + return true; + + return false; +} + +static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct realtek_smi *smi = ds->priv; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 }; + + /* include/linux/phylink.h says: + * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink + * expects the MAC driver to return all supported link modes. + */ + if (state->interface != PHY_INTERFACE_MODE_NA && + !rtl8365mb_phy_mode_supported(ds, port, state->interface)) { + dev_err(smi->dev, "phy mode %s is unsupported on port %d\n", + phy_modes(state->interface), port); + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Full); + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} + +static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) { + dev_err(smi->dev, "phy mode %s is unsupported on port %d\n", + phy_modes(state->interface), port); + return; + } + + if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { + dev_err(smi->dev, + "port %d supports only conventional PHY or fixed-link\n", + port); + return; + } + + if (phy_interface_mode_is_rgmii(state->interface)) { + ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface); + if (ret) + dev_err(smi->dev, + "failed to configure RGMII mode on port %d: %d\n", + port, ret); + return; + } + + /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also + * supports + */ +} + +static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb_port *p; + struct rtl8365mb *mb; + int ret; + + mb = smi->chip_data; + p = &mb->ports[port]; + cancel_delayed_work_sync(&p->mib_work); + + if (phy_interface_mode_is_rgmii(interface)) { + ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0, + false, false); + if (ret) + dev_err(smi->dev, + "failed to reset forced mode on port %d: %d\n", + port, ret); + + return; + } +} + +static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, int speed, + int duplex, bool tx_pause, + bool rx_pause) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb_port *p; + struct rtl8365mb *mb; + int ret; + + mb = smi->chip_data; + p = &mb->ports[port]; + schedule_delayed_work(&p->mib_work, 0); + + if (phy_interface_mode_is_rgmii(interface)) { + ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed, + duplex, tx_pause, + rx_pause); + if (ret) + dev_err(smi->dev, + "failed to force mode on port %d: %d\n", port, + ret); + + return; + } +} + +static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct realtek_smi *smi = ds->priv; + enum rtl8365mb_stp_state val; + int msti = 0; + + switch (state) { + case BR_STATE_DISABLED: + val = RTL8365MB_STP_STATE_DISABLED; + break; + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + val = RTL8365MB_STP_STATE_BLOCKING; + break; + case BR_STATE_LEARNING: + val = RTL8365MB_STP_STATE_LEARNING; + break; + case BR_STATE_FORWARDING: + val = RTL8365MB_STP_STATE_FORWARDING; + break; + default: + dev_err(smi->dev, "invalid STP state: %u\n", state); + return; + } + + regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port), + RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port), + val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port)); +} + +static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port, + bool enable) +{ + struct rtl8365mb *mb = smi->chip_data; + + /* Enable/disable learning by limiting the number of L2 addresses the + * port can learn. Realtek documentation states that a limit of zero + * disables learning. When enabling learning, set it to the chip's + * maximum. + */ + return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port), + enable ? mb->learn_limit_max : 0); +} + +static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port, + u32 mask) +{ + return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask); +} + +static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port, + u32 offset, u32 length, u64 *mibvalue) +{ + u64 tmpvalue = 0; + u32 val; + int ret; + int i; + + /* The MIB address is an SRAM address. We request a particular address + * and then poll the control register before reading the value from some + * counter registers. + */ + ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG, + RTL8365MB_MIB_ADDRESS(port, offset)); + if (ret) + return ret; + + /* Poll for completion */ + ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val, + !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK), + 10, 100); + if (ret) + return ret; + + /* Presumably this indicates a MIB counter read failure */ + if (val & RTL8365MB_MIB_CTRL0_RESET_MASK) + return -EIO; + + /* There are four MIB counter registers each holding a 16 bit word of a + * MIB counter. Depending on the offset, we should read from the upper + * two or lower two registers. In case the MIB counter is 4 words, we + * read from all four registers. + */ + if (length == 4) + offset = 3; + else + offset = (offset + 1) % 4; + + /* Read the MIB counter 16 bits at a time */ + for (i = 0; i < length; i++) { + ret = regmap_read(smi->map, + RTL8365MB_MIB_COUNTER_REG(offset - i), &val); + if (ret) + return ret; + + tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF); + } + + /* Only commit the result if no error occurred */ + *mibvalue = tmpvalue; + + return 0; +} + +static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb *mb; + int ret; + int i; + + mb = smi->chip_data; + + mutex_lock(&mb->mib_lock); + for (i = 0; i < RTL8365MB_MIB_END; i++) { + struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; + + ret = rtl8365mb_mib_counter_read(smi, port, mib->offset, + mib->length, &data[i]); + if (ret) { + dev_err(smi->dev, + "failed to read port %d counters: %d\n", port, + ret); + break; + } + } + mutex_unlock(&mb->mib_lock); +} + +static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < RTL8365MB_MIB_END; i++) { + struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; + + strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN); + } +} + +static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return RTL8365MB_MIB_END; +} + +static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb_mib_counter *mib; + struct rtl8365mb *mb; + + mb = smi->chip_data; + mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors]; + + mutex_lock(&mb->mib_lock); + rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length, + &phy_stats->SymbolErrorDuringCarrier); + mutex_unlock(&mb->mib_lock); +} + +static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + u64 cnt[RTL8365MB_MIB_END] = { + [RTL8365MB_MIB_ifOutOctets] = 1, + [RTL8365MB_MIB_ifOutUcastPkts] = 1, + [RTL8365MB_MIB_ifOutMulticastPkts] = 1, + [RTL8365MB_MIB_ifOutBroadcastPkts] = 1, + [RTL8365MB_MIB_dot3OutPauseFrames] = 1, + [RTL8365MB_MIB_ifOutDiscards] = 1, + [RTL8365MB_MIB_ifInOctets] = 1, + [RTL8365MB_MIB_ifInUcastPkts] = 1, + [RTL8365MB_MIB_ifInMulticastPkts] = 1, + [RTL8365MB_MIB_ifInBroadcastPkts] = 1, + [RTL8365MB_MIB_dot3InPauseFrames] = 1, + [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1, + [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1, + [RTL8365MB_MIB_dot3StatsFCSErrors] = 1, + [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1, + [RTL8365MB_MIB_dot3StatsLateCollisions] = 1, + [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1, + + }; + struct realtek_smi *smi = ds->priv; + struct rtl8365mb *mb; + int ret; + int i; + + mb = smi->chip_data; + + mutex_lock(&mb->mib_lock); + for (i = 0; i < RTL8365MB_MIB_END; i++) { + struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; + + /* Only fetch required MIB counters (marked = 1 above) */ + if (!cnt[i]) + continue; + + ret = rtl8365mb_mib_counter_read(smi, port, mib->offset, + mib->length, &cnt[i]); + if (ret) + break; + } + mutex_unlock(&mb->mib_lock); + + /* The RTL8365MB-VC exposes MIB objects, which we have to translate into + * IEEE 802.3 Managed Objects. This is not always completely faithful, + * but we try out best. See RFC 3635 for a detailed treatment of the + * subject. + */ + + mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] + + cnt[RTL8365MB_MIB_ifOutMulticastPkts] + + cnt[RTL8365MB_MIB_ifOutBroadcastPkts] + + cnt[RTL8365MB_MIB_dot3OutPauseFrames] - + cnt[RTL8365MB_MIB_ifOutDiscards]; + mac_stats->SingleCollisionFrames = + cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames]; + mac_stats->MultipleCollisionFrames = + cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames]; + mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] + + cnt[RTL8365MB_MIB_ifInMulticastPkts] + + cnt[RTL8365MB_MIB_ifInBroadcastPkts] + + cnt[RTL8365MB_MIB_dot3InPauseFrames]; + mac_stats->FrameCheckSequenceErrors = + cnt[RTL8365MB_MIB_dot3StatsFCSErrors]; + mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] - + 18 * mac_stats->FramesTransmittedOK; + mac_stats->FramesWithDeferredXmissions = + cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions]; + mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions]; + mac_stats->FramesAbortedDueToXSColls = + cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions]; + mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] - + 18 * mac_stats->FramesReceivedOK; + mac_stats->MulticastFramesXmittedOK = + cnt[RTL8365MB_MIB_ifOutMulticastPkts]; + mac_stats->BroadcastFramesXmittedOK = + cnt[RTL8365MB_MIB_ifOutBroadcastPkts]; + mac_stats->MulticastFramesReceivedOK = + cnt[RTL8365MB_MIB_ifInMulticastPkts]; + mac_stats->BroadcastFramesReceivedOK = + cnt[RTL8365MB_MIB_ifInBroadcastPkts]; +} + +static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb_mib_counter *mib; + struct rtl8365mb *mb; + + mb = smi->chip_data; + mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes]; + + mutex_lock(&mb->mib_lock); + rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length, + &ctrl_stats->UnsupportedOpcodesReceived); + mutex_unlock(&mb->mib_lock); +} + +static void rtl8365mb_stats_update(struct realtek_smi *smi, int port) +{ + u64 cnt[RTL8365MB_MIB_END] = { + [RTL8365MB_MIB_ifOutOctets] = 1, + [RTL8365MB_MIB_ifOutUcastPkts] = 1, + [RTL8365MB_MIB_ifOutMulticastPkts] = 1, + [RTL8365MB_MIB_ifOutBroadcastPkts] = 1, + [RTL8365MB_MIB_ifOutDiscards] = 1, + [RTL8365MB_MIB_ifInOctets] = 1, + [RTL8365MB_MIB_ifInUcastPkts] = 1, + [RTL8365MB_MIB_ifInMulticastPkts] = 1, + [RTL8365MB_MIB_ifInBroadcastPkts] = 1, + [RTL8365MB_MIB_etherStatsDropEvents] = 1, + [RTL8365MB_MIB_etherStatsCollisions] = 1, + [RTL8365MB_MIB_etherStatsFragments] = 1, + [RTL8365MB_MIB_etherStatsJabbers] = 1, + [RTL8365MB_MIB_dot3StatsFCSErrors] = 1, + [RTL8365MB_MIB_dot3StatsLateCollisions] = 1, + }; + struct rtl8365mb *mb = smi->chip_data; + struct rtnl_link_stats64 *stats; + int ret; + int i; + + stats = &mb->ports[port].stats; + + mutex_lock(&mb->mib_lock); + for (i = 0; i < RTL8365MB_MIB_END; i++) { + struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i]; + + /* Only fetch required MIB counters (marked = 1 above) */ + if (!cnt[i]) + continue; + + ret = rtl8365mb_mib_counter_read(smi, port, c->offset, + c->length, &cnt[i]); + if (ret) + break; + } + mutex_unlock(&mb->mib_lock); + + /* Don't update statistics if there was an error reading the counters */ + if (ret) + return; + + spin_lock(&mb->ports[port].stats_lock); + + stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] + + cnt[RTL8365MB_MIB_ifInMulticastPkts] + + cnt[RTL8365MB_MIB_ifInBroadcastPkts] - + cnt[RTL8365MB_MIB_ifOutDiscards]; + + stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] + + cnt[RTL8365MB_MIB_ifOutMulticastPkts] + + cnt[RTL8365MB_MIB_ifOutBroadcastPkts]; + + /* if{In,Out}Octets includes FCS - remove it */ + stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets; + stats->tx_bytes = + cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets; + + stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents]; + stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards]; + + stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts]; + stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions]; + + stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] + + cnt[RTL8365MB_MIB_etherStatsJabbers]; + stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors]; + stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors; + + stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards]; + stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions]; + stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors; + + spin_unlock(&mb->ports[port].stats_lock); +} + +static void rtl8365mb_stats_poll(struct work_struct *work) +{ + struct rtl8365mb_port *p = container_of(to_delayed_work(work), + struct rtl8365mb_port, + mib_work); + struct realtek_smi *smi = p->smi; + + rtl8365mb_stats_update(smi, p->index); + + schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES); +} + +static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb_port *p; + struct rtl8365mb *mb; + + mb = smi->chip_data; + p = &mb->ports[port]; + + spin_lock(&p->stats_lock); + memcpy(s, &p->stats, sizeof(*s)); + spin_unlock(&p->stats_lock); +} + +static void rtl8365mb_stats_setup(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + int i; + + /* Per-chip global mutex to protect MIB counter access, since doing + * so requires accessing a series of registers in a particular order. + */ + mutex_init(&mb->mib_lock); + + for (i = 0; i < smi->num_ports; i++) { + struct rtl8365mb_port *p = &mb->ports[i]; + + if (dsa_is_unused_port(smi->ds, i)) + continue; + + /* Per-port spinlock to protect the stats64 data */ + spin_lock_init(&p->stats_lock); + + /* This work polls the MIB counters and keeps the stats64 data + * up-to-date. + */ + INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll); + } +} + +static void rtl8365mb_stats_teardown(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + int i; + + for (i = 0; i < smi->num_ports; i++) { + struct rtl8365mb_port *p = &mb->ports[i]; + + if (dsa_is_unused_port(smi->ds, i)) + continue; + + cancel_delayed_work_sync(&p->mib_work); + } +} + +static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg, + u32 *val) +{ + int ret; + + ret = regmap_read(smi->map, reg, val); + if (ret) + return ret; + + return regmap_write(smi->map, reg, *val); +} + +static irqreturn_t rtl8365mb_irq(int irq, void *data) +{ + struct realtek_smi *smi = data; + unsigned long line_changes = 0; + struct rtl8365mb *mb; + u32 stat; + int line; + int ret; + + mb = smi->chip_data; + + ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG, + &stat); + if (ret) + goto out_error; + + if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) { + u32 linkdown_ind; + u32 linkup_ind; + u32 val; + + ret = rtl8365mb_get_and_clear_status_reg( + smi, RTL8365MB_PORT_LINKUP_IND_REG, &val); + if (ret) + goto out_error; + + linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val); + + ret = rtl8365mb_get_and_clear_status_reg( + smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val); + if (ret) + goto out_error; + + linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val); + + line_changes = (linkup_ind | linkdown_ind) & mb->port_mask; + } + + if (!line_changes) + goto out_none; + + for_each_set_bit(line, &line_changes, smi->num_ports) { + int child_irq = irq_find_mapping(smi->irqdomain, line); + + handle_nested_irq(child_irq); + } + + return IRQ_HANDLED; + +out_error: + dev_err(smi->dev, "failed to read interrupt status: %d\n", ret); + +out_none: + return IRQ_NONE; +} + +static struct irq_chip rtl8365mb_irq_chip = { + .name = "rtl8365mb", + /* The hardware doesn't support masking IRQs on a per-port basis */ +}; + +static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, domain->host_data); + irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq); + irq_set_nested_thread(irq, 1); + irq_set_noprobe(irq); + + return 0; +} + +static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_set_nested_thread(irq, 0); + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); +} + +static const struct irq_domain_ops rtl8365mb_irqdomain_ops = { + .map = rtl8365mb_irq_map, + .unmap = rtl8365mb_irq_unmap, + .xlate = irq_domain_xlate_onecell, +}; + +static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable) +{ + return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG, + RTL8365MB_INTR_LINK_CHANGE_MASK, + FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK, + enable ? 1 : 0)); +} + +static int rtl8365mb_irq_enable(struct realtek_smi *smi) +{ + return rtl8365mb_set_irq_enable(smi, true); +} + +static int rtl8365mb_irq_disable(struct realtek_smi *smi) +{ + return rtl8365mb_set_irq_enable(smi, false); +} + +static int rtl8365mb_irq_setup(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + struct device_node *intc; + u32 irq_trig; + int virq; + int irq; + u32 val; + int ret; + int i; + + intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller"); + if (!intc) { + dev_err(smi->dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + /* rtl8365mb IRQs cascade off this one */ + irq = of_irq_get(intc, 0); + if (irq <= 0) { + if (irq != -EPROBE_DEFER) + dev_err(smi->dev, "failed to get parent irq: %d\n", + irq); + ret = irq ? irq : -EINVAL; + goto out_put_node; + } + + smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports, + &rtl8365mb_irqdomain_ops, smi); + if (!smi->irqdomain) { + dev_err(smi->dev, "failed to add irq domain\n"); + ret = -ENOMEM; + goto out_put_node; + } + + for (i = 0; i < smi->num_ports; i++) { + virq = irq_create_mapping(smi->irqdomain, i); + if (!virq) { + dev_err(smi->dev, + "failed to create irq domain mapping\n"); + ret = -EINVAL; + goto out_remove_irqdomain; + } + + irq_set_parent(virq, irq); + } + + /* Configure chip interrupt signal polarity */ + irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + switch (irq_trig) { + case IRQF_TRIGGER_RISING: + case IRQF_TRIGGER_HIGH: + val = RTL8365MB_INTR_POLARITY_HIGH; + break; + case IRQF_TRIGGER_FALLING: + case IRQF_TRIGGER_LOW: + val = RTL8365MB_INTR_POLARITY_LOW; + break; + default: + dev_err(smi->dev, "unsupported irq trigger type %u\n", + irq_trig); + ret = -EINVAL; + goto out_remove_irqdomain; + } + + ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG, + RTL8365MB_INTR_POLARITY_MASK, + FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val)); + if (ret) + goto out_remove_irqdomain; + + /* Disable the interrupt in case the chip has it enabled on reset */ + ret = rtl8365mb_irq_disable(smi); + if (ret) + goto out_remove_irqdomain; + + /* Clear the interrupt status register */ + ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG, + RTL8365MB_INTR_ALL_MASK); + if (ret) + goto out_remove_irqdomain; + + ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT, + "rtl8365mb", smi); + if (ret) { + dev_err(smi->dev, "failed to request irq: %d\n", ret); + goto out_remove_irqdomain; + } + + /* Store the irq so that we know to free it during teardown */ + mb->irq = irq; + + ret = rtl8365mb_irq_enable(smi); + if (ret) + goto out_free_irq; + + of_node_put(intc); + + return 0; + +out_free_irq: + free_irq(mb->irq, smi); + mb->irq = 0; + +out_remove_irqdomain: + for (i = 0; i < smi->num_ports; i++) { + virq = irq_find_mapping(smi->irqdomain, i); + irq_dispose_mapping(virq); + } + + irq_domain_remove(smi->irqdomain); + smi->irqdomain = NULL; + +out_put_node: + of_node_put(intc); + + return ret; +} + +static void rtl8365mb_irq_teardown(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + int virq; + int i; + + if (mb->irq) { + free_irq(mb->irq, smi); + mb->irq = 0; + } + + if (smi->irqdomain) { + for (i = 0; i < smi->num_ports; i++) { + virq = irq_find_mapping(smi->irqdomain, i); + irq_dispose_mapping(virq); + } + + irq_domain_remove(smi->irqdomain); + smi->irqdomain = NULL; + } +} + +static int rtl8365mb_cpu_config(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + struct rtl8365mb_cpu *cpu = &mb->cpu; + u32 val; + int ret; + + ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG, + RTL8365MB_CPU_PORT_MASK_MASK, + FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK, + cpu->mask)); + if (ret) + return ret; + + val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) | + FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) | + FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) | + FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) | + FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) | + FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) | + FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK, + cpu->trap_port >> 3); + ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val); + if (ret) + return ret; + + return 0; +} + +static int rtl8365mb_switch_init(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + int ret; + int i; + + /* Do any chip-specific init jam before getting to the common stuff */ + if (mb->jam_table) { + for (i = 0; i < mb->jam_size; i++) { + ret = regmap_write(smi->map, mb->jam_table[i].reg, + mb->jam_table[i].val); + if (ret) + return ret; + } + } + + /* Common init jam */ + for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) { + ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg, + rtl8365mb_init_jam_common[i].val); + if (ret) + return ret; + } + + return 0; +} + +static int rtl8365mb_reset_chip(struct realtek_smi *smi) +{ + u32 val; + + realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG, + FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, + 1)); + + /* Realtek documentation says the chip needs 1 second to reset. Sleep + * for 100 ms before accessing any registers to prevent ACK timeouts. + */ + msleep(100); + return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val, + !(val & RTL8365MB_CHIP_RESET_HW_MASK), + 20000, 1e6); +} + +static int rtl8365mb_setup(struct dsa_switch *ds) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8365mb *mb; + int ret; + int i; + + mb = smi->chip_data; + + ret = rtl8365mb_reset_chip(smi); + if (ret) { + dev_err(smi->dev, "failed to reset chip: %d\n", ret); + goto out_error; + } + + /* Configure switch to vendor-defined initial state */ + ret = rtl8365mb_switch_init(smi); + if (ret) { + dev_err(smi->dev, "failed to initialize switch: %d\n", ret); + goto out_error; + } + + /* Set up cascading IRQs */ + ret = rtl8365mb_irq_setup(smi); + if (ret == -EPROBE_DEFER) + return ret; + else if (ret) + dev_info(smi->dev, "no interrupt support\n"); + + /* Configure CPU tagging */ + ret = rtl8365mb_cpu_config(smi); + if (ret) + goto out_teardown_irq; + + /* Configure ports */ + for (i = 0; i < smi->num_ports; i++) { + struct rtl8365mb_port *p = &mb->ports[i]; + + if (dsa_is_unused_port(smi->ds, i)) + continue; + + /* Set up per-port private data */ + p->smi = smi; + p->index = i; + + /* Forward only to the CPU */ + ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port)); + if (ret) + goto out_teardown_irq; + + /* Disable learning */ + ret = rtl8365mb_port_set_learning(smi, i, false); + if (ret) + goto out_teardown_irq; + + /* Set the initial STP state of all ports to DISABLED, otherwise + * ports will still forward frames to the CPU despite being + * administratively down by default. + */ + rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED); + } + + /* Set maximum packet length to 1536 bytes */ + ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG, + RTL8365MB_CFG0_MAX_LEN_MASK, + FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536)); + if (ret) + goto out_teardown_irq; + + ret = realtek_smi_setup_mdio(smi); + if (ret) { + dev_err(smi->dev, "could not set up MDIO bus\n"); + goto out_teardown_irq; + } + + /* Start statistics counter polling */ + rtl8365mb_stats_setup(smi); + + return 0; + +out_teardown_irq: + rtl8365mb_irq_teardown(smi); + +out_error: + return ret; +} + +static void rtl8365mb_teardown(struct dsa_switch *ds) +{ + struct realtek_smi *smi = ds->priv; + + rtl8365mb_stats_teardown(smi); + rtl8365mb_irq_teardown(smi); +} + +static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver) +{ + int ret; + + /* For some reason we have to write a magic value to an arbitrary + * register whenever accessing the chip ID/version registers. + */ + ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE); + if (ret) + return ret; + + ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id); + if (ret) + return ret; + + ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver); + if (ret) + return ret; + + /* Reset magic register */ + ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0); + if (ret) + return ret; + + return 0; +} + +static int rtl8365mb_detect(struct realtek_smi *smi) +{ + struct rtl8365mb *mb = smi->chip_data; + u32 chip_id; + u32 chip_ver; + int ret; + + ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver); + if (ret) { + dev_err(smi->dev, "failed to read chip id and version: %d\n", + ret); + return ret; + } + + switch (chip_id) { + case RTL8365MB_CHIP_ID_8365MB_VC: + dev_info(smi->dev, + "found an RTL8365MB-VC switch (ver=0x%04x)\n", + chip_ver); + + smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC; + smi->num_ports = smi->cpu_port + 1; + + mb->smi = smi; + mb->chip_id = chip_id; + mb->chip_ver = chip_ver; + mb->port_mask = BIT(smi->num_ports) - 1; + mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC; + mb->jam_table = rtl8365mb_init_jam_8365mb_vc; + mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc); + + mb->cpu.enable = 1; + mb->cpu.mask = BIT(smi->cpu_port); + mb->cpu.trap_port = smi->cpu_port; + mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL; + mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA; + mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES; + mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES; + + break; + default: + dev_err(smi->dev, + "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n", + chip_id, chip_ver); + return -ENODEV; + } + + return 0; +} + +static const struct dsa_switch_ops rtl8365mb_switch_ops = { + .get_tag_protocol = rtl8365mb_get_tag_protocol, + .setup = rtl8365mb_setup, + .teardown = rtl8365mb_teardown, + .phylink_validate = rtl8365mb_phylink_validate, + .phylink_mac_config = rtl8365mb_phylink_mac_config, + .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down, + .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up, + .port_stp_state_set = rtl8365mb_port_stp_state_set, + .get_strings = rtl8365mb_get_strings, + .get_ethtool_stats = rtl8365mb_get_ethtool_stats, + .get_sset_count = rtl8365mb_get_sset_count, + .get_eth_phy_stats = rtl8365mb_get_phy_stats, + .get_eth_mac_stats = rtl8365mb_get_mac_stats, + .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats, + .get_stats64 = rtl8365mb_get_stats64, +}; + +static const struct realtek_smi_ops rtl8365mb_smi_ops = { + .detect = rtl8365mb_detect, + .phy_read = rtl8365mb_phy_read, + .phy_write = rtl8365mb_phy_write, +}; + +const struct realtek_smi_variant rtl8365mb_variant = { + .ds_ops = &rtl8365mb_switch_ops, + .ops = &rtl8365mb_smi_ops, + .clk_delay = 10, + .cmd_read = 0xb9, + .cmd_write = 0xb8, + .chip_data_sz = sizeof(struct rtl8365mb), +}; +EXPORT_SYMBOL_GPL(rtl8365mb_variant); diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 75897a3690969f..bdb8d8d348807f 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi) } EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); -int rtl8366_init_vlan(struct realtek_smi *smi) -{ - int port; - int ret; - - ret = rtl8366_reset_vlan(smi); - if (ret) - return ret; - - /* Loop over the available ports, for each port, associate - * it with the VLAN (port+1) - */ - for (port = 0; port < smi->num_ports; port++) { - u32 mask; - - if (port == smi->cpu_port) - /* For the CPU port, make all ports members of this - * VLAN. - */ - mask = GENMASK((int)smi->num_ports - 1, 0); - else - /* For all other ports, enable itself plus the - * CPU port. - */ - mask = BIT(port) | BIT(smi->cpu_port); - - /* For each port, set the port as member of VLAN (port+1) - * and untagged, except for the CPU port: the CPU port (5) is - * member of VLAN 6 and so are ALL the other ports as well. - * Use filter 0 (no filter). - */ - dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", - (port + 1), port, mask); - ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); - if (ret) - return ret; - - dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", - (port + 1), port, (port + 1)); - ret = rtl8366_set_pvid(smi, port, (port + 1)); - if (ret) - return ret; - } - - return rtl8366_enable_vlan(smi, true); -} -EXPORT_SYMBOL_GPL(rtl8366_init_vlan); - -int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) -{ - struct realtek_smi *smi = ds->priv; - struct rtl8366_vlan_4k vlan4k; - int ret; - - /* Use VLAN nr port + 1 since VLAN0 is not valid */ - if (!smi->ops->is_vlan_valid(smi, port + 1)) - return -EINVAL; - - dev_info(smi->dev, "%s filtering on port %d\n", - vlan_filtering ? "enable" : "disable", - port); - - /* TODO: - * The hardware support filter ID (FID) 0..7, I have no clue how to - * support this in the driver when the callback only says on/off. - */ - ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); - if (ret) - return ret; - - /* Just set the filter to FID 1 for now then */ - ret = rtl8366_set_vlan(smi, port + 1, - vlan4k.member, - vlan4k.untag, - 1); - if (ret) - return ret; - - return 0; -} -EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); - int rtl8366_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) @@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port, return ret; } - dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", - vlan->vid, port, untagged ? "untagged" : "tagged", - pvid ? " PVID" : "no PVID"); - - if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) - dev_err(smi->dev, "port is DSA or CPU port\n"); + dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n", + vlan->vid, port, untagged ? "untagged" : "tagged", + pvid ? "PVID" : "no PVID"); member |= BIT(port); @@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, struct realtek_smi *smi = ds->priv; int ret, i; - dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port); + dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port); for (i = 0; i < smi->num_vlan_mc; i++) { struct rtl8366_vlan_mc vlanmc; @@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, * anymore then clear the whole member * config so it can be reused. */ - if (!vlanmc.member && vlanmc.untag) { + if (!vlanmc.member) { vlanmc.vid = 0; vlanmc.priority = 0; vlanmc.fid = 0; diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index a89093bc6c6add..03deacd83e6130 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -42,9 +43,12 @@ /* Port Enable Control register */ #define RTL8366RB_PECR 0x0001 -/* Switch Security Control registers */ -#define RTL8366RB_SSCR0 0x0002 -#define RTL8366RB_SSCR1 0x0003 +/* Switch per-port learning disablement register */ +#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002 + +/* Security control, actually aging register */ +#define RTL8366RB_SECURITY_CTRL 0x0003 + #define RTL8366RB_SSCR2 0x0004 #define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0) @@ -106,6 +110,18 @@ #define RTL8366RB_POWER_SAVING_REG 0x0021 +/* Spanning tree status (STP) control, two bits per port per FID */ +#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */ +#define RTL8366RB_STP_STATE_DISABLED 0x0 +#define RTL8366RB_STP_STATE_BLOCKING 0x1 +#define RTL8366RB_STP_STATE_LEARNING 0x2 +#define RTL8366RB_STP_STATE_FORWARDING 0x3 +#define RTL8366RB_STP_MASK GENMASK(1, 0) +#define RTL8366RB_STP_STATE(port, state) \ + ((state) << ((port) * 2)) +#define RTL8366RB_STP_STATE_MASK(port) \ + RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK) + /* CPU port control reg */ #define RTL8368RB_CPU_CTRL_REG 0x0061 #define RTL8368RB_CPU_PORTS_MSK 0x00FF @@ -143,6 +159,21 @@ #define RTL8366RB_PHY_NO_OFFSET 9 #define RTL8366RB_PHY_NO_MASK (0x1f << 9) +/* VLAN Ingress Control Register 1, one bit per port. + * bit 0 .. 5 will make the switch drop ingress frames without + * VID such as untagged or priority-tagged frames for respective + * port. + * bit 6 .. 11 will make the switch drop ingress frames carrying + * a C-tag with VID != 0 for respective port. + */ +#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E +#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6)) + +/* VLAN Ingress Control Register 2, one bit per port. + * bit0 .. bit5 will make the switch drop all ingress frames with + * a VLAN classification that does not include the port is in its + * member set. + */ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f /* LED control registers */ @@ -215,6 +246,7 @@ #define RTL8366RB_NUM_LEDGROUPS 4 #define RTL8366RB_NUM_VIDS 4096 #define RTL8366RB_PRIORITYMAX 7 +#define RTL8366RB_NUM_FIDS 8 #define RTL8366RB_FIDMAX 7 #define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */ @@ -300,6 +332,13 @@ #define RTL8366RB_INTERRUPT_STATUS_REG 0x0442 #define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */ +/* Port isolation registers */ +#define RTL8366RB_PORT_ISO_BASE 0x0F08 +#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum)) +#define RTL8366RB_PORT_ISO_EN BIT(0) +#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1) +#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1) + /* bits 0..5 enable force when cleared */ #define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11 @@ -314,9 +353,11 @@ /** * struct rtl8366rb - RTL8366RB-specific data * @max_mtu: per-port max MTU setting + * @pvid_enabled: if PVID is set for respective port */ struct rtl8366rb { unsigned int max_mtu[RTL8366RB_NUM_PORTS]; + bool pvid_enabled[RTL8366RB_NUM_PORTS]; }; static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { @@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; + /* Isolate all user ports so they can only send packets to itself and the CPU port */ + for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { + ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i), + RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) | + RTL8366RB_PORT_ISO_EN); + if (ret) + return ret; + } + /* CPU port can send packets to all ports */ + ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU), + RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) | + RTL8366RB_PORT_ISO_EN); + if (ret) + return ret; + /* Set up the "green ethernet" feature */ ret = rtl8366rb_jam_table(rtl8366rb_green_jam, ARRAY_SIZE(rtl8366rb_green_jam), smi, false); @@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds) /* layer 2 size, see rtl8366rb_change_mtu() */ rb->max_mtu[i] = 1532; - /* Enable learning for all ports */ - ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); + /* Disable learning for all ports */ + ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL, + RTL8366RB_PORT_ALL); if (ret) return ret; /* Enable auto ageing for all ports */ - ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); + ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0); if (ret) return ret; @@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; - /* Discard VLAN tagged packets if the port is not a member of - * the VLAN with which the packets is associated. - */ + /* Accept all packets by default, we enable filtering on-demand */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, + 0); + if (ret) + return ret; ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, - RTL8366RB_PORT_ALL); + 0); if (ret) return ret; @@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds) return ret; } - ret = rtl8366_init_vlan(smi); + ret = rtl8366_reset_vlan(smi); if (ret) return ret; @@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds) return -ENODEV; } - ds->configure_vlan_while_not_filtering = false; - return 0; } @@ -1127,6 +1184,190 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) rb8366rb_set_port_led(smi, port, false); } +static int +rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct realtek_smi *smi = ds->priv; + unsigned int port_bitmap = 0; + int ret, i; + + /* Loop over all other ports than the current one */ + for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { + /* Current port handled last */ + if (i == port) + continue; + /* Not on this bridge */ + if (dsa_to_port(ds, i)->bridge_dev != bridge) + continue; + /* Join this port to each other port on the bridge */ + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), + RTL8366RB_PORT_ISO_PORTS(BIT(port)), + RTL8366RB_PORT_ISO_PORTS(BIT(port))); + if (ret) + dev_err(smi->dev, "failed to join port %d\n", port); + + port_bitmap |= BIT(i); + } + + /* Set the bits for the ports we can access */ + return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), + RTL8366RB_PORT_ISO_PORTS(port_bitmap), + RTL8366RB_PORT_ISO_PORTS(port_bitmap)); +} + +static void +rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct realtek_smi *smi = ds->priv; + unsigned int port_bitmap = 0; + int ret, i; + + /* Loop over all other ports than this one */ + for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { + /* Current port handled last */ + if (i == port) + continue; + /* Not on this bridge */ + if (dsa_to_port(ds, i)->bridge_dev != bridge) + continue; + /* Remove this port from any other port on the bridge */ + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), + RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0); + if (ret) + dev_err(smi->dev, "failed to leave port %d\n", port); + + port_bitmap |= BIT(i); + } + + /* Clear the bits for the ports we can not access, leave ourselves */ + regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), + RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0); +} + +/** + * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames + * @smi: SMI state container + * @port: the port to drop untagged and C-tagged frames on + * @drop: whether to drop or pass untagged and C-tagged frames + */ +static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop) +{ + return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, + RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port), + drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0); +} + +static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366rb *rb; + int ret; + + rb = smi->chip_data; + + dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port, + vlan_filtering ? "enable" : "disable"); + + /* If the port is not in the member set, the frame will be dropped */ + ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, + BIT(port), vlan_filtering ? BIT(port) : 0); + if (ret) + return ret; + + /* If VLAN filtering is enabled and PVID is also enabled, we must + * not drop any untagged or C-tagged frames. If we turn off VLAN + * filtering on a port, we need to accept any frames. + */ + if (vlan_filtering) + ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]); + else + ret = rtl8366rb_drop_untagged(smi, port, false); + + return ret; +} + +static int +rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + /* We support enabling/disabling learning */ + if (flags.mask & ~(BR_LEARNING)) + return -EINVAL; + + return 0; +} + +static int +rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (flags.mask & BR_LEARNING) { + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL, + BIT(port), + (flags.val & BR_LEARNING) ? 0 : BIT(port)); + if (ret) + return ret; + } + + return 0; +} + +static void +rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct realtek_smi *smi = ds->priv; + u32 val; + int i; + + switch (state) { + case BR_STATE_DISABLED: + val = RTL8366RB_STP_STATE_DISABLED; + break; + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + val = RTL8366RB_STP_STATE_BLOCKING; + break; + case BR_STATE_LEARNING: + val = RTL8366RB_STP_STATE_LEARNING; + break; + case BR_STATE_FORWARDING: + val = RTL8366RB_STP_STATE_FORWARDING; + break; + default: + dev_err(smi->dev, "unknown bridge state requested\n"); + return; + } + + /* Set the same status for the port on all the FIDs */ + for (i = 0; i < RTL8366RB_NUM_FIDS; i++) { + regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i, + RTL8366RB_STP_STATE_MASK(port), + RTL8366RB_STP_STATE(port, val)); + } +} + +static void +rtl8366rb_port_fast_age(struct dsa_switch *ds, int port) +{ + struct realtek_smi *smi = ds->priv; + + /* This will age out any learned L2 entries */ + regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL, + BIT(port), BIT(port)); + /* Restore the normal state of things */ + regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL, + BIT(port), 0); +} + static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct realtek_smi *smi = ds->priv; @@ -1338,24 +1579,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val) static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index) { + struct rtl8366rb *rb; + bool pvid_enabled; + int ret; + + rb = smi->chip_data; + pvid_enabled = !!index; + if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS) return -EINVAL; - return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), RTL8366RB_PORT_VLAN_CTRL_MASK << RTL8366RB_PORT_VLAN_CTRL_SHIFT(port), (index & RTL8366RB_PORT_VLAN_CTRL_MASK) << RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); + if (ret) + return ret; + + rb->pvid_enabled[port] = pvid_enabled; + + /* If VLAN filtering is enabled and PVID is also enabled, we must + * not drop any untagged or C-tagged frames. Make sure to update the + * filtering setting. + */ + if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port))) + ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled); + + return ret; } static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) { - unsigned int max = RTL8366RB_NUM_VLANS; + unsigned int max = RTL8366RB_NUM_VLANS - 1; if (smi->vlan4k_enabled) max = RTL8366RB_NUM_VIDS - 1; - if (vlan == 0 || vlan > max) + if (vlan > max) return false; return true; @@ -1510,11 +1771,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = { .get_strings = rtl8366_get_strings, .get_ethtool_stats = rtl8366_get_ethtool_stats, .get_sset_count = rtl8366_get_sset_count, - .port_vlan_filtering = rtl8366_vlan_filtering, + .port_bridge_join = rtl8366rb_port_bridge_join, + .port_bridge_leave = rtl8366rb_port_bridge_leave, + .port_vlan_filtering = rtl8366rb_vlan_filtering, .port_vlan_add = rtl8366_vlan_add, .port_vlan_del = rtl8366_vlan_del, .port_enable = rtl8366rb_port_enable, .port_disable = rtl8366rb_port_disable, + .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags, + .port_bridge_flags = rtl8366rb_port_bridge_flags, + .port_stp_state_set = rtl8366rb_port_stp_state_set, + .port_fast_age = rtl8366rb_port_fast_age, .port_change_mtu = rtl8366rb_change_mtu, .port_max_mtu = rtl8366rb_max_mtu, }; diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 5e5d24e7c02b2d..21dba16af09793 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -20,6 +20,27 @@ #define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10) #define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT +/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period) + * To avoid floating point operations, we'll multiply the degrees by 10 + * to get a "phase" and get 1 decimal point precision. + */ +#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \ + (((ps) * 360) / 800) +#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \ + ((800 * (phase)) / 360) +#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \ + (((phase) - 738) / 9) +#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \ + SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps)) + +/* Valid range in degrees is a value between 73.8 and 101.7 + * in 0.9 degree increments + */ +#define SJA1105_RGMII_DELAY_MIN_PS \ + SJA1105_RGMII_DELAY_PHASE_TO_PS(738) +#define SJA1105_RGMII_DELAY_MAX_PS \ + SJA1105_RGMII_DELAY_PHASE_TO_PS(1017) + typedef enum { SPI_READ = 0, SPI_WRITE = 1, @@ -222,16 +243,14 @@ struct sja1105_flow_block { struct sja1105_private { struct sja1105_static_config static_config; - bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS]; - bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS]; + int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS]; + int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS]; phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS]; bool fixed_link[SJA1105_MAX_NUM_PORTS]; - bool vlan_aware; unsigned long ucast_egress_floods; unsigned long bcast_egress_floods; const struct sja1105_info *info; size_t max_xfer_len; - struct gpio_desc *reset_gpio; struct spi_device *spidev; struct dsa_switch *ds; u16 bridge_pvid[SJA1105_MAX_NUM_PORTS]; @@ -242,6 +261,8 @@ struct sja1105_private { * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; + /* Serializes access to the dynamic config interface */ + struct mutex dynamic_config_lock; struct devlink_region **regions; struct sja1105_cbs_entry *cbs; struct mii_bus *mdio_base_t1; diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c index 5bbf1707f2af5c..e3699f76f6d7e0 100644 --- a/drivers/net/dsa/sja1105/sja1105_clocking.c +++ b/drivers/net/dsa/sja1105/sja1105_clocking.c @@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd, sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op); } -/* Valid range in degrees is an integer between 73.8 and 101.7 */ -static u64 sja1105_rgmii_delay(u64 phase) -{ - /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9. - * To avoid floating point operations we'll multiply by 10 - * and get 1 decimal point precision. - */ - phase *= 10; - return (phase - 738) / 9; -} - /* The RGMII delay setup procedure is 2-step and gets called upon each * .phylink_mac_config. Both are strategic. * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues @@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port) const struct sja1105_private *priv = ctx; const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cfg_pad_mii_id pad_mii_id = {0}; + int rx_delay = priv->rgmii_rx_delay_ps[port]; + int tx_delay = priv->rgmii_tx_delay_ps[port]; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; int rc; - if (priv->rgmii_rx_delay[port]) - pad_mii_id.rxc_delay = sja1105_rgmii_delay(90); - if (priv->rgmii_tx_delay[port]) - pad_mii_id.txc_delay = sja1105_rgmii_delay(90); + if (rx_delay) + pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay); + if (tx_delay) + pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay); /* Stage 1: Turn the RGMII delay lines off. */ pad_mii_id.rxc_bypass = 1; @@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port) return rc; /* Stage 2: Turn the RGMII delay lines on. */ - if (priv->rgmii_rx_delay[port]) { + if (rx_delay) { pad_mii_id.rxc_bypass = 0; pad_mii_id.rxc_pd = 0; } - if (priv->rgmii_tx_delay[port]) { + if (tx_delay) { pad_mii_id.txc_bypass = 0; pad_mii_id.txc_pd = 0; } @@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port) const struct sja1105_private *priv = ctx; const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cfg_pad_mii_id pad_mii_id = {0}; + int rx_delay = priv->rgmii_rx_delay_ps[port]; + int tx_delay = priv->rgmii_tx_delay_ps[port]; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; pad_mii_id.rxc_pd = 1; pad_mii_id.txc_pd = 1; - if (priv->rgmii_rx_delay[port]) { - pad_mii_id.rxc_delay = sja1105_rgmii_delay(90); + if (rx_delay) { + pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay); /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */ pad_mii_id.rxc_bypass = 1; pad_mii_id.rxc_pd = 0; } - if (priv->rgmii_tx_delay[port]) { - pad_mii_id.txc_delay = sja1105_rgmii_delay(90); + if (tx_delay) { + pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay); pad_mii_id.txc_bypass = 1; pad_mii_id.txc_pd = 0; } diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index f2049f52833cb0..7729d3f8b7f50a 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = { }, }; +#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10 +#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000 + +static int +sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, + struct sja1105_dyn_cmd *cmd, + const struct sja1105_dynamic_table_ops *ops) +{ + u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {}; + int rc; + + /* We don't _need_ to read the full entry, just the command area which + * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a + * buffer that contains the full entry too. Additionally, our API + * doesn't really know how many bytes into the buffer does the command + * area really begin. So just read back the whole entry. + */ + rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, + ops->packed_size); + if (rc) + return rc; + + /* Unpack the command structure, and return it to the caller in case it + * needs to perform further checks on it (VALIDENT). + */ + memset(cmd, 0, sizeof(*cmd)); + ops->cmd_packing(packed_buf, cmd, UNPACK); + + /* Hardware hasn't cleared VALID => still working on it */ + return cmd->valid ? -EAGAIN : 0; +} + +/* Poll the dynamic config entry's control area until the hardware has + * cleared the VALID bit, which means we have confirmation that it has + * finished processing the command. + */ +static int +sja1105_dynamic_config_wait_complete(struct sja1105_private *priv, + struct sja1105_dyn_cmd *cmd, + const struct sja1105_dynamic_table_ops *ops) +{ + int rc; + + return read_poll_timeout(sja1105_dynamic_config_poll_valid, + rc, rc != -EAGAIN, + SJA1105_DYNAMIC_CONFIG_SLEEP_US, + SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, + false, priv, cmd, ops); +} + /* Provides read access to the settings through the dynamic interface * of the switch. * @blk_idx is used as key to select from the sja1105_dynamic_table_ops. @@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, struct sja1105_dyn_cmd cmd = {0}; /* SPI payload buffer */ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0}; - int retries = 3; int rc; if (blk_idx >= BLK_IDX_MAX_DYN) @@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, ops->entry_packing(packed_buf, entry, PACK); /* Send SPI write operation: read config table entry */ + mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) + if (rc < 0) { + mutex_unlock(&priv->dynamic_config_lock); return rc; + } - /* Loop until we have confirmation that hardware has finished - * processing the command and has cleared the VALID field - */ - do { - memset(packed_buf, 0, ops->packed_size); - - /* Retrieve the read operation's result */ - rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, - ops->packed_size); - if (rc < 0) - return rc; - - cmd = (struct sja1105_dyn_cmd) {0}; - ops->cmd_packing(packed_buf, &cmd, UNPACK); - - if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) - return -ENOENT; - cpu_relax(); - } while (cmd.valid && --retries); + rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); + mutex_unlock(&priv->dynamic_config_lock); + if (rc < 0) + return rc; - if (cmd.valid) - return -ETIMEDOUT; + if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) + return -ENOENT; /* Don't dereference possibly NULL pointer - maybe caller * only wanted to see whether the entry existed or not. @@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv, ops->entry_packing(packed_buf, entry, PACK); /* Send SPI write operation: read config table entry */ + mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); + if (rc < 0) { + mutex_unlock(&priv->dynamic_config_lock); + return rc; + } + + rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); + mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) return rc; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 924c3f129992f2..c343effe2e967a 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -27,15 +27,29 @@ #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull -static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, - unsigned int startup_delay) +/* Configure the optional reset pin and bring up switch */ +static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len, + unsigned int startup_delay) { + struct gpio_desc *gpio; + + gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + if (!gpio) + return 0; + gpiod_set_value_cansleep(gpio, 1); /* Wait for minimum reset pulse length */ msleep(pulse_len); gpiod_set_value_cansleep(gpio, 0); /* Wait until chip is ready after reset */ msleep(startup_delay); + + gpiod_put(gpio); + + return 0; } static void @@ -1095,27 +1109,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv) return sja1105_static_config_upload(priv); } -static int sja1105_parse_rgmii_delays(struct sja1105_private *priv) +/* This is the "new way" for a MAC driver to configure its RGMII delay lines, + * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps" + * properties. It has the advantage of working with fixed links and with PHYs + * that apply RGMII delays too, and the MAC driver needs not perform any + * special checks. + * + * Previously we were acting upon the "phy-mode" property when we were + * operating in fixed-link, basically acting as a PHY, but with a reversed + * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should + * behave as if it is connected to a PHY which has applied RGMII delays in the + * TX direction. So if anything, RX delays should have been added by the MAC, + * but we were adding TX delays. + * + * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall + * back to the legacy behavior and apply delays on fixed-link ports based on + * the reverse interpretation of the phy-mode. This is a deviation from the + * expected default behavior which is to simply apply no delays. To achieve + * that behavior with the new bindings, it is mandatory to specify + * "{rx,tx}-internal-delay-ps" with a value of 0. + */ +static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port, + struct device_node *port_dn) { - struct dsa_switch *ds = priv->ds; - int port; + phy_interface_t phy_mode = priv->phy_mode[port]; + struct device *dev = &priv->spidev->dev; + int rx_delay = -1, tx_delay = -1; - for (port = 0; port < ds->num_ports; port++) { - if (!priv->fixed_link[port]) - continue; + if (!phy_interface_mode_is_rgmii(phy_mode)) + return 0; - if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID || - priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID) - priv->rgmii_rx_delay[port] = true; + of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); + of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); - if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID || - priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID) - priv->rgmii_tx_delay[port] = true; + if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { + dev_warn(dev, + "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " + "please update device tree to specify \"rx-internal-delay-ps\" and " + "\"tx-internal-delay-ps\"", + port); - if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) && - !priv->info->setup_rgmii_delay) - return -EINVAL; + if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || + phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + rx_delay = 2000; + + if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || + phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + tx_delay = 2000; + } + + if (rx_delay < 0) + rx_delay = 0; + if (tx_delay < 0) + tx_delay = 0; + + if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { + dev_err(dev, "Chip cannot apply RGMII delays\n"); + return -EINVAL; } + + if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) || + (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) || + (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) || + (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) { + dev_err(dev, + "port %d RGMII delay values out of range, must be between %d and %d ps\n", + port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS); + return -ERANGE; + } + + priv->rgmii_rx_delay_ps[port] = rx_delay; + priv->rgmii_tx_delay_ps[port] = tx_delay; + return 0; } @@ -1166,6 +1231,12 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, } priv->phy_mode[index] = phy_mode; + + err = sja1105_parse_rgmii_delays(priv, index, child); + if (err) { + of_node_put(child); + return err; + } } return 0; @@ -1360,7 +1431,7 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port, */ if (state->interface != PHY_INTERFACE_MODE_NA && sja1105_phy_mode_mismatch(priv, port, state->interface)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } @@ -1380,9 +1451,8 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, 2500baseX_Full); } - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static int @@ -1766,6 +1836,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { + struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; int i; @@ -1802,7 +1873,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, u64_to_ether_addr(l2_lookup.macaddr, macaddr); /* We need to hide the dsa_8021q VLANs from the user. */ - if (!priv->vlan_aware) + if (!dsa_port_is_vlan_filtering(dp)) l2_lookup.vlanid = 0; rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); if (rc) @@ -2295,11 +2366,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, tpid2 = ETH_P_SJA1105; } - if (priv->vlan_aware == enabled) - return 0; - - priv->vlan_aware = enabled; - table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ @@ -2332,7 +2398,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, */ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; l2_lookup_params = table->entries; - l2_lookup_params->shared_learn = !priv->vlan_aware; + l2_lookup_params->shared_learn = !enabled; for (port = 0; port < ds->num_ports; port++) { if (dsa_is_unused_port(ds, port)) @@ -2965,7 +3031,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv) continue; dp->priv = sp; - sp->dp = dp; sp->data = tagger_data; slave = dp->slave; kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); @@ -3229,17 +3294,14 @@ static int sja1105_probe(struct spi_device *spi) return -EINVAL; } + rc = sja1105_hw_reset(dev, 1, 1); + if (rc) + return rc; + priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); if (!priv) return -ENOMEM; - /* Configure the optional reset pin and bring up switch */ - priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) - dev_dbg(dev, "reset-gpios not defined, ignoring\n"); - else - sja1105_hw_reset(priv->reset_gpio, 1, 1); - /* Populate our driver private structure (priv) based on * the device tree node that was probed (spi) */ @@ -3303,6 +3365,7 @@ static int sja1105_probe(struct spi_device *spi) priv->ds = ds; mutex_init(&priv->ptp_data.lock); + mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); rc = sja1105_parse_dt(priv); @@ -3311,15 +3374,6 @@ static int sja1105_probe(struct spi_device *spi) return rc; } - /* Error out early if internal delays are required through DT - * and we can't apply them. - */ - rc = sja1105_parse_rgmii_delays(priv); - if (rc < 0) { - dev_err(ds->dev, "RGMII delay not supported\n"); - return rc; - } - if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, sizeof(struct sja1105_cbs_entry), diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index 6802f4057cc038..f5dca6a9b0f93c 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -394,7 +394,8 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv, vl_lookup[k].vlanid = rule->key.vl.vid; vl_lookup[k].vlanprior = rule->key.vl.pcp; } else { - u16 vid = dsa_8021q_rx_vid(priv->ds, port); + struct dsa_port *dp = dsa_to_port(priv->ds, port); + u16 vid = dsa_tag_8021q_rx_vid(dp); vl_lookup[k].vlanid = vid; vl_lookup[k].vlanprior = 0; @@ -494,13 +495,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port, bool append) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); + struct dsa_port *dp = dsa_to_port(priv->ds, port); + bool vlan_aware = dsa_port_is_vlan_filtering(dp); int rc; - if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { + if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on DMAC"); return -EOPNOTSUPP; - } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; @@ -568,6 +571,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, u32 num_entries, struct action_gate_entry *entries) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); + struct dsa_port *dp = dsa_to_port(priv->ds, port); + bool vlan_aware = dsa_port_is_vlan_filtering(dp); int ipv = -1; int i, rc; s32 rem; @@ -592,11 +597,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, return -ERANGE; } - if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { + if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on DMAC"); return -EOPNOTSUPP; - } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 469420941054e9..910fcb3b252b81 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -456,7 +456,7 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, 1000baseT_Full); break; default: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); dev_err(ds->dev, "Unsupported port: %i\n", port); return; } @@ -467,10 +467,8 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, 10baseT_Full); phylink_set(mask, 100baseT_Full); - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c index d01cf1073d49d5..127a677d1f3918 100644 --- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c +++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c @@ -31,7 +31,7 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg, uval = (u16)FIELD_GET(GENMASK(31, 16), reg); - ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval); + ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval); if (ret < 0) { dev_err(dev, "xrs mdiobus_write returned %d\n", ret); return ret; @@ -39,13 +39,13 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg, uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ); - ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval); + ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval); if (ret < 0) { dev_err(dev, "xrs mdiobus_write returned %d\n", ret); return ret; } - ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD); + ret = mdiodev_read(mdiodev, XRS_MDIO_IBD); if (ret < 0) { dev_err(dev, "xrs mdiobus_read returned %d\n", ret); return ret; @@ -64,7 +64,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg, u16 uval; int ret; - ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val); + ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val); if (ret < 0) { dev_err(dev, "xrs mdiobus_write returned %d\n", ret); return ret; @@ -72,7 +72,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg, uval = (u16)FIELD_GET(GENMASK(31, 16), reg); - ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval); + ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval); if (ret < 0) { dev_err(dev, "xrs mdiobus_write returned %d\n", ret); return ret; @@ -80,7 +80,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg, uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE); - ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval); + ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval); if (ret < 0) { dev_err(dev, "xrs mdiobus_write returned %d\n", ret); return ret; diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 87c906e744fb72..846fa3af45040c 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -270,7 +270,7 @@ static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr, { struct el3_private *lp = netdev_priv(dev); - memcpy(dev->dev_addr, phys_addr, ETH_ALEN); + eth_hw_addr_set(dev, (u8 *)phys_addr); dev->base_addr = ioaddr; dev->irq = irq; dev->if_port = if_port; diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 6f0ea2facea9e6..1d124b0f65e7ce 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -567,6 +567,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, { struct corkscrew_private *vp = netdev_priv(dev); unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + __be16 addr[ETH_ALEN / 2]; int i; int irq; @@ -619,7 +620,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, /* Read the station address from the EEPROM. */ EL3WINDOW(0); for (i = 0; i < 0x18; i++) { - __be16 *phys_addr = (__be16 *) dev->dev_addr; int timer; outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd); /* Pause for at least 162 us. for the read to take place. */ @@ -631,8 +631,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, eeprom[i] = inw(ioaddr + Wn0EepromData); checksum ^= eeprom[i]; if (i < 3) - phys_addr[i] = htons(eeprom[i]); + addr[i] = htons(eeprom[i]); } + eth_hw_addr_set(dev, (u8 *)addr); checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index dd4d3c48b98db1..dc3b7c96061110 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -305,15 +305,13 @@ static int tc574_config(struct pcmcia_device *link) struct net_device *dev = link->priv; struct el3_private *lp = netdev_priv(dev); int ret, i, j; + __be16 addr[ETH_ALEN / 2]; unsigned int ioaddr; - __be16 *phys_addr; char *cardname; __u32 config; u8 *buf; size_t len; - phys_addr = (__be16 *)dev->dev_addr; - dev_dbg(&link->dev, "3c574_config()\n"); link->io_lines = 16; @@ -347,19 +345,20 @@ static int tc574_config(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x88, &buf); if (buf && len >= 6) { for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); + addr[i] = htons(le16_to_cpu(buf[i * 2])); kfree(buf); } else { kfree(buf); /* 0 < len < 6 */ EL3WINDOW(0); for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); - if (phys_addr[0] == htons(0x6060)) { + addr[i] = htons(read_eeprom(ioaddr, i + 10)); + if (addr[0] == htons(0x6060)) { pr_notice("IO port conflict at 0x%03lx-0x%03lx\n", dev->base_addr, dev->base_addr+15); goto failed; } } + eth_hw_addr_set(dev, (u8 *)addr); if (link->prod_id[1]) cardname = link->prod_id[1]; else diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 09816e84314d00..4673bc1604e7b2 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -237,8 +237,8 @@ static void tc589_detach(struct pcmcia_device *link) static int tc589_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; - __be16 *phys_addr; int ret, i, j, multi = 0, fifo; + __be16 addr[ETH_ALEN / 2]; unsigned int ioaddr; static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; u8 *buf; @@ -246,7 +246,6 @@ static int tc589_config(struct pcmcia_device *link) dev_dbg(&link->dev, "3c589_config\n"); - phys_addr = (__be16 *)dev->dev_addr; /* Is this a 3c562? */ if (link->manf_id != MANFID_3COM) dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); @@ -285,18 +284,19 @@ static int tc589_config(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x88, &buf); if (buf && len >= 6) { for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i*2])); + addr[i] = htons(le16_to_cpu(buf[i*2])); kfree(buf); } else { kfree(buf); /* 0 < len < 6 */ for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - if (phys_addr[0] == htons(0x6060)) { + addr[i] = htons(read_eeprom(ioaddr, i)); + if (addr[0] == htons(0x6060)) { dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", dev->base_addr, dev->base_addr+15); goto failed; } } + eth_hw_addr_set(dev, (u8 *)addr); /* The address and resource configuration register aren't loaded from * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 7b0ae9efc004f5..ccf07667aa5e65 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1091,6 +1091,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, struct vortex_private *vp; int option; unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + __be16 addr[ETH_ALEN / 2]; int i, step; struct net_device *dev; static int printed_version; @@ -1284,7 +1285,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) - ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); + addr[i] = htons(eeprom[i + 10]); + eth_hw_addr_set(dev, (u8 *)addr); if (print_info) pr_cont(" %pM", dev->dev_addr); /* Unfortunately an all zero eeprom passes the checksum and this diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index da1ae37a9d7391..991ad953aa7906 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -320,8 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; + eth_hw_addr_set(dev, SA_prom); pr_cont(" %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 6c6bdd5913ec1f..1f8acbba5b6b08 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -716,7 +716,7 @@ static int ax_init_dev(struct net_device *dev) for (i = 0; i < 16; i++) SA_prom[i] = SA_prom[i+i]; - memcpy(dev->dev_addr, SA_prom, ETH_ALEN); + eth_hw_addr_set(dev, SA_prom); } #ifdef CONFIG_AX88796_93CX6 @@ -733,7 +733,7 @@ static int ax_init_dev(struct net_device *dev) (__le16 __force *)mac_addr, sizeof(mac_addr) >> 1); - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, mac_addr); } #endif if (ax->plat->wordlength == 2) { @@ -748,16 +748,18 @@ static int ax_init_dev(struct net_device *dev) /* load the mac-address from the device */ if (ax->plat->flags & AXFLG_MAC_FROMDEV) { + u8 addr[ETH_ALEN]; + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ei_local->mem + E8390_CMD); /* 0x61 */ for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = - ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); + addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); + eth_hw_addr_set(dev, addr); } if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && ax->plat->mac_addr) - memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, ax->plat->mac_addr); if (!is_valid_ether_addr(dev->dev_addr)) { eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 3c370e686ec3ea..3aef959fc25bd0 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -187,6 +187,7 @@ static int get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; + u8 addr[ETH_ALEN]; int i, j; /* This is based on drivers/net/ethernet/8390/ne.c */ @@ -220,9 +221,11 @@ static int get_prom(struct pcmcia_device *link) for (i = 0; i < 6; i += 2) { j = inw(ioaddr + AXNET_DATAPORT); - dev->dev_addr[i] = j & 0xff; - dev->dev_addr[i+1] = j >> 8; + addr[i] = j & 0xff; + addr[i+1] = j >> 8; } + eth_hw_addr_set(dev, addr); + return 1; } /* get_prom */ diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 4ad8031ab66957..e320cccba61a68 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -374,8 +374,7 @@ static int mcf8390_init(struct net_device *dev) if (ret) return ret; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; + eth_hw_addr_set(dev, SA_prom); netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 9afc712f594847..0a9118b8be0c64 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -500,9 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) dev->base_addr = ioaddr; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = SA_prom[i]; - } + eth_hw_addr_set(dev, SA_prom); pr_cont("%pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index d6715008e04d7b..6a0a2039600a0a 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -390,7 +390,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, dev->ethtool_ops = &ne2k_pci_ethtool_ops; NS8390_init(dev, 0); - memcpy(dev->dev_addr, SA_prom, dev->addr_len); + eth_hw_addr_set(dev, SA_prom); i = register_netdev(dev); if (i) diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 96ad72abd37326..0f07fe03da98c8 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -278,6 +278,7 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link) { struct net_device *dev = link->priv; u_char __iomem *base, *virt; + u8 addr[ETH_ALEN]; int i, j; /* Allocate a small memory window */ @@ -302,7 +303,8 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link) (readb(base+2) == hw_info[i].a1) && (readb(base+4) == hw_info[i].a2)) { for (j = 0; j < 6; j++) - dev->dev_addr[j] = readb(base + (j<<1)); + addr[j] = readb(base + (j<<1)); + eth_hw_addr_set(dev, addr); break; } } @@ -324,6 +326,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; + u8 addr[ETH_ALEN]; u_char prom[32]; int i, j; @@ -362,7 +365,8 @@ static struct hw_info *get_prom(struct pcmcia_device *link) } if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) { for (j = 0; j < 6; j++) - dev->dev_addr[j] = prom[j<<1]; + addr[j] = prom[j<<1]; + eth_hw_addr_set(dev, addr); return (i < NR_INFO) ? hw_info+i : &default_info; } return NULL; @@ -377,6 +381,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link) static struct hw_info *get_dl10019(struct pcmcia_device *link) { struct net_device *dev = link->priv; + u8 addr[ETH_ALEN]; int i; u_char sum; @@ -385,7 +390,8 @@ static struct hw_info *get_dl10019(struct pcmcia_device *link) if (sum != 0xff) return NULL; for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i); + addr[i] = inb_p(dev->base_addr + 0x14 + i); + eth_hw_addr_set(dev, addr); i = inb(dev->base_addr + 0x1f); return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info; } @@ -400,6 +406,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; + u8 addr[ETH_ALEN]; int i, j; /* Not much of a test, but the alternatives are messy */ @@ -413,9 +420,10 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link) for (i = 0; i < 6; i += 2) { j = inw(ioaddr + PCNET_DATAPORT); - dev->dev_addr[i] = j & 0xff; - dev->dev_addr[i+1] = j >> 8; + addr[i] = j & 0xff; + addr[i+1] = j >> 8; } + eth_hw_addr_set(dev, addr); return NULL; } @@ -430,6 +438,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link) static struct hw_info *get_hwired(struct pcmcia_device *link) { struct net_device *dev = link->priv; + u8 addr[ETH_ALEN]; int i; for (i = 0; i < 6; i++) @@ -438,7 +447,8 @@ static struct hw_info *get_hwired(struct pcmcia_device *link) return NULL; for (i = 0; i < 6; i++) - dev->dev_addr[i] = hw_addr[i]; + addr[i] = hw_addr[i]; + eth_hw_addr_set(dev, addr); return &default_info; } /* get_hwired */ diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c index fbbd7f22c14277..bd89ca8a92dfbc 100644 --- a/drivers/net/ethernet/8390/stnic.c +++ b/drivers/net/ethernet/8390/stnic.c @@ -104,8 +104,8 @@ STNIC_WRITE (int reg, byte val) static int __init stnic_probe(void) { struct net_device *dev; - int i, err; struct ei_device *ei_local; + int err; /* If we are not running on a SolutionEngine, give up now */ if (! MACH_SE) @@ -119,8 +119,7 @@ static int __init stnic_probe(void) #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); #endif - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = stnic_eadr[i]; + eth_hw_addr_set(dev, stnic_eadr); /* Set the base address to point to the NIC, not the "real" base! */ dev->base_addr = 0x1000; diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c index 35a500a215213b..e8b4fe813a0828 100644 --- a/drivers/net/ethernet/8390/zorro8390.c +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -364,8 +364,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board, if (i) return i; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; + eth_hw_addr_set(dev, SA_prom); pr_debug("Found ethernet address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 412ae3e43ffb71..4601b38f532a89 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -33,6 +33,7 @@ source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" +source "drivers/net/ethernet/asix/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index aaa5078cd7d133..fdd8c6c174519b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ +obj-$(CONFIG_NET_VENDOR_ASIX) += asix/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index c4ecf4fcadf841..1cfdd01b4c2e33 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -342,7 +342,7 @@ static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv) static void owl_emac_set_hw_mac_addr(struct net_device *netdev) { struct owl_emac_priv *priv = netdev_priv(netdev); - u8 *mac_addr = netdev->dev_addr; + const u8 *mac_addr = netdev->dev_addr; u32 addr_high, addr_low; addr_high = mac_addr[0] << 8 | mac_addr[1]; @@ -1173,7 +1173,7 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, skaddr->sa_data); owl_emac_set_hw_mac_addr(netdev); return owl_emac_setup_frame_xmit(netdev_priv(netdev)); @@ -1385,7 +1385,7 @@ static void owl_emac_get_mac_addr(struct net_device *netdev) struct device *dev = netdev->dev.parent; int ret; - ret = eth_platform_get_mac_address(dev, netdev->dev_addr); + ret = platform_get_ethdev_address(dev, netdev); if (!ret && is_valid_ether_addr(netdev->dev_addr)) return; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index e0f6cc910bd243..c6982f7caf9b47 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -641,6 +641,7 @@ static int starfire_init_one(struct pci_dev *pdev, struct netdev_private *np; int i, irq, chip_idx = ent->driver_data; struct net_device *dev; + u8 addr[ETH_ALEN]; long ioaddr; void __iomem *base; int drv_flags, io_size; @@ -696,7 +697,8 @@ static int starfire_init_one(struct pci_dev *pdev, /* Serial EEPROM reads are hidden by the hardware. */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i); + addr[i] = readb(base + EEPROMCtrl + 20 - i); + eth_hw_addr_set(dev, addr); #if ! defined(final_version) /* Dump the EEPROM contents during development. */ if (debug > 4) @@ -955,7 +957,7 @@ static int netdev_open(struct net_device *dev) writew(0, ioaddr + PerfFilterTable + 4); writew(0, ioaddr + PerfFilterTable + 8); for (i = 1; i < 16; i++) { - __be16 *eaddrs = (__be16 *)dev->dev_addr; + const __be16 *eaddrs = (const __be16 *)dev->dev_addr; void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16; writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4; writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4; @@ -1787,14 +1789,14 @@ static void set_rx_mode(struct net_device *dev) } else if (netdev_mc_count(dev) <= 14) { /* Use the 16 element perfect filter, skip first two entries. */ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; - __be16 *eaddrs; + const __be16 *eaddrs; netdev_for_each_mc_addr(ha, dev) { eaddrs = (__be16 *) ha->addr; writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; } - eaddrs = (__be16 *)dev->dev_addr; + eaddrs = (const __be16 *)dev->dev_addr; i = netdev_mc_count(dev) + 2; while (i++ < 16) { writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; @@ -1805,7 +1807,7 @@ static void set_rx_mode(struct net_device *dev) } else { /* Must use a multicast hash table. */ void __iomem *filter_addr; - __be16 *eaddrs; + const __be16 *eaddrs; __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); @@ -1819,7 +1821,7 @@ static void set_rx_mode(struct net_device *dev) } /* Clear the perfect filter list, skip first two entries. */ filter_addr = ioaddr + PerfFilterTable + 2 * 16; - eaddrs = (__be16 *)dev->dev_addr; + eaddrs = (const __be16 *)dev->dev_addr; for (i = 2; i < 16; i++) { writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index c560ad06f0be3e..447dc64a17e5d1 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1025,7 +1025,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev->dev_addr[5]); @@ -1346,6 +1346,7 @@ static int greth_of_probe(struct platform_device *ofdev) int i; int err; int tmp; + u8 addr[ETH_ALEN]; unsigned long timeout; dev = alloc_etherdev(sizeof(struct greth_private)); @@ -1449,8 +1450,6 @@ static int greth_of_probe(struct platform_device *ofdev) break; } if (i == 6) { - u8 addr[ETH_ALEN]; - err = of_get_mac_address(ofdev->dev.of_node, addr); if (!err) { for (i = 0; i < 6; i++) @@ -1464,7 +1463,8 @@ static int greth_of_probe(struct platform_device *ofdev) } for (i = 0; i < 6; i++) - dev->dev_addr[i] = macaddr[i]; + addr[i] = macaddr[i]; + eth_hw_addr_set(dev, addr); macaddr[5]++; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 920633161174d5..f4edc616388c04 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3863,7 +3863,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) et131x_init_send(adapter); et131x_hwaddr_init(adapter); - ether_addr_copy(netdev->dev_addr, adapter->addr); + eth_hw_addr_set(netdev, adapter->addr); /* Init the device with the new settings */ et131x_adapter_setup(adapter); @@ -3966,7 +3966,7 @@ static int et131x_pci_setup(struct pci_dev *pdev, netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); - ether_addr_copy(netdev->dev_addr, adapter->addr); + eth_hw_addr_set(netdev, adapter->addr); rc = -ENOMEM; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 696517eae77f0a..1fc9a1cd3ef8f7 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1008,7 +1008,7 @@ static void slic_set_link_autoneg(struct slic_device *sdev) static void slic_set_mac_address(struct slic_device *sdev) { - u8 *addr = sdev->netdev->dev_addr; + const u8 *addr = sdev->netdev->dev_addr; u32 val; val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24; @@ -1660,7 +1660,7 @@ static int slic_read_eeprom(struct slic_device *sdev) goto free_eeprom; } /* set mac address */ - ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]); + eth_hw_addr_set(sdev->netdev, mac[devfn]); free_eeprom: dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 037baea1c73881..800ee022388fd5 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -356,7 +356,7 @@ static int emac_set_mac_address(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev-> dev_addr[2], db->membase + EMAC_MAC_A1_REG); @@ -852,7 +852,7 @@ static int emac_probe(struct platform_device *pdev) } /* Read MAC-address from DT */ - ret = of_get_mac_address(np, ndev->dev_addr); + ret = of_get_ethdev_address(np, ndev); if (ret) { /* if the MAC address is invalid get a random one */ eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 9dc12b13061f81..732da15a382743 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -869,6 +869,7 @@ static int ace_init(struct net_device *dev) int board_idx, ecode = 0; short i; unsigned char cache_size; + u8 addr[ETH_ALEN]; ap = netdev_priv(dev); regs = ap->regs; @@ -988,12 +989,13 @@ static int ace_init(struct net_device *dev) writel(mac1, ®s->MacAddrHi); writel(mac2, ®s->MacAddrLo); - dev->dev_addr[0] = (mac1 >> 8) & 0xff; - dev->dev_addr[1] = mac1 & 0xff; - dev->dev_addr[2] = (mac2 >> 24) & 0xff; - dev->dev_addr[3] = (mac2 >> 16) & 0xff; - dev->dev_addr[4] = (mac2 >> 8) & 0xff; - dev->dev_addr[5] = mac2 & 0xff; + addr[0] = (mac1 >> 8) & 0xff; + addr[1] = mac1 & 0xff; + addr[2] = (mac2 >> 24) & 0xff; + addr[3] = (mac2 >> 16) & 0xff; + addr[4] = (mac2 >> 8) & 0xff; + addr[5] = mac2 & 0xff; + eth_hw_addr_set(dev, addr); printk("MAC: %pM\n", dev->dev_addr); @@ -2712,15 +2714,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p) struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; struct sockaddr *addr=p; - u8 *da; + const u8 *da; struct cmd cmd; if(netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); - da = (u8 *)dev->dev_addr; + da = (const u8 *)dev->dev_addr; writel(da[0] << 8 | da[1], ®s->MacAddrHi); writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 1c00d719e5d766..d75d95a97dd932 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -849,7 +849,7 @@ static int init_phy(struct net_device *dev) return 0; } -static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) +static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr) { u32 msb; u32 lsb; @@ -1524,7 +1524,7 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; /* get default MAC address from device tree */ - ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr); + ret = of_get_ethdev_address(pdev->dev.of_node, ndev); if (ret) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0e43000614abd9..7d5d885d85d5ed 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4073,7 +4073,7 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter, ether_addr_copy(adapter->mac_addr, netdev->dev_addr); } else { ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); - ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + eth_hw_addr_set(netdev, adapter->mac_addr); } /* Set offload features */ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 4786f0504691d6..899c8a2a34b6ba 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -168,7 +168,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM + depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM depends on X86 || ARM64 || COMPILE_TEST depends on PTP_1588_CLOCK_OPTIONAL select BITREVERSE diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 92e4246dc359cf..9421afb950f791 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1500,7 +1500,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) int i; struct sockaddr *addr = p; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&lp->lock); /* Setting the MAC address to the device */ for (i = 0; i < ETH_ALEN; i++) @@ -1743,6 +1743,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, unsigned long reg_addr, reg_len; struct amd8111e_priv *lp; struct net_device *dev; + u8 addr[ETH_ALEN]; err = pci_enable_device(pdev); if (err) { @@ -1809,7 +1810,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* Initializing MAC address */ for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = readb(lp->mmio + PADR + i); + addr[i] = readb(lp->mmio + PADR + i); + eth_hw_addr_set(dev, addr); /* Setting user defined parametrs */ lp->ext_phy_option = speed_duplex[card_idx]; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 9d2f49fd945ed3..9c7d9690d00c4f 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -582,7 +582,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, switch( lp->cardtype ) { case OLD_RIEBL: /* No ethernet address! (Set some default address) */ - memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN); + eth_hw_addr_set(dev, OldRieblDefHwaddr); break; case NEW_RIEBL: lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN); @@ -1123,7 +1123,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr ) return -EIO; } - memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); + eth_hw_addr_set(dev, saddr->sa_data); for( i = 0; i < 6; i++ ) MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 ); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 9c1636222b99cc..c6f003975621b4 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev) aup->phy1_search_mac0 = 1; } else { if (is_valid_ether_addr(pd->mac)) { - memcpy(dev->dev_addr, pd->mac, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac); } else { /* Set a random MAC since no valid provided by platform_data. */ eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 4019cab875051d..30ee5329bd7cb9 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -529,7 +529,8 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, mace_init Resets the MACE chip. ---------------------------------------------------------------------------- */ -static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) +static int mace_init(mace_private *lp, unsigned int ioaddr, + const char *enet_addr) { int i; int ct = 0; @@ -635,7 +636,7 @@ static int nmclan_config(struct pcmcia_device *link) kfree(buf); goto failed; } - memcpy(dev->dev_addr, buf, ETH_ALEN); + eth_hw_addr_set(dev, buf); kfree(buf); /* Verify configuration by reading the MACE ID. */ diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 70d76fdb9f569d..f5c50ff377ff70 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1595,6 +1595,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) struct net_device *dev; const struct pcnet32_access *a = NULL; u8 promaddr[ETH_ALEN]; + u8 addr[ETH_ALEN]; int ret = -ENODEV; /* reset the chip */ @@ -1760,9 +1761,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) unsigned int val; val = a->read_csr(ioaddr, i + 12) & 0x0ffff; /* There may be endianness issues here. */ - dev->dev_addr[2 * i] = val & 0x0ff; - dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; + addr[2 * i] = val & 0x0ff; + addr[2 * i + 1] = (val >> 8) & 0x0ff; } + eth_hw_addr_set(dev, addr); /* read PROM address and compare with CSR address */ for (i = 0; i < ETH_ALEN; i++) @@ -1775,13 +1777,16 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) pr_cont(" warning: CSR address invalid,\n"); pr_info(" using instead PROM address of"); } - memcpy(dev->dev_addr, promaddr, ETH_ALEN); + eth_hw_addr_set(dev, promaddr); } } /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ - if (!is_valid_ether_addr(dev->dev_addr)) - eth_zero_addr(dev->dev_addr); + if (!is_valid_ether_addr(dev->dev_addr)) { + static const u8 zero_addr[ETH_ALEN] = {}; + + eth_hw_addr_set(dev, zero_addr); + } if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 4a845bc071b2d6..007bd77872916b 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -305,7 +305,6 @@ static int __init lance_probe( struct net_device *dev) unsigned long ioaddr; struct lance_private *lp; - int i; static int did_version; volatile unsigned short *ioaddr_probe; unsigned short tmp1, tmp2; @@ -373,8 +372,7 @@ static int __init lance_probe( struct net_device *dev) dev->irq); /* copy in the ethernet address from the prom */ - for(i = 0; i < 6 ; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); /* tell the card it's ether address, bytes swapped */ MEM->init.hwaddr[0] = dev->dev_addr[1]; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index ddece276ae238b..22d609563af83a 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1301,7 +1301,6 @@ static int sparc_lance_probe_one(struct platform_device *op, struct device_node *dp = op->dev.of_node; struct lance_private *lp; struct net_device *dev; - int i; dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) @@ -1315,8 +1314,7 @@ static int sparc_lance_probe_one(struct platform_device *op, * will copy the address in the device structure to the lance * initialization block. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); /* Get the IO region */ lp->lregs = of_ioremap(&op->resource[0], 0, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index b2cd3bdba9f89b..533b8519ec3528 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1331,6 +1331,10 @@ #define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 #endif +#ifndef MDIO_VEND2_PMA_MISC_CTRL0 +#define MDIO_VEND2_PMA_MISC_CTRL0 0x8090 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif @@ -1389,6 +1393,10 @@ #define XGBE_PMA_RX_RST_0_RESET_ON 0x10 #define XGBE_PMA_RX_RST_0_RESET_OFF 0x00 +#define XGBE_PMA_PLL_CTRL_MASK BIT(15) +#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15) +#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d5fd49dd25f336..3936543a74d8f7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1080,7 +1080,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) return 0; } -static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) +static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 17a585adfb49ce..30d24d19f40d11 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1912,10 +1912,8 @@ static int xgbe_close(struct net_device *netdev) clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); - flush_workqueue(pdata->an_workqueue); destroy_workqueue(pdata->an_workqueue); - flush_workqueue(pdata->dev_workqueue); destroy_workqueue(pdata->dev_workqueue); set_bit(XGBE_DOWN, &pdata->dev_state); @@ -2016,7 +2014,7 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, saddr->sa_data); hw_if->set_mac_address(pdata, netdev->dev_addr); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index bafc51c34e0be3..94879cf8b42001 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -369,9 +369,8 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported); - bitmap_and(advertising, - cmd->link_modes.advertising, lks->link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(advertising, cmd->link_modes.advertising, + lks->link_modes.supported); if ((cmd->base.autoneg == AUTONEG_ENABLE) && bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) { @@ -384,8 +383,7 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, pdata->phy.autoneg = cmd->base.autoneg; pdata->phy.speed = speed; pdata->phy.duplex = cmd->base.duplex; - bitmap_copy(lks->link_modes.advertising, advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(lks->link_modes.advertising, advertising); if (cmd->base.autoneg == AUTONEG_ENABLE) XGBE_SET_ADV(lks, Autoneg); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index a218dc6f2edd56..0e8698928e4d73 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -267,7 +267,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; - memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, pdata->mac_addr); /* Initialize ECC timestamps */ pdata->tx_sec_period = jiffies; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 18e48b3bc402b5..213769054391c1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -1977,12 +1977,26 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) } } +static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable) +{ + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0, + XGBE_PMA_PLL_CTRL_MASK, + enable ? XGBE_PMA_PLL_CTRL_ENABLE + : XGBE_PMA_PLL_CTRL_DISABLE); + + /* Wait for command to complete */ + usleep_range(100, 200); +} + static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd, unsigned int sub_cmd) { unsigned int s0 = 0; unsigned int wait; + /* Disable PLL re-initialization during FW command processing */ + xgbe_phy_pll_ctrl(pdata, false); + /* Log if a previous command did not complete */ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) { netif_dbg(pdata, link, pdata->netdev, @@ -2003,7 +2017,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, wait = XGBE_RATECHANGE_COUNT; while (wait--) { if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) - return; + goto reenable_pll; usleep_range(1000, 2000); } @@ -2013,6 +2027,10 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, /* Reset on error */ xgbe_phy_rx_reset(pdata); + +reenable_pll: + /* Enable PLL re-initialization */ + xgbe_phy_pll_ctrl(pdata, true); } static void xgbe_phy_rrc(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 3305979a9f7c1f..607a2c90513b52 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -729,7 +729,7 @@ struct xgbe_ext_stats { struct xgbe_hw_if { int (*tx_complete)(struct xgbe_ring_desc *); - int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr); + int (*set_mac_address)(struct xgbe_prv_data *, const u8 *addr); int (*config_rx_mode)(struct xgbe_prv_data *); int (*enable_rx_csum)(struct xgbe_prv_data *); diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c index 2da979e4fad147..6423e22e05b244 100644 --- a/drivers/net/ethernet/apm/xgene-v2/mac.c +++ b/drivers/net/ethernet/apm/xgene-v2/mac.c @@ -65,7 +65,7 @@ void xge_mac_set_speed(struct xge_pdata *pdata) void xge_mac_set_station_addr(struct xge_pdata *pdata) { - u8 *dev_addr = pdata->ndev->dev_addr; + const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 80399c8980bd30..d022b6db9e0661 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -36,7 +36,7 @@ static int xge_get_resources(struct xge_pdata *pdata) return -ENOMEM; } - if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) + if (device_get_ethdev_address(dev, ndev)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 5f657879134ed2..e641dbbea1e27e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -378,8 +378,8 @@ u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr) static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) { + const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; - u8 *dev_addr = pdata->ndev->dev_addr; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5f1fc6582d74a2..220dc42af31ae1 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1731,7 +1731,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) xgene_get_port_id_acpi(dev, pdata); #endif - if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) + if (device_get_ethdev_address(dev, ndev)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index f482ced2cadd9c..72b5e8eb0ec7d8 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -165,8 +165,8 @@ static void xgene_sgmac_reset(struct xgene_enet_pdata *p) static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p) { + const u8 *dev_addr = p->ndev->dev_addr; u32 addr0, addr1; - u8 *dev_addr = p->ndev->dev_addr; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 304b5d43f2369d..86607b79c09f70 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -207,8 +207,8 @@ static void xgene_pcs_reset(struct xgene_enet_pdata *pdata) static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) { + const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; - u8 *dev_addr = pdata->ndev->dev_addr; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index a989d2df59ad0b..9a650d1c1bdd1d 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -308,7 +308,7 @@ bmac_init_registers(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile unsigned short regValue; - unsigned short *pWord16; + const unsigned short *pWord16; int i; /* XXDEBUG(("bmac: enter init_registers\n")); */ @@ -371,7 +371,7 @@ bmac_init_registers(struct net_device *dev) bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ - pWord16 = (unsigned short *)dev->dev_addr; + pWord16 = (const unsigned short *)dev->dev_addr; bmwrite(dev, MADD0, *pWord16++); bmwrite(dev, MADD1, *pWord16++); bmwrite(dev, MADD2, *pWord16); @@ -521,19 +521,16 @@ static int bmac_resume(struct macio_dev *mdev) static int bmac_set_address(struct net_device *dev, void *addr) { struct bmac_data *bp = netdev_priv(dev); - unsigned char *p = addr; - unsigned short *pWord16; + const unsigned short *pWord16; unsigned long flags; - int i; XXDEBUG(("bmac: enter set_address\n")); spin_lock_irqsave(&bp->lock, flags); - for (i = 0; i < 6; ++i) { - dev->dev_addr[i] = p[i]; - } + eth_hw_addr_set(dev, addr); + /* load up the hardware address */ - pWord16 = (unsigned short *)dev->dev_addr; + pWord16 = (const unsigned short *)dev->dev_addr; bmwrite(dev, MADD0, *pWord16++); bmwrite(dev, MADD1, *pWord16++); bmwrite(dev, MADD2, *pWord16); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index bed481816ea313..062a300a566a55 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -217,7 +217,7 @@ struct aq_hw_ops { int (*hw_ring_tx_head_update)(struct aq_hw_s *self, struct aq_ring_s *aq_ring); - int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); + int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr); int (*hw_soft_reset)(struct aq_hw_s *self); @@ -226,7 +226,7 @@ struct aq_hw_ops { int (*hw_reset)(struct aq_hw_s *self); - int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr); + int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr); int (*hw_start)(struct aq_hw_s *self); @@ -373,7 +373,7 @@ struct aq_fw_ops { int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable); int (*set_power)(struct aq_hw_s *self, unsigned int power_state, - u8 *mac); + const u8 *mac); int (*send_fw_request)(struct aq_hw_s *self, const struct hw_fw_request_iface *fw_req, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 4a6dfac857ca96..02058fe79f52b1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -35,7 +35,7 @@ static int aq_apply_macsec_cfg(struct aq_nic_s *nic); static int aq_apply_secy_cfg(struct aq_nic_s *nic, const struct macsec_secy *secy); -static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac) +static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac) { u32 tmp[2] = { 0 }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 6c049864dac081..1acf544afeb444 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -300,6 +300,7 @@ static bool aq_nic_is_valid_ether_addr(const u8 *addr) int aq_nic_ndev_register(struct aq_nic_s *self) { + u8 addr[ETH_ALEN]; int err = 0; if (!self->ndev) { @@ -316,12 +317,13 @@ int aq_nic_ndev_register(struct aq_nic_s *self) #endif mutex_lock(&self->fwreq_mutex); - err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, - self->ndev->dev_addr); + err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); mutex_unlock(&self->fwreq_mutex); if (err) goto err_exit; + eth_hw_addr_set(self->ndev, addr); + if (!is_valid_ether_addr(self->ndev->dev_addr) || !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) { netdev_warn(self->ndev, "MAC is invalid, will use random."); @@ -332,7 +334,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) { static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; - ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); + eth_hw_addr_set(self->ndev, mac_addr_permanent); } #endif diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 611875ef2cd15b..4625ccb7949971 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -322,7 +322,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } -static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) { unsigned int h = 0U; unsigned int l = 0U; @@ -348,7 +348,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) return err; } -static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 9f1b15077e7d6c..d875ce3ec759bb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -533,7 +533,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } -int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) { unsigned int h = 0U; unsigned int l = 0U; @@ -558,7 +558,7 @@ int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) return err; } -static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index d8db972113ec1f..5298846dd9f772 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -58,7 +58,7 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring); void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self); -int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr); +int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr); int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc); int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 404cbf60d3f2ff..fc0e6600664464 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -944,7 +944,7 @@ u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self) } static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled, - u8 *mac) + const u8 *mac) { struct hw_atl_utils_fw_rpc *prpc = NULL; unsigned int rpc_size = 0U; @@ -987,7 +987,7 @@ static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled, } static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state, - u8 *mac) + const u8 *mac) { struct hw_atl_utils_fw_rpc *prpc = NULL; unsigned int rpc_size = 0U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index ee0c22d0493540..eac631c45c565a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -358,7 +358,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) return 0; } -static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac) +static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac) { struct hw_atl_utils_fw_rpc *rpc = NULL; struct offload_info *info = NULL; @@ -404,7 +404,7 @@ static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac) } static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state, - u8 *mac) + const u8 *mac) { int err = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index 92f64048bf691e..c98708bb044cad 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -516,7 +516,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } -static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl2_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 92a79c4ffa2c7b..0a67612af22814 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -26,7 +26,7 @@ config ARC_EMAC_CORE config ARC_EMAC tristate "ARC EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET + depends on OF_IRQ depends on ARC || COMPILE_TEST help On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x @@ -36,7 +36,7 @@ config ARC_EMAC config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && REGULATOR + depends on OF_IRQ && REGULATOR depends on ARCH_ROCKCHIP || COMPILE_TEST help Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 38c288ec905901..c642c3d3e60060 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -773,7 +773,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); arc_emac_set_address_internal(ndev); @@ -941,7 +941,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) } /* Get MAC address from device tree */ - err = of_get_mac_address(dev->of_node, ndev->dev_addr); + err = of_get_ethdev_address(dev->of_node, ndev); if (err) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 54cdafdd067db3..9acf589b1178bb 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -151,10 +151,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv) data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset", GPIOD_OUT_LOW); if (IS_ERR(data->reset_gpio)) { - error = PTR_ERR(data->reset_gpio); - dev_err(priv->dev, "Failed to request gpio: %d\n", error); mdiobus_free(bus); - return error; + return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio), + "Failed to request gpio\n"); } of_property_read_u32(np, "phy-reset-duration", &data->msec); @@ -166,9 +165,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv) error = of_mdiobus_register(bus, priv->dev->of_node); if (error) { - dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); mdiobus_free(bus); - return error; + return dev_err_probe(priv->dev, error, + "cannot register MDIO bus %s\n", bus->name); } return 0; diff --git a/drivers/net/ethernet/asix/Kconfig b/drivers/net/ethernet/asix/Kconfig new file mode 100644 index 00000000000000..eed02453314ce2 --- /dev/null +++ b/drivers/net/ethernet/asix/Kconfig @@ -0,0 +1,35 @@ +# +# Asix network device configuration +# + +config NET_VENDOR_ASIX + bool "Asix devices" + default y + help + If you have a network (Ethernet, non-USB, not NE2000 compatible) + interface based on a chip from ASIX, say Y. + +if NET_VENDOR_ASIX + +config SPI_AX88796C + tristate "Asix AX88796C-SPI support" + select PHYLIB + depends on SPI + depends on GPIOLIB + help + Say Y here if you intend to use ASIX AX88796C attached in SPI mode. + +config SPI_AX88796C_COMPRESSION + bool "SPI transfer compression" + default n + depends on SPI_AX88796C + help + Say Y here to enable SPI transfer compression. It saves up + to 24 dummy cycles during each transfer which may noticeably + speed up short transfers. This sets the default value that is + inherited by network interfaces during probe. It can be + changed at run time via spi-compression ethtool tunable. + + If unsure say N. + +endif # NET_VENDOR_ASIX diff --git a/drivers/net/ethernet/asix/Makefile b/drivers/net/ethernet/asix/Makefile new file mode 100644 index 00000000000000..0bfbbb0426340b --- /dev/null +++ b/drivers/net/ethernet/asix/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Asix network device drivers. +# + +obj-$(CONFIG_SPI_AX88796C) += ax88796c.o +ax88796c-y := ax88796c_main.o ax88796c_ioctl.o ax88796c_spi.o diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c new file mode 100644 index 00000000000000..916ae380a00441 --- /dev/null +++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2010 ASIX Electronics Corporation + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * ASIX AX88796C SPI Fast Ethernet Linux driver + */ + +#define pr_fmt(fmt) "ax88796c: " fmt + +#include +#include +#include +#include + +#include "ax88796c_main.h" +#include "ax88796c_ioctl.h" + +static const char ax88796c_priv_flag_names[][ETH_GSTRING_LEN] = { + "SPICompression", +}; + +static void +ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) +{ + /* Inherit standard device info */ + strncpy(info->driver, DRV_NAME, sizeof(info->driver)); +} + +static u32 ax88796c_get_msglevel(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + return ax_local->msg_enable; +} + +static void ax88796c_set_msglevel(struct net_device *ndev, u32 level) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + ax_local->msg_enable = level; +} + +static void +ax88796c_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + pause->tx_pause = !!(ax_local->flowctrl & AX_FC_TX); + pause->rx_pause = !!(ax_local->flowctrl & AX_FC_RX); + pause->autoneg = (ax_local->flowctrl & AX_FC_ANEG) ? + AUTONEG_ENABLE : + AUTONEG_DISABLE; +} + +static int +ax88796c_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + int fc; + + /* The following logic comes from phylink_ethtool_set_pauseparam() */ + fc = pause->tx_pause ? AX_FC_TX : 0; + fc |= pause->rx_pause ? AX_FC_RX : 0; + fc |= pause->autoneg ? AX_FC_ANEG : 0; + + ax_local->flowctrl = fc; + + if (pause->autoneg) { + phy_set_asym_pause(ax_local->phydev, pause->tx_pause, + pause->rx_pause); + } else { + int maccr = 0; + + phy_set_asym_pause(ax_local->phydev, 0, 0); + maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0; + maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0; + + mutex_lock(&ax_local->spi_lock); + + maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) & + ~(MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE); + AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR); + + mutex_unlock(&ax_local->spi_lock); + } + + return 0; +} + +static int ax88796c_get_regs_len(struct net_device *ndev) +{ + return AX88796C_REGDUMP_LEN + AX88796C_PHY_REGDUMP_LEN; +} + +static void +ax88796c_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *_p) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + int offset, i; + u16 *p = _p; + + memset(p, 0, ax88796c_get_regs_len(ndev)); + + mutex_lock(&ax_local->spi_lock); + + for (offset = 0; offset < AX88796C_REGDUMP_LEN; offset += 2) { + if (!test_bit(offset / 2, ax88796c_no_regs_mask)) + *p = AX_READ(&ax_local->ax_spi, offset); + p++; + } + + mutex_unlock(&ax_local->spi_lock); + + for (i = 0; i < AX88796C_PHY_REGDUMP_LEN / 2; i++) { + *p = phy_read(ax_local->phydev, i); + p++; + } +} + +static void +ax88796c_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + memcpy(data, ax88796c_priv_flag_names, + sizeof(ax88796c_priv_flag_names)); + break; + } +} + +static int +ax88796c_get_sset_count(struct net_device *ndev, int stringset) +{ + int ret = 0; + + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + ret = ARRAY_SIZE(ax88796c_priv_flag_names); + break; + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int ax88796c_set_priv_flags(struct net_device *ndev, u32 flags) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + if (flags & ~AX_PRIV_FLAGS_MASK) + return -EOPNOTSUPP; + + if ((ax_local->priv_flags ^ flags) & AX_CAP_COMP) + if (netif_running(ndev)) + return -EBUSY; + + ax_local->priv_flags = flags; + + return 0; +} + +static u32 ax88796c_get_priv_flags(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + return ax_local->priv_flags; +} + +int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc) +{ + struct ax88796c_device *ax_local = mdiobus->priv; + int ret; + + mutex_lock(&ax_local->spi_lock); + AX_WRITE(&ax_local->ax_spi, MDIOCR_RADDR(loc) + | MDIOCR_FADDR(phy_id) | MDIOCR_READ, P2_MDIOCR); + + ret = read_poll_timeout(AX_READ, ret, + (ret != 0), + 0, jiffies_to_usecs(HZ / 100), false, + &ax_local->ax_spi, P2_MDIOCR); + if (!ret) + ret = AX_READ(&ax_local->ax_spi, P2_MDIODR); + + mutex_unlock(&ax_local->spi_lock); + + return ret; +} + +int +ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val) +{ + struct ax88796c_device *ax_local = mdiobus->priv; + int ret; + + mutex_lock(&ax_local->spi_lock); + AX_WRITE(&ax_local->ax_spi, val, P2_MDIODR); + + AX_WRITE(&ax_local->ax_spi, + MDIOCR_RADDR(loc) | MDIOCR_FADDR(phy_id) + | MDIOCR_WRITE, P2_MDIOCR); + + ret = read_poll_timeout(AX_READ, ret, + ((ret & MDIOCR_VALID) != 0), 0, + jiffies_to_usecs(HZ / 100), false, + &ax_local->ax_spi, P2_MDIOCR); + mutex_unlock(&ax_local->spi_lock); + + return ret; +} + +const struct ethtool_ops ax88796c_ethtool_ops = { + .get_drvinfo = ax88796c_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = ax88796c_get_msglevel, + .set_msglevel = ax88796c_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_pauseparam = ax88796c_get_pauseparam, + .set_pauseparam = ax88796c_set_pauseparam, + .get_regs_len = ax88796c_get_regs_len, + .get_regs = ax88796c_get_regs, + .get_strings = ax88796c_get_strings, + .get_sset_count = ax88796c_get_sset_count, + .get_priv_flags = ax88796c_get_priv_flags, + .set_priv_flags = ax88796c_set_priv_flags, +}; + +int ax88796c_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + int ret; + + ret = phy_mii_ioctl(ndev->phydev, ifr, cmd); + + return ret; +} diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.h b/drivers/net/ethernet/asix/ax88796c_ioctl.h new file mode 100644 index 00000000000000..34d2a7dcc5eff4 --- /dev/null +++ b/drivers/net/ethernet/asix/ax88796c_ioctl.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010 ASIX Electronics Corporation + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * ASIX AX88796C SPI Fast Ethernet Linux driver + */ + +#ifndef _AX88796C_IOCTL_H +#define _AX88796C_IOCTL_H + +#include +#include + +#include "ax88796c_main.h" + +extern const struct ethtool_ops ax88796c_ethtool_ops; + +bool ax88796c_check_power(const struct ax88796c_device *ax_local); +bool ax88796c_check_power_and_wake(struct ax88796c_device *ax_local); +void ax88796c_set_power_saving(struct ax88796c_device *ax_local, u8 ps_level); +int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc); +int ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val); +int ax88796c_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); + +#endif diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c new file mode 100644 index 00000000000000..4b0c5a09fd57ff --- /dev/null +++ b/drivers/net/ethernet/asix/ax88796c_main.c @@ -0,0 +1,1164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2010 ASIX Electronics Corporation + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * ASIX AX88796C SPI Fast Ethernet Linux driver + */ + +#define pr_fmt(fmt) "ax88796c: " fmt + +#include "ax88796c_main.h" +#include "ax88796c_ioctl.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION); +static int msg_enable = NETIF_MSG_PROBE | + NETIF_MSG_LINK | + NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR; + +static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000"; +unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)]; + +module_param(msg_enable, int, 0444); +MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)"); + +static int ax88796c_soft_reset(struct ax88796c_device *ax_local) +{ + u16 temp; + int ret; + + lockdep_assert_held(&ax_local->spi_lock); + + AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR); + AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR); + + ret = read_poll_timeout(AX_READ, ret, + (ret & PSR_DEV_READY), + 0, jiffies_to_usecs(160 * HZ / 1000), false, + &ax_local->ax_spi, P0_PSR); + if (ret) + return ret; + + temp = AX_READ(&ax_local->ax_spi, P4_SPICR); + if (ax_local->priv_flags & AX_CAP_COMP) { + AX_WRITE(&ax_local->ax_spi, + (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR); + ax_local->ax_spi.comp = 1; + } else { + AX_WRITE(&ax_local->ax_spi, + (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR); + ax_local->ax_spi.comp = 0; + } + + return 0; +} + +static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local) +{ + int ret; + + lockdep_assert_held(&ax_local->spi_lock); + + AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR); + + ret = read_poll_timeout(AX_READ, ret, + (ret & PSR_DEV_READY), + 0, jiffies_to_usecs(2 * HZ / 1000), false, + &ax_local->ax_spi, P0_PSR); + if (ret) { + dev_err(&ax_local->spi->dev, + "timeout waiting for reload eeprom\n"); + return ret; + } + + return 0; +} + +static void ax88796c_set_hw_multicast(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + int mc_count = netdev_mc_count(ndev); + u16 rx_ctl = RXCR_AB; + + lockdep_assert_held(&ax_local->spi_lock); + + memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE); + + if (ndev->flags & IFF_PROMISC) { + rx_ctl |= RXCR_PRO; + + } else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) { + rx_ctl |= RXCR_AMALL; + + } else if (mc_count == 0) { + /* just broadcast and directed */ + } else { + u32 crc_bits; + int i; + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, ndev) { + crc_bits = ether_crc(ETH_ALEN, ha->addr); + ax_local->multi_filter[crc_bits >> 29] |= + (1 << ((crc_bits >> 26) & 7)); + } + + for (i = 0; i < 4; i++) { + AX_WRITE(&ax_local->ax_spi, + ((ax_local->multi_filter[i * 2 + 1] << 8) | + ax_local->multi_filter[i * 2]), P3_MFAR(i)); + } + } + + AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR); +} + +static void ax88796c_set_mac_addr(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + lockdep_assert_held(&ax_local->spi_lock); + + AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) | + (u16)ndev->dev_addr[5]), P3_MACASR0); + AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) | + (u16)ndev->dev_addr[3]), P3_MACASR1); + AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) | + (u16)ndev->dev_addr[1]), P3_MACASR2); +} + +static void ax88796c_load_mac_addr(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + u16 temp; + + lockdep_assert_held(&ax_local->spi_lock); + + /* Try the device tree first */ + if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) && + is_valid_ether_addr(ndev->dev_addr)) { + if (netif_msg_probe(ax_local)) + dev_info(&ax_local->spi->dev, + "MAC address read from device tree\n"); + return; + } + + /* Read the MAC address from AX88796C */ + temp = AX_READ(&ax_local->ax_spi, P3_MACASR0); + ndev->dev_addr[5] = (u8)temp; + ndev->dev_addr[4] = (u8)(temp >> 8); + + temp = AX_READ(&ax_local->ax_spi, P3_MACASR1); + ndev->dev_addr[3] = (u8)temp; + ndev->dev_addr[2] = (u8)(temp >> 8); + + temp = AX_READ(&ax_local->ax_spi, P3_MACASR2); + ndev->dev_addr[1] = (u8)temp; + ndev->dev_addr[0] = (u8)(temp >> 8); + + if (is_valid_ether_addr(ndev->dev_addr)) { + if (netif_msg_probe(ax_local)) + dev_info(&ax_local->spi->dev, + "MAC address read from ASIX chip\n"); + return; + } + + /* Use random address if none found */ + if (netif_msg_probe(ax_local)) + dev_info(&ax_local->spi->dev, "Use random MAC address\n"); + eth_hw_addr_random(ndev); +} + +static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed) +{ + u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR); + + /* Prepare SOP header */ + info->sop.flags_len = info->pkt_len | + ((ip_summed == CHECKSUM_NONE) || + (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0); + + info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM) + | pkt_len_bar; + cpu_to_be16s(&info->sop.flags_len); + cpu_to_be16s(&info->sop.seq_lenbar); + + /* Prepare Segment header */ + info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS + | info->pkt_len; + + info->seg.eo_so_seglenbar = pkt_len_bar; + + cpu_to_be16s(&info->seg.flags_seqnum_seglen); + cpu_to_be16s(&info->seg.eo_so_seglenbar); + + /* Prepare EOP header */ + info->eop.seq_len = ((info->seq_num << 11) & + TX_HDR_EOP_SEQNUM) | info->pkt_len; + info->eop.seqbar_lenbar = ((~info->seq_num << 11) & + TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar; + + cpu_to_be16s(&info->eop.seq_len); + cpu_to_be16s(&info->eop.seqbar_lenbar); +} + +static int +ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages) +{ + u8 free_pages; + u16 tmp; + + lockdep_assert_held(&ax_local->spi_lock); + + free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK; + if (free_pages < need_pages) { + /* schedule free page interrupt */ + tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR) + & TFBFCR_SCHE_FREE_PAGE; + AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET | + TFBFCR_SET_FREE_PAGE(need_pages), + P0_TFBFCR); + return -ENOMEM; + } + + return 0; +} + +static struct sk_buff * +ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + u8 spi_len = ax_local->ax_spi.comp ? 1 : 4; + struct sk_buff *skb; + struct tx_pkt_info info; + struct skb_data *entry; + u16 pkt_len; + u8 padlen, seq_num; + u8 need_pages; + int headroom; + int tailroom; + + if (skb_queue_empty(q)) + return NULL; + + skb = skb_peek(q); + pkt_len = skb->len; + need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7; + if (ax88796c_check_free_pages(ax_local, need_pages) != 0) + return NULL; + + headroom = skb_headroom(skb); + tailroom = skb_tailroom(skb); + padlen = round_up(pkt_len, 4) - pkt_len; + seq_num = ++ax_local->seq_num & 0x1F; + + info.pkt_len = pkt_len; + + if (skb_cloned(skb) || + (headroom < (TX_OVERHEAD + spi_len)) || + (tailroom < (padlen + TX_EOP_SIZE))) { + size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0); + size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0); + + if (pskb_expand_head(skb, h, t, GFP_KERNEL)) + return NULL; + } + + info.seq_num = seq_num; + ax88796c_proc_tx_hdr(&info, skb->ip_summed); + + /* SOP and SEG header */ + memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD); + + /* Write SPI TXQ header */ + memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len); + + /* Make 32-bit alignment */ + skb_put(skb, padlen); + + /* EOP header */ + memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE); + + skb_unlink(skb, q); + + entry = (struct skb_data *)skb->cb; + memset(entry, 0, sizeof(*entry)); + entry->len = pkt_len; + + if (netif_msg_pktdata(ax_local)) { + char pfx[IFNAMSIZ + 7]; + + snprintf(pfx, sizeof(pfx), "%s: ", ndev->name); + + netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n", + pkt_len, skb->len, seq_num); + + netdev_info(ndev, " SPI Header:\n"); + print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1, + skb->data, 4, 0); + + netdev_info(ndev, " TX SOP:\n"); + print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1, + skb->data + 4, TX_OVERHEAD, 0); + + netdev_info(ndev, " TX packet:\n"); + print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1, + skb->data + 4 + TX_OVERHEAD, + skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0); + + netdev_info(ndev, " TX EOP:\n"); + print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1, + skb->data + skb->len - 4, 4, 0); + } + + return skb; +} + +static int ax88796c_hard_xmit(struct ax88796c_device *ax_local) +{ + struct ax88796c_pcpu_stats *stats; + struct sk_buff *tx_skb; + struct skb_data *entry; + unsigned long flags; + + lockdep_assert_held(&ax_local->spi_lock); + + stats = this_cpu_ptr(ax_local->stats); + tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q); + + if (!tx_skb) { + this_cpu_inc(ax_local->stats->tx_dropped); + return 0; + } + entry = (struct skb_data *)tx_skb->cb; + + AX_WRITE(&ax_local->ax_spi, + (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR); + + axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len); + + if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) || + ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) { + /* Ack tx error int */ + AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR); + + this_cpu_inc(ax_local->stats->tx_dropped); + + if (net_ratelimit()) + netif_err(ax_local, tx_err, ax_local->ndev, + "TX FIFO error, re-initialize the TX bridge\n"); + + /* Reinitial tx bridge */ + AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT | + AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR); + ax_local->seq_num = 0; + } else { + flags = u64_stats_update_begin_irqsave(&stats->syncp); + u64_stats_inc(&stats->tx_packets); + u64_stats_add(&stats->tx_bytes, entry->len); + u64_stats_update_end_irqrestore(&stats->syncp, flags); + } + + entry->state = tx_done; + dev_kfree_skb(tx_skb); + + return 1; +} + +static int +ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + skb_queue_tail(&ax_local->tx_wait_q, skb); + if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER) + netif_stop_queue(ndev); + + set_bit(EVENT_TX, &ax_local->flags); + schedule_work(&ax_local->ax_work); + + return NETDEV_TX_OK; +} + +static void +ax88796c_skb_return(struct ax88796c_device *ax_local, + struct sk_buff *skb, struct rx_header *rxhdr) +{ + struct net_device *ndev = ax_local->ndev; + struct ax88796c_pcpu_stats *stats; + unsigned long flags; + int status; + + stats = this_cpu_ptr(ax_local->stats); + + do { + if (!(ndev->features & NETIF_F_RXCSUM)) + break; + + /* checksum error bit is set */ + if ((rxhdr->flags & RX_HDR3_L3_ERR) || + (rxhdr->flags & RX_HDR3_L4_ERR)) + break; + + /* Other types may be indicated by more than one bit. */ + if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) || + (rxhdr->flags & RX_HDR3_L4_TYPE_UDP)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } while (0); + + flags = u64_stats_update_begin_irqsave(&stats->syncp); + u64_stats_inc(&stats->rx_packets); + u64_stats_add(&stats->rx_bytes, skb->len); + u64_stats_update_end_irqrestore(&stats->syncp, flags); + + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, ax_local->ndev); + + netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n", + skb->len + sizeof(struct ethhdr), skb->protocol); + + status = netif_rx_ni(skb); + if (status != NET_RX_SUCCESS && net_ratelimit()) + netif_info(ax_local, rx_err, ndev, + "netif_rx status %d\n", status); +} + +static void +ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb) +{ + struct rx_header *rxhdr = (struct rx_header *)rx_skb->data; + struct net_device *ndev = ax_local->ndev; + u16 len; + + be16_to_cpus(&rxhdr->flags_len); + be16_to_cpus(&rxhdr->seq_lenbar); + be16_to_cpus(&rxhdr->flags); + + if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) != + (~rxhdr->seq_lenbar & 0x7FF)) { + netif_err(ax_local, rx_err, ndev, "Header error\n"); + + this_cpu_inc(ax_local->stats->rx_frame_errors); + kfree_skb(rx_skb); + return; + } + + if ((rxhdr->flags_len & RX_HDR1_MII_ERR) || + (rxhdr->flags_len & RX_HDR1_CRC_ERR)) { + netif_err(ax_local, rx_err, ndev, "CRC or MII error\n"); + + this_cpu_inc(ax_local->stats->rx_crc_errors); + kfree_skb(rx_skb); + return; + } + + len = rxhdr->flags_len & RX_HDR1_PKT_LEN; + if (netif_msg_pktdata(ax_local)) { + char pfx[IFNAMSIZ + 7]; + + snprintf(pfx, sizeof(pfx), "%s: ", ndev->name); + netdev_info(ndev, "RX data, total len %d, packet len %d\n", + rx_skb->len, len); + + netdev_info(ndev, " Dump RX packet header:"); + print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1, + rx_skb->data, sizeof(*rxhdr), 0); + + netdev_info(ndev, " Dump RX packet:"); + print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1, + rx_skb->data + sizeof(*rxhdr), len, 0); + } + + skb_pull(rx_skb, sizeof(*rxhdr)); + pskb_trim(rx_skb, len); + + ax88796c_skb_return(ax_local, rx_skb, rxhdr); +} + +static int ax88796c_receive(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + struct skb_data *entry; + u16 w_count, pkt_len; + struct sk_buff *skb; + u8 pkt_cnt; + + lockdep_assert_held(&ax_local->spi_lock); + + /* check rx packet and total word count */ + AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR) + | RTWCR_RX_LATCH, P0_RTWCR); + + pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK; + if (!pkt_cnt) + return 0; + + pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF; + + w_count = round_up(pkt_len + 6, 4) >> 1; + + skb = netdev_alloc_skb(ndev, w_count * 2); + if (!skb) { + AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1); + this_cpu_inc(ax_local->stats->rx_dropped); + return 0; + } + entry = (struct skb_data *)skb->cb; + + AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1); + + axspi_read_rxq(&ax_local->ax_spi, + skb_put(skb, w_count * 2), skb->len); + + /* Check if rx bridge is idle */ + if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) { + if (net_ratelimit()) + netif_err(ax_local, rx_err, ndev, + "Rx Bridge is not idle\n"); + AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2); + + entry->state = rx_err; + } else { + entry->state = rx_done; + } + + AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR); + + ax88796c_rx_fixup(ax_local, skb); + + return 1; +} + +static int ax88796c_process_isr(struct ax88796c_device *ax_local) +{ + struct net_device *ndev = ax_local->ndev; + int todo = 0; + u16 isr; + + lockdep_assert_held(&ax_local->spi_lock); + + isr = AX_READ(&ax_local->ax_spi, P0_ISR); + AX_WRITE(&ax_local->ax_spi, isr, P0_ISR); + + netif_dbg(ax_local, intr, ndev, " ISR 0x%04x\n", isr); + + if (isr & ISR_TXERR) { + netif_dbg(ax_local, intr, ndev, " TXERR interrupt\n"); + AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR); + ax_local->seq_num = 0x1f; + } + + if (isr & ISR_TXPAGES) { + netif_dbg(ax_local, intr, ndev, " TXPAGES interrupt\n"); + set_bit(EVENT_TX, &ax_local->flags); + } + + if (isr & ISR_LINK) { + netif_dbg(ax_local, intr, ndev, " Link change interrupt\n"); + phy_mac_interrupt(ax_local->ndev->phydev); + } + + if (isr & ISR_RXPKT) { + netif_dbg(ax_local, intr, ndev, " RX interrupt\n"); + todo = ax88796c_receive(ax_local->ndev); + } + + return todo; +} + +static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance) +{ + struct ax88796c_device *ax_local; + struct net_device *ndev; + + ndev = dev_instance; + if (!ndev) { + pr_err("irq %d for unknown device.\n", irq); + return IRQ_RETVAL(0); + } + ax_local = to_ax88796c_device(ndev); + + disable_irq_nosync(irq); + + netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n"); + + set_bit(EVENT_INTR, &ax_local->flags); + schedule_work(&ax_local->ax_work); + + return IRQ_HANDLED; +} + +static void ax88796c_work(struct work_struct *work) +{ + struct ax88796c_device *ax_local = + container_of(work, struct ax88796c_device, ax_work); + + mutex_lock(&ax_local->spi_lock); + + if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) { + ax88796c_set_hw_multicast(ax_local->ndev); + clear_bit(EVENT_SET_MULTI, &ax_local->flags); + } + + if (test_bit(EVENT_INTR, &ax_local->flags)) { + AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR); + + while (ax88796c_process_isr(ax_local)) + /* nothing */; + + clear_bit(EVENT_INTR, &ax_local->flags); + + AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR); + + enable_irq(ax_local->ndev->irq); + } + + if (test_bit(EVENT_TX, &ax_local->flags)) { + while (skb_queue_len(&ax_local->tx_wait_q)) { + if (!ax88796c_hard_xmit(ax_local)) + break; + } + + clear_bit(EVENT_TX, &ax_local->flags); + + if (netif_queue_stopped(ax_local->ndev) && + (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER)) + netif_wake_queue(ax_local->ndev); + } + + mutex_unlock(&ax_local->spi_lock); +} + +static void ax88796c_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + u32 rx_frame_errors = 0, rx_crc_errors = 0; + u32 rx_dropped = 0, tx_dropped = 0; + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct ax88796c_pcpu_stats *s; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + s = per_cpu_ptr(ax_local->stats, cpu); + + do { + start = u64_stats_fetch_begin_irq(&s->syncp); + rx_packets = u64_stats_read(&s->rx_packets); + rx_bytes = u64_stats_read(&s->rx_bytes); + tx_packets = u64_stats_read(&s->tx_packets); + tx_bytes = u64_stats_read(&s->tx_bytes); + } while (u64_stats_fetch_retry_irq(&s->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + + rx_dropped += s->rx_dropped; + tx_dropped += s->tx_dropped; + rx_frame_errors += s->rx_frame_errors; + rx_crc_errors += s->rx_crc_errors; + } + + stats->rx_dropped = rx_dropped; + stats->tx_dropped = tx_dropped; + stats->rx_frame_errors = rx_frame_errors; + stats->rx_crc_errors = rx_crc_errors; +} + +static void ax88796c_set_mac(struct ax88796c_device *ax_local) +{ + u16 maccr; + + maccr = (ax_local->link) ? MACCR_RXEN : 0; + + switch (ax_local->speed) { + case SPEED_100: + maccr |= MACCR_SPEED_100; + break; + case SPEED_10: + case SPEED_UNKNOWN: + break; + default: + return; + } + + switch (ax_local->duplex) { + case DUPLEX_FULL: + maccr |= MACCR_SPEED_100; + break; + case DUPLEX_HALF: + case DUPLEX_UNKNOWN: + break; + default: + return; + } + + if (ax_local->flowctrl & AX_FC_ANEG && + ax_local->phydev->autoneg) { + maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0; + maccr |= !ax_local->pause != !ax_local->asym_pause ? + MACCR_TXFC_ENABLE : 0; + } else { + maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0; + maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0; + } + + mutex_lock(&ax_local->spi_lock); + + maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) & + ~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 | + MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE); + AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR); + + mutex_unlock(&ax_local->spi_lock); +} + +static void ax88796c_handle_link_change(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + struct phy_device *phydev = ndev->phydev; + bool update = false; + + if (phydev->link && (ax_local->speed != phydev->speed || + ax_local->duplex != phydev->duplex || + ax_local->pause != phydev->pause || + ax_local->asym_pause != phydev->asym_pause)) { + ax_local->speed = phydev->speed; + ax_local->duplex = phydev->duplex; + ax_local->pause = phydev->pause; + ax_local->asym_pause = phydev->asym_pause; + update = true; + } + + if (phydev->link != ax_local->link) { + if (!phydev->link) { + ax_local->speed = SPEED_UNKNOWN; + ax_local->duplex = DUPLEX_UNKNOWN; + } + + ax_local->link = phydev->link; + update = true; + } + + if (update) + ax88796c_set_mac(ax_local); + + if (net_ratelimit()) + phy_print_status(ndev->phydev); +} + +static void ax88796c_set_csums(struct ax88796c_device *ax_local) +{ + struct net_device *ndev = ax_local->ndev; + + lockdep_assert_held(&ax_local->spi_lock); + + if (ndev->features & NETIF_F_RXCSUM) { + AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0); + AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1); + } else { + AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0); + AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1); + } + + if (ndev->features & NETIF_F_HW_CSUM) { + AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0); + AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1); + } else { + AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0); + AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1); + } +} + +static int +ax88796c_open(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + unsigned long irq_flag = 0; + int fc = AX_FC_NONE; + int ret; + u16 t; + + ret = request_irq(ndev->irq, ax88796c_interrupt, + irq_flag, ndev->name, ndev); + if (ret) { + netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n", + ndev->irq, ret); + return ret; + } + + mutex_lock(&ax_local->spi_lock); + + ret = ax88796c_soft_reset(ax_local); + if (ret < 0) { + free_irq(ndev->irq, ndev); + mutex_unlock(&ax_local->spi_lock); + return ret; + } + ax_local->seq_num = 0x1f; + + ax88796c_set_mac_addr(ndev); + ax88796c_set_csums(ax_local); + + /* Disable stuffing packet */ + t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR); + t &= ~RXBSPCR_STUF_ENABLE; + AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR); + + /* Enable RX packet process */ + AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER); + + t = AX_READ(&ax_local->ax_spi, P0_FER); + t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL; + AX_WRITE(&ax_local->ax_spi, t, P0_FER); + + /* Setup LED mode */ + AX_WRITE(&ax_local->ax_spi, + (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN | + LCR_LED1_100MODE), P2_LCR0); + AX_WRITE(&ax_local->ax_spi, + (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) | + LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1); + + /* Disable PHY auto-polling */ + AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR); + + /* Enable MAC interrupts */ + AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR); + + mutex_unlock(&ax_local->spi_lock); + + /* Setup flow-control configuration */ + phy_support_asym_pause(ax_local->phydev); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + ax_local->phydev->advertising) || + linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + ax_local->phydev->advertising)) + fc |= AX_FC_ANEG; + + fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + ax_local->phydev->advertising) ? AX_FC_RX : 0; + fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + ax_local->phydev->advertising) != + linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + ax_local->phydev->advertising)) ? AX_FC_TX : 0; + ax_local->flowctrl = fc; + + phy_start(ax_local->ndev->phydev); + + netif_start_queue(ndev); + + spi_message_init(&ax_local->ax_spi.rx_msg); + + return 0; +} + +static int +ax88796c_close(struct net_device *ndev) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + + phy_stop(ndev->phydev); + + /* We lock the mutex early not only to protect the device + * against concurrent access, but also avoid waking up the + * queue in ax88796c_work(). phy_stop() needs to be called + * before because it locks the mutex to access SPI. + */ + mutex_lock(&ax_local->spi_lock); + + netif_stop_queue(ndev); + + /* No more work can be scheduled now. Make any pending work, + * including one already waiting for the mutex to be unlocked, + * NOP. + */ + netif_dbg(ax_local, ifdown, ndev, "clearing bits\n"); + clear_bit(EVENT_SET_MULTI, &ax_local->flags); + clear_bit(EVENT_INTR, &ax_local->flags); + clear_bit(EVENT_TX, &ax_local->flags); + + /* Disable MAC interrupts */ + AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR); + __skb_queue_purge(&ax_local->tx_wait_q); + ax88796c_soft_reset(ax_local); + + mutex_unlock(&ax_local->spi_lock); + + cancel_work_sync(&ax_local->ax_work); + + free_irq(ndev->irq, ndev); + + return 0; +} + +static int +ax88796c_set_features(struct net_device *ndev, netdev_features_t features) +{ + struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + netdev_features_t changed = features ^ ndev->features; + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))) + return 0; + + ndev->features = features; + + if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)) + ax88796c_set_csums(ax_local); + + return 0; +} + +static const struct net_device_ops ax88796c_netdev_ops = { + .ndo_open = ax88796c_open, + .ndo_stop = ax88796c_close, + .ndo_start_xmit = ax88796c_start_xmit, + .ndo_get_stats64 = ax88796c_get_stats64, + .ndo_do_ioctl = ax88796c_ioctl, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = ax88796c_set_features, +}; + +static int ax88796c_hard_reset(struct ax88796c_device *ax_local) +{ + struct device *dev = (struct device *)&ax_local->spi->dev; + struct gpio_desc *reset_gpio; + + /* reset info */ + reset_gpio = gpiod_get(dev, "reset", 0); + if (IS_ERR(reset_gpio)) { + dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio)); + return PTR_ERR(reset_gpio); + } + + /* set reset */ + gpiod_direction_output(reset_gpio, 1); + msleep(100); + gpiod_direction_output(reset_gpio, 0); + gpiod_put(reset_gpio); + msleep(20); + + return 0; +} + +static int ax88796c_probe(struct spi_device *spi) +{ + char phy_id[MII_BUS_ID_SIZE + 3]; + struct ax88796c_device *ax_local; + struct net_device *ndev; + u16 temp; + int ret; + + ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &spi->dev); + + ax_local = to_ax88796c_device(ndev); + + dev_set_drvdata(&spi->dev, ax_local); + ax_local->spi = spi; + ax_local->ax_spi.spi = spi; + + ax_local->stats = + devm_netdev_alloc_pcpu_stats(&spi->dev, + struct ax88796c_pcpu_stats); + if (!ax_local->stats) + return -ENOMEM; + + ax_local->ndev = ndev; + ax_local->priv_flags |= comp ? AX_CAP_COMP : 0; + ax_local->msg_enable = msg_enable; + mutex_init(&ax_local->spi_lock); + + ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev); + if (!ax_local->mdiobus) + return -ENOMEM; + + ax_local->mdiobus->priv = ax_local; + ax_local->mdiobus->read = ax88796c_mdio_read; + ax_local->mdiobus->write = ax88796c_mdio_write; + ax_local->mdiobus->name = "ax88976c-mdiobus"; + ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID); + ax_local->mdiobus->parent = &spi->dev; + + snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE, + "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select); + + ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus); + if (ret < 0) { + dev_err(&spi->dev, "Could not register MDIO bus\n"); + return ret; + } + + if (netif_msg_probe(ax_local)) { + dev_info(&spi->dev, "AX88796C-SPI Configuration:\n"); + dev_info(&spi->dev, " Compression : %s\n", + ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF"); + } + + ndev->irq = spi->irq; + ndev->netdev_ops = &ax88796c_netdev_ops; + ndev->ethtool_ops = &ax88796c_ethtool_ops; + ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + ndev->needed_headroom = TX_OVERHEAD; + ndev->needed_tailroom = TX_EOP_SIZE; + + mutex_lock(&ax_local->spi_lock); + + /* ax88796c gpio reset */ + ax88796c_hard_reset(ax_local); + + /* Reset AX88796C */ + ret = ax88796c_soft_reset(ax_local); + if (ret < 0) { + ret = -ENODEV; + mutex_unlock(&ax_local->spi_lock); + goto err; + } + /* Check board revision */ + temp = AX_READ(&ax_local->ax_spi, P2_CRIR); + if ((temp & 0xF) != 0x0) { + dev_err(&spi->dev, "spi read failed: %d\n", temp); + ret = -ENODEV; + mutex_unlock(&ax_local->spi_lock); + goto err; + } + + /*Reload EEPROM*/ + ax88796c_reload_eeprom(ax_local); + + ax88796c_load_mac_addr(ndev); + + if (netif_msg_probe(ax_local)) + dev_info(&spi->dev, + "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", + ndev->irq, + ndev->dev_addr[0], ndev->dev_addr[1], + ndev->dev_addr[2], ndev->dev_addr[3], + ndev->dev_addr[4], ndev->dev_addr[5]); + + /* Disable power saving */ + AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR) + & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR); + + mutex_unlock(&ax_local->spi_lock); + + INIT_WORK(&ax_local->ax_work, ax88796c_work); + + skb_queue_head_init(&ax_local->tx_wait_q); + + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + ax_local->mdiobus->id, AX88796C_PHY_ID); + ax_local->phydev = phy_connect(ax_local->ndev, phy_id, + ax88796c_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(ax_local->phydev)) { + ret = PTR_ERR(ax_local->phydev); + goto err; + } + ax_local->phydev->irq = PHY_POLL; + + ret = devm_register_netdev(&spi->dev, ndev); + if (ret) { + dev_err(&spi->dev, "failed to register a network device\n"); + goto err_phy_dis; + } + + netif_info(ax_local, probe, ndev, "%s %s registered\n", + dev_driver_string(&spi->dev), + dev_name(&spi->dev)); + phy_attached_info(ax_local->phydev); + + return 0; + +err_phy_dis: + phy_disconnect(ax_local->phydev); +err: + return ret; +} + +static int ax88796c_remove(struct spi_device *spi) +{ + struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev); + struct net_device *ndev = ax_local->ndev; + + phy_disconnect(ndev->phydev); + + netif_info(ax_local, probe, ndev, "removing network device %s %s\n", + dev_driver_string(&spi->dev), + dev_name(&spi->dev)); + + return 0; +} + +static const struct of_device_id ax88796c_dt_ids[] = { + { .compatible = "asix,ax88796c" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ax88796c_dt_ids); + +static const struct spi_device_id asix_id[] = { + { "ax88796c", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, asix_id); + +static struct spi_driver ax88796c_spi_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(ax88796c_dt_ids), + }, + .probe = ax88796c_probe, + .remove = ax88796c_remove, + .id_table = asix_id, +}; + +static __init int ax88796c_spi_init(void) +{ + int ret; + + bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN); + ret = bitmap_parse(no_regs_list, 35, + ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN); + if (ret) { + bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN); + pr_err("Invalid bitmap description, masking all registers\n"); + } + + return spi_register_driver(&ax88796c_spi_driver); +} + +static __exit void ax88796c_spi_exit(void) +{ + spi_unregister_driver(&ax88796c_spi_driver); +} + +module_init(ax88796c_spi_init); +module_exit(ax88796c_spi_exit); + +MODULE_AUTHOR("Ɓukasz Stelmach "); +MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h new file mode 100644 index 00000000000000..80263c3cef7575 --- /dev/null +++ b/drivers/net/ethernet/asix/ax88796c_main.h @@ -0,0 +1,568 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010 ASIX Electronics Corporation + * Copyright (c) 2020 Samsung Electronics + * + * ASIX AX88796C SPI Fast Ethernet Linux driver + */ + +#ifndef _AX88796C_MAIN_H +#define _AX88796C_MAIN_H + +#include +#include + +#include "ax88796c_spi.h" + +/* These identify the driver base version and may not be removed. */ +#define DRV_NAME "ax88796c" +#define ADP_NAME "ASIX AX88796C SPI Ethernet Adapter" + +#define TX_QUEUE_HIGH_WATER 45 /* Tx queue high water mark */ +#define TX_QUEUE_LOW_WATER 20 /* Tx queue low water mark */ + +#define AX88796C_REGDUMP_LEN 256 +#define AX88796C_PHY_REGDUMP_LEN 14 +#define AX88796C_PHY_ID 0x10 + +#define TX_OVERHEAD 8 +#define TX_EOP_SIZE 4 + +#define AX_MCAST_FILTER_SIZE 8 +#define AX_MAX_MCAST 64 +#define AX_MAX_CLK 80000000 +#define TX_HDR_SOP_DICF 0x8000 +#define TX_HDR_SOP_CPHI 0x4000 +#define TX_HDR_SOP_INT 0x2000 +#define TX_HDR_SOP_MDEQ 0x1000 +#define TX_HDR_SOP_PKTLEN 0x07FF +#define TX_HDR_SOP_SEQNUM 0xF800 +#define TX_HDR_SOP_PKTLENBAR 0x07FF + +#define TX_HDR_SEG_FS 0x8000 +#define TX_HDR_SEG_LS 0x4000 +#define TX_HDR_SEG_SEGNUM 0x3800 +#define TX_HDR_SEG_SEGLEN 0x0700 +#define TX_HDR_SEG_EOFST 0xC000 +#define TX_HDR_SEG_SOFST 0x3800 +#define TX_HDR_SEG_SEGLENBAR 0x07FF + +#define TX_HDR_EOP_SEQNUM 0xF800 +#define TX_HDR_EOP_PKTLEN 0x07FF +#define TX_HDR_EOP_SEQNUMBAR 0xF800 +#define TX_HDR_EOP_PKTLENBAR 0x07FF + +/* Rx header fields mask */ +#define RX_HDR1_MCBC 0x8000 +#define RX_HDR1_STUFF_PKT 0x4000 +#define RX_HDR1_MII_ERR 0x2000 +#define RX_HDR1_CRC_ERR 0x1000 +#define RX_HDR1_PKT_LEN 0x07FF + +#define RX_HDR2_SEQ_NUM 0xF800 +#define RX_HDR2_PKT_LEN_BAR 0x7FFF + +#define RX_HDR3_PE 0x8000 +#define RX_HDR3_L3_TYPE_IPV4V6 0x6000 +#define RX_HDR3_L3_TYPE_IP 0x4000 +#define RX_HDR3_L3_TYPE_IPV6 0x2000 +#define RX_HDR3_L4_TYPE_ICMPV6 0x1400 +#define RX_HDR3_L4_TYPE_TCP 0x1000 +#define RX_HDR3_L4_TYPE_IGMP 0x0c00 +#define RX_HDR3_L4_TYPE_ICMP 0x0800 +#define RX_HDR3_L4_TYPE_UDP 0x0400 +#define RX_HDR3_L3_ERR 0x0200 +#define RX_HDR3_L4_ERR 0x0100 +#define RX_HDR3_PRIORITY(x) ((x) << 4) +#define RX_HDR3_STRIP 0x0008 +#define RX_HDR3_VLAN_ID 0x0007 + +struct ax88796c_pcpu_stats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + struct u64_stats_sync syncp; + u32 rx_dropped; + u32 tx_dropped; + u32 rx_frame_errors; + u32 rx_crc_errors; +}; + +struct ax88796c_device { + struct spi_device *spi; + struct net_device *ndev; + struct ax88796c_pcpu_stats __percpu *stats; + + struct work_struct ax_work; + + struct mutex spi_lock; /* device access */ + + struct sk_buff_head tx_wait_q; + + struct axspi_data ax_spi; + + struct mii_bus *mdiobus; + struct phy_device *phydev; + + int msg_enable; + + u16 seq_num; + + u8 multi_filter[AX_MCAST_FILTER_SIZE]; + + int link; + int speed; + int duplex; + int pause; + int asym_pause; + int flowctrl; + #define AX_FC_NONE 0 + #define AX_FC_RX BIT(0) + #define AX_FC_TX BIT(1) + #define AX_FC_ANEG BIT(2) + + u32 priv_flags; + #define AX_CAP_COMP BIT(0) + #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP) + + unsigned long flags; + #define EVENT_INTR BIT(0) + #define EVENT_TX BIT(1) + #define EVENT_SET_MULTI BIT(2) + +}; + +#define to_ax88796c_device(ndev) ((struct ax88796c_device *)netdev_priv(ndev)) + +enum skb_state { + illegal = 0, + tx_done, + rx_done, + rx_err, +}; + +struct skb_data { + enum skb_state state; + size_t len; +}; + +/* A88796C register definition */ + /* Definition of PAGE0 */ +#define P0_PSR (0x00) + #define PSR_DEV_READY BIT(7) + #define PSR_RESET (0 << 15) + #define PSR_RESET_CLR BIT(15) +#define P0_BOR (0x02) +#define P0_FER (0x04) + #define FER_IPALM BIT(0) + #define FER_DCRC BIT(1) + #define FER_RH3M BIT(2) + #define FER_HEADERSWAP BIT(7) + #define FER_WSWAP BIT(8) + #define FER_BSWAP BIT(9) + #define FER_INTHI BIT(10) + #define FER_INTLO (0 << 10) + #define FER_IRQ_PULL BIT(11) + #define FER_RXEN BIT(14) + #define FER_TXEN BIT(15) +#define P0_ISR (0x06) + #define ISR_RXPKT BIT(0) + #define ISR_MDQ BIT(4) + #define ISR_TXT BIT(5) + #define ISR_TXPAGES BIT(6) + #define ISR_TXERR BIT(8) + #define ISR_LINK BIT(9) +#define P0_IMR (0x08) + #define IMR_RXPKT BIT(0) + #define IMR_MDQ BIT(4) + #define IMR_TXT BIT(5) + #define IMR_TXPAGES BIT(6) + #define IMR_TXERR BIT(8) + #define IMR_LINK BIT(9) + #define IMR_MASKALL (0xFFFF) + #define IMR_DEFAULT (IMR_TXERR) +#define P0_WFCR (0x0A) + #define WFCR_PMEIND BIT(0) /* PME indication */ + #define WFCR_PMETYPE BIT(1) /* PME I/O type */ + #define WFCR_PMEPOL BIT(2) /* PME polarity */ + #define WFCR_PMERST BIT(3) /* Reset PME */ + #define WFCR_SLEEP BIT(4) /* Enable sleep mode */ + #define WFCR_WAKEUP BIT(5) /* Enable wakeup mode */ + #define WFCR_WAITEVENT BIT(6) /* Reserved */ + #define WFCR_CLRWAKE BIT(7) /* Clear wakeup */ + #define WFCR_LINKCH BIT(8) /* Enable link change */ + #define WFCR_MAGICP BIT(9) /* Enable magic packet */ + #define WFCR_WAKEF BIT(10) /* Enable wakeup frame */ + #define WFCR_PMEEN BIT(11) /* Enable PME pin */ + #define WFCR_LINKCHS BIT(12) /* Link change status */ + #define WFCR_MAGICPS BIT(13) /* Magic packet status */ + #define WFCR_WAKEFS BIT(14) /* Wakeup frame status */ + #define WFCR_PMES BIT(15) /* PME pin status */ +#define P0_PSCR (0x0C) + #define PSCR_PS_MASK (0xFFF0) + #define PSCR_PS_D0 (0) + #define PSCR_PS_D1 BIT(0) + #define PSCR_PS_D2 BIT(1) + #define PSCR_FPS BIT(3) /* Enable fiber mode PS */ + #define PSCR_SWPS BIT(4) /* Enable software */ + /* PS control */ + #define PSCR_WOLPS BIT(5) /* Enable WOL PS */ + #define PSCR_SWWOL BIT(6) /* Enable software select */ + /* WOL PS */ + #define PSCR_PHYOSC BIT(7) /* Internal PHY OSC control */ + #define PSCR_FOFEF BIT(8) /* Force PHY generate FEF */ + #define PSCR_FOF BIT(9) /* Force PHY in fiber mode */ + #define PSCR_PHYPD BIT(10) /* PHY power down. */ + /* Active high */ + #define PSCR_PHYRST BIT(11) /* PHY reset signal. */ + /* Active low */ + #define PSCR_PHYCSIL BIT(12) /* PHY cable energy detect */ + #define PSCR_PHYCOFF BIT(13) /* PHY cable off */ + #define PSCR_PHYLINK BIT(14) /* PHY link status */ + #define PSCR_EEPOK BIT(15) /* EEPROM load complete */ +#define P0_MACCR (0x0E) + #define MACCR_RXEN BIT(0) /* Enable RX */ + #define MACCR_DUPLEX_FULL BIT(1) /* 1: Full, 0: Half */ + #define MACCR_SPEED_100 BIT(2) /* 1: 100Mbps, 0: 10Mbps */ + #define MACCR_RXFC_ENABLE BIT(3) + #define MACCR_RXFC_MASK 0xFFF7 + #define MACCR_TXFC_ENABLE BIT(4) + #define MACCR_TXFC_MASK 0xFFEF + #define MACCR_PSI BIT(6) /* Software Cable-Off */ + /* Power Saving Interrupt */ + #define MACCR_PF BIT(7) + #define MACCR_PMM_BITS 8 + #define MACCR_PMM_MASK (0x1F00) + #define MACCR_PMM_RESET BIT(8) + #define MACCR_PMM_WAIT (2 << 8) + #define MACCR_PMM_READY (3 << 8) + #define MACCR_PMM_D1 (4 << 8) + #define MACCR_PMM_D2 (5 << 8) + #define MACCR_PMM_WAKE (7 << 8) + #define MACCR_PMM_D1_WAKE (8 << 8) + #define MACCR_PMM_D2_WAKE (9 << 8) + #define MACCR_PMM_SLEEP (10 << 8) + #define MACCR_PMM_PHY_RESET (11 << 8) + #define MACCR_PMM_SOFT_D1 (16 << 8) + #define MACCR_PMM_SOFT_D2 (17 << 8) +#define P0_TFBFCR (0x10) + #define TFBFCR_SCHE_FREE_PAGE 0xE07F + #define TFBFCR_FREE_PAGE_BITS 0x07 + #define TFBFCR_FREE_PAGE_LATCH BIT(6) + #define TFBFCR_SET_FREE_PAGE(x) (((x) & 0x3F) << TFBFCR_FREE_PAGE_BITS) + #define TFBFCR_TX_PAGE_SET BIT(13) + #define TFBFCR_MANU_ENTX BIT(15) + #define TX_FREEBUF_MASK 0x003F + #define TX_DPTSTART 0x4000 + +#define P0_TSNR (0x12) + #define TXNR_TXB_ERR BIT(5) + #define TXNR_TXB_IDLE BIT(6) + #define TSNR_PKT_CNT(x) (((x) & 0x3F) << 8) + #define TXNR_TXB_REINIT BIT(14) + #define TSNR_TXB_START BIT(15) +#define P0_RTDPR (0x14) +#define P0_RXBCR1 (0x16) + #define RXBCR1_RXB_DISCARD BIT(14) + #define RXBCR1_RXB_START BIT(15) +#define P0_RXBCR2 (0x18) + #define RXBCR2_PKT_MASK (0xFF) + #define RXBCR2_RXPC_MASK (0x7F) + #define RXBCR2_RXB_READY BIT(13) + #define RXBCR2_RXB_IDLE BIT(14) + #define RXBCR2_RXB_REINIT BIT(15) +#define P0_RTWCR (0x1A) + #define RTWCR_RXWC_MASK (0x3FFF) + #define RTWCR_RX_LATCH BIT(15) +#define P0_RCPHR (0x1C) + + /* Definition of PAGE1 */ +#define P1_RPPER (0x22) + #define RPPER_RXEN BIT(0) +#define P1_MRCR (0x28) +#define P1_MDR (0x2A) +#define P1_RMPR (0x2C) +#define P1_TMPR (0x2E) +#define P1_RXBSPCR (0x30) + #define RXBSPCR_STUF_WORD_CNT(x) (((x) & 0x7000) >> 12) + #define RXBSPCR_STUF_ENABLE BIT(15) +#define P1_MCR (0x32) + #define MCR_SBP BIT(8) + #define MCR_SM BIT(9) + #define MCR_CRCENLAN BIT(11) + #define MCR_STP BIT(12) + /* Definition of PAGE2 */ +#define P2_CIR (0x42) +#define P2_PCR (0x44) + #define PCR_POLL_EN BIT(0) + #define PCR_POLL_FLOWCTRL BIT(1) + #define PCR_POLL_BMCR BIT(2) + #define PCR_PHYID(x) ((x) << 8) +#define P2_PHYSR (0x46) +#define P2_MDIODR (0x48) +#define P2_MDIOCR (0x4A) + #define MDIOCR_RADDR(x) ((x) & 0x1F) + #define MDIOCR_FADDR(x) (((x) & 0x1F) << 8) + #define MDIOCR_VALID BIT(13) + #define MDIOCR_READ BIT(14) + #define MDIOCR_WRITE BIT(15) +#define P2_LCR0 (0x4C) + #define LCR_LED0_EN BIT(0) + #define LCR_LED0_100MODE BIT(1) + #define LCR_LED0_DUPLEX BIT(2) + #define LCR_LED0_LINK BIT(3) + #define LCR_LED0_ACT BIT(4) + #define LCR_LED0_COL BIT(5) + #define LCR_LED0_10MODE BIT(6) + #define LCR_LED0_DUPCOL BIT(7) + #define LCR_LED1_EN BIT(8) + #define LCR_LED1_100MODE BIT(9) + #define LCR_LED1_DUPLEX BIT(10) + #define LCR_LED1_LINK BIT(11) + #define LCR_LED1_ACT BIT(12) + #define LCR_LED1_COL BIT(13) + #define LCR_LED1_10MODE BIT(14) + #define LCR_LED1_DUPCOL BIT(15) +#define P2_LCR1 (0x4E) + #define LCR_LED2_MASK (0xFF00) + #define LCR_LED2_EN BIT(0) + #define LCR_LED2_100MODE BIT(1) + #define LCR_LED2_DUPLEX BIT(2) + #define LCR_LED2_LINK BIT(3) + #define LCR_LED2_ACT BIT(4) + #define LCR_LED2_COL BIT(5) + #define LCR_LED2_10MODE BIT(6) + #define LCR_LED2_DUPCOL BIT(7) +#define P2_IPGCR (0x50) +#define P2_CRIR (0x52) +#define P2_FLHWCR (0x54) +#define P2_RXCR (0x56) + #define RXCR_PRO BIT(0) + #define RXCR_AMALL BIT(1) + #define RXCR_SEP BIT(2) + #define RXCR_AB BIT(3) + #define RXCR_AM BIT(4) + #define RXCR_AP BIT(5) + #define RXCR_ARP BIT(6) +#define P2_JLCR (0x58) +#define P2_MPLR (0x5C) + + /* Definition of PAGE3 */ +#define P3_MACASR0 (0x62) + #define P3_MACASR(x) (P3_MACASR0 + 2 * (x)) + #define MACASR_LOWBYTE_MASK 0x00FF + #define MACASR_HIGH_BITS 0x08 +#define P3_MACASR1 (0x64) +#define P3_MACASR2 (0x66) +#define P3_MFAR01 (0x68) +#define P3_MFAR_BASE (0x68) + #define P3_MFAR(x) (P3_MFAR_BASE + 2 * (x)) + +#define P3_MFAR23 (0x6A) +#define P3_MFAR45 (0x6C) +#define P3_MFAR67 (0x6E) +#define P3_VID0FR (0x70) +#define P3_VID1FR (0x72) +#define P3_EECSR (0x74) +#define P3_EEDR (0x76) +#define P3_EECR (0x78) + #define EECR_ADDR_MASK (0x00FF) + #define EECR_READ_ACT BIT(8) + #define EECR_WRITE_ACT BIT(9) + #define EECR_WRITE_DISABLE BIT(10) + #define EECR_WRITE_ENABLE BIT(11) + #define EECR_EE_READY BIT(13) + #define EECR_RELOAD BIT(14) + #define EECR_RESET BIT(15) +#define P3_TPCR (0x7A) + #define TPCR_PATT_MASK (0xFF) + #define TPCR_RAND_PKT_EN BIT(14) + #define TPCR_FIXED_PKT_EN BIT(15) +#define P3_TPLR (0x7C) + /* Definition of PAGE4 */ +#define P4_SPICR (0x8A) + #define SPICR_RCEN BIT(0) + #define SPICR_QCEN BIT(1) + #define SPICR_RBRE BIT(3) + #define SPICR_PMM BIT(4) + #define SPICR_LOOPBACK BIT(8) + #define SPICR_CORE_RES_CLR BIT(10) + #define SPICR_SPI_RES_CLR BIT(11) +#define P4_SPIISMR (0x8C) + +#define P4_COERCR0 (0x92) + #define COERCR0_RXIPCE BIT(0) + #define COERCR0_RXIPVE BIT(1) + #define COERCR0_RXV6PE BIT(2) + #define COERCR0_RXTCPE BIT(3) + #define COERCR0_RXUDPE BIT(4) + #define COERCR0_RXICMP BIT(5) + #define COERCR0_RXIGMP BIT(6) + #define COERCR0_RXICV6 BIT(7) + + #define COERCR0_RXTCPV6 BIT(8) + #define COERCR0_RXUDPV6 BIT(9) + #define COERCR0_RXICMV6 BIT(10) + #define COERCR0_RXIGMV6 BIT(11) + #define COERCR0_RXICV6V6 BIT(12) + + #define COERCR0_DEFAULT (COERCR0_RXIPCE | COERCR0_RXV6PE | \ + COERCR0_RXTCPE | COERCR0_RXUDPE | \ + COERCR0_RXTCPV6 | COERCR0_RXUDPV6) +#define P4_COERCR1 (0x94) + #define COERCR1_IPCEDP BIT(0) + #define COERCR1_IPVEDP BIT(1) + #define COERCR1_V6VEDP BIT(2) + #define COERCR1_TCPEDP BIT(3) + #define COERCR1_UDPEDP BIT(4) + #define COERCR1_ICMPDP BIT(5) + #define COERCR1_IGMPDP BIT(6) + #define COERCR1_ICV6DP BIT(7) + #define COERCR1_RX64TE BIT(8) + #define COERCR1_RXPPPE BIT(9) + #define COERCR1_TCP6DP BIT(10) + #define COERCR1_UDP6DP BIT(11) + #define COERCR1_IC6DP BIT(12) + #define COERCR1_IG6DP BIT(13) + #define COERCR1_ICV66DP BIT(14) + #define COERCR1_RPCE BIT(15) + + #define COERCR1_DEFAULT (COERCR1_RXPPPE) + +#define P4_COETCR0 (0x96) + #define COETCR0_TXIP BIT(0) + #define COETCR0_TXTCP BIT(1) + #define COETCR0_TXUDP BIT(2) + #define COETCR0_TXICMP BIT(3) + #define COETCR0_TXIGMP BIT(4) + #define COETCR0_TXICV6 BIT(5) + #define COETCR0_TXTCPV6 BIT(8) + #define COETCR0_TXUDPV6 BIT(9) + #define COETCR0_TXICMV6 BIT(10) + #define COETCR0_TXIGMV6 BIT(11) + #define COETCR0_TXICV6V6 BIT(12) + + #define COETCR0_DEFAULT (COETCR0_TXIP | COETCR0_TXTCP | \ + COETCR0_TXUDP | COETCR0_TXTCPV6 | \ + COETCR0_TXUDPV6) +#define P4_COETCR1 (0x98) + #define COETCR1_TX64TE BIT(0) + #define COETCR1_TXPPPE BIT(1) + +#define P4_COECEDR (0x9A) +#define P4_L2CECR (0x9C) + + /* Definition of PAGE5 */ +#define P5_WFTR (0xA2) + #define WFTR_2MS (0x01) + #define WFTR_4MS (0x02) + #define WFTR_8MS (0x03) + #define WFTR_16MS (0x04) + #define WFTR_32MS (0x05) + #define WFTR_64MS (0x06) + #define WFTR_128MS (0x07) + #define WFTR_256MS (0x08) + #define WFTR_512MS (0x09) + #define WFTR_1024MS (0x0A) + #define WFTR_2048MS (0x0B) + #define WFTR_4096MS (0x0C) + #define WFTR_8192MS (0x0D) + #define WFTR_16384MS (0x0E) + #define WFTR_32768MS (0x0F) +#define P5_WFCCR (0xA4) +#define P5_WFCR03 (0xA6) + #define WFCR03_F0_EN BIT(0) + #define WFCR03_F1_EN BIT(4) + #define WFCR03_F2_EN BIT(8) + #define WFCR03_F3_EN BIT(12) +#define P5_WFCR47 (0xA8) + #define WFCR47_F4_EN BIT(0) + #define WFCR47_F5_EN BIT(4) + #define WFCR47_F6_EN BIT(8) + #define WFCR47_F7_EN BIT(12) +#define P5_WF0BMR0 (0xAA) +#define P5_WF0BMR1 (0xAC) +#define P5_WF0CR (0xAE) +#define P5_WF0OBR (0xB0) +#define P5_WF1BMR0 (0xB2) +#define P5_WF1BMR1 (0xB4) +#define P5_WF1CR (0xB6) +#define P5_WF1OBR (0xB8) +#define P5_WF2BMR0 (0xBA) +#define P5_WF2BMR1 (0xBC) + + /* Definition of PAGE6 */ +#define P6_WF2CR (0xC2) +#define P6_WF2OBR (0xC4) +#define P6_WF3BMR0 (0xC6) +#define P6_WF3BMR1 (0xC8) +#define P6_WF3CR (0xCA) +#define P6_WF3OBR (0xCC) +#define P6_WF4BMR0 (0xCE) +#define P6_WF4BMR1 (0xD0) +#define P6_WF4CR (0xD2) +#define P6_WF4OBR (0xD4) +#define P6_WF5BMR0 (0xD6) +#define P6_WF5BMR1 (0xD8) +#define P6_WF5CR (0xDA) +#define P6_WF5OBR (0xDC) + +/* Definition of PAGE7 */ +#define P7_WF6BMR0 (0xE2) +#define P7_WF6BMR1 (0xE4) +#define P7_WF6CR (0xE6) +#define P7_WF6OBR (0xE8) +#define P7_WF7BMR0 (0xEA) +#define P7_WF7BMR1 (0xEC) +#define P7_WF7CR (0xEE) +#define P7_WF7OBR (0xF0) +#define P7_WFR01 (0xF2) +#define P7_WFR23 (0xF4) +#define P7_WFR45 (0xF6) +#define P7_WFR67 (0xF8) +#define P7_WFPC0 (0xFA) +#define P7_WFPC1 (0xFC) + +/* Tx headers structure */ +struct tx_sop_header { + /* bit 15-11: flags, bit 10-0: packet length */ + u16 flags_len; + /* bit 15-11: sequence number, bit 11-0: packet length bar */ + u16 seq_lenbar; +}; + +struct tx_segment_header { + /* bit 15-14: flags, bit 13-11: segment number */ + /* bit 10-0: segment length */ + u16 flags_seqnum_seglen; + /* bit 15-14: end offset, bit 13-11: start offset */ + /* bit 10-0: segment length bar */ + u16 eo_so_seglenbar; +}; + +struct tx_eop_header { + /* bit 15-11: sequence number, bit 10-0: packet length */ + u16 seq_len; + /* bit 15-11: sequence number bar, bit 10-0: packet length bar */ + u16 seqbar_lenbar; +}; + +struct tx_pkt_info { + struct tx_sop_header sop; + struct tx_segment_header seg; + struct tx_eop_header eop; + u16 pkt_len; + u16 seq_num; +}; + +/* Rx headers structure */ +struct rx_header { + u16 flags_len; + u16 seq_lenbar; + u16 flags; +}; + +extern unsigned long ax88796c_no_regs_mask[]; + +#endif /* #ifndef _AX88796C_MAIN_H */ diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c new file mode 100644 index 00000000000000..94df4f96d2be21 --- /dev/null +++ b/drivers/net/ethernet/asix/ax88796c_spi.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2010 ASIX Electronics Corporation + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * ASIX AX88796C SPI Fast Ethernet Linux driver + */ + +#define pr_fmt(fmt) "ax88796c: " fmt + +#include +#include + +#include "ax88796c_spi.h" + +const u8 ax88796c_rx_cmd_buf[5] = {AX_SPICMD_READ_RXQ, 0xFF, 0xFF, 0xFF, 0xFF}; +const u8 ax88796c_tx_cmd_buf[4] = {AX_SPICMD_WRITE_TXQ, 0xFF, 0xFF, 0xFF}; + +/* driver bus management functions */ +int axspi_wakeup(struct axspi_data *ax_spi) +{ + int ret; + + ax_spi->cmd_buf[0] = AX_SPICMD_EXIT_PWD; /* OP */ + ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 1); + if (ret) + dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); + return ret; +} + +int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status) +{ + int ret; + + /* OP */ + ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS; + ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3); + if (ret) + dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); + else + le16_to_cpus(&status->isr); + + return ret; +} + +int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len) +{ + struct spi_transfer *xfer = ax_spi->spi_rx_xfer; + int ret; + + memcpy(ax_spi->cmd_buf, ax88796c_rx_cmd_buf, 5); + + xfer->tx_buf = ax_spi->cmd_buf; + xfer->rx_buf = NULL; + xfer->len = ax_spi->comp ? 2 : 5; + xfer->bits_per_word = 8; + spi_message_add_tail(xfer, &ax_spi->rx_msg); + + xfer++; + xfer->rx_buf = data; + xfer->tx_buf = NULL; + xfer->len = len; + xfer->bits_per_word = 8; + spi_message_add_tail(xfer, &ax_spi->rx_msg); + ret = spi_sync(ax_spi->spi, &ax_spi->rx_msg); + if (ret) + dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); + + return ret; +} + +int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len) +{ + return spi_write(ax_spi->spi, data, len); +} + +u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg) +{ + int ret; + int len = ax_spi->comp ? 3 : 4; + + ax_spi->cmd_buf[0] = 0x03; /* OP code read register */ + ax_spi->cmd_buf[1] = reg; /* register address */ + ax_spi->cmd_buf[2] = 0xFF; /* dumy cycle */ + ax_spi->cmd_buf[3] = 0xFF; /* dumy cycle */ + ret = spi_write_then_read(ax_spi->spi, + ax_spi->cmd_buf, len, + ax_spi->rx_buf, 2); + if (ret) { + dev_err(&ax_spi->spi->dev, + "%s() failed: ret = %d\n", __func__, ret); + return 0xFFFF; + } + + le16_to_cpus((u16 *)ax_spi->rx_buf); + + return *(u16 *)ax_spi->rx_buf; +} + +int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value) +{ + int ret; + + memset(ax_spi->cmd_buf, 0, sizeof(ax_spi->cmd_buf)); + ax_spi->cmd_buf[0] = AX_SPICMD_WRITE_REG; /* OP code read register */ + ax_spi->cmd_buf[1] = reg; /* register address */ + ax_spi->cmd_buf[2] = value; + ax_spi->cmd_buf[3] = value >> 8; + + ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 4); + if (ret) + dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); + return ret; +} + diff --git a/drivers/net/ethernet/asix/ax88796c_spi.h b/drivers/net/ethernet/asix/ax88796c_spi.h new file mode 100644 index 00000000000000..5bcf91f603fb22 --- /dev/null +++ b/drivers/net/ethernet/asix/ax88796c_spi.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010 ASIX Electronics Corporation + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * ASIX AX88796C SPI Fast Ethernet Linux driver + */ + +#ifndef _AX88796C_SPI_H +#define _AX88796C_SPI_H + +#include +#include + +/* Definition of SPI command */ +#define AX_SPICMD_WRITE_TXQ 0x02 +#define AX_SPICMD_READ_REG 0x03 +#define AX_SPICMD_READ_STATUS 0x05 +#define AX_SPICMD_READ_RXQ 0x0B +#define AX_SPICMD_BIDIR_WRQ 0xB2 +#define AX_SPICMD_WRITE_REG 0xD8 +#define AX_SPICMD_EXIT_PWD 0xAB + +extern const u8 ax88796c_rx_cmd_buf[]; +extern const u8 ax88796c_tx_cmd_buf[]; + +struct axspi_data { + struct spi_device *spi; + struct spi_message rx_msg; + struct spi_transfer spi_rx_xfer[2]; + u8 cmd_buf[6]; + u8 rx_buf[6]; + u8 comp; +}; + +struct spi_status { + u16 isr; + u8 status; +# define AX_STATUS_READY 0x80 +}; + +int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len); +int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len); +u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg); +int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value); +int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status); +int axspi_wakeup(struct axspi_data *ax_spi); + +static inline u16 AX_READ(struct axspi_data *ax_spi, u8 offset) +{ + return axspi_read_reg(ax_spi, offset); +} + +static inline int AX_WRITE(struct axspi_data *ax_spi, u16 value, u8 offset) +{ + return axspi_write_reg(ax_spi, offset, value); +} + +static inline int AX_READ_STATUS(struct axspi_data *ax_spi, + struct spi_status *status) +{ + return axspi_read_status(ax_spi, status); +} + +static inline int AX_WAKEUP(struct axspi_data *ax_spi) +{ + return axspi_wakeup(ax_spi); +} +#endif diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 02ae98aabf91c8..88d2ab7483994a 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1082,14 +1082,12 @@ static void ag71xx_mac_validate(struct phylink_config *config, phylink_set(mask, 1000baseX_Full); } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); return; unsupported: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); } static void ag71xx_mac_pcs_get_state(struct phylink_config *config, @@ -1968,10 +1966,10 @@ static int ag71xx_probe(struct platform_device *pdev) ag->stop_desc->ctrl = 0; ag->stop_desc->next = (u32)ag->stop_desc_dma; - err = of_get_mac_address(np, ndev->dev_addr); + err = of_get_ethdev_address(np, ndev); if (err) { netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); } err = of_get_phy_mode(np, &ag->phy_if_mode); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4ea157efca868c..4ad3fc72e74e32 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -607,7 +607,7 @@ static int alx_set_mac_address(struct net_device *netdev, void *data) if (netdev->addr_assign_type & NET_ADDR_RANDOM) netdev->addr_assign_type ^= NET_ADDR_RANDOM; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); alx_set_macaddr(hw, hw->mac_addr); @@ -1832,7 +1832,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); - memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, hw->mac_addr); memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); hw->mdio.prtad = 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3b51b172b3172b..da595242bc13a5 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -482,7 +482,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); @@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, buffer_info->skb = NULL; buffer_info->length = 0; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); - netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); + netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed"); break; } buffer_info->dma = mapping; @@ -2662,10 +2662,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* enable device (incl. PCI PM wakeup and hotplug setup) */ err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1c chip can DMA to 64-bit addresses, but it uses a single @@ -2769,7 +2767,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ netdev->addr_assign_type = NET_ADDR_RANDOM; } - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (netif_msg_probe(adapter)) dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 753973ac922e9b..56e5f440e6665e 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -374,7 +374,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1e_hw_set_mac_addr(&adapter->hw); @@ -2297,10 +2297,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err = 0; err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1e chip can DMA to 64-bit addresses, but it uses a single @@ -2392,7 +2390,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); INIT_WORK(&adapter->reset_task, atl1e_reset_task); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 68f6c0bbd945c9..b4c9e805e98106 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3027,7 +3027,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* mark random mac */ netdev->addr_assign_type = NET_ADDR_RANDOM; } - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index b69298ddb647fe..bbc4d7b08a498f 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -931,7 +931,7 @@ static int atl2_set_mac(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl2_set_mac_addr(&adapter->hw); @@ -1405,7 +1405,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* copy the MAC address out of the EEPROM */ atl2_read_mac_addr(&adapter->hw); - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; goto err_eeprom; diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index 0941d07d083397..e8cfbf4ff1b53d 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -69,7 +69,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atlx_set_mac_addr(&adapter->hw); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index fa784953c60188..969591bbc06636 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -218,7 +218,8 @@ static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index data[1] = (val >> 0) & 0xFF; } -static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +static inline void __b44_cam_write(struct b44 *bp, + const unsigned char *data, int index) { u32 val; @@ -1200,7 +1201,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { - /* Allocation may have failed due to pci_alloc_consistent + /* Allocation may have failed due to dma_alloc_coherent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *rx_ring; @@ -1383,7 +1384,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&bp->lock); @@ -1507,7 +1508,8 @@ static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) } } -static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) +static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask, + int offset) { int magicsync = 6; int k, j, len = offset; @@ -2171,7 +2173,7 @@ static int b44_get_invariants(struct b44 *bp) * valid PHY address. */ bp->phy_addr &= 0x1F; - memcpy(bp->dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, addr); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ pr_err("Invalid MAC address found in EEPROM\n"); diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 02a569500234cd..7cc5213c575a1d 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -170,7 +170,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet, goto err_free_buf_descs; } - ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL); + ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL); if (!ring->slots) goto err_free_buf_descs; @@ -715,7 +715,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev) return err; SET_NETDEV_DEV(netdev, &pdev->dev); - err = of_get_mac_address(dev->of_node, netdev->dev_addr); + err = of_get_ethdev_address(dev->of_node, netdev); if (err) eth_hw_addr_random(netdev); netdev->netdev_ops = &bcm4908_enet_netdev_ops; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index d56886300ecf85..a568994a03a632 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -670,7 +670,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) u32 val; priv = netdev_priv(dev); - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); /* use perfect match register 0 to store my mac address */ val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | @@ -1762,7 +1762,7 @@ static int bcm_enet_probe(struct platform_device *pdev) pd = dev_get_platdata(&pdev->dev); if (pd) { - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac_addr); priv->has_phy = pd->has_phy; priv->phy_id = pd->phy_id; priv->has_phy_interrupt = pd->has_phy_interrupt; @@ -2665,7 +2665,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) pd = dev_get_platdata(&pdev->dev); if (pd) { - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac_addr); memcpy(priv->used_ports, pd->used_ports, sizeof(pd->used_ports)); priv->num_ports = pd->num_ports; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7fa1b695400d78..40933bf5a71007 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1818,7 +1818,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) } static void umac_set_hw_addr(struct bcm_sysport_priv *priv, - unsigned char *addr) + const unsigned char *addr) { u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; @@ -1850,7 +1850,7 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); /* interface is disabled, changes to MAC will be reflected on next * open call @@ -2555,7 +2555,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) } /* Initialize netdevice members */ - ret = of_get_mac_address(dn, dev->dev_addr); + ret = of_get_ethdev_address(dn, dev); if (ret) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 6ce80cbcb48e30..086739e4f40a95 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -10,6 +10,7 @@ #include #include +#include #include "bgmac.h" static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, @@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) { struct bcma_device *core = bgmac->bcma.core; struct mii_bus *mii_bus; + struct device_node *np; int err; mii_bus = mdiobus_alloc(); @@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) mii_bus->parent = &core->dev; mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - err = mdiobus_register(mii_bus); + np = of_get_child_by_name(core->dev.of_node, "mdio"); + + err = of_mdiobus_register(mii_bus, np); if (err) { dev_err(&core->dev, "Registration of mii bus failed\n"); goto err_free_bus; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 9513cfb5ba58c3..e6f48786949c0f 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "bgmac.h" @@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac *bgmac) struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; + /* DT info should be the most accurate */ + phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node, + bgmac_adjust_link); + if (phy_dev) + return 0; + /* Connect to the PHY */ - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, - bgmac->phyaddr); - phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(phy_dev)) { - dev_err(bgmac->dev, "PHY connection failed\n"); - return PTR_ERR(phy_dev); + if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) { + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, + bgmac->phyaddr); + phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phy_dev)) { + dev_err(bgmac->dev, "PHY connection failed\n"); + return PTR_ERR(phy_dev); + } + + return 0; } - return 0; + /* Assume a fixed link to the switch port */ + return bgmac_phy_connect_direct(bgmac); } static const struct bcma_device_id bgmac_bcma_tbl[] = { @@ -128,7 +140,7 @@ static int bgmac_probe(struct bcma_device *core) bcma_set_drvdata(core, bgmac); - err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); + err = of_get_ethdev_address(bgmac->dev->of_node, bgmac->net_dev); if (err == -EPROBE_DEFER) return err; @@ -150,7 +162,7 @@ static int bgmac_probe(struct bcma_device *core) err = -ENOTSUPP; goto err; } - ether_addr_copy(bgmac->net_dev->dev_addr, mac); + eth_hw_addr_set(bgmac->net_dev, mac); } /* On BCM4706 we need common core to access PHY */ @@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_device *core) bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset; bgmac->get_bus_clock = bcma_bgmac_get_bus_clock; bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32; - if (bgmac->mii_bus) - bgmac->phy_connect = bcma_phy_connect; - else - bgmac->phy_connect = bgmac_phy_connect_direct; + bgmac->phy_connect = bcma_phy_connect; err = bgmac_enet_probe(bgmac); if (err) diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index df8ff839cc6214..c6412c523637b9 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -191,7 +191,7 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->dev = &pdev->dev; bgmac->dma_dev = &pdev->dev; - ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); + ret = of_get_ethdev_address(np, bgmac->net_dev); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index fe4d99abd54876..7b525c65bacb98 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -768,7 +768,7 @@ static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set, udelay(2); } -static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) +static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr) { u32 tmp; @@ -1241,7 +1241,7 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) if (ret < 0) return ret; - ether_addr_copy(net_dev->dev_addr, sa->sa_data); + eth_hw_addr_set(net_dev, sa->sa_data); bgmac_write_mac_address(bgmac, net_dev->dev_addr); eth_commit_mac_addr_change(net_dev, addr); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8c83973adca57d..babc955ba64e26 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -2704,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) } static void -bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) +bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos) { u32 val; @@ -7910,7 +7910,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); @@ -8574,7 +8574,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_kdump_kernel()) bnx2_wait_dma_complete(bp); - memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, bp->mac_addr); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e789430f407c3a..2b06d78baa0869 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1994,7 +1994,7 @@ int bnx2x_idle_chk(struct bnx2x *bp); * operation has been successfully scheduled and a negative - if a requested * operations has failed. */ -int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, +int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b5d954cb409ae4..e8e8c2d593c558 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4336,7 +4336,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) return rc; } - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) rc = bnx2x_set_eth_mac(bp, true); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ae87296ae1ffa4..aec666e976831e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8417,7 +8417,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp) * Init service functions */ -int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, +int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags) { @@ -9146,7 +9146,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) else if (bp->wol) { u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u8 *mac_addr = bp->dev->dev_addr; + const u8 *mac_addr = bp->dev->dev_addr; struct pci_dev *pdev = bp->pdev; u32 val; u16 pmc; @@ -11790,7 +11790,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) * as the SAN mac was copied from the primary MAC. */ if (IS_MF_FCOE_AFEX(bp)) - memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); + eth_hw_addr_set(bp->dev, fip_mac); } else { val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_upper); @@ -11823,9 +11823,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) u32 val, val2; int func = BP_ABS_FUNC(bp); int port = BP_PORT(bp); + u8 addr[ETH_ALEN] = {}; /* Zero primary MAC configuration */ - eth_zero_addr(bp->dev->dev_addr); + eth_hw_addr_set(bp->dev, addr); if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); @@ -11834,8 +11835,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && - (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { + bnx2x_set_mac_buf(addr, val, val2); + eth_hw_addr_set(bp->dev, addr); + } if (CNIC_SUPPORT(bp)) bnx2x_get_cnic_mac_hwinfo(bp); @@ -11843,7 +11846,8 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) /* in SF read MACs from port configuration */ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + bnx2x_set_mac_buf(addr, val, val2); + eth_hw_addr_set(bp->dev, addr); if (CNIC_SUPPORT(bp)) bnx2x_get_cnic_mac_hwinfo(bp); @@ -12291,7 +12295,9 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (rc) return rc; } else { - eth_zero_addr(bp->dev->dev_addr); + static const u8 zero_addr[ETH_ALEN] = {}; + + eth_hw_addr_set(bp->dev, zero_addr); } bnx2x_set_modes_bitmap(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6fbf735fca31c2..74a8931ce1d1d5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3058,7 +3058,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID && !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { /* update new mac to net device */ - memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); + eth_hw_addr_set(bp->dev, bulletin->mac); } if (bulletin->valid_bitmap & (1 << LINK_VALID)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 966d5722c5e2fa..8c2cf551978780 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -508,7 +508,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading); -int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, + bool set); int bnx2x_vfpf_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *params); int bnx2x_vfpf_set_mcast(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index ea0e9394f89868..c9129b9ba446b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -384,9 +384,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) sizeof(bp->fw_ver)); if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) - memcpy(bp->dev->dev_addr, - bp->acquire_resp.resc.current_mac_addr, - ETH_ALEN); + eth_hw_addr_set(bp->dev, + bp->acquire_resp.resc.current_mac_addr); out: bnx2x_vfpf_finalize(bp, &req->first_tlv); @@ -722,7 +721,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) } /* request pf to add a mac for the vf */ -int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) +int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set) { struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; @@ -767,7 +766,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) "vfpf SET MAC failed. Check bulletin board for new posts\n"); /* copy mac from bulletin to device */ - memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); + eth_hw_addr_set(bp->dev, bulletin.mac); /* check if bulletin board was updated */ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index c6ef7ec2c11515..2bc2b707d6eee2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o +bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 62f84cc91e4d10..c04ea83188e22d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -49,8 +49,6 @@ #include #include #include -#include -#include #include #include #include @@ -85,55 +83,7 @@ MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); #define BNXT_TX_PUSH_THRESH 164 -enum board_idx { - BCM57301, - BCM57302, - BCM57304, - BCM57417_NPAR, - BCM58700, - BCM57311, - BCM57312, - BCM57402, - BCM57404, - BCM57406, - BCM57402_NPAR, - BCM57407, - BCM57412, - BCM57414, - BCM57416, - BCM57417, - BCM57412_NPAR, - BCM57314, - BCM57417_SFP, - BCM57416_SFP, - BCM57404_NPAR, - BCM57406_NPAR, - BCM57407_SFP, - BCM57407_NPAR, - BCM57414_NPAR, - BCM57416_NPAR, - BCM57452, - BCM57454, - BCM5745x_NPAR, - BCM57508, - BCM57504, - BCM57502, - BCM57508_NPAR, - BCM57504_NPAR, - BCM57502_NPAR, - BCM58802, - BCM58804, - BCM58808, - NETXTREME_E_VF, - NETXTREME_C_VF, - NETXTREME_S_VF, - NETXTREME_C_VF_HV, - NETXTREME_E_VF_HV, - NETXTREME_E_P5_VF, - NETXTREME_E_P5_VF_HV, -}; - -/* indexed by enum above */ +/* indexed by enum board_idx */ static const struct { char *name; } board_info[] = { @@ -2172,7 +2122,7 @@ static int bnxt_async_event_process(struct bnxt *bp, set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); break; case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { - char *fatal_str = "non-fatal"; + char *type_str = "Solicited"; if (!bp->fw_health) goto async_event_process_exit; @@ -2184,13 +2134,21 @@ static int bnxt_async_event_process(struct bnxt *bp, bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); if (!bp->fw_reset_max_dsecs) bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; - if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { - fatal_str = "fatal"; + if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { + set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); + } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { + type_str = "Fatal"; + bp->fw_health->fatalities++; set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + } else if (data2 && BNXT_FW_STATUS_HEALTHY != + EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { + type_str = "Non-fatal"; + bp->fw_health->survivals++; + set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); } netif_warn(bp, hw, bp->dev, - "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", - fatal_str, data1, data2, + "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", + type_str, data1, data2, bp->fw_reset_min_dsecs * 100, bp->fw_reset_max_dsecs * 100); set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); @@ -2198,17 +2156,18 @@ static int bnxt_async_event_process(struct bnxt *bp, } case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { struct bnxt_fw_health *fw_health = bp->fw_health; + char *status_desc = "healthy"; + u32 status; if (!fw_health) goto async_event_process_exit; if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { fw_health->enabled = false; - netif_info(bp, drv, bp->dev, - "Error recovery info: error recovery[0]\n"); + netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); break; } - fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); + fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); fw_health->tmr_multiplier = DIV_ROUND_UP(fw_health->polling_dsecs * HZ, bp->current_interval * 10); @@ -2218,10 +2177,13 @@ static int bnxt_async_event_process(struct bnxt *bp, bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); fw_health->last_fw_reset_cnt = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (status != BNXT_FW_STATUS_HEALTHY) + status_desc = "unhealthy"; netif_info(bp, drv, bp->dev, - "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", - fw_health->master, fw_health->last_fw_reset_cnt, - bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG)); + "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", + fw_health->primary ? "primary" : "backup", status, + status_desc, fw_health->last_fw_reset_cnt); if (!fw_health->enabled) { /* Make sure tmr_counter is set and visible to * bnxt_health_check() before setting enabled to true. @@ -4651,7 +4613,7 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, return rc; } -static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) +int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) { struct hwrm_func_drv_unrgtr_input *req; int rc; @@ -4869,7 +4831,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, #endif static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, - u8 *mac_addr) + const u8 *mac_addr) { struct hwrm_cfa_l2_filter_alloc_output *resp; struct hwrm_cfa_l2_filter_alloc_input *req; @@ -6366,7 +6328,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) if (rx_rings != bp->rx_nr_rings) { netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", rx_rings, bp->rx_nr_rings); - if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && + if (netif_is_rxfh_configured(bp->dev) && (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != bnxt_get_nr_rss_ctxs(bp, rx_rings) || bnxt_get_max_rss_ring(bp) >= rx_rings)) { @@ -7192,7 +7154,7 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, ctx_pg->nr_pages = 0; } -static void bnxt_free_ctx_mem(struct bnxt *bp) +void bnxt_free_ctx_mem(struct bnxt *bp) { struct bnxt_ctx_mem_info *ctx = bp->ctx; int i; @@ -7518,12 +7480,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; + if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; flags_ext = le32_to_cpu(resp->flags_ext); if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -7579,6 +7547,32 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) return rc; } +static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) +{ + struct hwrm_dbg_qcaps_output *resp; + struct hwrm_dbg_qcaps_input *req; + int rc; + + bp->fw_dbg_cap = 0; + if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) + return; + + rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); + if (rc) + return; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_dbg_qcaps_exit; + + bp->fw_dbg_cap = le32_to_cpu(resp->flags); + +hwrm_dbg_qcaps_exit: + hwrm_req_drop(bp, req); +} + static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); static int bnxt_hwrm_func_qcaps(struct bnxt *bp) @@ -7588,6 +7582,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) rc = __bnxt_hwrm_func_qcaps(bp); if (rc) return rc; + + bnxt_hwrm_dbg_qcaps(bp); + rc = bnxt_hwrm_queue_qportcfg(bp); if (rc) { netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); @@ -7642,6 +7639,7 @@ static int __bnxt_alloc_fw_health(struct bnxt *bp) if (!bp->fw_health) return -ENOMEM; + mutex_init(&bp->fw_health->lock); return 0; } @@ -7688,12 +7686,16 @@ static void bnxt_inv_fw_health_reg(struct bnxt *bp) struct bnxt_fw_health *fw_health = bp->fw_health; u32 reg_type; - if (!fw_health || !fw_health->status_reliable) + if (!fw_health) return; reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) fw_health->status_reliable = false; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) + fw_health->resets_reliable = false; } static void bnxt_try_map_fw_health_reg(struct bnxt *bp) @@ -7750,6 +7752,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) int i; bp->fw_health->status_reliable = false; + bp->fw_health->resets_reliable = false; /* Only pre-map the monitoring GRC registers using window 3 */ for (i = 0; i < 4; i++) { u32 reg = fw_health->regs[i]; @@ -7763,6 +7766,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); } bp->fw_health->status_reliable = true; + bp->fw_health->resets_reliable = true; if (reg_base == 0xffffffff) return 0; @@ -8208,6 +8212,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) if (!rc) { bp->fw_rx_stats_ext_size = le16_to_cpu(resp_qs->rx_stat_size) / 8; + if (BNXT_FW_MAJ(bp) < 220 && + bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) + bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; + bp->fw_tx_stats_ext_size = tx_stat_size ? le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; } else { @@ -9246,7 +9254,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info) } } -static void bnxt_report_link(struct bnxt *bp) +void bnxt_report_link(struct bnxt *bp) { if (bp->link_info.link_up) { const char *signal = ""; @@ -9691,8 +9699,6 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_req_send(bp, req); } -static int bnxt_fw_init_one(struct bnxt *bp); - static int bnxt_fw_reset_via_optee(struct bnxt *bp) { #ifdef CONFIG_TEE_BNXT_FW @@ -9739,6 +9745,33 @@ static int bnxt_try_recover_fw(struct bnxt *bp) return -ENODEV; } +int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int rc; + + if (!BNXT_NEW_RM(bp)) + return 0; /* no resource reservations required */ + + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + if (rc) + netdev_err(bp->dev, "resc_qcaps failed\n"); + + hw_resc->resv_cp_rings = 0; + hw_resc->resv_stat_ctxs = 0; + hw_resc->resv_irqs = 0; + hw_resc->resv_tx_rings = 0; + hw_resc->resv_rx_rings = 0; + hw_resc->resv_hw_ring_grps = 0; + hw_resc->resv_vnics = 0; + if (!fw_reset) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } + + return rc; +} + static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { struct hwrm_func_drv_if_change_output *resp; @@ -9822,25 +9855,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) return rc; } } - if (BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - - rc = bnxt_hwrm_func_resc_qcaps(bp, true); - if (rc) - netdev_err(bp->dev, "resc_qcaps failed\n"); - - hw_resc->resv_cp_rings = 0; - hw_resc->resv_stat_ctxs = 0; - hw_resc->resv_irqs = 0; - hw_resc->resv_tx_rings = 0; - hw_resc->resv_rx_rings = 0; - hw_resc->resv_hw_ring_grps = 0; - hw_resc->resv_vnics = 0; - if (!fw_reset) { - bp->tx_nr_rings = 0; - bp->rx_nr_rings = 0; - } - } + rc = bnxt_cancel_reservations(bp, fw_reset); } return rc; } @@ -10318,7 +10333,7 @@ void bnxt_half_close_nic(struct bnxt *bp) bnxt_free_mem(bp, false); } -static void bnxt_reenable_sriov(struct bnxt *bp) +void bnxt_reenable_sriov(struct bnxt *bp) { if (BNXT_PF(bp)) { struct bnxt_pf_info *pf = &bp->pf; @@ -11295,14 +11310,18 @@ static void bnxt_fw_health_check(struct bnxt *bp) } val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); - if (val == fw_health->last_fw_heartbeat) + if (val == fw_health->last_fw_heartbeat) { + fw_health->arrests++; goto fw_reset; + } fw_health->last_fw_heartbeat = val; val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - if (val != fw_health->last_fw_reset_cnt) + if (val != fw_health->last_fw_reset_cnt) { + fw_health->discoveries++; goto fw_reset; + } fw_health->tmr_counter = fw_health->tmr_multiplier; return; @@ -11508,7 +11527,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp) } bnxt_fw_reset_close(bp); wait_dsecs = fw_health->master_func_wait_dsecs; - if (fw_health->master) { + if (fw_health->primary) { if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) wait_dsecs = 0; bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; @@ -11772,13 +11791,17 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) bnxt_rx_ring_reset(bp); - if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) - bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || + test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) + bnxt_devlink_health_fw_report(bp); + else + bnxt_fw_reset(bp); + } if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { if (!is_bnxt_fw_ok(bp)) - bnxt_devlink_health_report(bp, - BNXT_FW_EXCEPTION_SP_EVENT); + bnxt_devlink_health_fw_report(bp); } smp_mb__before_atomic(); @@ -11989,7 +12012,7 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp) static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); -static int bnxt_fw_init_one(struct bnxt *bp) +int bnxt_fw_init_one(struct bnxt *bp) { int rc; @@ -12051,6 +12074,27 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) } } +bool bnxt_hwrm_reset_permitted(struct bnxt *bp) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + bool result = true; /* firmware will enforce if unknown */ + + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + return result; + + if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) + return result; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + if (!hwrm_req_send(bp, req)) + result = !!(le16_to_cpu(resp->flags) & + FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); + hwrm_req_drop(bp, req); + return result; +} + static void bnxt_reset_all(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -12093,7 +12137,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { bnxt_ulp_start(bp, rc); - bnxt_dl_health_status_update(bp, false); + bnxt_dl_health_fw_status_update(bp, false); } bp->fw_reset_state = 0; dev_close(bp->dev); @@ -12159,7 +12203,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) return; } - if (!bp->fw_health->master) { + if (!bp->fw_health->primary) { u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; @@ -12192,6 +12236,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } } clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); + if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && + !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) + bnxt_dl_remote_reload(bp); if (pci_enable_device(bp->pdev)) { netdev_err(bp->dev, "Cannot re-enable PCI device\n"); rc = -ENODEV; @@ -12241,8 +12289,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_vf_reps_alloc(bp); bnxt_vf_reps_open(bp); bnxt_ptp_reapply_pps(bp); - bnxt_dl_health_recovery_done(bp); - bnxt_dl_health_status_update(bp, true); + clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { + bnxt_dl_health_fw_recovery_done(bp); + bnxt_dl_health_fw_status_update(bp, true); + } rtnl_unlock(); break; } @@ -12369,7 +12420,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (rc) return rc; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) { bnxt_close_nic(bp, false, false); rc = bnxt_open_nic(bp, false, false); @@ -13103,7 +13154,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp) int rc = 0; if (BNXT_PF(bp)) { - memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, bp->pf.mac_addr); } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; @@ -13111,7 +13162,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp) if (is_valid_ether_addr(vf->mac_addr)) { /* overwrite netdev dev_addr with admin VF MAC */ - memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, vf->mac_addr); /* Older PF driver or firmware may not approve this * correctly. */ @@ -13186,6 +13237,15 @@ static int bnxt_map_db_bar(struct bnxt *bp) return 0; } +void bnxt_print_device_info(struct bnxt *bp) +{ + netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", + board_info[bp->board_idx].name, + (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); + + pcie_print_link_status(bp->pdev); +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; @@ -13209,10 +13269,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; bp = netdev_priv(dev); + bp->board_idx = ent->driver_data; bp->msg_enable = BNXT_DEF_MSG_ENABLE; bnxt_set_max_func_irqs(bp, max_irqs); - if (bnxt_vf_pciid(ent->driver_data)) + if (bnxt_vf_pciid(bp->board_idx)) bp->flags |= BNXT_FLAG_VF; if (pdev->msix_cap) @@ -13370,7 +13431,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_inv_fw_health_reg(bp); - bnxt_dl_register(bp); + rc = bnxt_dl_register(bp); + if (rc) + goto init_err_dl; rc = register_netdev(dev); if (rc) @@ -13380,16 +13443,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) devlink_port_type_eth_set(&bp->dl_port, bp->dev); bnxt_dl_fw_reporters_create(bp); - netdev_info(dev, "%s found at mem %lx, node addr %pM\n", - board_info[ent->driver_data].name, - (long)pci_resource_start(pdev, 0), dev->dev_addr); - pcie_print_link_status(pdev); + bnxt_print_device_info(bp); pci_save_state(pdev); return 0; init_err_cleanup: bnxt_dl_unregister(bp); +init_err_dl: bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 19fe6478e9b4bc..d0d5da9b78f8b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -489,6 +489,15 @@ struct rx_tpa_end_cmp_ext { ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) +#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION) + +#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK) + #define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \ !!((data1) & \ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC) @@ -1514,6 +1523,21 @@ struct bnxt_ctx_mem_info { struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX]; }; +enum bnxt_health_severity { + SEVERITY_NORMAL = 0, + SEVERITY_WARNING, + SEVERITY_RECOVERABLE, + SEVERITY_FATAL, +}; + +enum bnxt_health_remedy { + REMEDY_DEVLINK_RECOVER, + REMEDY_POWER_CYCLE_DEVICE, + REMEDY_POWER_CYCLE_HOST, + REMEDY_FW_UPDATE, + REMEDY_HW_REPLACE, +}; + struct bnxt_fw_health { u32 flags; u32 polling_dsecs; @@ -1531,9 +1555,9 @@ struct bnxt_fw_health { u32 last_fw_heartbeat; u32 last_fw_reset_cnt; u8 enabled:1; - u8 master:1; - u8 fatal:1; + u8 primary:1; u8 status_reliable:1; + u8 resets_reliable:1; u8 tmr_multiplier; u8 tmr_counter; u8 fw_reset_seq_cnt; @@ -1543,12 +1567,15 @@ struct bnxt_fw_health { u32 echo_req_data1; u32 echo_req_data2; struct devlink_health_reporter *fw_reporter; - struct devlink_health_reporter *fw_reset_reporter; - struct devlink_health_reporter *fw_fatal_reporter; -}; - -struct bnxt_fw_reporter_ctx { - unsigned long sp_event; + /* Protects severity and remedy */ + struct mutex lock; + enum bnxt_health_severity severity; + enum bnxt_health_remedy remedy; + u32 arrests; + u32 discoveries; + u32 survivals; + u32 fatalities; + u32 diagnoses; }; #define BNXT_FW_HEALTH_REG_TYPE_MASK 3 @@ -1586,6 +1613,54 @@ struct bnxt_fw_reporter_ctx { #define BNXT_FW_RETRY 5 #define BNXT_FW_IF_RETRY 10 +enum board_idx { + BCM57301, + BCM57302, + BCM57304, + BCM57417_NPAR, + BCM58700, + BCM57311, + BCM57312, + BCM57402, + BCM57404, + BCM57406, + BCM57402_NPAR, + BCM57407, + BCM57412, + BCM57414, + BCM57416, + BCM57417, + BCM57412_NPAR, + BCM57314, + BCM57417_SFP, + BCM57416_SFP, + BCM57404_NPAR, + BCM57406_NPAR, + BCM57407_SFP, + BCM57407_NPAR, + BCM57414_NPAR, + BCM57416_NPAR, + BCM57452, + BCM57454, + BCM5745x_NPAR, + BCM57508, + BCM57504, + BCM57502, + BCM57508_NPAR, + BCM57504_NPAR, + BCM57502_NPAR, + BCM58802, + BCM58804, + BCM58808, + NETXTREME_E_VF, + NETXTREME_C_VF, + NETXTREME_S_VF, + NETXTREME_C_VF_HV, + NETXTREME_E_VF_HV, + NETXTREME_E_P5_VF, + NETXTREME_E_P5_VF_HV, +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -1840,6 +1915,10 @@ struct bnxt { #define BNXT_STATE_DRV_REGISTERED 7 #define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8 #define BNXT_STATE_NAPI_DISABLED 9 +#define BNXT_STATE_FW_ACTIVATE 11 +#define BNXT_STATE_RECOVER 12 +#define BNXT_STATE_FW_NON_FATAL_COND 13 +#define BNXT_STATE_FW_ACTIVATE_RESET 14 #define BNXT_NO_FW_ACCESS(bp) \ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ @@ -1879,8 +1958,13 @@ struct bnxt { #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 + #define BNXT_FW_CAP_LIVEPATCH 0x08000000 #define BNXT_FW_CAP_PTP_PPS 0x10000000 + #define BNXT_FW_CAP_HOT_RESET_IF 0x20000000 #define BNXT_FW_CAP_RING_MONITOR 0x40000000 + #define BNXT_FW_CAP_DBG_QCAPS 0x80000000 + + u32 fw_dbg_cap; #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; @@ -2049,6 +2133,7 @@ struct bnxt { struct list_head tc_indr_block_list; struct dentry *debugfs_pdev; struct device *hwmon_dev; + enum board_idx board_idx; }; #define BNXT_NUM_RX_RING_STATS 8 @@ -2090,6 +2175,9 @@ struct bnxt { #define BNXT_RX_STATS_EXT_OFFSET(counter) \ (offsetof(struct rx_port_stats_ext, counter) / 8) +#define BNXT_RX_STATS_EXT_NUM_LEGACY \ + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks) + #define BNXT_TX_STATS_EXT_OFFSET(counter) \ (offsetof(struct tx_port_stats_ext, counter) / 8) @@ -2181,11 +2269,13 @@ void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only); +int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int bnxt_nq_rings_in_use(struct bnxt *bp); int bnxt_hwrm_set_coal(struct bnxt *); +void bnxt_free_ctx_mem(struct bnxt *bp); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); @@ -2194,9 +2284,11 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); +void bnxt_report_link(struct bnxt *bp); int bnxt_update_link(struct bnxt *bp, bool chng_link_state); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); +int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); @@ -2205,6 +2297,7 @@ int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); +void bnxt_reenable_sriov(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf); @@ -2212,6 +2305,8 @@ void bnxt_fw_exception(struct bnxt *bp); void bnxt_fw_reset(struct bnxt *bp); int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp); +int bnxt_fw_init_one(struct bnxt *bp); +bool bnxt_hwrm_reset_permitted(struct bnxt *bp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_restore_pf_fw_resources(struct bnxt *bp); @@ -2219,5 +2314,5 @@ int bnxt_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid); void bnxt_dim_work(struct work_struct *work); int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); - +void bnxt_print_device_info(struct bnxt *bp); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c new file mode 100644 index 00000000000000..d3cb2f21946dad --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -0,0 +1,444 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2021 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_coredump.h" + +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, + struct bnxt_hwrm_dbg_dma_info *info) +{ + struct hwrm_dbg_cmn_input *cmn_req = msg; + __le16 *seq_ptr = msg + info->seq_off; + struct hwrm_dbg_cmn_output *cmn_resp; + u16 seq = 0, len, segs_off; + dma_addr_t dma_handle; + void *dma_buf, *resp; + int rc, off = 0; + + dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); + if (!dma_buf) { + hwrm_req_drop(bp, msg); + return -ENOMEM; + } + + hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + cmn_resp = hwrm_req_hold(bp, msg); + resp = cmn_resp; + + segs_off = offsetof(struct hwrm_dbg_coredump_list_output, + total_segments); + cmn_req->host_dest_addr = cpu_to_le64(dma_handle); + cmn_req->host_buf_len = cpu_to_le32(info->dma_len); + while (1) { + *seq_ptr = cpu_to_le16(seq); + rc = hwrm_req_send(bp, msg); + if (rc) + break; + + len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); + if (!seq && + cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { + info->segs = le16_to_cpu(*((__le16 *)(resp + + segs_off))); + if (!info->segs) { + rc = -EIO; + break; + } + + info->dest_buf_size = info->segs * + sizeof(struct coredump_segment_record); + info->dest_buf = kmalloc(info->dest_buf_size, + GFP_KERNEL); + if (!info->dest_buf) { + rc = -ENOMEM; + break; + } + } + + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } + + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) + info->dest_buf_size += len; + + if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) + break; + + seq++; + off += len; + } + hwrm_req_drop(bp, msg); + return rc; +} + +static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, + struct bnxt_coredump *coredump) +{ + struct bnxt_hwrm_dbg_dma_info info = {NULL}; + struct hwrm_dbg_coredump_list_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); + if (rc) + return rc; + + info.dma_len = COREDUMP_LIST_BUF_LEN; + info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); + info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, + data_len); + + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); + if (!rc) { + coredump->data = info.dest_buf; + coredump->data_size = info.dest_buf_size; + coredump->total_segs = info.segs; + } + return rc; +} + +static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, + u16 segment_id) +{ + struct hwrm_dbg_coredump_initiate_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); + + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, + u16 segment_id, u32 *seg_len, + void *buf, u32 buf_len, u32 offset) +{ + struct hwrm_dbg_coredump_retrieve_input *req; + struct bnxt_hwrm_dbg_dma_info info = {NULL}; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); + if (rc) + return rc; + + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); + + info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; + info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, + seq_no); + info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, + data_len); + if (buf) { + info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } + + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); + if (!rc) + *seg_len = info.dest_buf_size; + + return rc; +} + +static void +bnxt_fill_coredump_seg_hdr(struct bnxt *bp, + struct bnxt_coredump_segment_hdr *seg_hdr, + struct coredump_segment_record *seg_rec, u32 seg_len, + int status, u32 duration, u32 instance) +{ + memset(seg_hdr, 0, sizeof(*seg_hdr)); + memcpy(seg_hdr->signature, "sEgM", 4); + if (seg_rec) { + seg_hdr->component_id = (__force __le32)seg_rec->component_id; + seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; + seg_hdr->low_version = seg_rec->version_low; + seg_hdr->high_version = seg_rec->version_hi; + seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags); + } else { + /* For hwrm_ver_get response Component id = 2 + * and Segment id = 0 + */ + seg_hdr->component_id = cpu_to_le32(2); + seg_hdr->segment_id = 0; + } + seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); + seg_hdr->length = cpu_to_le32(seg_len); + seg_hdr->status = cpu_to_le32(status); + seg_hdr->duration = cpu_to_le32(duration); + seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); + seg_hdr->instance = cpu_to_le32(instance); +} + +static void bnxt_fill_cmdline(struct bnxt_coredump_record *record) +{ + struct mm_struct *mm = current->mm; + int i, len, last = 0; + + if (mm) { + len = min_t(int, mm->arg_end - mm->arg_start, + sizeof(record->commandline) - 1); + if (len && !copy_from_user(record->commandline, + (char __user *)mm->arg_start, len)) { + for (i = 0; i < len; i++) { + if (record->commandline[i]) + last = i; + else + record->commandline[i] = ' '; + } + record->commandline[last + 1] = 0; + return; + } + } + + strscpy(record->commandline, current->comm, TASK_COMM_LEN); +} + +static void +bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, + time64_t start, s16 start_utc, u16 total_segs, + int status) +{ + time64_t end = ktime_get_real_seconds(); + u32 os_ver_major = 0, os_ver_minor = 0; + struct tm tm; + + time64_to_tm(start, 0, &tm); + memset(record, 0, sizeof(*record)); + memcpy(record->signature, "cOrE", 4); + record->flags = 0; + record->low_version = 0; + record->high_version = 1; + record->asic_state = 0; + strscpy(record->system_name, utsname()->nodename, + sizeof(record->system_name)); + record->year = cpu_to_le16(tm.tm_year + 1900); + record->month = cpu_to_le16(tm.tm_mon + 1); + record->day = cpu_to_le16(tm.tm_mday); + record->hour = cpu_to_le16(tm.tm_hour); + record->minute = cpu_to_le16(tm.tm_min); + record->second = cpu_to_le16(tm.tm_sec); + record->utc_bias = cpu_to_le16(start_utc); + bnxt_fill_cmdline(record); + record->total_segments = cpu_to_le32(total_segs); + + if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2) + netdev_warn(bp->dev, "Unknown OS release in coredump\n"); + record->os_ver_major = cpu_to_le32(os_ver_major); + record->os_ver_minor = cpu_to_le32(os_ver_minor); + + strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name)); + time64_to_tm(end, 0, &tm); + record->end_year = cpu_to_le16(tm.tm_year + 1900); + record->end_month = cpu_to_le16(tm.tm_mon + 1); + record->end_day = cpu_to_le16(tm.tm_mday); + record->end_hour = cpu_to_le16(tm.tm_hour); + record->end_minute = cpu_to_le16(tm.tm_min); + record->end_second = cpu_to_le16(tm.tm_sec); + record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); + record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | + bp->ver_resp.chip_rev << 8 | + bp->ver_resp.chip_metal); + record->asic_id2 = 0; + record->coredump_status = cpu_to_le32(status); + record->ioctl_low_version = 0; + record->ioctl_high_version = 0; +} + +static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) +{ + u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; + struct coredump_segment_record *seg_record = NULL; + struct bnxt_coredump_segment_hdr seg_hdr; + struct bnxt_coredump coredump = {NULL}; + time64_t start_time; + u16 start_utc; + int rc = 0, i; + + if (buf) + buf_len = *dump_len; + + start_time = ktime_get_real_seconds(); + start_utc = sys_tz.tz_minuteswest * 60; + seg_hdr_len = sizeof(seg_hdr); + + /* First segment should be hwrm_ver_get response */ + *dump_len = seg_hdr_len + ver_get_resp_len; + if (buf) { + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, + 0, 0, 0); + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len; + memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); + offset += ver_get_resp_len; + } + + rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); + if (rc) { + netdev_err(bp->dev, "Failed to get coredump segment list\n"); + goto err; + } + + *dump_len += seg_hdr_len * coredump.total_segs; + + seg_record = (struct coredump_segment_record *)coredump.data; + seg_record_len = sizeof(*seg_record); + + for (i = 0; i < coredump.total_segs; i++) { + u16 comp_id = le16_to_cpu(seg_record->component_id); + u16 seg_id = le16_to_cpu(seg_record->segment_id); + u32 duration = 0, seg_len = 0; + unsigned long start, end; + + if (buf && ((offset + seg_hdr_len) > + BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto err; + } + + start = jiffies; + + rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); + if (rc) { + netdev_err(bp->dev, + "Failed to initiate coredump for seg = %d\n", + seg_record->segment_id); + goto next_seg; + } + + /* Write segment data into the buffer */ + rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, + &seg_len, buf, buf_len, + offset + seg_hdr_len); + if (rc && rc == -ENOBUFS) + goto err; + else if (rc) + netdev_err(bp->dev, + "Failed to retrieve coredump for seg = %d\n", + seg_record->segment_id); + +next_seg: + end = jiffies; + duration = jiffies_to_msecs(end - start); + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, + rc, duration, 0); + + if (buf) { + /* Write segment header into the buffer */ + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len + seg_len; + } + + *dump_len += seg_len; + seg_record = + (struct coredump_segment_record *)((u8 *)seg_record + + seg_record_len); + } + +err: + if (buf) + bnxt_fill_coredump_record(bp, buf + offset, start_time, + start_utc, coredump.total_segs + 1, + rc); + kfree(coredump.data); + *dump_len += sizeof(struct bnxt_coredump_record); + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); + return rc; +} + +int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len) +{ + if (dump_type == BNXT_DUMP_CRASH) { +#ifdef CONFIG_TEE_BNXT_FW + return tee_bnxt_copy_coredump(buf, 0, *dump_len); +#else + return -EOPNOTSUPP; +#endif + } else { + return __bnxt_get_coredump(bp, buf, dump_len); + } +} + +static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) +{ + struct hwrm_dbg_qcfg_output *resp; + struct hwrm_dbg_qcfg_input *req; + int rc, hdr_len = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) + return -EOPNOTSUPP; + + if (dump_type == BNXT_DUMP_CRASH && + !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + if (dump_type == BNXT_DUMP_CRASH) + req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto get_dump_len_exit; + + if (dump_type == BNXT_DUMP_CRASH) { + *dump_len = le32_to_cpu(resp->crashdump_size); + } else { + /* Driver adds coredump header and "HWRM_VER_GET response" + * segment additionally to coredump. + */ + hdr_len = sizeof(struct bnxt_coredump_segment_hdr) + + sizeof(struct hwrm_ver_get_output) + + sizeof(struct bnxt_coredump_record); + *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len; + } + if (*dump_len <= hdr_len) + rc = -EINVAL; + +get_dump_len_exit: + hwrm_req_drop(bp, req); + return rc; +} + +u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type) +{ + u32 len = 0; + + if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) { + if (dump_type == BNXT_DUMP_CRASH) + len = BNXT_CRASH_DUMP_LEN; + else + __bnxt_get_coredump(bp, NULL, &len); + } + return len; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h index 09c22f8fe39910..b1a1b2fffb194d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h @@ -10,6 +10,10 @@ #ifndef BNXT_COREDUMP_H #define BNXT_COREDUMP_H +#include +#include +#include + struct bnxt_coredump_segment_hdr { __u8 signature[4]; __le32 component_id; @@ -63,4 +67,51 @@ struct bnxt_coredump_record { __u8 ioctl_high_version; __le16 rsvd3[313]; }; + +#define BNXT_CRASH_DUMP_LEN (8 << 20) + +#define COREDUMP_LIST_BUF_LEN 2048 +#define COREDUMP_RETRIEVE_BUF_LEN 4096 + +struct bnxt_coredump { + void *data; + int data_size; + u16 total_segs; +}; + +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) + +struct bnxt_hwrm_dbg_dma_info { + void *dest_buf; + int dest_buf_size; + u16 dma_len; + u16 seq_off; + u16 data_len_off; + u16 segs; + u32 seg_start; + u32 buf_len; +}; + +struct hwrm_dbg_cmn_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; +}; + +struct hwrm_dbg_cmn_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define HWRM_DBG_CMN_FLAGS_MORE 1 +}; + +int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len); +u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type); + #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 9576547df4aba2..ce790e9b45c302 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -16,6 +16,18 @@ #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_ethtool.h" +#include "bnxt_ulp.h" +#include "bnxt_ptp.h" +#include "bnxt_coredump.h" + +static void __bnxt_fw_recover(struct bnxt *bp) +{ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || + test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) + bnxt_fw_reset(bp); + else + bnxt_fw_exception(bp); +} static int bnxt_dl_flash_update(struct devlink *dl, @@ -40,146 +52,208 @@ bnxt_dl_flash_update(struct devlink *dl, return rc; } -static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg, - struct netlink_ext_ack *extack) +static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset) +{ + struct hwrm_func_cfg_input *req; + int rc; + + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT); + if (remote_reset) + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS); + + return hwrm_req_send(bp, req); +} + +static char *bnxt_health_severity_str(enum bnxt_health_severity severity) +{ + switch (severity) { + case SEVERITY_NORMAL: return "normal"; + case SEVERITY_WARNING: return "warning"; + case SEVERITY_RECOVERABLE: return "recoverable"; + case SEVERITY_FATAL: return "fatal"; + default: return "unknown"; + } +} + +static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy) +{ + switch (remedy) { + case REMEDY_DEVLINK_RECOVER: return "devlink recover"; + case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle"; + case REMEDY_POWER_CYCLE_HOST: return "host power cycle"; + case REMEDY_FW_UPDATE: return "update firmware"; + case REMEDY_HW_REPLACE: return "replace hardware"; + default: return "unknown"; + } +} + +static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); - u32 val; + struct bnxt_fw_health *h = bp->fw_health; + u32 fw_status, fw_resets; int rc; if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) - return 0; + return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); - val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (!h->status_reliable) + return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); - if (BNXT_FW_IS_BOOTING(val)) { - rc = devlink_fmsg_string_pair_put(fmsg, "Description", - "Not yet completed initialization"); + mutex_lock(&h->lock); + fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (BNXT_FW_IS_BOOTING(fw_status)) { + rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); if (rc) - return rc; - } else if (BNXT_FW_IS_ERR(val)) { - rc = devlink_fmsg_string_pair_put(fmsg, "Description", - "Encountered fatal error and cannot recover"); + goto unlock; + } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { + if (!h->severity) { + h->severity = SEVERITY_FATAL; + h->remedy = REMEDY_POWER_CYCLE_DEVICE; + h->diagnoses++; + devlink_health_report(h->fw_reporter, + "FW error diagnosed", h); + } + rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error"); if (rc) - return rc; + goto unlock; + rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); + if (rc) + goto unlock; + } else { + rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); + if (rc) + goto unlock; } - if (val >> 16) { - rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16); + rc = devlink_fmsg_string_pair_put(fmsg, "Severity", + bnxt_health_severity_str(h->severity)); + if (rc) + goto unlock; + + if (h->severity) { + rc = devlink_fmsg_string_pair_put(fmsg, "Remedy", + bnxt_health_remedy_str(h->remedy)); if (rc) - return rc; + goto unlock; + if (h->remedy == REMEDY_DEVLINK_RECOVER) { + rc = devlink_fmsg_string_pair_put(fmsg, "Impact", + "traffic+ntuple_cfg"); + if (rc) + goto unlock; + } } - val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val); - if (rc) +unlock: + mutex_unlock(&h->lock); + if (rc || !h->resets_reliable) return rc; - return 0; + fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); + if (rc) + return rc; + return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); } -static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { - .name = "fw", - .diagnose = bnxt_fw_reporter_diagnose, -}; - -static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter, - void *priv_ctx, - struct netlink_ext_ack *extack) +static int bnxt_fw_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); + u32 dump_len; + void *data; + int rc; - if (!priv_ctx) + /* TODO: no firmware dump support in devlink_health_report() context */ + if (priv_ctx) return -EOPNOTSUPP; - bnxt_fw_reset(bp); - return -EINPROGRESS; -} + dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE); + if (!dump_len) + return -EIO; -static const -struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = { - .name = "fw_reset", - .recover = bnxt_fw_reset_recover, -}; + data = vmalloc(dump_len); + if (!data) + return -ENOMEM; -static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, - void *priv_ctx, - struct netlink_ext_ack *extack) + rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len); + if (!rc) { + rc = devlink_fmsg_pair_nest_start(fmsg, "core"); + if (rc) + goto exit; + rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); + if (rc) + goto exit; + rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); + if (rc) + goto exit; + rc = devlink_fmsg_pair_nest_end(fmsg); + } + +exit: + vfree(data); + return rc; +} + +static int bnxt_fw_recover(struct devlink_health_reporter *reporter, + void *priv_ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); - struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; - unsigned long event; - if (!priv_ctx) - return -EOPNOTSUPP; + if (bp->fw_health->severity == SEVERITY_FATAL) + return -ENODEV; - bp->fw_health->fatal = true; - event = fw_reporter_ctx->sp_event; - if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) - bnxt_fw_reset(bp); - else if (event == BNXT_FW_EXCEPTION_SP_EVENT) - bnxt_fw_exception(bp); + set_bit(BNXT_STATE_RECOVER, &bp->state); + __bnxt_fw_recover(bp); return -EINPROGRESS; } -static const -struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { - .name = "fw_fatal", - .recover = bnxt_fw_fatal_recover, +static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { + .name = "fw", + .diagnose = bnxt_fw_diagnose, + .dump = bnxt_fw_dump, + .recover = bnxt_fw_recover, }; void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!bp->dl || !health) + if (!health || health->fw_reporter) return; - if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) - goto err_recovery; - - health->fw_reset_reporter = - devlink_health_reporter_create(bp->dl, - &bnxt_dl_fw_reset_reporter_ops, + health->fw_reporter = + devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, 0, bp); - if (IS_ERR(health->fw_reset_reporter)) { - netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", - PTR_ERR(health->fw_reset_reporter)); - health->fw_reset_reporter = NULL; - bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; - } - -err_recovery: - if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) - return; - - if (!health->fw_reporter) { - health->fw_reporter = - devlink_health_reporter_create(bp->dl, - &bnxt_dl_fw_reporter_ops, - 0, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; - bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; - return; - } - } - - if (health->fw_fatal_reporter) - return; - - health->fw_fatal_reporter = - devlink_health_reporter_create(bp->dl, - &bnxt_dl_fw_fatal_reporter_ops, - 0, bp); - if (IS_ERR(health->fw_fatal_reporter)) { - netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", - PTR_ERR(health->fw_fatal_reporter)); - health->fw_fatal_reporter = NULL; + if (IS_ERR(health->fw_reporter)) { + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", + PTR_ERR(health->fw_reporter)); + health->fw_reporter = NULL; bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; } } @@ -188,15 +262,9 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!bp->dl || !health) + if (!health) return; - if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && - health->fw_reset_reporter) { - devlink_health_reporter_destroy(health->fw_reset_reporter); - health->fw_reset_reporter = NULL; - } - if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) return; @@ -204,82 +272,319 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) devlink_health_reporter_destroy(health->fw_reporter); health->fw_reporter = NULL; } - - if (health->fw_fatal_reporter) { - devlink_health_reporter_destroy(health->fw_fatal_reporter); - health->fw_fatal_reporter = NULL; - } } -void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) +void bnxt_devlink_health_fw_report(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; - struct bnxt_fw_reporter_ctx fw_reporter_ctx; - - fw_reporter_ctx.sp_event = event; - switch (event) { - case BNXT_FW_RESET_NOTIFY_SP_EVENT: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { - if (!fw_health->fw_fatal_reporter) - return; - - devlink_health_report(fw_health->fw_fatal_reporter, - "FW fatal async event received", - &fw_reporter_ctx); - return; - } - if (!fw_health->fw_reset_reporter) - return; + int rc; - devlink_health_report(fw_health->fw_reset_reporter, - "FW non-fatal reset event received", - &fw_reporter_ctx); + if (!fw_health) return; - case BNXT_FW_EXCEPTION_SP_EVENT: - if (!fw_health->fw_fatal_reporter) - return; - - devlink_health_report(fw_health->fw_fatal_reporter, - "FW fatal error reported", - &fw_reporter_ctx); + if (!fw_health->fw_reporter) { + __bnxt_fw_recover(bp); return; } + + mutex_lock(&fw_health->lock); + fw_health->severity = SEVERITY_RECOVERABLE; + fw_health->remedy = REMEDY_DEVLINK_RECOVER; + mutex_unlock(&fw_health->lock); + rc = devlink_health_report(fw_health->fw_reporter, "FW error reported", + fw_health); + if (rc == -ECANCELED) + __bnxt_fw_recover(bp); } -void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) +void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy) { - struct bnxt_fw_health *health = bp->fw_health; + struct bnxt_fw_health *fw_health = bp->fw_health; u8 state; - if (healthy) + mutex_lock(&fw_health->lock); + if (healthy) { + fw_health->severity = SEVERITY_NORMAL; state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; - else + } else { + fw_health->severity = SEVERITY_FATAL; + fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE; state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; - - if (health->fatal) - devlink_health_reporter_state_update(health->fw_fatal_reporter, - state); - else - devlink_health_reporter_state_update(health->fw_reset_reporter, - state); - - health->fatal = false; + } + mutex_unlock(&fw_health->lock); + devlink_health_reporter_state_update(fw_health->fw_reporter, state); } -void bnxt_dl_health_recovery_done(struct bnxt *bp) +void bnxt_dl_health_fw_recovery_done(struct bnxt *bp) { - struct bnxt_fw_health *hlth = bp->fw_health; + struct bnxt_dl *dl = devlink_priv(bp->dl); - if (hlth->fatal) - devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter); - else - devlink_health_reporter_recovery_done(hlth->fw_reset_reporter); + devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter); + bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset); } static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, struct netlink_ext_ack *extack); +static void +bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, + struct hwrm_fw_livepatch_output *resp) +{ + int err = ((struct hwrm_err_output *)resp)->cmd_err; + + switch (err) { + case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE: + netdev_err(bp->dev, "Illegal live patch opcode"); + NL_SET_ERR_MSG_MOD(extack, "Invalid opcode"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED: + NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED: + NL_SET_ERR_MSG_MOD(extack, "Live patch not found"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED: + NL_SET_ERR_MSG_MOD(extack, + "Live patch deactivation failed. Firmware not patched."); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL: + NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER: + NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE: + NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED: + NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); + break; + default: + netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err); + NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); + break; + } +} + +static int +bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) +{ + struct hwrm_fw_livepatch_query_output *query_resp; + struct hwrm_fw_livepatch_query_input *query_req; + struct hwrm_fw_livepatch_output *patch_resp; + struct hwrm_fw_livepatch_input *patch_req; + u32 installed = 0; + u16 flags; + u8 target; + int rc; + + if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) { + NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY); + if (rc) + return rc; + query_resp = hwrm_req_hold(bp, query_req); + + rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH); + if (rc) { + hwrm_req_drop(bp, query_req); + return rc; + } + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; + patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; + patch_resp = hwrm_req_hold(bp, patch_req); + + for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) { + query_req->fw_target = target; + rc = hwrm_req_send(bp, query_req); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query packages"); + break; + } + + flags = le16_to_cpu(query_resp->status_flags); + if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) + continue; + if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) && + !strncmp(query_resp->active_ver, query_resp->install_ver, + sizeof(query_resp->active_ver))) + continue; + + patch_req->fw_target = target; + rc = hwrm_req_send(bp, patch_req); + if (rc) { + bnxt_dl_livepatch_report_err(bp, extack, patch_resp); + break; + } + installed++; + } + + if (!rc && !installed) { + NL_SET_ERR_MSG_MOD(extack, "No live patches found"); + rc = -ENOENT; + } + hwrm_req_drop(bp, query_req); + hwrm_req_drop(bp, patch_req); + return rc; +} + +static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc = 0; + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { + if (BNXT_PF(bp) && bp->pf.active_vfs) { + NL_SET_ERR_MSG_MOD(extack, + "reload is unsupported when VFs are allocated\n"); + return -EOPNOTSUPP; + } + rtnl_lock(); + if (bp->dev->reg_state == NETREG_UNREGISTERED) { + rtnl_unlock(); + return -ENODEV; + } + bnxt_ulp_stop(bp); + if (netif_running(bp->dev)) { + rc = bnxt_close_nic(bp, true, true); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to close"); + dev_close(bp->dev); + rtnl_unlock(); + break; + } + } + bnxt_vf_reps_free(bp); + rc = bnxt_hwrm_func_drv_unrgtr(bp); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to deregister"); + if (netif_running(bp->dev)) + dev_close(bp->dev); + rtnl_unlock(); + break; + } + bnxt_cancel_reservations(bp, false); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; + break; + } + case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { + if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) + return bnxt_dl_livepatch_activate(bp, extack); + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) { + NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot"); + return -EOPNOTSUPP; + } + if (!bnxt_hwrm_reset_permitted(bp)) { + NL_SET_ERR_MSG_MOD(extack, + "Reset denied by firmware, it may be inhibited by remote driver"); + return -EPERM; + } + rtnl_lock(); + if (bp->dev->reg_state == NETREG_UNREGISTERED) { + rtnl_unlock(); + return -ENODEV; + } + if (netif_running(bp->dev)) + set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + rc = bnxt_hwrm_firmware_reset(bp->dev, + FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, + FW_RESET_REQ_FLAGS_RESET_GRACEFUL | + FW_RESET_REQ_FLAGS_FW_ACTIVATION); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware"); + clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + rtnl_unlock(); + } + break; + } + default: + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action, + enum devlink_reload_limit limit, u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc = 0; + + *actions_performed = 0; + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { + bnxt_fw_init_one(bp); + bnxt_vf_reps_alloc(bp); + if (netif_running(bp->dev)) + rc = bnxt_open_nic(bp, true, true); + bnxt_ulp_start(bp, rc); + if (!rc) { + bnxt_reenable_sriov(bp); + bnxt_ptp_reapply_pps(bp); + } + break; + } + case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { + unsigned long start = jiffies; + unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10; + + if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) + break; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10; + if (!netif_running(bp->dev)) + NL_SET_ERR_MSG_MOD(extack, + "Device is closed, not waiting for reset notice that will never come"); + rtnl_unlock(); + while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) { + if (time_after(jiffies, timeout)) { + NL_SET_ERR_MSG_MOD(extack, "Activation incomplete"); + rc = -ETIMEDOUT; + break; + } + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + NL_SET_ERR_MSG_MOD(extack, "Activation aborted"); + rc = -ENODEV; + break; + } + msleep(50); + } + rtnl_lock(); + if (!rc) + *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); + clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + break; + } + default: + return -EOPNOTSUPP; + } + + if (!rc) { + bnxt_print_device_info(bp); + if (netif_running(bp->dev)) { + mutex_lock(&bp->link_lock); + bnxt_report_link(bp); + mutex_unlock(&bp->link_lock); + } + *actions_performed |= BIT(action); + } else if (netif_running(bp->dev)) { + dev_close(bp->dev); + } + rtnl_unlock(); + return rc; +} + static const struct devlink_ops bnxt_dl_ops = { #ifdef CONFIG_BNXT_SRIOV .eswitch_mode_set = bnxt_dl_eswitch_mode_set, @@ -287,6 +592,11 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ .info_get = bnxt_dl_info_get, .flash_update = bnxt_dl_flash_update, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | + BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET), + .reload_down = bnxt_dl_reload_down, + .reload_up = bnxt_dl_reload_up, }; static const struct devlink_ops bnxt_vf_dl_ops; @@ -430,6 +740,57 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req, return 0; } +#define BNXT_FW_SRT_PATCH "fw.srt.patch" +#define BNXT_FW_CRT_PATCH "fw.crt.patch" + +static int bnxt_dl_livepatch_info_put(struct bnxt *bp, + struct devlink_info_req *req, + const char *key) +{ + struct hwrm_fw_livepatch_query_input *query; + struct hwrm_fw_livepatch_query_output *resp; + u16 flags; + int rc; + + if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) + return 0; + + rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY); + if (rc) + return rc; + + if (!strcmp(key, BNXT_FW_SRT_PATCH)) + query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW; + else if (!strcmp(key, BNXT_FW_CRT_PATCH)) + query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW; + else + goto exit; + + resp = hwrm_req_hold(bp, query); + rc = hwrm_req_send(bp, query); + if (rc) + goto exit; + + flags = le16_to_cpu(resp->status_flags); + if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) { + resp->active_ver[sizeof(resp->active_ver) - 1] = '\0'; + rc = devlink_info_version_running_put(req, key, resp->active_ver); + if (rc) + goto exit; + } + + if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) { + resp->install_ver[sizeof(resp->install_ver) - 1] = '\0'; + rc = devlink_info_version_stored_put(req, key, resp->install_ver); + if (rc) + goto exit; + } + +exit: + hwrm_req_drop(bp, query); + return rc; +} + #define HWRM_FW_VER_STR_LEN 16 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, @@ -554,8 +915,13 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info); if (rc || - !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) + !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) { + if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf))) + return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); return 0; + } buf[0] = 0; strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN); @@ -583,8 +949,16 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor, nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch); - return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, - DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) + return rc; + + rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); + if (rc) + return rc; + return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH); + } static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, @@ -712,6 +1086,32 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, return 0; } +static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + return -EOPNOTSUPP; + + ctx->val.vbool = bnxt_dl_get_remote_reset(dl); + return 0; +} + +static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; + + rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool); + if (rc) + return rc; + + bnxt_dl_set_remote_reset(dl, ctx->val.vbool); + return rc; +} + static const struct devlink_param bnxt_dl_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), @@ -734,53 +1134,49 @@ static const struct devlink_param bnxt_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, NULL), -}; - -static const struct devlink_param bnxt_dl_port_params[] = { + /* keep REMOTE_DEV_RESET last, it is excluded based on caps */ + DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + bnxt_remote_dev_reset_get, + bnxt_remote_dev_reset_set, NULL), }; static int bnxt_dl_params_register(struct bnxt *bp) { + int num_params = ARRAY_SIZE(bnxt_dl_params); int rc; if (bp->hwrm_spec_code < 0x10600) return 0; - rc = devlink_params_register(bp->dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - if (rc) { + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + num_params--; + + rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params); + if (rc) netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", rc); - return rc; - } - rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - if (rc) { - netdev_err(bp->dev, "devlink_port_params_register failed\n"); - devlink_params_unregister(bp->dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - return rc; - } - devlink_params_publish(bp->dl); - - return 0; + return rc; } static void bnxt_dl_params_unregister(struct bnxt *bp) { + int num_params = ARRAY_SIZE(bnxt_dl_params); + if (bp->hwrm_spec_code < 0x10600) return; - devlink_params_unregister(bp->dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + num_params--; + + devlink_params_unregister(bp->dl, bnxt_dl_params, num_params); } int bnxt_dl_register(struct bnxt *bp) { const struct devlink_ops *devlink_ops; struct devlink_port_attrs attrs = {}; + struct bnxt_dl *bp_dl; struct devlink *dl; int rc; @@ -795,21 +1191,18 @@ int bnxt_dl_register(struct bnxt *bp) return -ENOMEM; } - bnxt_link_bp_to_dl(bp, dl); + bp->dl = dl; + bp_dl = devlink_priv(dl); + bp_dl->bp = bp; + bnxt_dl_set_remote_reset(dl, true); /* Add switchdev eswitch mode setting, if SRIOV supported */ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && bp->hwrm_spec_code > 0x10803) bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; - rc = devlink_register(dl); - if (rc) { - netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); - goto err_dl_free; - } - if (!BNXT_PF(bp)) - return 0; + goto out; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = bp->pf.port_id; @@ -819,21 +1212,20 @@ int bnxt_dl_register(struct bnxt *bp) rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed\n"); - goto err_dl_unreg; + goto err_dl_free; } rc = bnxt_dl_params_register(bp); if (rc) goto err_dl_port_unreg; +out: + devlink_register(dl); return 0; err_dl_port_unreg: devlink_port_unregister(&bp->dl_port); -err_dl_unreg: - devlink_unregister(dl); err_dl_free: - bnxt_link_bp_to_dl(bp, NULL); devlink_free(dl); return rc; } @@ -842,13 +1234,10 @@ void bnxt_dl_unregister(struct bnxt *bp) { struct devlink *dl = bp->dl; - if (!dl) - return; - + devlink_unregister(dl); if (BNXT_PF(bp)) { bnxt_dl_params_unregister(bp); devlink_port_unregister(&bp->dl_port); } - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index d889f240da2b2c..a715458abc30e7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -13,6 +13,7 @@ /* Struct to hold housekeeping info needed by devlink interface */ struct bnxt_dl { struct bnxt *bp; /* back ptr to the controlling dev */ + bool remote_reset; }; static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) @@ -20,17 +21,21 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) return ((struct bnxt_dl *)devlink_priv(dl))->bp; } -/* To clear devlink pointer from bp, pass NULL dl */ -static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) +static inline void bnxt_dl_remote_reload(struct bnxt *bp) { - bp->dl = dl; + devlink_remote_reload_actions_performed(bp->dl, 0, + BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | + BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); +} - /* add a back pointer in dl to bp */ - if (dl) { - struct bnxt_dl *bp_dl = devlink_priv(dl); +static inline bool bnxt_dl_get_remote_reset(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset; +} - bp_dl->bp = bp; - } +static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value) +{ + ((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value; } #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 @@ -66,9 +71,9 @@ enum bnxt_dl_version_type { BNXT_VERSION_STORED, }; -void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); -void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); -void bnxt_dl_health_recovery_done(struct bnxt *bp); +void bnxt_devlink_health_fw_report(struct bnxt *bp); +void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy); +void bnxt_dl_health_fw_recovery_done(struct bnxt *bp); void bnxt_dl_fw_reporters_create(struct bnxt *bp); void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); int bnxt_dl_register(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7260910e75fb2a..8188d55722e4b3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -427,6 +427,8 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, + BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), + BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), }; static const struct { @@ -909,7 +911,7 @@ static int bnxt_set_channels(struct net_device *dev, if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && - (dev->priv_flags & IFF_RXFH_CONFIGURED)) { + netif_is_rxfh_configured(dev)) { netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); return -EINVAL; } @@ -2180,13 +2182,18 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, return rc; } -static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, - u8 self_reset, u8 flags) +int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, + u8 self_reset, u8 flags) { struct bnxt *bp = netdev_priv(dev); struct hwrm_fw_reset_input *req; int rc; + if (!bnxt_hwrm_reset_permitted(bp)) { + netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); + return -EPERM; + } + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); if (rc) return rc; @@ -2825,39 +2832,56 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) return retval; } -static void bnxt_get_pkgver(struct net_device *dev) +int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) { struct bnxt *bp = netdev_priv(dev); u16 index = 0; char *pkgver; u32 pkglen; u8 *pkgbuf; - int len; + int rc; - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, NULL, &pkglen) != 0) - return; + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, NULL, &pkglen); + if (rc) + return rc; pkgbuf = kzalloc(pkglen, GFP_KERNEL); if (!pkgbuf) { dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", pkglen); - return; + return -ENOMEM; } - if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) + rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); + if (rc) goto err; pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, pkglen); - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) + strscpy(ver, pkgver, size); + else + rc = -ENOENT; + +err: + kfree(pkgbuf); + + return rc; +} + +static void bnxt_get_pkgver(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + char buf[FW_VER_STR_LEN]; + int len; + + if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { len = strlen(bp->fw_ver_str); snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, - "/pkg %s", pkgver); + "/pkg %s", buf); } -err: - kfree(pkgbuf); } static int bnxt_get_eeprom(struct net_device *dev, @@ -3609,337 +3633,6 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return 0; } -static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, - struct bnxt_hwrm_dbg_dma_info *info) -{ - struct hwrm_dbg_cmn_input *cmn_req = msg; - __le16 *seq_ptr = msg + info->seq_off; - struct hwrm_dbg_cmn_output *cmn_resp; - u16 seq = 0, len, segs_off; - dma_addr_t dma_handle; - void *dma_buf, *resp; - int rc, off = 0; - - dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); - if (!dma_buf) { - hwrm_req_drop(bp, msg); - return -ENOMEM; - } - - hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); - cmn_resp = hwrm_req_hold(bp, msg); - resp = cmn_resp; - - segs_off = offsetof(struct hwrm_dbg_coredump_list_output, - total_segments); - cmn_req->host_dest_addr = cpu_to_le64(dma_handle); - cmn_req->host_buf_len = cpu_to_le32(info->dma_len); - while (1) { - *seq_ptr = cpu_to_le16(seq); - rc = hwrm_req_send(bp, msg); - if (rc) - break; - - len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); - if (!seq && - cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { - info->segs = le16_to_cpu(*((__le16 *)(resp + - segs_off))); - if (!info->segs) { - rc = -EIO; - break; - } - - info->dest_buf_size = info->segs * - sizeof(struct coredump_segment_record); - info->dest_buf = kmalloc(info->dest_buf_size, - GFP_KERNEL); - if (!info->dest_buf) { - rc = -ENOMEM; - break; - } - } - - if (info->dest_buf) { - if ((info->seg_start + off + len) <= - BNXT_COREDUMP_BUF_LEN(info->buf_len)) { - memcpy(info->dest_buf + off, dma_buf, len); - } else { - rc = -ENOBUFS; - break; - } - } - - if (cmn_req->req_type == - cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) - info->dest_buf_size += len; - - if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) - break; - - seq++; - off += len; - } - hwrm_req_drop(bp, msg); - return rc; -} - -static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, - struct bnxt_coredump *coredump) -{ - struct bnxt_hwrm_dbg_dma_info info = {NULL}; - struct hwrm_dbg_coredump_list_input *req; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); - if (rc) - return rc; - - info.dma_len = COREDUMP_LIST_BUF_LEN; - info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); - info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, - data_len); - - rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); - if (!rc) { - coredump->data = info.dest_buf; - coredump->data_size = info.dest_buf_size; - coredump->total_segs = info.segs; - } - return rc; -} - -static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, - u16 segment_id) -{ - struct hwrm_dbg_coredump_initiate_input *req; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); - if (rc) - return rc; - - hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); - req->component_id = cpu_to_le16(component_id); - req->segment_id = cpu_to_le16(segment_id); - - return hwrm_req_send(bp, req); -} - -static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, - u16 segment_id, u32 *seg_len, - void *buf, u32 buf_len, u32 offset) -{ - struct hwrm_dbg_coredump_retrieve_input *req; - struct bnxt_hwrm_dbg_dma_info info = {NULL}; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); - if (rc) - return rc; - - req->component_id = cpu_to_le16(component_id); - req->segment_id = cpu_to_le16(segment_id); - - info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; - info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, - seq_no); - info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, - data_len); - if (buf) { - info.dest_buf = buf + offset; - info.buf_len = buf_len; - info.seg_start = offset; - } - - rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); - if (!rc) - *seg_len = info.dest_buf_size; - - return rc; -} - -static void -bnxt_fill_coredump_seg_hdr(struct bnxt *bp, - struct bnxt_coredump_segment_hdr *seg_hdr, - struct coredump_segment_record *seg_rec, u32 seg_len, - int status, u32 duration, u32 instance) -{ - memset(seg_hdr, 0, sizeof(*seg_hdr)); - memcpy(seg_hdr->signature, "sEgM", 4); - if (seg_rec) { - seg_hdr->component_id = (__force __le32)seg_rec->component_id; - seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; - seg_hdr->low_version = seg_rec->version_low; - seg_hdr->high_version = seg_rec->version_hi; - } else { - /* For hwrm_ver_get response Component id = 2 - * and Segment id = 0 - */ - seg_hdr->component_id = cpu_to_le32(2); - seg_hdr->segment_id = 0; - } - seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); - seg_hdr->length = cpu_to_le32(seg_len); - seg_hdr->status = cpu_to_le32(status); - seg_hdr->duration = cpu_to_le32(duration); - seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); - seg_hdr->instance = cpu_to_le32(instance); -} - -static void -bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, - time64_t start, s16 start_utc, u16 total_segs, - int status) -{ - time64_t end = ktime_get_real_seconds(); - u32 os_ver_major = 0, os_ver_minor = 0; - struct tm tm; - - time64_to_tm(start, 0, &tm); - memset(record, 0, sizeof(*record)); - memcpy(record->signature, "cOrE", 4); - record->flags = 0; - record->low_version = 0; - record->high_version = 1; - record->asic_state = 0; - strlcpy(record->system_name, utsname()->nodename, - sizeof(record->system_name)); - record->year = cpu_to_le16(tm.tm_year + 1900); - record->month = cpu_to_le16(tm.tm_mon + 1); - record->day = cpu_to_le16(tm.tm_mday); - record->hour = cpu_to_le16(tm.tm_hour); - record->minute = cpu_to_le16(tm.tm_min); - record->second = cpu_to_le16(tm.tm_sec); - record->utc_bias = cpu_to_le16(start_utc); - strcpy(record->commandline, "ethtool -w"); - record->total_segments = cpu_to_le32(total_segs); - - sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); - record->os_ver_major = cpu_to_le32(os_ver_major); - record->os_ver_minor = cpu_to_le32(os_ver_minor); - - strlcpy(record->os_name, utsname()->sysname, 32); - time64_to_tm(end, 0, &tm); - record->end_year = cpu_to_le16(tm.tm_year + 1900); - record->end_month = cpu_to_le16(tm.tm_mon + 1); - record->end_day = cpu_to_le16(tm.tm_mday); - record->end_hour = cpu_to_le16(tm.tm_hour); - record->end_minute = cpu_to_le16(tm.tm_min); - record->end_second = cpu_to_le16(tm.tm_sec); - record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); - record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | - bp->ver_resp.chip_rev << 8 | - bp->ver_resp.chip_metal); - record->asic_id2 = 0; - record->coredump_status = cpu_to_le32(status); - record->ioctl_low_version = 0; - record->ioctl_high_version = 0; -} - -static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) -{ - u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); - u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; - struct coredump_segment_record *seg_record = NULL; - struct bnxt_coredump_segment_hdr seg_hdr; - struct bnxt_coredump coredump = {NULL}; - time64_t start_time; - u16 start_utc; - int rc = 0, i; - - if (buf) - buf_len = *dump_len; - - start_time = ktime_get_real_seconds(); - start_utc = sys_tz.tz_minuteswest * 60; - seg_hdr_len = sizeof(seg_hdr); - - /* First segment should be hwrm_ver_get response */ - *dump_len = seg_hdr_len + ver_get_resp_len; - if (buf) { - bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, - 0, 0, 0); - memcpy(buf + offset, &seg_hdr, seg_hdr_len); - offset += seg_hdr_len; - memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); - offset += ver_get_resp_len; - } - - rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); - if (rc) { - netdev_err(bp->dev, "Failed to get coredump segment list\n"); - goto err; - } - - *dump_len += seg_hdr_len * coredump.total_segs; - - seg_record = (struct coredump_segment_record *)coredump.data; - seg_record_len = sizeof(*seg_record); - - for (i = 0; i < coredump.total_segs; i++) { - u16 comp_id = le16_to_cpu(seg_record->component_id); - u16 seg_id = le16_to_cpu(seg_record->segment_id); - u32 duration = 0, seg_len = 0; - unsigned long start, end; - - if (buf && ((offset + seg_hdr_len) > - BNXT_COREDUMP_BUF_LEN(buf_len))) { - rc = -ENOBUFS; - goto err; - } - - start = jiffies; - - rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); - if (rc) { - netdev_err(bp->dev, - "Failed to initiate coredump for seg = %d\n", - seg_record->segment_id); - goto next_seg; - } - - /* Write segment data into the buffer */ - rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, - &seg_len, buf, buf_len, - offset + seg_hdr_len); - if (rc && rc == -ENOBUFS) - goto err; - else if (rc) - netdev_err(bp->dev, - "Failed to retrieve coredump for seg = %d\n", - seg_record->segment_id); - -next_seg: - end = jiffies; - duration = jiffies_to_msecs(end - start); - bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, - rc, duration, 0); - - if (buf) { - /* Write segment header into the buffer */ - memcpy(buf + offset, &seg_hdr, seg_hdr_len); - offset += seg_hdr_len + seg_len; - } - - *dump_len += seg_len; - seg_record = - (struct coredump_segment_record *)((u8 *)seg_record + - seg_record_len); - } - -err: - if (buf) - bnxt_fill_coredump_record(bp, buf + offset, start_time, - start_utc, coredump.total_segs + 1, - rc); - kfree(coredump.data); - *dump_len += sizeof(struct bnxt_coredump_record); - if (rc == -ENOBUFS) - netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); - return rc; -} - static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) { struct bnxt *bp = netdev_priv(dev); @@ -3971,10 +3664,7 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) bp->ver_resp.hwrm_fw_rsvd_8b; dump->flag = bp->dump_flag; - if (bp->dump_flag == BNXT_DUMP_CRASH) - dump->len = BNXT_CRASH_DUMP_LEN; - else - bnxt_get_coredump(bp, NULL, &dump->len); + dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); return 0; } @@ -3989,15 +3679,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, memset(buf, 0, dump->len); dump->flag = bp->dump_flag; - if (dump->flag == BNXT_DUMP_CRASH) { -#ifdef CONFIG_TEE_BNXT_FW - return tee_bnxt_copy_coredump(buf, 0, dump->len); -#endif - } else { - return bnxt_get_coredump(bp, buf, &dump->len); - } - - return 0; + return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); } static int bnxt_get_ts_info(struct net_device *dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 0a57cb6a4a4bf0..6aa44840f13a5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -22,49 +22,6 @@ struct bnxt_led_cfg { u8 rsvd; }; -#define COREDUMP_LIST_BUF_LEN 2048 -#define COREDUMP_RETRIEVE_BUF_LEN 4096 - -struct bnxt_coredump { - void *data; - int data_size; - u16 total_segs; -}; - -#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) - -struct bnxt_hwrm_dbg_dma_info { - void *dest_buf; - int dest_buf_size; - u16 dma_len; - u16 seq_off; - u16 data_len_off; - u16 segs; - u32 seg_start; - u32 buf_len; -}; - -struct hwrm_dbg_cmn_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 host_dest_addr; - __le32 host_buf_len; -}; - -struct hwrm_dbg_cmn_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 flags; - #define HWRM_DBG_CMN_FLAGS_MORE 1 -}; - -#define BNXT_CRASH_DUMP_LEN (8 << 20) - #define BNXT_LED_DFLT_ENA \ (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ @@ -94,8 +51,11 @@ u32 bnxt_fw_to_ethtool_speed(u16); u16 bnxt_get_fw_auto_link_speeds(u32); int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, struct hwrm_nvm_get_dev_info_output *nvm_dev_info); +int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, + u8 self_reset, u8 flags); int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, u32 install_type); +int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size); void bnxt_ethtool_init(struct bnxt *bp); void bnxt_ethtool_free(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 94d07a9f70343b..ea86c54247c71a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -532,8 +532,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 52 -#define HWRM_VERSION_STR "1.10.2.52" +#define HWRM_VERSION_RSVD 63 +#define HWRM_VERSION_STR "1.10.2.63" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1587,6 +1587,8 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL u8 max_schqs; u8 mpc_chnls_cap; #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL @@ -1956,6 +1958,18 @@ struct hwrm_func_cfg_output { u8 valid; }; +/* hwrm_func_cfg_cmd_err (size:64b/8B) */ +struct hwrm_func_cfg_cmd_err { + u8 code; + #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL + #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT + u8 unused_0[7]; +}; + /* hwrm_func_qstats_input (size:192b/24B) */ struct hwrm_func_qstats_input { __le16 req_type; @@ -3601,7 +3615,15 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -4040,7 +4062,7 @@ struct tx_port_stats_ext { __le64 pfc_pri7_tx_transitions; }; -/* rx_port_stats_ext (size:3648b/456B) */ +/* rx_port_stats_ext (size:3776b/472B) */ struct rx_port_stats_ext { __le64 link_down_events; __le64 continuous_pause_events; @@ -4099,6 +4121,8 @@ struct rx_port_stats_ext { __le64 rx_discard_packets_cos5; __le64 rx_discard_packets_cos6; __le64 rx_discard_packets_cos7; + __le64 rx_fec_corrected_blocks; + __le64 rx_fec_uncorrectable_blocks; }; /* hwrm_port_qstats_ext_input (size:320b/40B) */ @@ -4372,7 +4396,10 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL - u8 unused_0[3]; + __le16 flags2; + #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL + u8 unused_0[1]; u8 valid; }; @@ -6076,6 +6103,11 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -6206,7 +6238,15 @@ struct hwrm_vnic_rss_cfg_input { __le64 ring_grp_tbl_addr; __le64 hash_key_tbl_addr; __le16 rss_ctx_idx; - u8 unused_1[6]; + u8 flags; + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + u8 rss_hash_function; + #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL + #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL + #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL + #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM + u8 unused_1[4]; }; /* hwrm_vnic_rss_cfg_output (size:128b/16B) */ @@ -6331,7 +6371,24 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ - u8 unused_0; + u8 cmpl_coal_cnt; + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX __le16 flags; #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL __le64 page_tbl_addr; @@ -7099,6 +7156,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -7234,6 +7292,7 @@ struct hwrm_cfa_ntuple_filter_cfg_input { __le32 flags; #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; @@ -7834,11 +7893,11 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output { #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL u8 unused_0[3]; u8 valid; }; -/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ struct hwrm_tunnel_dst_port_query_input { __le16 req_type; __le16 cmpl_ring; @@ -8414,6 +8473,86 @@ struct hwrm_fw_get_structured_data_cmd_err { u8 unused_0[7]; }; +/* hwrm_fw_livepatch_query_input (size:192b/24B) */ +struct hwrm_fw_livepatch_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 fw_target; + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW + u8 unused_0[7]; +}; + +/* hwrm_fw_livepatch_query_output (size:640b/80B) */ +struct hwrm_fw_livepatch_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + char install_ver[32]; + char active_ver[32]; + __le16 status_flags; + #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL + #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_livepatch_input (size:256b/32B) */ +struct hwrm_fw_livepatch_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 opcode; + #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL + #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL + #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE + u8 fw_target; + #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL + #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL + #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW + u8 loadtype; + #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL + #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL + #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT + u8 flags; + __le32 patch_len; + __le64 host_addr; +}; + +/* hwrm_fw_livepatch_output (size:128b/16B) */ +struct hwrm_fw_livepatch_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */ +struct hwrm_fw_livepatch_cmd_err { + u8 code; + #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL + #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL + #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL + #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED + u8 unused_0[7]; +}; + /* hwrm_exec_fwd_resp_input (size:1024b/128B) */ struct hwrm_exec_fwd_resp_input { __le16 req_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f0aa480799ca4b..8388be119f9a07 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -11,9 +11,7 @@ #include #include #include -#include #include -#include #include #include #include "bnxt_hsi.h" diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index fa5f05708e6df5..7c528e1f8713ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -10,6 +10,9 @@ #ifndef BNXT_PTP_H #define BNXT_PTP_H +#include +#include + #define BNXT_PTP_GRC_WIN 6 #define BNXT_PTP_GRC_WIN_BASE 0x6000 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 70d8ca3039dcb9..1d177fed44a6c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1151,7 +1151,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) } } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) { struct hwrm_func_vf_cfg_input *req; int rc = 0; @@ -1217,7 +1217,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) /* overwrite netdev dev_addr with admin VF MAC */ if (is_valid_ether_addr(bp->vf.mac_addr)) - memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, bp->vf.mac_addr); update_vf_mac_exit: hwrm_req_drop(bp, req); if (inform_pf) @@ -1246,7 +1246,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) { } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) { return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 995535e4c11be0..9a4bacba477b14 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -41,5 +41,5 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); -int bnxt_approve_mac(struct bnxt *, u8 *, bool); +int bnxt_approve_mac(struct bnxt *, const u8 *, bool); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 6b4d2556a6df68..54d59f681b8632 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -11,8 +11,7 @@ #define BNXT_ULP_H #define BNXT_ROCE_ULP 0 -#define BNXT_OTHER_ULP 1 -#define BNXT_MAX_ULP 2 +#define BNXT_MAX_ULP 1 #define BNXT_MIN_ROCE_CP_RINGS 2 #define BNXT_MIN_ROCE_STAT_CTXS 1 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 9401936b74fa29..8eb28e08858206 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -475,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->features |= pf_dev->features; bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, dev->perm_addr); - ether_addr_copy(dev->dev_addr, dev->perm_addr); + eth_hw_addr_set(dev, dev->perm_addr); /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */ if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) dev->max_mtu = max_mtu; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 23c7595d2a1d3f..226f4403cfed3c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev, return 0; } +static void bcmgenet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv; + u32 umac_cmd; + + priv = netdev_priv(dev); + + epause->autoneg = priv->autoneg_pause; + + if (netif_carrier_ok(dev)) { + /* report active state when link is up */ + umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); + epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); + epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); + } else { + /* otherwise report stored settings */ + epause->tx_pause = priv->tx_pause; + epause->rx_pause = priv->rx_pause; + } +} + +static int bcmgenet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev) + return -ENODEV; + + if (!phy_validate_pause(dev->phydev, epause)) + return -EINVAL; + + priv->autoneg_pause = !!epause->autoneg; + priv->tx_pause = !!epause->tx_pause; + priv->rx_pause = !!epause->rx_pause; + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + return 0; +} + /* standard ethtool support functions. */ enum bcmgenet_stat_type { BCMGENET_STAT_NETDEV = -1, @@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = bcmgenet_get_rxnfc, .set_rxnfc = bcmgenet_set_rxnfc, + .get_pauseparam = bcmgenet_get_pauseparam, + .set_pauseparam = bcmgenet_set_pauseparam, }; /* Power down the unimac, based on mode. */ @@ -1609,7 +1653,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, /* Power down LED */ if (priv->hw_params->flags & GENET_HAS_EXT) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - if (GENET_IS_V5(priv)) + if (GENET_IS_V5(priv) && !priv->ephy_16nm) reg |= EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | EXT_PWR_DOWN_PHY_SD | @@ -1646,7 +1690,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, case GENET_POWER_PASSIVE: reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | EXT_ENERGY_DET_MASK); - if (GENET_IS_V5(priv)) { + if (GENET_IS_V5(priv) && !priv->ephy_16nm) { reg &= ~(EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | EXT_PWR_DOWN_PHY_SD | @@ -3222,7 +3266,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) } static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, - unsigned char *addr) + const unsigned char *addr) { bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); @@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq1; } + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + bcmgenet_netif_start(dev); netif_tx_start_all_queues(dev); @@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev) */ cancel_work_sync(&priv->bcmgenet_irq_work); - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; - /* tx reclaim */ bcmgenet_tx_reclaim_all(dev); bcmgenet_fini_dma(priv); @@ -3519,7 +3560,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) #define MAX_MDF_FILTER 17 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, - unsigned char *addr, + const unsigned char *addr, int *i) { bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], @@ -3592,7 +3633,7 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -3869,6 +3910,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) struct bcmgenet_plat_data { enum bcmgenet_version version; u32 dma_max_burst_length; + bool ephy_16nm; }; static const struct bcmgenet_plat_data v1_plat_data = { @@ -3901,6 +3943,12 @@ static const struct bcmgenet_plat_data bcm2711_plat_data = { .dma_max_burst_length = 0x08, }; +static const struct bcmgenet_plat_data bcm7712_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .ephy_16nm = true, +}; + static const struct of_device_id bcmgenet_match[] = { { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, @@ -3908,6 +3956,7 @@ static const struct of_device_id bcmgenet_match[] = { { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data }, { }, }; MODULE_DEVICE_TABLE(of, bcmgenet_match); @@ -3950,6 +3999,11 @@ static int bcmgenet_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); + /* Set default pause parameters */ + priv->autoneg_pause = 1; + priv->tx_pause = 1; + priv->rx_pause = 1; + SET_NETDEV_DEV(dev, &pdev->dev); dev_set_drvdata(&pdev->dev, dev); dev->watchdog_timeo = 2 * HZ; @@ -3983,6 +4037,7 @@ static int bcmgenet_probe(struct platform_device *pdev) if (pdata) { priv->version = pdata->version; priv->dma_max_burst_length = pdata->dma_max_burst_length; + priv->ephy_16nm = pdata->ephy_16nm; } else { priv->version = pd->genet_version; priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; @@ -4036,11 +4091,15 @@ static int bcmgenet_probe(struct platform_device *pdev) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); if (pd && !IS_ERR_OR_NULL(pd->mac_address)) - ether_addr_copy(dev->dev_addr, pd->mac_address); + eth_hw_addr_set(dev, pd->mac_address); else - if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN)) - if (has_acpi_companion(&pdev->dev)) - bcmgenet_get_hw_addr(priv, dev->dev_addr); + if (device_get_ethdev_address(&pdev->dev, dev)) + if (has_acpi_companion(&pdev->dev)) { + u8 addr[ETH_ALEN]; + + bcmgenet_get_hw_addr(priv, addr); + eth_hw_addr_set(dev, addr); + } if (!is_valid_ether_addr(dev->dev_addr)) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0a6d91b0f0aa23..946f6e283c4e6b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -329,6 +329,7 @@ struct bcmgenet_mib_counters { #define EXT_CFG_IDDQ_BIAS (1 << 0) #define EXT_CFG_PWR_DOWN (1 << 1) #define EXT_CK25_DIS (1 << 4) +#define EXT_CFG_IDDQ_GLOBAL_PWR (1 << 3) #define EXT_GPHY_RESET (1 << 5) /* DMA rings size */ @@ -594,6 +595,9 @@ struct bcmgenet_priv { /* other misc variables */ struct bcmgenet_hw_params *hw_params; + unsigned autoneg_pause:1; + unsigned tx_pause:1; + unsigned rx_pause:1; /* MDIO bus variables */ wait_queue_head_t wq; @@ -606,13 +610,10 @@ struct bcmgenet_priv { bool clk_eee_enabled; /* PHY device variables */ - int old_link; - int old_speed; - int old_duplex; - int old_pause; phy_interface_t phy_interface; int phy_addr; int ext_phy; + bool ephy_16nm; /* Interrupt variables */ struct work_struct bcmgenet_irq_work; @@ -690,6 +691,7 @@ int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 89d16c587bb7dc..5f259641437a72 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -25,92 +25,80 @@ #include "bcmgenet.h" -/* setup netdev link state when PHY link status change and - * update UMAC and RGMII block when link up - */ -void bcmgenet_mii_setup(struct net_device *dev) +static void bcmgenet_mac_config(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; - bool status_changed = false; - if (priv->old_link != phydev->link) { - status_changed = true; - priv->old_link = phydev->link; - } + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; - if (phydev->link) { - /* check speed/duplex/pause changes */ - if (priv->old_speed != phydev->speed) { - status_changed = true; - priv->old_speed = phydev->speed; - } + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) { + cmd_bits |= CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + } else { + /* pause capability defaults to Symmetric */ + if (priv->autoneg_pause) { + bool tx_pause = 0, rx_pause = 0; - if (priv->old_duplex != phydev->duplex) { - status_changed = true; - priv->old_duplex = phydev->duplex; - } + if (phydev->autoneg) + phy_get_pause(phydev, &tx_pause, &rx_pause); - if (priv->old_pause != phydev->pause) { - status_changed = true; - priv->old_pause = phydev->pause; + if (!tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + if (!rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; } - /* done if nothing has changed */ - if (!status_changed) - return; - - /* speed */ - if (phydev->speed == SPEED_1000) - cmd_bits = CMD_SPEED_1000; - else if (phydev->speed == SPEED_100) - cmd_bits = CMD_SPEED_100; - else - cmd_bits = CMD_SPEED_10; - cmd_bits <<= CMD_SPEED_SHIFT; - - /* duplex */ - if (phydev->duplex != DUPLEX_FULL) - cmd_bits |= CMD_HD_EN; - - /* pause capability */ - if (!phydev->pause) - cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; - - /* - * Program UMAC and RGMII block based on established - * link speed, duplex, and pause. The speed set in - * umac->cmd tell RGMII block which clock to use for - * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). - * Receive clock is provided by the PHY. - */ - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg &= ~OOB_DISABLE; - reg |= RGMII_LINK; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + /* Manual override */ + if (!priv->rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + if (!priv->tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + } - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | - CMD_HD_EN | - CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); - reg |= cmd_bits; - if (reg & CMD_SW_RESET) { - reg &= ~CMD_SW_RESET; - bcmgenet_umac_writel(priv, reg, UMAC_CMD); - udelay(2); - reg |= CMD_TX_EN | CMD_RX_EN; - } + /* Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - } else { - /* done if nothing has changed */ - if (!status_changed) - return; - - /* needed for MoCA fixed PHY to reflect correct link status */ - netif_carrier_off(dev); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct phy_device *phydev = dev->phydev; + + if (phydev->link) + bcmgenet_mac_config(dev); phy_print_status(phydev); } @@ -130,20 +118,36 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev, return 0; } +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx) +{ + struct phy_device *phydev = dev->phydev; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising, + rx | tx); + phy_start_aneg(phydev); + + mutex_lock(&phydev->lock); + if (phydev->link) + bcmgenet_mac_config(dev); + mutex_unlock(&phydev->lock); +} + void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 reg = 0; /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ - if (GENET_IS_V4(priv)) { + if (GENET_IS_V4(priv) || priv->ephy_16nm) { reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); if (enable) { reg &= ~EXT_CK25_DIS; bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); mdelay(1); - reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_CFG_IDDQ_GLOBAL_PWR); reg |= EXT_GPHY_RESET; bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); mdelay(1); @@ -151,7 +155,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) reg &= ~EXT_GPHY_RESET; } else { reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | - EXT_GPHY_RESET; + EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR; bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); mdelay(1); reg |= EXT_CK25_DIS; @@ -286,23 +290,53 @@ int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct device_node *dn = kdev->of_node; + phy_interface_t phy_iface = priv->phy_interface; struct phy_device *phydev; - u32 phy_flags = 0; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; int ret; /* Communicate the integrated PHY revision */ if (priv->internal_phy) phy_flags = priv->gphy_rev; - /* Initialize link state variables that bcmgenet_mii_setup() uses */ - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; + /* This is an ugly quirk but we have not been correctly interpreting + * the phy_interface values and we have done that across different + * drivers, so at least we are consistent in our mistakes. + * + * When the Generic PHY driver is in use either the PHY has been + * strapped or programmed correctly by the boot loader so we should + * stick to our incorrect interpretation since we have validated it. + * + * Now when a dedicated PHY driver is in use, we need to reverse the + * meaning of the phy_interface_mode values to something that the PHY + * driver will interpret and act on such that we have two mistakes + * canceling themselves so to speak. We only do this for the two + * modes that GENET driver officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other + * modes are not *officially* supported with the boot loader and the + * scripted environment generating Device Tree blobs for those + * platforms. + * + * Note that internal PHY, MoCA and fixed-link configurations are not + * affected because they use different phy_interface_t values or the + * Generic PHY driver. + */ + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } if (dn) { phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, - phy_flags, priv->phy_interface); + phy_flags, phy_iface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -332,7 +366,7 @@ int bcmgenet_mii_probe(struct net_device *dev) phydev->dev_flags = phy_flags; ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, - priv->phy_interface); + phy_iface); if (ret) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -350,8 +384,6 @@ int bcmgenet_mii_probe(struct net_device *dev) return ret; } - linkmode_copy(phydev->advertising, phydev->supported); - /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs. On GENETv5 there is a hardware issue * that prevents the signaling of link UP interrupts when diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5e0e0e70d80146..b1328c5524b535 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3942,7 +3942,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp) } /* tp->lock is held. */ -static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) +static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, + int index) { u32 addr_high, addr_low; @@ -5746,7 +5747,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); udelay(40); - current_link_up = false; tp->link_config.rmt_adv = 0; mac_status = tr32(MAC_STATUS); @@ -9366,7 +9366,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) return 0; @@ -10273,8 +10273,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_asic_rev(tp) == ASIC_REV_5705 && tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { - if (tg3_flag(tp, TSO_CAPABLE) && - tg3_asic_rev(tp) == ASIC_REV_5705) { + if (tg3_flag(tp, TSO_CAPABLE)) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !tg3_flag(tp, IS_5788)) { @@ -11213,12 +11212,8 @@ static void tg3_reset_task(struct work_struct *work) } tg3_netif_start(tp); - tg3_full_unlock(tp); - - if (!err) - tg3_phy_start(tp); - + tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); out: rtnl_unlock(); @@ -16915,19 +16910,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) return err; } -static int tg3_get_device_address(struct tg3 *tp) +static int tg3_get_device_address(struct tg3 *tp, u8 *addr) { - struct net_device *dev = tp->dev; u32 hi, lo, mac_offset; int addr_ok = 0; int err; - if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr)) + if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) return 0; if (tg3_flag(tp, IS_SSB_CORE)) { - err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); - if (!err && is_valid_ether_addr(&dev->dev_addr[0])) + err = ssb_gige_get_macaddr(tp->pdev, addr); + if (!err && is_valid_ether_addr(addr)) return 0; } @@ -16951,41 +16945,41 @@ static int tg3_get_device_address(struct tg3 *tp) /* First try to get it from MAC address mailbox. */ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); if ((hi >> 16) == 0x484b) { - dev->dev_addr[0] = (hi >> 8) & 0xff; - dev->dev_addr[1] = (hi >> 0) & 0xff; + addr[0] = (hi >> 8) & 0xff; + addr[1] = (hi >> 0) & 0xff; tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); - dev->dev_addr[2] = (lo >> 24) & 0xff; - dev->dev_addr[3] = (lo >> 16) & 0xff; - dev->dev_addr[4] = (lo >> 8) & 0xff; - dev->dev_addr[5] = (lo >> 0) & 0xff; + addr[2] = (lo >> 24) & 0xff; + addr[3] = (lo >> 16) & 0xff; + addr[4] = (lo >> 8) & 0xff; + addr[5] = (lo >> 0) & 0xff; /* Some old bootcode may report a 0 MAC address in SRAM */ - addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + addr_ok = is_valid_ether_addr(addr); } if (!addr_ok) { /* Next, try NVRAM. */ if (!tg3_flag(tp, NO_NVRAM) && !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { - memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); - memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + memcpy(&addr[0], ((char *)&hi) + 2, 2); + memcpy(&addr[2], (char *)&lo, sizeof(lo)); } /* Finally just fetch it out of the MAC control regs. */ else { hi = tr32(MAC_ADDR_0_HIGH); lo = tr32(MAC_ADDR_0_LOW); - dev->dev_addr[5] = lo & 0xff; - dev->dev_addr[4] = (lo >> 8) & 0xff; - dev->dev_addr[3] = (lo >> 16) & 0xff; - dev->dev_addr[2] = (lo >> 24) & 0xff; - dev->dev_addr[1] = hi & 0xff; - dev->dev_addr[0] = (hi >> 8) & 0xff; + addr[5] = lo & 0xff; + addr[4] = (lo >> 8) & 0xff; + addr[3] = (lo >> 16) & 0xff; + addr[2] = (lo >> 24) & 0xff; + addr[1] = hi & 0xff; + addr[0] = (hi >> 8) & 0xff; } } - if (!is_valid_ether_addr(&dev->dev_addr[0])) + if (!is_valid_ether_addr(addr)) return -EINVAL; return 0; } @@ -17561,6 +17555,7 @@ static int tg3_init_one(struct pci_dev *pdev, char str[40]; u64 dma_mask, persist_dma_mask; netdev_features_t features = 0; + u8 addr[ETH_ALEN] __aligned(2); err = pci_enable_device(pdev); if (err) { @@ -17783,12 +17778,13 @@ static int tg3_init_one(struct pci_dev *pdev, tp->rx_pending = 63; } - err = tg3_get_device_address(tp); + err = tg3_get_device_address(tp, addr); if (err) { dev_err(&pdev->dev, "Could not obtain valid ethernet address, aborting\n"); goto err_out_apeunmap; } + eth_hw_addr_set(dev, addr); intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ba47777d9cff7e..bbdc829c352433 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -875,7 +875,7 @@ bnad_set_netdev_perm_addr(struct bnad *bnad) ether_addr_copy(netdev->perm_addr, bnad->perm_addr); if (is_zero_ether_addr(netdev->dev_addr)) - ether_addr_copy(netdev->dev_addr, bnad->perm_addr); + eth_hw_addr_set(netdev, bnad->perm_addr); } /* Control Path Handlers */ @@ -3249,7 +3249,7 @@ bnad_set_mac_address(struct net_device *netdev, void *addr) err = bnad_mac_addr_set_locked(bnad, sa->sa_data); if (!err) - ether_addr_copy(netdev->dev_addr, sa->sa_data); + eth_hw_addr_set(netdev, sa->sa_data); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -3515,7 +3515,6 @@ static void bnad_uninit(struct bnad *bnad) { if (bnad->work_q) { - flush_workqueue(bnad->work_q); destroy_workqueue(bnad->work_q); bnad->work_q = NULL; } diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d8d87213697cde..5620b97b3482b1 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -243,9 +243,11 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 -#define MACB_SRTSM_OFFSET 15 -#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ +#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */ +#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ #define MACB_OSSMODE_SIZE 1 +#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */ +#define MACB_MIIONRGMII_SIZE 1 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -713,6 +715,7 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_MIIONRGMII 0x00000200 #define MACB_CAPS_CLK_HW_CHG 0x04000000 #define MACB_CAPS_MACB_IS_EMAC 0x08000000 #define MACB_CAPS_FIFO_MODE 0x10000000 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d13fb1d3182155..ffce528aa00e4a 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -313,7 +313,7 @@ static void macb_get_hwaddr(struct macb *bp) addr[5] = (top >> 8) & 0xff; if (is_valid_ether_addr(addr)) { - memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + eth_hw_addr_set(bp->dev, addr); return; } } @@ -522,21 +522,21 @@ static void macb_validate(struct phylink_config *config, state->interface != PHY_INTERFACE_MODE_SGMII && state->interface != PHY_INTERFACE_MODE_10GBASER && !phy_interface_mode_is_rgmii(state->interface)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } if (!macb_is_gem(bp) && (state->interface == PHY_INTERFACE_MODE_GMII || phy_interface_mode_is_rgmii(state->interface))) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } if (state->interface == PHY_INTERFACE_MODE_10GBASER && !(bp->caps & MACB_CAPS_HIGH_SPEED && bp->caps & MACB_CAPS_PCS)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } @@ -547,13 +547,8 @@ static void macb_validate(struct phylink_config *config, if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && (state->interface == PHY_INTERFACE_MODE_NA || state->interface == PHY_INTERFACE_MODE_10GBASER)) { - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseER_Full); + phylink_set_10g_modes(mask); phylink_set(mask, 10000baseKR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseT_Full); if (state->interface != PHY_INTERFACE_MODE_NA) goto out; } @@ -575,9 +570,8 @@ static void macb_validate(struct phylink_config *config, phylink_set(mask, 1000baseT_Half); } out: - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, @@ -684,6 +678,9 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { ctrl |= GEM_BIT(PCSSEL); ncr |= GEM_BIT(ENABLE_HS_MAC); + } else if (bp->caps & MACB_CAPS_MIIONRGMII && + bp->phy_interface == PHY_INTERFACE_MODE_MII) { + ncr |= MACB_BIT(MIIONRGMII); } } @@ -900,6 +897,17 @@ static int macb_mdiobus_register(struct macb *bp) { struct device_node *child, *np = bp->pdev->dev.of_node; + /* If we have a child named mdio, probe it instead of looking for PHYs + * directly under the MAC node + */ + child = of_get_child_by_name(np, "mdio"); + if (child) { + int ret = of_mdiobus_register(bp->mii_bus, child); + + of_node_put(child); + return ret; + } + if (of_phy_is_fixed_link(np)) return mdiobus_register(bp->mii_bus); @@ -4594,7 +4602,8 @@ static const struct macb_config zynq_config = { }; static const struct macb_config sama7g5_gem_config = { - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | + MACB_CAPS_MIIONRGMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4602,7 +4611,8 @@ static const struct macb_config sama7g5_gem_config = { }; static const struct macb_config sama7g5_emac_config = { - .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | + MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4774,7 +4784,7 @@ static int macb_probe(struct platform_device *pdev) if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) bp->rx_intr_mask |= MACB_BIT(RXUBR); - err = of_get_mac_address(np, bp->dev->dev_addr); + err = of_get_ethdev_address(np, bp->dev); if (err == -EPROBE_DEFER) goto err_out_free_netdev; else if (err) diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index c2e1f163bb14f0..095c5a2144a71e 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, return NULL; } -static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); unsigned long flags; @@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) u32 secl, sech; spin_lock_irqsave(&bp->tsu_clk_lock, flags); + ptp_read_system_prets(sts); first = gem_readl(bp, TN); + ptp_read_system_postts(sts); secl = gem_readl(bp, TSL); sech = gem_readl(bp, TSH); second = gem_readl(bp, TN); @@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) /* if so, use later read & re-read seconds * (assume all done within 1s) */ + ptp_read_system_prets(sts); ts->tv_nsec = gem_readl(bp, TN); + ptp_read_system_postts(sts); secl = gem_readl(bp, TSL); sech = gem_readl(bp, TSH); } else { @@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } if (delta > TSU_NSEC_MAX_VAL) { - gem_tsu_get_time(&bp->ptp_clock_info, &now); + gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL); now = timespec64_add(now, then); gem_tsu_set_time(&bp->ptp_clock_info, @@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = { .pps = 1, .adjfine = gem_ptp_adjfine, .adjtime = gem_ptp_adjtime, - .gettime64 = gem_tsu_get_time, + .gettimex64 = gem_tsu_get_time, .settime64 = gem_tsu_set_time, .enable = gem_ptp_enable, }; @@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1, * The timestamp only contains lower few bits of seconds, * so add value from 1588 timer */ - gem_tsu_get_time(&bp->ptp_clock_info, &tsu); + gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL); /* If the top bit is set in the timestamp, * but not in 1588 timer, it has rolled over, diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index b6a066404f4bfb..457cb71210003e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -607,7 +607,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr) writel(value, ioaddr + XGMAC_CONTROL); } -static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, +static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr, int num) { u32 data; @@ -1479,7 +1479,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); @@ -1693,6 +1693,7 @@ static int xgmac_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev = NULL; struct xgmac_priv *priv = NULL; + u8 addr[ETH_ALEN]; u32 uid; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1785,7 +1786,8 @@ static int xgmac_probe(struct platform_device *pdev) ndev->max_mtu = XGMAC_MAX_MTU; /* Get the MAC address */ - xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); + xgmac_get_mac_addr(priv->base, addr, 0); + eth_hw_addr_set(ndev, addr); if (!is_valid_ether_addr(ndev->dev_addr)) netdev_warn(ndev, "MAC address %pM not valid", ndev->dev_addr); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 2a0d64e5797c87..73cb03266549ca 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -411,7 +411,7 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) if (!ether_addr_equal(netdev->dev_addr, mac)) { macaddr_changed = true; - ether_addr_copy(netdev->dev_addr, mac); + eth_hw_addr_set(netdev, mac); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac); call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev); } @@ -490,7 +490,6 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev) wq = &lio->rxq_status_wq[q_no]; if (wq->wq) { cancel_delayed_work_sync(&wq->wk.work); - flush_workqueue(wq->wq); destroy_workqueue(wq->wq); wq->wq = NULL; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 2907e13b9df697..12eee2bc7f5c88 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -892,12 +892,11 @@ liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused * bus = pdev->bus->number; device = PCI_SLOT(pdev->devfn); function = PCI_FUNC(pdev->devfn); - oct_dev->watchdog_task = kthread_create( - liquidio_watchdog, oct_dev, - "liowd/%02hhx:%02hhx.%hhx", bus, device, function); - if (!IS_ERR(oct_dev->watchdog_task)) { - wake_up_process(oct_dev->watchdog_task); - } else { + oct_dev->watchdog_task = kthread_run(liquidio_watchdog, + oct_dev, + "liowd/%02hhx:%02hhx.%hhx", + bus, device, function); + if (IS_ERR(oct_dev->watchdog_task)) { oct_dev->watchdog_task = NULL; dev_err(&oct_dev->pci_dev->dev, "failed to create kernel_thread\n"); @@ -1279,6 +1278,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) struct lio *lio; dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + device_lock(&oct->pci_dev->dev); + if (oct->devlink) { + devlink_unregister(oct->devlink); + devlink_free(oct->devlink); + oct->devlink = NULL; + } + device_unlock(&oct->pci_dev->dev); + if (!oct->ifcount) { dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); return 1; @@ -1300,12 +1307,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) liquidio_destroy_nic_device(oct, i); - if (oct->devlink) { - devlink_unregister(oct->devlink); - devlink_free(oct->devlink); - oct->devlink = NULL; - } - dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); return 0; } @@ -2022,7 +2023,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return -EIO; } - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); return 0; @@ -3632,7 +3633,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Copy MAC Address to OS network device structure */ - ether_addr_copy(netdev->dev_addr, mac); + eth_hw_addr_set(netdev, mac); /* By default all interfaces on a single Octeon uses the same * tx and rx queues @@ -3749,10 +3750,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } } + device_lock(&octeon_dev->pci_dev->dev); devlink = devlink_alloc(&liquidio_devlink_ops, sizeof(struct lio_devlink_priv), &octeon_dev->pci_dev->dev); if (!devlink) { + device_unlock(&octeon_dev->pci_dev->dev); dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); goto setup_nic_dev_free; } @@ -3760,15 +3763,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio_devlink = devlink_priv(devlink); lio_devlink->oct = octeon_dev; - if (devlink_register(devlink)) { - devlink_free(devlink); - dev_err(&octeon_dev->pci_dev->dev, - "devlink registration failed\n"); - goto setup_nic_dev_free; - } - octeon_dev->devlink = devlink; octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + devlink_register(devlink); + device_unlock(&octeon_dev->pci_dev->dev); return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index f6396ac64006c7..c607756b731f07 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1168,7 +1168,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return -EPERM; } - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); return 0; @@ -2148,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); /* Copy MAC Address to OS network device structure */ - ether_addr_copy(netdev->dev_addr, mac); + eth_hw_addr_set(netdev, mac); if (liquidio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq, diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 30463a6d1f8c99..4e39d712e121e5 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1501,7 +1501,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; - result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + result = of_get_ethdev_address(pdev->dev.of_node, netdev); if (result) eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 0fbecd093fa1f0..f2f1ce81fd9ccd 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1311,9 +1311,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } err = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a27227aeae8800..bb45d5df2856fc 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -221,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->node = mbx.nic_cfg.node_id; if (!nic->set_mac_pending) - ether_addr_copy(nic->netdev->dev_addr, - mbx.nic_cfg.mac_addr); + eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr); nic->sqs_mode = mbx.nic_cfg.sqs_mode; nic->loopback_supported = mbx.nic_cfg.loopback_supported; nic->link_up = false; @@ -1612,7 +1611,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); if (nic->pdev->msix_enabled) { if (nicvf_hw_set_mac_addr(nic, netdev)) @@ -2119,10 +2118,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = pci_enable_device(pdev); - if (err) { - dev_err(dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, DRV_NAME); if (err) { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index c36fed9c3d737d..574a32f23f966b 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1387,10 +1387,10 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, u8 *dst) { u8 mac[ETH_ALEN]; - u8 *addr; + int ret; - addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN); - if (!addr) { + ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac); + if (ret) { dev_err(dev, "MAC address invalid: %pM\n", mac); return -EINVAL; } @@ -1597,9 +1597,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pcim_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } err = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index d246eee4b6d5c1..609820e214a343 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -853,7 +853,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p) if (!mac->ops->macaddress_set) return -EOPNOTSUPP; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); mac->ops->macaddress_set(mac, dev->dev_addr); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h index dfa77491a910d7..5913eaf442b5c1 100644 --- a/drivers/net/ethernet/chelsio/cxgb/gmac.h +++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h @@ -117,7 +117,7 @@ struct cmac_ops { const struct cmac_statistics *(*statistics_update)(struct cmac *, int); int (*macaddress_get)(struct cmac *, u8 mac_addr[6]); - int (*macaddress_set)(struct cmac *, u8 mac_addr[6]); + int (*macaddress_set)(struct cmac *, const u8 mac_addr[6]); }; typedef struct _cmac_instance cmac_instance; diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index c27908e66f5edb..0bb37e4680c788 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -496,7 +496,7 @@ static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) return 0; } -static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) +static int pm3393_macaddress_set(struct cmac *cmac, const u8 ma[6]) { u32 val, lo, mid, hi, enabled = cmac->instance->enabled; diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c index 310add28fcf590..007c591b8bf5f8 100644 --- a/drivers/net/ethernet/chelsio/cxgb/subr.c +++ b/drivers/net/ethernet/chelsio/cxgb/subr.c @@ -1140,7 +1140,7 @@ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi) adapter->port[i].dev->name); goto error; } - memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); + eth_hw_addr_set(adapter->port[i].dev, hw_addr); init_link_config(&adapter->port[i].link_config, bi); } diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index 873c1c7b4ca0f5..2ad3efb550c293 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -379,7 +379,7 @@ static int mac_intr_clear(struct cmac *mac) } /* Expect MAC address to be in network byte order. */ -static int mac_set_address(struct cmac* mac, u8 addr[6]) +static int mac_set_address(struct cmac* mac, const u8 addr[6]) { u32 val; int port = mac->instance->index; @@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac) } hw_stats[] = { #define HW_STAT(reg, stat_name) \ - { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } + { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) } /* Rx stats */ HW_STAT(RxUnicast, RxUnicastFramesOK), diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h index b706f2fbe4f487..a309016f7f8cb0 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/common.h +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@ -710,7 +710,7 @@ int t3_mac_enable(struct cmac *mac, int which); int t3_mac_disable(struct cmac *mac, int which); int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); -int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); +int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]); int t3_mac_set_num_ucast(struct cmac *mac, int n); const struct mac_stats *t3_mac_update_stats(struct cmac *mac); int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 38e47703f9abd9..9cf9e33664e4f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2586,7 +2586,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); if (offload_running(adapter)) write_smt_entry(adapter, pi->port_id); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 7ff31d1026fb27..53feac8da503f6 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include #include "common.h" #include "regs.h" #include "sge_defs.h" @@ -3758,8 +3759,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, memcpy(hw_addr, adapter->params.vpd.eth_base, 5); hw_addr[5] = adapter->params.vpd.eth_base[5] + i; - memcpy(adapter->port[i]->dev_addr, hw_addr, - ETH_ALEN); + eth_hw_addr_set(adapter->port[i], hw_addr); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1); diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c index 3af19a55037245..1bdc6cad1e49a2 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c +++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c @@ -240,7 +240,7 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr) } /* Set one of the station's unicast MAC addresses. */ -int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) +int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]) { if (idx >= mac->nucast) return -EINVAL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ecea3cdd30b3f7..5657ac8cfca001 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1545,7 +1545,7 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx, u8 hw_addr[]) { - ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr); + eth_hw_addr_set(adapter->port[port_idx], hw_addr); ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0d9cda4ab30374..dde1cf51d0abf4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3468,7 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (ret < 0) return ret; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 64144b6171d722..e7b4e3ed056c72 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -9706,7 +9706,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) if (ret) return ret; - memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(adap->port[i], addr); j++; } return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index f55105a4112fd6..03cb1410d6fc31 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -40,6 +40,7 @@ #ifndef __CXGB4VF_ADAPTER_H__ #define __CXGB4VF_ADAPTER_H__ +#include #include #include #include @@ -507,7 +508,7 @@ static inline const char *port_name(struct adapter *adapter, int pidx) static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, u8 hw_addr[]) { - memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); + eth_hw_addr_set(adapter->port[pidx], hw_addr); } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 49b76fd47daa0f..64479c464b4ec5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1218,7 +1218,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) if (ret < 0) return ret; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -2902,10 +2902,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize generic PCI device state. */ err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * Reserve PCI resources for the device. If we can't get them some diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index bcad69c480740b..4af5561cbfc54c 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent) * created only after 3 way handshake is done. */ sock_orphan(child); - percpu_counter_inc((child)->sk_prot->orphan_count); + INC_ORPHAN_COUNT(child); chtls_release_resources(child); chtls_conn_done(child); } else { diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h index b1161bdeda4dc7..f61ca657601cae 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h @@ -95,7 +95,7 @@ struct deferred_skb_cb { #define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok) #define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok) #define SACK_OK(tp) ((tp)->rx_opt.sack_ok) -#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count) +#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count) /* TLS SKB */ #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld) diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index d0c4c8b7a15ab0..4a97aa8e1387d2 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1227,7 +1227,7 @@ static int set_mac_address(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n", dev->name, dev->dev_addr); @@ -1314,6 +1314,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) int tmp; unsigned rev_type = 0; int eeprom_buff[CHKSUM_LEN]; + u8 addr[ETH_ALEN]; int retval; /* Initialize the device structure. */ @@ -1387,9 +1388,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) for (i = 0; i < ETH_ALEN / 2; i++) { unsigned int Addr; Addr = readreg(dev, PP_IA + i * 2); - dev->dev_addr[i * 2] = Addr & 0xFF; - dev->dev_addr[i * 2 + 1] = Addr >> 8; + addr[i * 2] = Addr & 0xFF; + addr[i * 2 + 1] = Addr >> 8; } + eth_hw_addr_set(dev, addr); /* Load the Adapter Configuration. * Note: Barring any more specific information from some @@ -1464,9 +1466,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) /* eeprom_buff has 32-bit ints, so we can't just memcpy it */ /* store the initial memory base address */ for (i = 0; i < ETH_ALEN / 2; i++) { - dev->dev_addr[i * 2] = eeprom_buff[i]; - dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8; + addr[i * 2] = eeprom_buff[i]; + addr[i * 2 + 1] = eeprom_buff[i] >> 8; } + eth_hw_addr_set(dev, addr); cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n", dev->name, lp->adapter_cnf); } diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 072fac5f5d2434..21ba6e893072cd 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -746,7 +746,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) if (dev == NULL) return NULL; - memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); + eth_hw_addr_set(dev, data->dev_addr); dev->ethtool_ops = &ep93xx_ethtool_ops; dev->netdev_ops = &ep93xx_netdev_ops; diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 6324e80960c3e1..84251b85fc9317 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -541,7 +541,7 @@ static int set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, saddr->sa_data); netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr); /* set the Ethernet address */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 12ffc14fbecd8e..6ded4d9fa32a07 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev, int err; err = enic_dev_fw_info(enic, &fw_info); - /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + /* return only when dma_alloc_coherent fails in vnic_dev_fw_info * For other failures, like devcmd failure, we return previously * recorded info. */ @@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &vstats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d0a8f7106958bc..aacf141986d55c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &stats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ @@ -985,7 +985,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) return -EADDRNOTAVAIL; } - memcpy(netdev->dev_addr, addr, netdev->addr_len); + eth_hw_addr_set(netdev, addr); return 0; } @@ -1098,6 +1098,7 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) static int enic_set_vf_port(struct net_device *netdev, int vf, struct nlattr *port[]) { + static const u8 zero_addr[ETH_ALEN] = {}; struct enic *enic = netdev_priv(netdev); struct enic_port_profile prev_pp; struct enic_port_profile *pp; @@ -1162,7 +1163,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, } else { memset(pp, 0, sizeof(*pp)); if (vf == PORT_SELF_VF) - eth_zero_addr(netdev->dev_addr); + eth_hw_addr_set(netdev, zero_addr); } } else { /* Set flag to indicate that the port assoc/disassoc @@ -1174,7 +1175,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, if (pp->request == PORT_REQUEST_DISASSOCIATE) { eth_zero_addr(pp->mac_addr); if (vf == PORT_SELF_VF) - eth_zero_addr(netdev->dev_addr); + eth_hw_addr_set(netdev, zero_addr); } } diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c index e6a83198c3dda2..80f46dbd5117b5 100644 --- a/drivers/net/ethernet/cisco/enic/enic_pp.c +++ b/drivers/net/ethernet/cisco/enic/enic_pp.c @@ -73,9 +73,9 @@ static int enic_set_port_profile(struct enic *enic, int vf) struct vic_provinfo *vp; const u8 oui[3] = VIC_PROVINFO_CISCO_OUI; const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX); + const u8 *client_mac; char uuid_str[38]; char client_mac_str[18]; - u8 *client_mac; int err; ENIC_PP_BY_INDEX(enic, vf, pp, &err); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 6e745ca4c4333d..941f175fb911e2 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1889,7 +1889,7 @@ static int gmac_set_mac_address(struct net_device *netdev, void *addr) { struct sockaddr *sa = addr; - memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, sa->sa_data); gmac_write_mac_address(netdev); return 0; @@ -2467,13 +2467,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) DEFAULT_NAPI_WEIGHT); if (is_valid_ether_addr((void *)port->mac_addr)) { - memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, (u8 *)port->mac_addr); } else { dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n", port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); - eth_random_addr(netdev->dev_addr); + eth_hw_addr_random(netdev); } gmac_write_mac_address(netdev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index e842de6f66356b..0985ab216566b1 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1425,6 +1425,7 @@ dm9000_probe(struct platform_device *pdev) enum of_gpio_flags flags; struct regulator *power; bool inv_mac_addr = false; + u8 addr[ETH_ALEN]; power = devm_regulator_get(dev, "vcc"); if (IS_ERR(power)) { @@ -1666,11 +1667,12 @@ dm9000_probe(struct platform_device *pdev) /* try reading the node address from the attached EEPROM */ for (i = 0; i < 6; i += 2) - dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); + dm9000_read_eeprom(db, i / 2, addr + i); + eth_hw_addr_set(ndev, addr); if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { mac_src = "platform data"; - memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); + eth_hw_addr_set(ndev, pdata->dev_addr); } if (!is_valid_ether_addr(ndev->dev_addr)) { @@ -1678,7 +1680,8 @@ dm9000_probe(struct platform_device *pdev) mac_src = "chip"; for (i = 0; i < 6; i++) - ndev->dev_addr[i] = ior(db, i+DM9000_PAR); + addr[i] = ior(db, i + DM9000_PAR); + eth_hw_addr_set(ndev, pdata->dev_addr); } if (!is_valid_ether_addr(ndev->dev_addr)) { diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 117c26fa590950..d51b3d24a0c87b 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -666,8 +666,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) struct de_private *de = netdev_priv(dev); u16 hash_table[32]; struct netdev_hw_addr *ha; + const u16 *eaddrs; int i; - u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); __set_bit_le(255, hash_table); /* Broadcast entry */ @@ -685,7 +685,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) setup_frm = &de->setup_frame[13*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -695,7 +695,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); struct netdev_hw_addr *ha; - u16 *eaddrs; + const u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ @@ -710,7 +710,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) setup_frm = &de->setup_frame[15*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -1713,6 +1713,7 @@ static const struct ethtool_ops de_ethtool_ops = { static void de21040_get_mac_address(struct de_private *de) { + u8 addr[ETH_ALEN]; unsigned i; dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */ @@ -1724,12 +1725,13 @@ static void de21040_get_mac_address(struct de_private *de) value = dr32(ROMCmd); rmb(); } while (value < 0 && --boguscnt > 0); - de->dev->dev_addr[i] = value; + addr[i] = value; udelay(1); if (boguscnt <= 0) pr_warn("timeout reading 21040 MAC address byte %u\n", i); } + eth_hw_addr_set(de->dev, addr); } static void de21040_get_media_info(struct de_private *de) @@ -1821,8 +1823,7 @@ static void de21041_get_srom_info(struct de_private *de) #endif /* store MAC address */ - for (i = 0; i < 6; i ++) - de->dev->dev_addr[i] = ee_data[i + sa_offset]; + eth_hw_addr_set(de->dev, &ee_data[sa_offset]); /* get offset of controller 0 info leaf. ignore 2nd byte. */ ofs = ee_data[SROMC0InfoLeaf]; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 36ab4cbf2ad08a..13121c4dcfe6f4 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -4031,6 +4031,7 @@ get_hw_addr(struct net_device *dev) int broken, i, k, tmp, status = 0; u_short j,chksum; struct de4x5_private *lp = netdev_priv(dev); + u8 addr[ETH_ALEN]; broken = de4x5_bad_srom(lp); @@ -4042,28 +4043,30 @@ get_hw_addr(struct net_device *dev) if (lp->chipset == DC21040) { while ((tmp = inl(DE4X5_APROM)) < 0); k += (u_char) tmp; - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; while ((tmp = inl(DE4X5_APROM)) < 0); k += (u_short) (tmp << 8); - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; } else if (!broken) { - dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; - dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; + addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; + addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; } else if ((broken == SMC) || (broken == ACCTON)) { - dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; - dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; + addr[i] = *((u_char *)&lp->srom + i); i++; + addr[i] = *((u_char *)&lp->srom + i); i++; } } else { k += (u_char) (tmp = inb(EISA_APROM)); - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; k += (u_short) ((tmp = inb(EISA_APROM)) << 8); - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; } if (k > 0xffff) k-=0xffff; } if (k == 0xffff) k=0; + eth_hw_addr_set(dev, addr); + if (lp->bus == PCI) { if (lp->chipset == DC21040) { while ((tmp = inl(DE4X5_APROM)) < 0); @@ -4095,8 +4098,9 @@ get_hw_addr(struct net_device *dev) int x = dev->dev_addr[i]; x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4); x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2); - dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); + addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); } + eth_hw_addr_set(dev, addr); } #endif /* CONFIG_PPC_PMAC */ @@ -4158,12 +4162,9 @@ test_bad_enet(struct net_device *dev, int status) if ((tmp == 0) || (tmp == 0x5fa)) { if ((lp->chipset == last.chipset) && (lp->bus_num == last.bus) && (lp->bus_num > 0)) { - for (i=0; idev_addr[i] = last.addr[i]; - for (i=ETH_ALEN-1; i>2; --i) { - dev->dev_addr[i] += 1; - if (dev->dev_addr[i] != 0) break; - } - for (i=0; idev_addr[i]; + eth_addr_inc(last.addr); + eth_hw_addr_set(dev, last.addr); + if (!an_exception(lp)) { dev->irq = last.irq; } @@ -5391,9 +5392,7 @@ de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data if (netif_queue_stopped(dev)) return -EBUSY; netif_stop_queue(dev); - for (i=0; idev_addr[i] = tmp.addr[i]; - } + eth_hw_addr_set(dev, tmp.addr); build_setup_frame(dev, PHYS_ADDR_ONLY); /* Set up the descriptor and give ownership to the card */ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index c763b692e16430..83f1727d1423be 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -476,8 +476,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Set Node address */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = db->srom[20 + i]; + eth_hw_addr_set(dev, &db->srom[20]); err = register_netdev (dev); if (err) @@ -1436,9 +1435,9 @@ static void update_cr6(u32 cr6_data, void __iomem *ioaddr) static void dm9132_id_table(struct net_device *dev) { + const u16 *addrptr = (const u16 *)dev->dev_addr; struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr + 0xc0; - u16 *addrptr = (u16 *)dev->dev_addr; struct netdev_hw_addr *ha; u16 i, hash_table[4]; @@ -1477,7 +1476,7 @@ static void send_filter_frame(struct net_device *dev) struct dmfe_board_info *db = netdev_priv(dev); struct netdev_hw_addr *ha; struct tx_desc *txptr; - u16 * addrptr; + const u16 * addrptr; u32 * suptr; int i; @@ -1487,7 +1486,7 @@ static void send_filter_frame(struct net_device *dev) suptr = (u32 *) txptr->tx_buf_ptr; /* Node address */ - addrptr = (u16 *) dev->dev_addr; + addrptr = (const u16 *) dev->dev_addr; *suptr++ = addrptr[0]; *suptr++ = addrptr[1]; *suptr++ = addrptr[2]; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index fcedd733bacbf1..79df5a72877b83 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -339,7 +339,7 @@ static void tulip_up(struct net_device *dev) } } else { /* This is set_rx_mode(), but without starting the transmitter. */ - u16 *eaddrs = (u16 *)dev->dev_addr; + const u16 *eaddrs = (const u16 *)dev->dev_addr; u16 *setup_frm = &tp->setup_frame[15*6]; dma_addr_t mapping; @@ -1001,8 +1001,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) struct tulip_private *tp = netdev_priv(dev); u16 hash_table[32]; struct netdev_hw_addr *ha; + const u16 *eaddrs; int i; - u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); __set_bit_le(255, hash_table); /* Broadcast entry */ @@ -1019,7 +1019,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) setup_frm = &tp->setup_frame[13*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -1029,7 +1029,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); struct netdev_hw_addr *ha; - u16 *eaddrs; + const u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ @@ -1044,7 +1044,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) setup_frm = &tp->setup_frame[15*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -1305,6 +1305,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) int chip_idx = ent->driver_data; const char *chip_name = tulip_tbl[chip_idx].chip_name; unsigned int eeprom_missing = 0; + u8 addr[ETH_ALEN] __aligned(2); unsigned int force_csr0 = 0; board_idx++; @@ -1506,13 +1507,15 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) do { value = ioread32(ioaddr + CSR9); } while (value < 0 && --boguscnt > 0); - put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); + put_unaligned_le16(value, ((__le16 *)addr) + i); sum += value & 0xffff; } + eth_hw_addr_set(dev, addr); } else if (chip_idx == COMET) { /* No need to read the EEPROM. */ - put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); - put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); + put_unaligned_le32(ioread32(ioaddr + 0xA4), addr); + put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4); + eth_hw_addr_set(dev, addr); for (i = 0; i < 6; i ++) sum += dev->dev_addr[i]; } else { @@ -1575,20 +1578,23 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif for (i = 0; i < 6; i ++) { - dev->dev_addr[i] = ee_data[i + sa_offset]; + addr[i] = ee_data[i + sa_offset]; sum += ee_data[i + sa_offset]; } + eth_hw_addr_set(dev, addr); } /* Lite-On boards have the address byte-swapped. */ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02) && - dev->dev_addr[1] == 0x00) + dev->dev_addr[1] == 0x00) { for (i = 0; i < 6; i+=2) { - char tmp = dev->dev_addr[i]; - dev->dev_addr[i] = dev->dev_addr[i+1]; - dev->dev_addr[i+1] = tmp; + addr[i] = dev->dev_addr[i+1]; + addr[i+1] = dev->dev_addr[i]; } + eth_hw_addr_set(dev, addr); + } + /* On the Zynx 315 Etherarray and other multiport boards only the first Tulip has an EEPROM. On Sparc systems the mac address is held in the OBP property @@ -1599,17 +1605,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (sum == 0 || sum == 6*0xff) { #if defined(CONFIG_SPARC) struct device_node *dp = pci_device_to_OF_node(pdev); - const unsigned char *addr; + const unsigned char *addr2; int len; #endif eeprom_missing = 1; for (i = 0; i < 5; i++) - dev->dev_addr[i] = last_phys_addr[i]; - dev->dev_addr[i] = last_phys_addr[i] + 1; + addr[i] = last_phys_addr[i]; + addr[i] = last_phys_addr[i] + 1; + eth_hw_addr_set(dev, addr); #if defined(CONFIG_SPARC) - addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == ETH_ALEN) - memcpy(dev->dev_addr, addr, ETH_ALEN); + addr2 = of_get_property(dp, "local-mac-address", &len); + if (addr2 && len == ETH_ALEN) + eth_hw_addr_set(dev, addr2); #endif #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ if (last_irq) diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index d67ef7d02d6b85..77d9058431e3a6 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -272,6 +272,7 @@ static int uli526x_init_one(struct pci_dev *pdev, struct uli526x_board_info *db; /* board information structure */ struct net_device *dev; void __iomem *ioaddr; + u8 addr[ETH_ALEN]; int i, err; ULI526X_DBUG(0, "uli526x_init_one()", 0); @@ -379,7 +380,7 @@ static int uli526x_init_one(struct pci_dev *pdev, uw32(DCR13, 0x1b0); //Select ID Table access port //Read MAC address from CR14 for (i = 0; i < 6; i++) - dev->dev_addr[i] = ur32(DCR14); + addr[i] = ur32(DCR14); //Read end uw32(DCR13, 0); //Clear CR13 uw32(DCR0, 0); //Clear CR0 @@ -388,8 +389,10 @@ static int uli526x_init_one(struct pci_dev *pdev, else /*Exist SROM*/ { for (i = 0; i < 6; i++) - dev->dev_addr[i] = db->srom[20 + i]; + addr[i] = db->srom[20 + i]; } + eth_hw_addr_set(dev, addr); + err = register_netdev (dev); if (err) goto err_out_unmap; @@ -1343,7 +1346,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) void __iomem *ioaddr = db->ioaddr; struct netdev_hw_addr *ha; struct tx_desc *txptr; - u16 * addrptr; + const u16 * addrptr; u32 * suptr; int i; @@ -1353,7 +1356,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) suptr = (u32 *) txptr->tx_buf_ptr; /* Node address */ - addrptr = (u16 *) dev->dev_addr; + addrptr = (const u16 *) dev->dev_addr; *suptr++ = addrptr[0] << FLT_SHIFT; *suptr++ = addrptr[1] << FLT_SHIFT; *suptr++ = addrptr[2] << FLT_SHIFT; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 85b99099c6b946..86b1d23eba833c 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -355,6 +355,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) int chip_idx = ent->driver_data; int irq; int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + __le16 addr[ETH_ALEN / 2]; void __iomem *ioaddr; i = pcim_enable_device(pdev); @@ -382,7 +383,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_netdev; for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); + addr[i] = cpu_to_le16(eeprom_read(ioaddr, i)); + eth_hw_addr_set(dev, (u8 *)addr); /* Reset the chip to erase previous misconfiguration. No hold time required! */ @@ -877,7 +879,7 @@ static void init_registers(struct net_device *dev) 8000 16 longwords 0200 2 longwords 2000 32 longwords C000 32 longwords 0400 4 longwords */ -#if defined (__i386__) && !defined(MODULE) +#if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML) /* When not a module we can work around broken '486 PCI boards. */ if (boot_cpu_data.x86 <= 4) { i |= 0x4800; diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index a8de7935557800..8759f9f76b6217 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -1015,12 +1015,14 @@ static void read_mac_address(struct xircom_private *card) xw32(CSR10, i + 3); data_count = xr32(CSR9); if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { + u8 addr[ETH_ALEN]; int j; for (j = 0; j < 6; j++) { xw32(CSR10, i + j + 4); - card->dev->dev_addr[j] = xr32(CSR9) & 0xff; + addr[j] = xr32(CSR9) & 0xff; } + eth_hw_addr_set(card->dev, addr); break; } else if (link == 0) { break; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 202ecb1320534c..a301f7e6a4405c 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -349,8 +349,7 @@ parse_eeprom (struct net_device *dev) } /* Set MAC address */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = psrom->mac_addr[i]; + eth_hw_addr_set(dev, psrom->mac_addr); if (np->chip_id == CHIP_IP1000A) { np->led_mode = psrom->led_mode; @@ -567,7 +566,7 @@ static void rio_hw_init(struct net_device *dev) */ for (i = 0; i < 3; i++) dw16(StationAddr0 + 2 * i, - cpu_to_le16(((u16 *)dev->dev_addr)[i])); + cpu_to_le16(((const u16 *)dev->dev_addr)[i])); set_multicast (dev); if (np->coalesce) { diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index c36d186dffed13..c710dc17be90da 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev, int bar = 1; #endif int phy, phy_end, phy_idx = 0; + __le16 addr[ETH_ALEN / 2]; if (pci_enable_device(pdev)) return -EIO; @@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev, goto err_out_res; for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = + addr[i] = cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); + eth_hw_addr_set(dev, (u8 *)addr); np = netdev_priv(dev); np->ndev = dev; @@ -1611,7 +1613,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); __set_mac_addr(dev); return 0; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 6c51cf991dad5f..92462ed87bc438 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -60,11 +60,11 @@ static void __dnet_set_hwaddr(struct dnet *bp) { u16 tmp; - tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); + tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); - tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); + tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); - tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); + tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); } @@ -93,7 +93,7 @@ static void dnet_get_hwaddr(struct dnet *bp) *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); if (is_valid_ether_addr(addr)) - memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + eth_hw_addr_set(bp->dev, addr); } static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index b2d4fb3feb7448..46e3a05e958248 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -479,6 +479,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) struct net_device *net_dev; struct ec_bhf_priv *priv; void __iomem *dma_io; + u8 addr[ETH_ALEN]; void __iomem *io; int err = 0; @@ -539,7 +540,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err < 0) goto err_free_net_dev; - memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); + memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN); + eth_hw_addr_set(net_dev, addr); err = register_netdev(net_dev); if (err < 0) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 649c5c429bd7cf..528eb0f223b176 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1080,7 +1080,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, } /* Uses synchronous MCCQ */ -int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, +int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index c30d6d6f0f3a0e..db1f3b908582ef 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2385,7 +2385,7 @@ int be_pci_fnum_get(struct be_adapter *adapter); int be_fw_wait_ready(struct be_adapter *adapter); int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, bool permanent, u32 if_handle, u32 pmac_id); -int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, +int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain); int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 domain); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 361c1c87c18306..d51f24c9e1b804 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -272,7 +272,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) iowrite32(val, adapter->db + DB_CQ_OFFSET); } -static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac) +static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac) { int i; @@ -369,7 +369,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) /* Remember currently programmed MAC */ ether_addr_copy(adapter->dev_mac, addr->sa_data); done: - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; err: @@ -4599,7 +4599,7 @@ static int be_mac_setup(struct be_adapter *adapter) if (status) return status; - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(adapter->netdev, mac); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); /* Initial MAC for BE3 VFs is already programmed by PF */ @@ -4621,7 +4621,6 @@ static void be_destroy_err_recovery_workq(void) if (!be_err_recovery_workq) return; - flush_workqueue(be_err_recovery_workq); destroy_workqueue(be_err_recovery_workq); be_err_recovery_workq = NULL; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ed1ed48e748388..b1c8ffea6ad299 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -707,20 +707,16 @@ static int ethoc_mdio_probe(struct net_device *dev) else phy = phy_find_first(priv->mdio); - if (!phy) { - dev_err(&dev->dev, "no PHY found\n"); - return -ENXIO; - } + if (!phy) + return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n"); priv->old_duplex = -1; priv->old_link = -1; err = phy_connect_direct(dev, phy, ethoc_mdio_poll, PHY_INTERFACE_MODE_GMII); - if (err) { - dev_err(&dev->dev, "could not attach to PHY\n"); - return err; - } + if (err) + return dev_err_probe(&dev->dev, err, "could not attach to PHY\n"); phy_set_max_speed(phy, SPEED_100); @@ -806,8 +802,8 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void ethoc_do_set_mac_address(struct net_device *dev) { + const unsigned char *mac = dev->dev_addr; struct ethoc *priv = netdev_priv(dev); - unsigned char *mac = dev->dev_addr; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); @@ -820,7 +816,7 @@ static int ethoc_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); ethoc_do_set_mac_address(dev); return 0; } @@ -1148,18 +1144,22 @@ static int ethoc_probe(struct platform_device *pdev) /* Allow the platform setup code to pass in a MAC address. */ if (pdata) { - ether_addr_copy(netdev->dev_addr, pdata->hwaddr); + eth_hw_addr_set(netdev, pdata->hwaddr); priv->phy_id = pdata->phy_id; } else { - of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + of_get_ethdev_address(pdev->dev.of_node, netdev); priv->phy_id = -1; } /* Check that the given MAC address is valid. If it isn't, read the * current MAC from the controller. */ - if (!is_valid_ether_addr(netdev->dev_addr)) - ethoc_get_mac_address(netdev, netdev->dev_addr); + if (!is_valid_ether_addr(netdev->dev_addr)) { + u8 addr[ETH_ALEN]; + + ethoc_get_mac_address(netdev, addr); + eth_hw_addr_set(netdev, addr); + } /* Check the MAC again for validity, if it still isn't choose and * program a random one. diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig index 38aa824efb25de..9241b9b1c7a366 100644 --- a/drivers/net/ethernet/ezchip/Kconfig +++ b/drivers/net/ethernet/ezchip/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP config EZCHIP_NPS_MANAGEMENT_ENET tristate "EZchip NPS management enet support" - depends on OF_IRQ && OF_NET + depends on OF_IRQ depends on HAS_IOMEM help Simple LAN device for debug or management purposes. diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index f9a288a6ec8cca..323340826dabdc 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -421,7 +421,7 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p) res = eth_mac_addr(ndev, p); if (!res) { - ether_addr_copy(ndev->dev_addr, addr->sa_data); + eth_hw_addr_set(ndev, addr->sa_data); nps_enet_set_hw_mac_address(ndev); } @@ -601,7 +601,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); /* set kernel MAC address to dev */ - err = of_get_mac_address(dev->of_node, ndev->dev_addr); + err = of_get_ethdev_address(dev->of_node, ndev); if (err) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ff76e401a014bb..97c5d70de76e07 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -182,13 +182,10 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv) u8 mac[ETH_ALEN]; unsigned int m; unsigned int l; - void *addr; - addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); - if (addr) { - ether_addr_copy(priv->netdev->dev_addr, mac); + if (!device_get_ethdev_address(priv->dev, priv->netdev)) { dev_info(priv->dev, "Read MAC address %pM from device tree\n", - mac); + priv->netdev->dev_addr); return; } @@ -203,7 +200,7 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv) mac[5] = l & 0xff; if (is_valid_ether_addr(mac)) { - ether_addr_copy(priv->netdev->dev_addr, mac); + eth_hw_addr_set(priv->netdev, mac); dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); } else { eth_hw_addr_random(priv->netdev); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 25c91b3c5fd30c..b3939a5f7b0332 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -482,6 +482,7 @@ static int fealnx_init_one(struct pci_dev *pdev, struct net_device *dev; void *ring_space; dma_addr_t ring_dma; + u8 addr[ETH_ALEN]; #ifdef USE_IO_OPS int bar = 0; #else @@ -525,7 +526,8 @@ static int fealnx_init_one(struct pci_dev *pdev, /* read ethernet id */ for (i = 0; i < 6; ++i) - dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); + addr[i] = ioread8(ioaddr + PAR0 + i); + eth_hw_addr_set(dev, addr); /* Reset the chip to erase previous misconfiguration. */ iowrite32(0x00000001, ioaddr + BCR); @@ -827,7 +829,7 @@ static int netdev_open(struct net_device *dev) return -EAGAIN; for (i = 0; i < 3; i++) - iowrite16(((unsigned short*)dev->dev_addr)[i], + iowrite16(((const unsigned short *)dev->dev_addr)[i], ioaddr + PAR0 + i*2); init_ring(dev); @@ -857,7 +859,7 @@ static int netdev_open(struct net_device *dev) np->bcrvalue |= 0x04; /* big-endian */ #endif -#if defined(__i386__) && !defined(MODULE) +#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML) if (boot_cpu_data.x86 <= 4) np->crvalue = 0xa00; else diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 685d2d8a3b3662..6b2927d863e2cc 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -268,11 +268,11 @@ static int dpaa_netdev_init(struct net_device *net_dev, if (is_valid_ether_addr(mac_addr)) { memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, mac_addr); } else { eth_hw_addr_random(net_dev); err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac, - (enet_addr_t *)net_dev->dev_addr); + (const enet_addr_t *)net_dev->dev_addr); if (err) { dev_err(dev, "Failed to set random MAC address\n"); return -EINVAL; @@ -452,7 +452,7 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) mac_dev = priv->mac_dev; err = mac_dev->change_addr(mac_dev->fman_mac, - (enet_addr_t *)net_dev->dev_addr); + (const enet_addr_t *)net_dev->dev_addr); if (err < 0) { netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", err); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 605a39f892b949..7fefe1574b6a02 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -189,12 +189,11 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = { .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set, }; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; struct dpaa2_eth_devlink_priv *dl_priv; - int err; priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev); @@ -204,25 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) } dl_priv = devlink_priv(priv->devlink); dl_priv->dpaa2_priv = priv; - - err = devlink_register(priv->devlink); - if (err) { - dev_err(dev, "devlink_register() = %d\n", err); - goto devlink_free; - } - return 0; +} -devlink_free: +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv) +{ devlink_free(priv->devlink); +} - return err; + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) +{ + devlink_register(priv->devlink); } void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv) { devlink_unregister(priv->devlink); - devlink_free(priv->devlink); } int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 7065c71ed7b86e..714e961e7a77a2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -533,6 +533,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, percpu_stats->rx_packets++; percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); list_add_tail(&skb->list, ch->rx_list); @@ -641,6 +642,7 @@ static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, fq->stats.frames += cleaned; ch->stats.frames += cleaned; + ch->stats.frames_per_cdan += cleaned; /* A dequeue operation only pulls frames from a single queue * into the store. Return the frame queue as an out param. @@ -1264,7 +1266,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) /* Tx confirmation frame processing routine */ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch __always_unused, + struct dpaa2_eth_channel *ch, const struct dpaa2_fd *fd, struct dpaa2_eth_fq *fq) { @@ -1279,6 +1281,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, percpu_extras = this_cpu_ptr(priv->percpu_extras); percpu_extras->tx_conf_frames++; percpu_extras->tx_conf_bytes += fd_len; + ch->stats.bytes_per_cdan += fd_len; /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; @@ -1601,6 +1604,12 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) } } while (store_cleaned); + /* Update NET DIM with the values for this CDAN */ + dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan, + ch->stats.bytes_per_cdan); + ch->stats.frames_per_cdan = 0; + ch->stats.bytes_per_cdan = 0; + /* We didn't consume the entire budget, so finish napi and * re-enable data availability notifications */ @@ -4013,7 +4022,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) return err; } } - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, mac_addr); } else if (is_zero_ether_addr(dpni_mac_addr)) { /* No MAC address configured, fill in net_dev->dev_addr * with a random one @@ -4038,7 +4047,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) /* NET_ADDR_PERM is default, all we have to do is * fill in the device addr. */ - memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, dpni_mac_addr); } return 0; @@ -4431,7 +4440,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) if (err) goto err_connect_mac; - err = dpaa2_eth_dl_register(priv); + err = dpaa2_eth_dl_alloc(priv); if (err) goto err_dl_register; @@ -4453,6 +4462,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dpaa2_dbg_add(priv); #endif + dpaa2_eth_dl_register(priv); dev_info(dev, "Probed interface %s\n", net_dev->name); return 0; @@ -4461,7 +4471,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) err_dl_port_add: dpaa2_eth_dl_traps_unregister(priv); err_dl_trap_register: - dpaa2_eth_dl_unregister(priv); + dpaa2_eth_dl_free(priv); err_dl_register: dpaa2_eth_disconnect_mac(priv); err_connect_mac: @@ -4508,6 +4518,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); + dpaa2_eth_dl_unregister(priv); + #ifdef CONFIG_DEBUG_FS dpaa2_dbg_remove(priv); #endif @@ -4519,7 +4531,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); - dpaa2_eth_dl_unregister(priv); + dpaa2_eth_dl_free(priv); if (priv->do_link_poll) kthread_stop(priv->poll_thread); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index cdb623d5f2c1a5..2085844227fe53 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -384,6 +384,8 @@ struct dpaa2_eth_ch_stats { __u64 xdp_redirect; /* Must be last, does not show up in ethtool stats */ __u64 frames; + __u64 frames_per_cdan; + __u64 bytes_per_cdan; }; /* Maximum number of queues associated with a DPNI */ @@ -725,7 +727,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv); +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv); int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 2da5f881f63023..adb8ce5306ee84 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -820,7 +820,63 @@ static int dpaa2_eth_set_tunable(struct net_device *net_dev, return err; } +static int dpaa2_eth_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_io *dpio = priv->channel[0]->dpio; + + dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs); + ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio); + + return 0; +} + +static int dpaa2_eth_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_io *dpio; + int prev_adaptive; + u32 prev_rx_usecs; + int i, j, err; + + /* Keep track of the previous value, just in case we fail */ + dpio = priv->channel[0]->dpio; + dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs); + prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio); + + /* Setup new value for rx coalescing */ + for (i = 0; i < priv->num_channels; i++) { + dpio = priv->channel[i]->dpio; + + dpaa2_io_set_adaptive_coalescing(dpio, + ic->use_adaptive_rx_coalesce); + err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs); + if (err) + goto restore_rx_usecs; + } + + return 0; + +restore_rx_usecs: + for (j = 0; j < i; j++) { + dpio = priv->channel[j]->dpio; + + dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs); + dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive); + } + + return err; +} + const struct ethtool_ops dpaa2_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_drvinfo = dpaa2_eth_get_drvinfo, .nway_reset = dpaa2_eth_nway_reset, .get_link = ethtool_op_get_link, @@ -836,4 +892,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .get_ts_info = dpaa2_eth_get_ts_info, .get_tunable = dpaa2_eth_get_tunable, .set_tunable = dpaa2_eth_set_tunable, + .get_coalesce = dpaa2_eth_get_coalesce, + .set_coalesce = dpaa2_eth_set_coalesce, }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index ae6d382d87352a..ef8f0a05502423 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -139,7 +139,7 @@ static void dpaa2_mac_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_NA: case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_USXGMII: - phylink_set(mask, 10000baseT_Full); + phylink_set_10g_modes(mask); if (state->interface == PHY_INTERFACE_MODE_10GBASER) break; phylink_set(mask, 5000baseT_Full); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 175f15c46842e3..d039457928b0dd 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -980,7 +980,7 @@ static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) /* First check if firmware has any address configured by bootloader */ if (!is_zero_ether_addr(mac_addr)) { - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, mac_addr); } else { /* No MAC address configured, fill in net_dev->dev_addr * with a random one diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 042327b9981fad..504e12554079e3 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -7,7 +7,9 @@ #include #include #include +#include #include +#include static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) { @@ -314,12 +316,261 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) return 0; } +static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) +{ + union enetc_tx_bd txbd_tmp; + u8 flags = 0, e_flags = 0; + dma_addr_t addr; + + enetc_clear_tx_bd(&txbd_tmp); + addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; + + if (skb_vlan_tag_present(skb)) + flags |= ENETC_TXBD_FLAGS_EX; + + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.buf_len = cpu_to_le16(hdr_len); + + /* first BD needs frm_len and offload flags set */ + txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); + txbd_tmp.flags = flags; + + /* For the TSO header we do not set the dma address since we do not + * want it unmapped when we do cleanup. We still set len so that we + * count the bytes sent. + */ + tx_swbd->len = hdr_len; + tx_swbd->do_twostep_tstamp = false; + tx_swbd->check_wb = false; + + /* Actually write the header in the BD */ + *txbd = txbd_tmp; + + /* Add extension BD for VLAN */ + if (flags & ENETC_TXBD_FLAGS_EX) { + /* Get the next BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + /* Setup the VLAN fields */ + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + txbd_tmp.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + + /* Write the BD */ + txbd_tmp.ext.e_flags = e_flags; + *txbd = txbd_tmp; + } +} + +static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, char *data, + int size, bool last_bd) +{ + union enetc_tx_bd txbd_tmp; + dma_addr_t addr; + u8 flags = 0; + + enetc_clear_tx_bd(&txbd_tmp); + + addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -ENOMEM; + } + + if (last_bd) { + flags |= ENETC_TXBD_FLAGS_F; + tx_swbd->is_eof = 1; + } + + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.buf_len = cpu_to_le16(size); + txbd_tmp.flags = flags; + + tx_swbd->dma = addr; + tx_swbd->len = size; + tx_swbd->dir = DMA_TO_DEVICE; + + *txbd = txbd_tmp; + + return 0; +} + +static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, + char *hdr, int hdr_len, int *l4_hdr_len) +{ + char *l4_hdr = hdr + skb_transport_offset(skb); + int mac_hdr_len = skb_network_offset(skb); + + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); + + tcph->check = 0; + } else { + struct udphdr *udph = (struct udphdr *)(l4_hdr); + + udph->check = 0; + } + + /* Compute the IP checksum. This is necessary since tso_build_hdr() + * already incremented the IP ID field. + */ + if (!tso->ipv6) { + struct iphdr *iph = (void *)(hdr + mac_hdr_len); + + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } + + /* Compute the checksum over the L4 header. */ + *l4_hdr_len = hdr_len - skb_transport_offset(skb); + return csum_partial(l4_hdr, *l4_hdr_len, 0); +} + +static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, + struct sk_buff *skb, char *hdr, int len, + __wsum sum) +{ + char *l4_hdr = hdr + skb_transport_offset(skb); + __sum16 csum_final; + + /* Complete the L4 checksum by appending the pseudo-header to the + * already computed checksum. + */ + if (!tso->ipv6) + csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + len, ip_hdr(skb)->protocol, sum); + else + csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + len, ipv6_hdr(skb)->nexthdr, sum); + + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); + + tcph->check = csum_final; + } else { + struct udphdr *udph = (struct udphdr *)(l4_hdr); + + udph->check = csum_final; + } +} + +static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + int hdr_len, total_len, data_len; + struct enetc_tx_swbd *tx_swbd; + union enetc_tx_bd *txbd; + struct tso_t tso; + __wsum csum, csum2; + int count = 0, pos; + int err, i, bd_data_num; + + /* Initialize the TSO handler, and prepare the first payload */ + hdr_len = tso_start(skb, &tso); + total_len = skb->len - hdr_len; + i = tx_ring->next_to_use; + + while (total_len > 0) { + char *hdr; + + /* Get the BD */ + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->tx_swbd[i]; + prefetchw(txbd); + + /* Determine the length of this packet */ + data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_len; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); + + /* compute the csum over the L4 header */ + csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); + enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + bd_data_num = 0; + count++; + + while (data_len > 0) { + int size; + + size = min_t(int, tso.size, data_len); + + /* Advance the index in the BDR */ + enetc_bdr_idx_inc(tx_ring, &i); + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->tx_swbd[i]; + prefetchw(txbd); + + /* Compute the checksum over this segment of data and + * add it to the csum already computed (over the L4 + * header and possible other data segments). + */ + csum2 = csum_partial(tso.data, size, 0); + csum = csum_block_add(csum, csum2, pos); + pos += size; + + err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, + tso.data, size, + size == data_len); + if (err) + goto err_map_data; + + data_len -= size; + count++; + bd_data_num++; + tso_build_data(skb, &tso, size); + + if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) + goto err_chained_bd; + } + + enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); + + if (total_len == 0) + tx_swbd->skb = skb; + + /* Go to the next BD */ + enetc_bdr_idx_inc(tx_ring, &i); + } + + tx_ring->next_to_use = i; + enetc_update_tx_ring_tail(tx_ring); + + return count; + +err_map_data: + dev_err(tx_ring->dev, "DMA map error"); + +err_chained_bd: + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_bdr *tx_ring; - int count; + int count, err; /* Queue one-step Sync packet if already locked */ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { @@ -332,19 +583,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, tx_ring = priv->tx_ring[skb->queue_mapping]; - if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) - if (unlikely(skb_linearize(skb))) - goto drop_packet_err; + if (skb_is_gso(skb)) { + if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } - count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { - netif_stop_subqueue(ndev, tx_ring->index); - return NETDEV_TX_BUSY; - } + enetc_lock_mdio(); + count = enetc_map_tx_tso_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } else { + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } - enetc_lock_mdio(); - count = enetc_map_tx_buffs(tx_ring, skb); - enetc_unlock_mdio(); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + err = skb_checksum_help(skb); + if (err) + goto drop_packet_err; + } + enetc_lock_mdio(); + count = enetc_map_tx_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } if (unlikely(!count)) goto drop_packet_err; @@ -546,10 +813,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) bool is_eof = tx_swbd->is_eof; if (unlikely(tx_swbd->check_wb)) { - struct enetc_ndev_priv *priv = netdev_priv(ndev); - union enetc_tx_bd *txbd; - - txbd = ENETC_TXBD(*tx_ring, i); + union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); if (txbd->flags & ENETC_TXBD_FLAGS_W && tx_swbd->do_twostep_tstamp) { @@ -567,8 +831,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) if (xdp_frame) { xdp_return_frame(xdp_frame); } else if (skb) { - if (unlikely(tx_swbd->skb->cb[0] & - ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { + if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { /* Start work to release lock for next one-step * timestamping packet. And send one skb in * tx_skbs queue if has. @@ -1493,15 +1756,32 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr) return -ENOMEM; err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); - if (err) { - vfree(txr->tx_swbd); - return err; + if (err) + goto err_alloc_bdr; + + txr->tso_headers = dma_alloc_coherent(txr->dev, + txr->bd_count * TSO_HEADER_SIZE, + &txr->tso_headers_dma, + GFP_KERNEL); + if (!txr->tso_headers) { + err = -ENOMEM; + goto err_alloc_tso; } txr->next_to_clean = 0; txr->next_to_use = 0; return 0; + +err_alloc_tso: + dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd), + txr->bd_base, txr->bd_dma_base); + txr->bd_base = NULL; +err_alloc_bdr: + vfree(txr->tx_swbd); + txr->tx_swbd = NULL; + + return err; } static void enetc_free_txbdr(struct enetc_bdr *txr) @@ -1513,6 +1793,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr) size = txr->bd_count * sizeof(union enetc_tx_bd); + dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE, + txr->tso_headers, txr->tso_headers_dma); + txr->tso_headers = NULL; + dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); txr->bd_base = NULL; @@ -2607,10 +2891,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) pcie_flr(pdev); err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 08b283347d9ced..fb39e406b7fc00 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -112,6 +112,10 @@ struct enetc_bdr { dma_addr_t bd_dma_base; u8 tsd_enable; /* Time specific departure */ bool ext_en; /* enable h/w descriptor extensions */ + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; } ____cacheline_aligned_in_smp; static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 0f5f081a5bafe8..1514e6a4a3ff78 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -635,10 +635,14 @@ struct enetc_cmd_rfse { #define ENETC_RFSE_EN BIT(15) #define ENETC_RFSE_MODE_BD 2 -static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) +static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw, + struct net_device *ndev) { + u8 addr[ETH_ALEN] __aligned(4); + *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); + eth_hw_addr_set(ndev, addr); } #define ENETC_SI_INT_IDX 0 diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index d522bd5c90b498..0e87c7043b7726 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -40,7 +40,7 @@ static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, saddr->sa_data); enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); return 0; @@ -762,10 +762,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; if (si->num_rss) ndev->hw_features |= NETIF_F_RXHASH; @@ -782,7 +786,7 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, } /* pick up primary MAC address from SI */ - enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); + enetc_load_primary_mac_addr(&si->hw, ndev); } static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) @@ -806,10 +810,8 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); err = of_mdiobus_register(bus, np); - if (err) { - dev_err(dev, "cannot register MDIO bus\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "cannot register MDIO bus\n"); pf->mdio = bus; @@ -940,7 +942,7 @@ static void enetc_pl_mac_validate(struct phylink_config *config, state->interface != PHY_INTERFACE_MODE_2500BASEX && state->interface != PHY_INTERFACE_MODE_USXGMII && !phy_interface_mode_is_rgmii(state->interface)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } @@ -963,10 +965,8 @@ static void enetc_pl_mac_validate(struct phylink_config *config, phylink_set(mask, 2500baseX_Full); } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void enetc_pl_mac_config(struct phylink_config *config, @@ -1218,10 +1218,8 @@ static int enetc_pf_probe(struct pci_dev *pdev, ERR_PTR(err)); err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); - if (err) { - dev_err(&pdev->dev, "PCI probing failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); if (!si->hw.port || !si->hw.global) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index bc594892507acb..36b4f51dd297c2 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -39,10 +39,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, } err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 4577226d3c6adb..0536d2c76fbc40 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -486,14 +486,16 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, data_size = sizeof(struct streamid_data); si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); + if (!si_data) + return -ENOMEM; cbd.length = cpu_to_le16(data_size); dma = dma_map_single(&priv->si->pdev->dev, si_data, data_size, DMA_FROM_DEVICE); if (dma_mapping_error(&priv->si->pdev->dev, dma)) { netdev_err(priv->si->ndev, "DMA mapping failed!\n"); - kfree(si_data); - return -ENOMEM; + err = -ENOMEM; + goto out; } cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); @@ -512,12 +514,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, err = enetc_send_cmd(priv->si, &cbd); if (err) - return -EINVAL; + goto out; - if (!enable) { - kfree(si_data); - return 0; - } + if (!enable) + goto out; /* Enable the entry overwrite again incase space flushed by hardware */ memset(&cbd, 0, sizeof(cbd)); @@ -560,6 +560,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, } err = enetc_send_cmd(priv->si, &cbd); +out: + if (!dma_mapping_error(&priv->si->pdev->dev, dma)) + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE); + kfree(si_data); return err; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 1a9d1e8b772ce7..17924305afa2f1 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -122,16 +122,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; if (si->num_rss) ndev->hw_features |= NETIF_F_RXHASH; /* pick up primary MAC address from SI */ - enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); + enetc_load_primary_mac_addr(&si->hw, ndev); } static int enetc_vf_probe(struct pci_dev *pdev, @@ -143,10 +147,8 @@ static int enetc_vf_probe(struct pci_dev *pdev, int err; err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); - if (err) { - dev_err(&pdev->dev, "PCI probing failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ec87b370bba1fb..bc418b910999fc 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1768,11 +1768,8 @@ static int fec_get_mac(struct net_device *ndev) return 0; } - memcpy(ndev->dev_addr, iap, ETH_ALEN); - /* Adjust MAC if using macaddr */ - if (iap == macaddr) - ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; + eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); return 0; } @@ -3326,7 +3323,7 @@ fec_set_mac_address(struct net_device *ndev, void *p) if (addr) { if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); } /* Add netif status check here to avoid system hang in below case: diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 73ff359a15f1aa..bbbde9f701c27a 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -112,7 +112,7 @@ static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sock = addr; - memcpy(dev->dev_addr, sock->sa_data, dev->addr_len); + eth_hw_addr_set(dev, sock->sa_data); mpc52xx_fec_set_paddr(dev, sock->sa_data); return 0; @@ -890,7 +890,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) * * First try to read MAC address from DT */ - rv = of_get_mac_address(np, ndev->dev_addr); + rv = of_get_ethdev_address(np, ndev); if (rv) { struct mpc52xx_fec __iomem *fec = priv->fec; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index bce3c9398887c8..1950a8936bc0cb 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -366,7 +366,7 @@ static void set_dflts(struct dtsec_cfg *cfg) cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME; } -static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr) +static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr) { u32 tmp; @@ -516,7 +516,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, if (addr) { MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr); - set_mac_address(regs, (u8 *)eth_addr); + set_mac_address(regs, (const u8 *)eth_addr); } /* HASH */ @@ -1022,7 +1022,7 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) return 0; } -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) +int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; enum comm_mode mode = COMM_MODE_NONE; @@ -1041,7 +1041,7 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) * Station address have to be swapped (big endian to little endian */ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr); - set_mac_address(dtsec->regs, (u8 *)(*enet_addr)); + set_mac_address(dtsec->regs, (const u8 *)(*enet_addr)); graceful_start(dtsec, mode); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index 5149d96ec2c15e..68512c3bd6e528 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -37,7 +37,7 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params); int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val); -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr); +int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr); int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed); int dtsec_restart_autoneg(struct fman_mac *dtsec); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 62f42921933d6b..2216b7f51d26ef 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -354,7 +354,7 @@ struct fman_mac { bool allmulti_enabled; }; -static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr, +static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr, u8 paddr_num) { u32 tmp0, tmp1; @@ -897,12 +897,12 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) return 0; } -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr) +int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr) { if (!is_init_done(memac->memac_drv_param)) return -EINVAL; - add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0); + add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0); return 0; } @@ -1058,7 +1058,7 @@ int memac_init(struct fman_mac *memac) /* MAC Address */ if (memac->addr != 0) { MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr); - add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0); + add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0); } fixed_link = memac_drv_param->fixed_link; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index b2c671ec0ce790..3820f7a229834d 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -40,7 +40,7 @@ struct fman_mac *memac_config(struct fman_mac_params *params); int memac_set_promiscuous(struct fman_mac *memac, bool new_val); -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr); +int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr); int memac_adjust_link(struct fman_mac *memac, u16 speed); int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val); int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable); diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 41946b16f6c724..311c1906e0446f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -221,7 +221,7 @@ struct fman_mac { bool allmulti_enabled; }; -static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr) +static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr) { u32 tmp0, tmp1; @@ -514,13 +514,13 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) return 0; } -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr) +int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr) { if (!is_init_done(tgec->cfg)) return -EINVAL; tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr); - set_mac_address(tgec->regs, (u8 *)(*p_enet_addr)); + set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr)); return 0; } @@ -704,7 +704,7 @@ int tgec_init(struct fman_mac *tgec) if (tgec->addr) { MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr); - set_mac_address(tgec->regs, (u8 *)eth_addr); + set_mac_address(tgec->regs, (const u8 *)eth_addr); } /* interrupts */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index 3bfd1062b386de..b28b20b2614829 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -37,7 +37,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params); int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val); -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr); +int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr); int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val); int tgec_enable(struct fman_mac *tgec, enum comm_mode mode); int tgec_disable(struct fman_mac *tgec, enum comm_mode mode); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 824a81a9f35072..daa285a9b8b25b 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -66,7 +66,7 @@ struct mac_device { int (*stop)(struct mac_device *mac_dev); void (*adjust_link)(struct mac_device *mac_dev); int (*set_promisc)(struct fman_mac *mac_dev, bool enable); - int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); + int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 2db6e38a772e7d..bacf25318f87ae 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1005,7 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev) spin_lock_init(&fep->lock); spin_lock_init(&fep->tx_lock); - of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + of_get_ethdev_address(ofdev->dev.of_node, ndev); ret = fep->ops->allocate_bd(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index af6ad94bf24a47..acab58fd3db38c 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -753,7 +753,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; - err = of_get_mac_address(np, dev->dev_addr); + err = of_get_ethdev_address(np, dev); if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 3eb288d10b0c32..823221c912ab13 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3205,7 +3205,7 @@ static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); /* * If device is not running, we will set mac addr register @@ -3731,7 +3731,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) goto err_free_netdev; } - of_get_mac_address(np, dev->dev_addr); + of_get_ethdev_address(np, dev); ugeth->ug_info = ug_info; ugeth->dev = device; diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 62c0bed82cedbc..b0d733e9a7c66a 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -334,6 +334,7 @@ static int fmvj18x_config(struct pcmcia_device *link) u8 *buf; size_t len; u_char buggybuf[32]; + u8 addr[ETH_ALEN]; dev_dbg(&link->dev, "fmvj18x_config\n"); @@ -468,8 +469,7 @@ static int fmvj18x_config(struct pcmcia_device *link) goto failed; } /* Read MACID from CIS */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = buf[i + 5]; + eth_hw_addr_set(dev, &buf[5]); kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) @@ -490,7 +490,8 @@ static int fmvj18x_config(struct pcmcia_device *link) case UNGERMANN: /* Read MACID from register */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i); + addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i); + eth_hw_addr_set(dev, addr); card_name = "Access/CARD"; break; case XXX10304: @@ -499,16 +500,15 @@ static int fmvj18x_config(struct pcmcia_device *link) pr_notice("unable to read hardware net address\n"); goto failed; } - for (i = 0 ; i < 6; i++) { - dev->dev_addr[i] = buggybuf[i]; - } + eth_hw_addr_set(dev, buggybuf); card_name = "FMV-J182"; break; case MBH10302: default: /* Read MACID from register */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + MAC_ID + i); + addr[i] = inb(ioaddr + MAC_ID + i); + eth_hw_addr_set(dev, addr); card_name = "FMV-J181"; break; } diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 92dc18a4bcc41c..b719f72281c44b 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -30,7 +30,7 @@ #define GVE_MIN_MSIX 3 /* Numbers of gve tx/rx stats in stats report. */ -#define GVE_TX_STATS_REPORT_NUM 5 +#define GVE_TX_STATS_REPORT_NUM 6 #define GVE_RX_STATS_REPORT_NUM 2 /* Interval to schedule a stats report update, 20000ms. */ @@ -142,6 +142,19 @@ struct gve_index_list { s16 tail; }; +/* A single received packet split across multiple buffers may be + * reconstructed using the information in this structure. + */ +struct gve_rx_ctx { + /* head and tail of skb chain for the current packet or NULL if none */ + struct sk_buff *skb_head; + struct sk_buff *skb_tail; + u16 total_expected_size; + u8 expected_frag_cnt; + u8 curr_frag_cnt; + u8 reuse_frags; +}; + /* Contains datapath state used to represent an RX queue. */ struct gve_rx_ring { struct gve_priv *gve; @@ -153,6 +166,7 @@ struct gve_rx_ring { /* threshold for posting new buffs and descs */ u32 db_threshold; + u16 packet_buffer_size; }; /* DQO fields. */ @@ -200,15 +214,16 @@ struct gve_rx_ring { u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ + u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ + u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ + u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */ u32 q_num; /* queue index */ u32 ntfy_id; /* notification block index */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */ dma_addr_t q_resources_bus; /* dma address for the queue resources */ struct u64_stats_sync statss; /* sync stats for 32bit archs */ - /* head and tail of skb chain for the current packet or NULL if none */ - struct sk_buff *skb_head; - struct sk_buff *skb_tail; + struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ }; /* A TX desc ring entry */ @@ -224,11 +239,6 @@ struct gve_tx_iovec { u32 iov_padding; /* padding associated with this segment */ }; -struct gve_tx_dma_buf { - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); -}; - /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc * ring entry but only used for a pkt_desc not a seg_desc */ @@ -236,7 +246,10 @@ struct gve_tx_buffer_state { struct sk_buff *skb; /* skb for this pkt */ union { struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ - struct gve_tx_dma_buf buf; + struct { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + }; }; }; @@ -280,7 +293,8 @@ struct gve_tx_pending_packet_dqo { * All others correspond to `skb`'s frags and should be unmapped with * `dma_unmap_page`. */ - struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1]; + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); u16 num_bufs; /* Linked list index to next element in the list, or -1 if none */ @@ -342,8 +356,8 @@ struct gve_tx_ring { union { /* GQI fields */ struct { - /* NIC tail pointer */ - __be32 last_nic_done; + /* Spinlock for when cleanup in progress */ + spinlock_t clean_lock; }; /* DQO fields. */ @@ -414,7 +428,9 @@ struct gve_tx_ring { u32 q_num ____cacheline_aligned; /* queue idx */ u32 stop_queue; /* count of queue stops */ u32 wake_queue; /* count of queue wakes */ + u32 queue_timeout; /* count of queue timeouts */ u32 ntfy_id; /* notification block index */ + u32 last_kick_msec; /* Last time the queue was kicked */ dma_addr_t bus; /* dma address of the descr ring */ dma_addr_t q_resources_bus; /* dma address of the queue resources */ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ @@ -822,15 +838,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); bool gve_tx_poll(struct gve_notify_block *block, int budget); int gve_tx_alloc_rings(struct gve_priv *priv); void gve_tx_free_rings_gqi(struct gve_priv *priv); -__be32 gve_tx_load_event_counter(struct gve_priv *priv, - struct gve_tx_ring *tx); +u32 gve_tx_load_event_counter(struct gve_priv *priv, + struct gve_tx_ring *tx); +bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); /* rx handling */ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); -bool gve_rx_poll(struct gve_notify_block *block, int budget); +int gve_rx_poll(struct gve_notify_block *block, int budget); +bool gve_rx_work_pending(struct gve_rx_ring *rx); int gve_rx_alloc_rings(struct gve_priv *priv); void gve_rx_free_rings_gqi(struct gve_priv *priv); -bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, - netdev_features_t feat); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); int gve_reset(struct gve_priv *priv, bool attempt_teardown); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index f089d33dd48e06..83ae56c310d3b9 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -38,7 +38,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option *option, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, - struct gve_device_option_dqo_rda **dev_op_dqo_rda) + struct gve_device_option_dqo_rda **dev_op_dqo_rda, + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u16 option_length = be16_to_cpu(option->option_length); @@ -111,6 +112,24 @@ void gve_parse_device_option(struct gve_priv *priv, } *dev_op_dqo_rda = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_JUMBO_FRAMES: + if (option_length < sizeof(**dev_op_jumbo_frames) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Jumbo Frames", + (int)sizeof(**dev_op_jumbo_frames), + GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_jumbo_frames)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Jumbo Frames"); + } + *dev_op_jumbo_frames = (void *)(option + 1); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -126,7 +145,8 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_descriptor *descriptor, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, - struct gve_device_option_dqo_rda **dev_op_dqo_rda) + struct gve_device_option_dqo_rda **dev_op_dqo_rda, + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames) { const int num_options = be16_to_cpu(descriptor->num_device_options); struct gve_device_option *dev_opt; @@ -146,7 +166,7 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, - dev_op_dqo_rda); + dev_op_dqo_rda, dev_op_jumbo_frames); dev_opt = next_opt; } @@ -530,6 +550,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cpu_to_be64(rx->data.data_bus), cmd.create_rx_queue.index = cpu_to_be32(queue_index); cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { cmd.create_rx_queue.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt); @@ -660,12 +681,31 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv, return 0; } +static void gve_enable_supported_features(struct gve_priv *priv, + u32 supported_features_mask, + const struct gve_device_option_jumbo_frames + *dev_op_jumbo_frames) +{ + /* Before control reaches this point, the page-size-capped max MTU from + * the gve_device_descriptor field has already been stored in + * priv->dev->max_mtu. We overwrite it with the true max MTU below. + */ + if (dev_op_jumbo_frames && + (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) { + dev_info(&priv->pdev->dev, + "JUMBO FRAMES device option enabled.\n"); + priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); + } +} + int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; struct gve_device_descriptor *descriptor; + u32 supported_features_mask = 0; union gve_adminq_command cmd; dma_addr_t descriptor_bus; int err = 0; @@ -689,7 +729,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) goto free_device_descriptor; err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, - &dev_op_gqi_qpl, &dev_op_dqo_rda); + &dev_op_gqi_qpl, &dev_op_dqo_rda, + &dev_op_jumbo_frames); if (err) goto free_device_descriptor; @@ -704,12 +745,19 @@ int gve_adminq_describe_device(struct gve_priv *priv) priv->queue_format = GVE_DQO_RDA_FORMAT; dev_info(&priv->pdev->dev, "Driver is running with DQO RDA queue format.\n"); + supported_features_mask = + be32_to_cpu(dev_op_dqo_rda->supported_features_mask); } else if (dev_op_gqi_rda) { priv->queue_format = GVE_GQI_RDA_FORMAT; dev_info(&priv->pdev->dev, "Driver is running with GQI RDA queue format.\n"); + supported_features_mask = + be32_to_cpu(dev_op_gqi_rda->supported_features_mask); } else { priv->queue_format = GVE_GQI_QPL_FORMAT; + if (dev_op_gqi_qpl) + supported_features_mask = + be32_to_cpu(dev_op_gqi_qpl->supported_features_mask); dev_info(&priv->pdev->dev, "Driver is running with GQI QPL queue format.\n"); } @@ -733,7 +781,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) } priv->dev->max_mtu = mtu; priv->num_event_counters = be16_to_cpu(descriptor->counters); - ether_addr_copy(priv->dev->dev_addr, descriptor->mac); + eth_hw_addr_set(priv->dev, descriptor->mac); mac = descriptor->mac; dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); @@ -746,6 +794,9 @@ int gve_adminq_describe_device(struct gve_priv *priv) } priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); + gve_enable_supported_features(priv, supported_features_mask, + dev_op_jumbo_frames); + free_device_descriptor: dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, descriptor_bus); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 47c3d8f313fcf6..83c0b40cd2d95e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -108,6 +108,14 @@ struct gve_device_option_dqo_rda { static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); +struct gve_device_option_jumbo_frames { + __be32 supported_features_mask; + __be16 max_mtu; + u8 padding[2]; +}; + +static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -121,6 +129,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_GQI_RDA = 0x2, GVE_DEV_OPT_ID_GQI_QPL = 0x3, GVE_DEV_OPT_ID_DQO_RDA = 0x4, + GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, }; enum gve_dev_opt_req_feat_mask { @@ -128,6 +137,11 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, +}; + +enum gve_sup_feature_mask { + GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -270,6 +284,7 @@ enum gve_stat_names { TX_LAST_COMPLETION_PROCESSED = 5, RX_NEXT_EXPECTED_SEQUENCE = 6, RX_BUFFERS_POSTED = 7, + TX_TIMEOUT_CNT = 8, // stats from NIC RX_QUEUE_DROP_CNT = 65, RX_NO_BUFFERS_POSTED = 66, diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h index 05ae6300e98465..4d225a18d8ce97 100644 --- a/drivers/net/ethernet/google/gve/gve_desc.h +++ b/drivers/net/ethernet/google/gve/gve_desc.h @@ -90,12 +90,13 @@ union gve_rx_data_slot { /* GVE Recive Packet Descriptor Flags */ #define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x))) -#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ -#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ -#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */ -#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */ -#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */ -#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */ +#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ +#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ +#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */ +#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */ +#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */ +#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */ +#define GVE_RXF_PKT_CONT GVE_RXFLG(10) /* Multi Fragment RX packet */ /* GVE IRQ */ #define GVE_IRQ_ACK BIT(31) diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 716e6240305d93..c8df47a97fa4f9 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]", + "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", @@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev, } while (u64_stats_fetch_retry(&priv->rx[ring].statss, start)); data[i++] = tmp_rx_bytes; + data[i++] = rx->rx_cont_packet_cnt; + data[i++] = rx->rx_frag_flip_cnt; + data[i++] = rx->rx_frag_copy_cnt; /* rx dropped packets */ data[i++] = tmp_rx_skb_alloc_fail + tmp_rx_buf_alloc_fail + @@ -330,8 +334,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = tmp_tx_bytes; data[i++] = tx->wake_queue; data[i++] = tx->stop_queue; - data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, - tx)); + data[i++] = gve_tx_load_event_counter(priv, tx); data[i++] = tx->dma_mapping_error; /* stats from NIC */ if (skip_nic_stats) { diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index bf8a4a7c43f780..6b02ef432eda6b 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -24,6 +24,9 @@ #define GVE_VERSION "1.0.0" #define GVE_VERSION_PREFIX "GVE-" +// Minimum amount of time between queue kicks in msec (10 seconds) +#define MIN_TX_TIMEOUT_GAP (1000 * 10) + const char gve_version_str[] = GVE_VERSION; static const char gve_version_prefix[] = GVE_VERSION_PREFIX; @@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) __be32 __iomem *irq_doorbell; bool reschedule = false; struct gve_priv *priv; + int work_done = 0; block = container_of(napi, struct gve_notify_block, napi); priv = block->priv; if (block->tx) reschedule |= gve_tx_poll(block, budget); - if (block->rx) - reschedule |= gve_rx_poll(block, budget); + if (block->rx) { + work_done = gve_rx_poll(block, budget); + reschedule |= work_done == budget; + } if (reschedule) return budget; - napi_complete(napi); - irq_doorbell = gve_irq_doorbell(priv, block); - iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); + /* Complete processing - don't unmask irq if busy polling is enabled */ + if (likely(napi_complete_done(napi, work_done))) { + irq_doorbell = gve_irq_doorbell(priv, block); + iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); - /* Double check we have no extra work. - * Ensure unmask synchronizes with checking for work. - */ - mb(); - if (block->tx) - reschedule |= gve_tx_poll(block, -1); - if (block->rx) - reschedule |= gve_rx_poll(block, -1); - if (reschedule && napi_reschedule(napi)) - iowrite32be(GVE_IRQ_MASK, irq_doorbell); + /* Ensure IRQ ACK is visible before we check pending work. + * If queue had issued updates, it would be truly visible. + */ + mb(); - return 0; + if (block->tx) + reschedule |= gve_tx_clean_pending(priv, block->tx); + if (block->rx) + reschedule |= gve_rx_work_pending(block->rx); + + if (reschedule && napi_reschedule(napi)) + iowrite32be(GVE_IRQ_MASK, irq_doorbell); + } + return work_done; } static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) @@ -279,7 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) int i, j; int err; - priv->msix_vectors = kvzalloc(num_vecs_requested * + priv->msix_vectors = kvcalloc(num_vecs_requested, sizeof(*priv->msix_vectors), GFP_KERNEL); if (!priv->msix_vectors) return -ENOMEM; @@ -640,7 +649,7 @@ static int gve_alloc_rings(struct gve_priv *priv) int err; /* Setup tx rings */ - priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx), + priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), GFP_KERNEL); if (!priv->tx) return -ENOMEM; @@ -653,7 +662,7 @@ static int gve_alloc_rings(struct gve_priv *priv) goto free_tx; /* Setup rx rings */ - priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx), + priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), GFP_KERNEL); if (!priv->rx) { err = -ENOMEM; @@ -776,12 +785,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, qpl->id = id; qpl->num_entries = 0; - qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); + qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); /* caller handles clean up */ if (!qpl->pages) return -ENOMEM; - qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses), - GFP_KERNEL); + qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); /* caller handles clean up */ if (!qpl->page_buses) return -ENOMEM; @@ -840,7 +848,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) if (priv->queue_format == GVE_GQI_RDA_FORMAT) return 0; - priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL); + priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); if (!priv->qpls) return -ENOMEM; @@ -859,7 +867,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * sizeof(unsigned long) * BITS_PER_BYTE; - priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) * + priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), sizeof(unsigned long), GFP_KERNEL); if (!priv->qpl_cfg.qpl_id_map) { err = -ENOMEM; @@ -1116,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv) static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) { - struct gve_priv *priv = netdev_priv(dev); + struct gve_notify_block *block; + struct gve_tx_ring *tx = NULL; + struct gve_priv *priv; + u32 last_nic_done; + u32 current_time; + u32 ntfy_idx; + netdev_info(dev, "Timeout on tx queue, %d", txqueue); + priv = netdev_priv(dev); + if (txqueue > priv->tx_cfg.num_queues) + goto reset; + + ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); + if (ntfy_idx > priv->num_ntfy_blks) + goto reset; + + block = &priv->ntfy_blocks[ntfy_idx]; + tx = block->tx; + + current_time = jiffies_to_msecs(jiffies); + if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) + goto reset; + + /* Check to see if there are missed completions, which will allow us to + * kick the queue. + */ + last_nic_done = gve_tx_load_event_counter(priv, tx); + if (last_nic_done - tx->done) { + netdev_info(dev, "Kicking queue %d", txqueue); + iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); + napi_schedule(&block->napi); + tx->last_kick_msec = current_time; + goto out; + } // Else reset. + +reset: gve_schedule_reset(priv); + +out: + if (tx) + tx->queue_timeout++; priv->tx_timeo_cnt++; } @@ -1247,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv) .value = cpu_to_be64(last_completion), .queue_id = cpu_to_be32(idx), }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_TIMEOUT_CNT), + .value = cpu_to_be64(priv->tx[idx].queue_timeout), + .queue_id = cpu_to_be32(idx), + }; } } /* rx stats */ @@ -1320,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) "Could not get device information: err=%d\n", err); goto err; } - if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) { - priv->dev->max_mtu = PAGE_SIZE; - err = gve_adminq_set_mtu(priv, priv->dev->mtu); - if (err) { - dev_err(&priv->pdev->dev, "Could not set mtu"); - goto err; - } - } priv->dev->mtu = priv->dev->max_mtu; num_ntfy = pci_msix_vec_count(priv->pdev); if (num_ntfy <= 0) { diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 94941d4e474492..c8500babbd1db9 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev, dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK); + page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); } static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) { - if (rx->data.raw_addressing) { - u32 slots = rx->mask + 1; - int i; + u32 slots = rx->mask + 1; + int i; + if (rx->data.raw_addressing) { for (i = 0; i < slots; i++) gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], &rx->data.data_ring[i]); } else { + for (i = 0; i < slots; i++) + page_ref_sub(rx->data.page_info[i].page, + rx->data.page_info[i].pagecnt_bias - 1); gve_unassign_qpl(priv, rx->data.qpl->id); rx->data.qpl = NULL; } @@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, page_info->page_offset = 0; page_info->page_address = page_address(page); *slot_addr = cpu_to_be64(addr); + /* The page already has 1 ref */ + page_ref_add(page, INT_MAX - 1); + page_info->pagecnt_bias = INT_MAX; } static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, @@ -136,6 +143,16 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) return err; } +static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->curr_frag_cnt = 0; + ctx->total_expected_size = 0; + ctx->expected_frag_cnt = 0; + ctx->skb_head = NULL; + ctx->skb_tail = NULL; + ctx->reuse_frags = false; +} + static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) { struct gve_rx_ring *rx = &priv->rx[idx]; @@ -202,6 +219,12 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) rx->cnt = 0; rx->db_threshold = priv->rx_desc_cnt / 2; rx->desc.seqno = 1; + + /* Allocating half-page buffers allows page-flipping which is faster + * than copying or allocating new pages. + */ + rx->packet_buffer_size = PAGE_SIZE / 2; + gve_rx_ctx_clear(&rx->ctx); gve_rx_add_to_block(priv, idx); return 0; @@ -268,18 +291,28 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) return PKT_HASH_TYPE_L2; } +static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx) +{ + return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0; +} + static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, - u16 len) + u16 packet_buffer_size, u16 len, + struct gve_rx_ctx *ctx) { - struct sk_buff *skb = napi_get_frags(napi); + u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx); + struct sk_buff *skb; - if (unlikely(!skb)) + if (!ctx->skb_head) + ctx->skb_head = napi_get_frags(napi); + + if (unlikely(!ctx->skb_head)) return NULL; - skb_add_rx_frag(skb, 0, page_info->page, - page_info->page_offset + - GVE_RX_PAD, len, PAGE_SIZE / 2); + skb = ctx->skb_head; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, + offset, len, packet_buffer_size); return skb; } @@ -293,23 +326,18 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl *(slot_addr) ^= offset; } -static bool gve_rx_can_flip_buffers(struct net_device *netdev) -{ - return PAGE_SIZE == 4096 - ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false; -} - -static int gve_rx_can_recycle_buffer(struct page *page) +static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info) { - int pagecount = page_count(page); + int pagecount = page_count(page_info->page); /* This page is not being used by any SKBs - reuse */ - if (pagecount == 1) + if (pagecount == page_info->pagecnt_bias) return 1; /* This page is still being used by an SKB - we can't reuse */ - else if (pagecount >= 2) + else if (pagecount > page_info->pagecnt_bias) return 0; - WARN(pagecount < 1, "Pagecount should never be < 1"); + WARN(pagecount < page_info->pagecnt_bias, + "Pagecount should never be less than the bias."); return -1; } @@ -317,19 +345,19 @@ static struct sk_buff * gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, struct gve_rx_slot_page_info *page_info, u16 len, struct napi_struct *napi, - union gve_rx_data_slot *data_slot) + union gve_rx_data_slot *data_slot, + u16 packet_buffer_size, struct gve_rx_ctx *ctx) { - struct sk_buff *skb; + struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx); - skb = gve_rx_add_frags(napi, page_info, len); if (!skb) return NULL; - /* Optimistically stop the kernel from freeing the page by increasing - * the page bias. We will check the refcount in refill to determine if - * we need to alloc a new page. + /* Optimistically stop the kernel from freeing the page. + * We will check again in refill to determine if we need to alloc a + * new page. */ - get_page(page_info->page); + gve_dec_pagecnt_bias(page_info); return skb; } @@ -340,6 +368,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev, u16 len, struct napi_struct *napi, union gve_rx_data_slot *data_slot) { + struct gve_rx_ctx *ctx = &rx->ctx; struct sk_buff *skb; /* if raw_addressing mode is not enabled gvnic can only receive into @@ -347,116 +376,259 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev, * choice is to copy the data out of it so that we can return it to the * device. */ - if (page_info->can_flip) { - skb = gve_rx_add_frags(napi, page_info, len); + if (ctx->reuse_frags) { + skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); /* No point in recycling if we didn't get the skb */ if (skb) { /* Make sure that the page isn't freed. */ - get_page(page_info->page); + gve_dec_pagecnt_bias(page_info); gve_rx_flip_buff(page_info, &data_slot->qpl_offset); } } else { - skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD); + const u16 padding = gve_rx_ctx_padding(ctx); + + skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx); if (skb) { u64_stats_update_begin(&rx->statss); - rx->rx_copied_pkt++; + rx->rx_frag_copy_cnt++; u64_stats_update_end(&rx->statss); } } return skb; } -static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, - netdev_features_t feat, u32 idx) +#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) +static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc) { + return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx); +} + +static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx) +{ + bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false; + bool buffer_error = false, desc_error = false, seqno_error = false; struct gve_rx_slot_page_info *page_info; struct gve_priv *priv = rx->gve; - struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; - struct net_device *dev = priv->dev; - union gve_rx_data_slot *data_slot; - struct sk_buff *skb = NULL; - dma_addr_t page_bus; - u16 len; + u32 idx = rx->cnt & rx->mask; + bool reuse_frags, can_flip; + struct gve_rx_desc *desc; + u16 packet_size = 0; + u16 n_frags = 0; + int recycle; + + /** In QPL mode, we only flip buffers when all buffers containing the packet + * can be flipped. RDA can_flip decisions will be made later, per frag. + */ + can_flip = qpl_mode; + reuse_frags = can_flip; + do { + u16 frag_size; + + n_frags++; + desc = &rx->desc.desc_ring[idx]; + desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error; + if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) { + seqno_error = true; + netdev_warn(priv->dev, + "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.", + rx->desc.seqno, GVE_SEQNO(desc->flags_seq)); + } + frag_size = be16_to_cpu(desc->len); + packet_size += frag_size; + if (frag_size > rx->packet_buffer_size) { + packet_size_error = true; + netdev_warn(priv->dev, + "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.", + rx->packet_buffer_size, be16_to_cpu(desc->len)); + } + page_info = &rx->data.page_info[idx]; + if (can_flip) { + recycle = gve_rx_can_recycle_buffer(page_info); + reuse_frags = reuse_frags && recycle > 0; + buffer_error = buffer_error || unlikely(recycle < 0); + } + idx = (idx + 1) & rx->mask; + rx->desc.seqno = gve_next_seqno(rx->desc.seqno); + } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq)); + + prefetch(rx->desc.desc_ring + idx); + + ctx->curr_frag_cnt = 0; + ctx->total_expected_size = packet_size - GVE_RX_PAD; + ctx->expected_frag_cnt = n_frags; + ctx->skb_head = NULL; + ctx->reuse_frags = reuse_frags; - /* drop this packet */ - if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) { + if (ctx->expected_frag_cnt > 1) { u64_stats_update_begin(&rx->statss); - rx->rx_desc_err_dropped_pkt++; + rx->rx_cont_packet_cnt++; u64_stats_update_end(&rx->statss); + } + if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) { + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + u64_stats_update_end(&rx->statss); + } + + if (unlikely(buffer_error || seqno_error || packet_size_error)) { + gve_schedule_reset(priv); return false; } - len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; - page_info = &rx->data.page_info[idx]; + if (unlikely(desc_error)) { + u64_stats_update_begin(&rx->statss); + rx->rx_desc_err_dropped_pkt++; + u64_stats_update_end(&rx->statss); + return false; + } + return true; +} - data_slot = &rx->data.data_ring[idx]; - page_bus = (rx->data.raw_addressing) ? - be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK : - rx->data.qpl->page_buses[idx]; - dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, - PAGE_SIZE, DMA_FROM_DEVICE); +static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_slot_page_info *page_info, struct napi_struct *napi, + u16 len, union gve_rx_data_slot *data_slot) +{ + struct net_device *netdev = priv->dev; + struct gve_rx_ctx *ctx = &rx->ctx; + struct sk_buff *skb = NULL; - if (len <= priv->rx_copybreak) { + if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) { /* Just copy small packets */ - skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD); - u64_stats_update_begin(&rx->statss); - rx->rx_copied_pkt++; - rx->rx_copybreak_pkt++; - u64_stats_update_end(&rx->statss); + skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx); + if (skb) { + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + rx->rx_frag_copy_cnt++; + rx->rx_copybreak_pkt++; + } u64_stats_update_end(&rx->statss); } else { - u8 can_flip = gve_rx_can_flip_buffers(dev); - int recycle = 0; + if (rx->data.raw_addressing) { + int recycle = gve_rx_can_recycle_buffer(page_info); - if (can_flip) { - recycle = gve_rx_can_recycle_buffer(page_info->page); - if (recycle < 0) { - if (!rx->data.raw_addressing) - gve_schedule_reset(priv); - return false; + if (unlikely(recycle < 0)) { + gve_schedule_reset(priv); + return NULL; } - } - - page_info->can_flip = can_flip && recycle; - if (rx->data.raw_addressing) { - skb = gve_rx_raw_addressing(&priv->pdev->dev, dev, + page_info->can_flip = recycle; + if (page_info->can_flip) { + u64_stats_update_begin(&rx->statss); + rx->rx_frag_flip_cnt++; + u64_stats_update_end(&rx->statss); + } + skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, page_info, len, napi, - data_slot); + data_slot, + rx->packet_buffer_size, ctx); } else { - skb = gve_rx_qpl(&priv->pdev->dev, dev, rx, + if (ctx->reuse_frags) { + u64_stats_update_begin(&rx->statss); + rx->rx_frag_flip_cnt++; + u64_stats_update_end(&rx->statss); + } + skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, page_info, len, napi, data_slot); } } + return skb; +} - if (!skb) { - u64_stats_update_begin(&rx->statss); - rx->rx_skb_alloc_fail++; - u64_stats_update_end(&rx->statss); - return false; +static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, + u64 *packet_size_bytes, u32 *work_done) +{ + struct gve_rx_slot_page_info *page_info; + struct gve_rx_ctx *ctx = &rx->ctx; + union gve_rx_data_slot *data_slot; + struct gve_priv *priv = rx->gve; + struct gve_rx_desc *first_desc; + struct sk_buff *skb = NULL; + struct gve_rx_desc *desc; + struct napi_struct *napi; + dma_addr_t page_bus; + u32 work_cnt = 0; + void *va; + u32 idx; + u16 len; + + idx = rx->cnt & rx->mask; + first_desc = &rx->desc.desc_ring[idx]; + desc = first_desc; + napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + + if (unlikely(!gve_rx_ctx_init(ctx, rx))) + goto skb_alloc_fail; + + while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) { + /* Prefetch two packet buffers ahead, we will need it soon. */ + page_info = &rx->data.page_info[(idx + 2) & rx->mask]; + va = page_info->page_address + page_info->page_offset; + + prefetch(page_info->page); /* Kernel page struct. */ + prefetch(va); /* Packet header. */ + prefetch(va + 64); /* Next cacheline too. */ + + len = gve_rx_get_fragment_size(ctx, desc); + + page_info = &rx->data.page_info[idx]; + data_slot = &rx->data.data_ring[idx]; + page_bus = rx->data.raw_addressing ? + be64_to_cpu(data_slot->addr) - page_info->page_offset : + rx->data.qpl->page_buses[idx]; + dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE); + + skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot); + if (!skb) { + u64_stats_update_begin(&rx->statss); + rx->rx_skb_alloc_fail++; + u64_stats_update_end(&rx->statss); + goto skb_alloc_fail; + } + + ctx->curr_frag_cnt++; + rx->cnt++; + idx = rx->cnt & rx->mask; + work_cnt++; + desc = &rx->desc.desc_ring[idx]; } if (likely(feat & NETIF_F_RXCSUM)) { /* NIC passes up the partial sum */ - if (rx_desc->csum) + if (first_desc->csum) skb->ip_summed = CHECKSUM_COMPLETE; else skb->ip_summed = CHECKSUM_NONE; - skb->csum = csum_unfold(rx_desc->csum); + skb->csum = csum_unfold(first_desc->csum); } /* parse flags & pass relevant info up */ if (likely(feat & NETIF_F_RXHASH) && - gve_needs_rss(rx_desc->flags_seq)) - skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash), - gve_rss_type(rx_desc->flags_seq)); + gve_needs_rss(first_desc->flags_seq)) + skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash), + gve_rss_type(first_desc->flags_seq)); + *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0); + *work_done = work_cnt; if (skb_is_nonlinear(skb)) napi_gro_frags(napi); else napi_gro_receive(napi, skb); + + gve_rx_ctx_clear(ctx); return true; + +skb_alloc_fail: + if (napi->skb) + napi_free_frags(napi); + *packet_size_bytes = 0; + *work_done = ctx->expected_frag_cnt; + while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) { + rx->cnt++; + ctx->curr_frag_cnt++; + } + gve_rx_ctx_clear(ctx); + return false; } -static bool gve_rx_work_pending(struct gve_rx_ring *rx) +bool gve_rx_work_pending(struct gve_rx_ring *rx) { struct gve_rx_desc *desc; __be16 flags_seq; @@ -499,7 +671,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) * owns half the page it is impossible to tell which half. Either * the whole page is free or it needs to be replaced. */ - int recycle = gve_rx_can_recycle_buffer(page_info->page); + int recycle = gve_rx_can_recycle_buffer(page_info); if (recycle < 0) { if (!rx->data.raw_addressing) @@ -511,11 +683,15 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) union gve_rx_data_slot *data_slot = &rx->data.data_ring[idx]; struct device *dev = &priv->pdev->dev; - gve_rx_free_buffer(dev, page_info, data_slot); page_info->page = NULL; - if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot)) + if (gve_rx_alloc_buffer(priv, dev, page_info, + data_slot)) { + u64_stats_update_begin(&rx->statss); + rx->rx_buf_alloc_fail++; + u64_stats_update_end(&rx->statss); break; + } } } fill_cnt++; @@ -524,19 +700,20 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) return true; } -bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, - netdev_features_t feat) +static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, + netdev_features_t feat) { + u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0; struct gve_priv *priv = rx->gve; - u32 work_done = 0, packets = 0; + u32 idx = rx->cnt & rx->mask; struct gve_rx_desc *desc; - u32 cnt = rx->cnt; - u32 idx = cnt & rx->mask; u64 bytes = 0; - desc = rx->desc.desc_ring + idx; + desc = &rx->desc.desc_ring[idx]; while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && work_done < budget) { + u64 packet_size_bytes = 0; + u32 work_cnt = 0; bool dropped; netif_info(priv, rx_status, priv->dev, @@ -546,56 +723,57 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, "[%d] seqno=%d rx->desc.seqno=%d\n", rx->q_num, GVE_SEQNO(desc->flags_seq), rx->desc.seqno); - dropped = !gve_rx(rx, desc, feat, idx); + + dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt); if (!dropped) { - bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; - packets++; + bytes += packet_size_bytes; + ok_packet_cnt++; } - cnt++; - idx = cnt & rx->mask; - desc = rx->desc.desc_ring + idx; - rx->desc.seqno = gve_next_seqno(rx->desc.seqno); - work_done++; + total_packet_cnt++; + idx = rx->cnt & rx->mask; + desc = &rx->desc.desc_ring[idx]; + work_done += work_cnt; } - if (!work_done && rx->fill_cnt - cnt > rx->db_threshold) - return false; + if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) + return 0; - u64_stats_update_begin(&rx->statss); - rx->rpackets += packets; - rx->rbytes += bytes; - u64_stats_update_end(&rx->statss); - rx->cnt = cnt; + if (work_done) { + u64_stats_update_begin(&rx->statss); + rx->rpackets += ok_packet_cnt; + rx->rbytes += bytes; + u64_stats_update_end(&rx->statss); + } /* restock ring slots */ if (!rx->data.raw_addressing) { /* In QPL mode buffs are refilled as the desc are processed */ rx->fill_cnt += work_done; - } else if (rx->fill_cnt - cnt <= rx->db_threshold) { + } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { /* In raw addressing mode buffs are only refilled if the avail * falls below a threshold. */ if (!gve_rx_refill_buffers(priv, rx)) - return false; + return 0; /* If we were not able to completely refill buffers, we'll want * to schedule this queue for work again to refill buffers. */ - if (rx->fill_cnt - cnt <= rx->db_threshold) { + if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { gve_rx_write_doorbell(priv, rx); - return true; + return budget; } } gve_rx_write_doorbell(priv, rx); - return gve_rx_work_pending(rx); + return total_packet_cnt; } -bool gve_rx_poll(struct gve_notify_block *block, int budget) +int gve_rx_poll(struct gve_notify_block *block, int budget) { struct gve_rx_ring *rx = block->rx; netdev_features_t feat; - bool repoll = false; + int work_done = 0; feat = block->napi.dev->features; @@ -604,8 +782,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget) budget = INT_MAX; if (budget > 0) - repoll |= gve_clean_rx_done(rx, budget, feat); - else - repoll |= gve_rx_work_pending(rx); - return repoll; + work_done = gve_clean_rx_done(rx, budget, feat); + + return work_done; } diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 8500621b2cd412..beb8bb079023cc 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -240,8 +240,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) rx->dqo.bufq.mask = buffer_queue_slots - 1; rx->dqo.complq.num_free_slots = completion_queue_slots; rx->dqo.complq.mask = completion_queue_slots - 1; - rx->skb_head = NULL; - rx->skb_tail = NULL; + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4); rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, @@ -467,12 +467,12 @@ static void gve_rx_skb_hash(struct sk_buff *skb, static void gve_rx_free_skb(struct gve_rx_ring *rx) { - if (!rx->skb_head) + if (!rx->ctx.skb_head) return; - dev_kfree_skb_any(rx->skb_head); - rx->skb_head = NULL; - rx->skb_tail = NULL; + dev_kfree_skb_any(rx->ctx.skb_head); + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; } /* Chains multi skbs for single rx packet. @@ -483,7 +483,7 @@ static int gve_rx_append_frags(struct napi_struct *napi, u16 buf_len, struct gve_rx_ring *rx, struct gve_priv *priv) { - int num_frags = skb_shinfo(rx->skb_tail)->nr_frags; + int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; if (unlikely(num_frags == MAX_SKB_FRAGS)) { struct sk_buff *skb; @@ -492,17 +492,17 @@ static int gve_rx_append_frags(struct napi_struct *napi, if (!skb) return -1; - skb_shinfo(rx->skb_tail)->frag_list = skb; - rx->skb_tail = skb; + skb_shinfo(rx->ctx.skb_tail)->frag_list = skb; + rx->ctx.skb_tail = skb; num_frags = 0; } - if (rx->skb_tail != rx->skb_head) { - rx->skb_head->len += buf_len; - rx->skb_head->data_len += buf_len; - rx->skb_head->truesize += priv->data_buffer_size_dqo; + if (rx->ctx.skb_tail != rx->ctx.skb_head) { + rx->ctx.skb_head->len += buf_len; + rx->ctx.skb_head->data_len += buf_len; + rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; } - skb_add_rx_frag(rx->skb_tail, num_frags, + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, priv->data_buffer_size_dqo); @@ -556,7 +556,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, buf_len, DMA_FROM_DEVICE); /* Append to current skb if one exists. */ - if (rx->skb_head) { + if (rx->ctx.skb_head) { if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, priv)) != 0) { goto error; @@ -567,11 +567,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, } if (eop && buf_len <= priv->rx_copybreak) { - rx->skb_head = gve_rx_copy(priv->dev, napi, - &buf_state->page_info, buf_len, 0); - if (unlikely(!rx->skb_head)) + rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, + &buf_state->page_info, buf_len, 0, NULL); + if (unlikely(!rx->ctx.skb_head)) goto error; - rx->skb_tail = rx->skb_head; + rx->ctx.skb_tail = rx->ctx.skb_head; u64_stats_update_begin(&rx->statss); rx->rx_copied_pkt++; @@ -583,12 +583,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } - rx->skb_head = napi_get_frags(napi); - if (unlikely(!rx->skb_head)) + rx->ctx.skb_head = napi_get_frags(napi); + if (unlikely(!rx->ctx.skb_head)) goto error; - rx->skb_tail = rx->skb_head; + rx->ctx.skb_tail = rx->ctx.skb_head; - skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page, + skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, priv->data_buffer_size_dqo); gve_dec_pagecnt_bias(&buf_state->page_info); @@ -635,27 +635,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; int err; - skb_record_rx_queue(rx->skb_head, rx->q_num); + skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); if (feat & NETIF_F_RXHASH) - gve_rx_skb_hash(rx->skb_head, desc, ptype); + gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); if (feat & NETIF_F_RXCSUM) - gve_rx_skb_csum(rx->skb_head, desc, ptype); + gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); /* RSC packets must set gso_size otherwise the TCP stack will complain * that packets are larger than MTU. */ if (desc->rsc) { - err = gve_rx_complete_rsc(rx->skb_head, desc, ptype); + err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); if (err < 0) return err; } - if (skb_headlen(rx->skb_head) == 0) + if (skb_headlen(rx->ctx.skb_head) == 0) napi_gro_frags(napi); else - napi_gro_receive(napi, rx->skb_head); + napi_gro_receive(napi, rx->ctx.skb_head); return 0; } @@ -717,18 +717,18 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) /* Free running counter of completed descriptors */ rx->cnt++; - if (!rx->skb_head) + if (!rx->ctx.skb_head) continue; if (!compl_desc->end_of_packet) continue; work_done++; - pkt_bytes = rx->skb_head->len; + pkt_bytes = rx->ctx.skb_head->len; /* The ethernet header (first ETH_HLEN bytes) is snipped off * by eth_type_trans. */ - if (skb_headlen(rx->skb_head)) + if (skb_headlen(rx->ctx.skb_head)) pkt_bytes += ETH_HLEN; /* gve_rx_complete_skb() will consume skb if successful */ @@ -741,8 +741,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) } bytes += pkt_bytes; - rx->skb_head = NULL; - rx->skb_tail = NULL; + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; } gve_rx_post_buffers_dqo(rx); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 665ac795a1adf8..a9cb241fedf412 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) gve_tx_remove_from_block(priv, idx); slots = tx->mask + 1; - gve_clean_tx_done(priv, tx, tx->req, false); + gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); netdev_tx_reset_queue(tx->netdev_txq); dma_free_coherent(hdev, sizeof(*tx->q_resources), @@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) /* Make sure everything is zeroed to start */ memset(tx, 0, sizeof(*tx)); + spin_lock_init(&tx->clean_lock); tx->q_num = idx; tx->mask = slots - 1; @@ -303,15 +304,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) { if (info->skb) { - dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), - dma_unmap_len(&info->buf, len), + dma_unmap_single(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), DMA_TO_DEVICE); - dma_unmap_len_set(&info->buf, len, 0); + dma_unmap_len_set(info, len, 0); } else { - dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), - dma_unmap_len(&info->buf, len), + dma_unmap_page(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), DMA_TO_DEVICE); - dma_unmap_len_set(&info->buf, len, 0); + dma_unmap_len_set(info, len, 0); } } @@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); } +static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED); + /* Stops the queue if the skb cannot be transmitted. */ -static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) +static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) { int bytes_required = 0; + u32 nic_done; + u32 to_do; + int ret; if (!tx->raw_addressing) bytes_required = gve_skb_fifo_bytes_required(tx, skb); @@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) if (likely(gve_can_tx(tx, bytes_required))) return 0; - /* No space, so stop the queue */ - tx->stop_queue++; - netif_tx_stop_queue(tx->netdev_txq); - smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */ - - /* Now check for resources again, in case gve_clean_tx_done() freed - * resources after we checked and we stopped the queue after - * gve_clean_tx_done() checked. - * - * gve_maybe_stop_tx() gve_clean_tx_done() - * nsegs/can_alloc test failed - * gve_tx_free_fifo() - * if (tx queue stopped) - * netif_tx_queue_wake() - * netif_tx_stop_queue() - * Need to check again for space here! - */ - if (likely(!gve_can_tx(tx, bytes_required))) - return -EBUSY; + ret = -EBUSY; + spin_lock(&tx->clean_lock); + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = nic_done - tx->done; - netif_tx_start_queue(tx->netdev_txq); - tx->wake_queue++; - return 0; + /* Only try to clean if there is hope for TX */ + if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { + if (to_do > 0) { + to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT); + gve_clean_tx_done(priv, tx, to_do, false); + } + if (likely(gve_can_tx(tx, bytes_required))) + ret = 0; + } + if (ret) { + /* No space, so stop the queue */ + tx->stop_queue++; + netif_tx_stop_queue(tx->netdev_txq); + } + spin_unlock(&tx->clean_lock); + + return ret; } static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, @@ -491,7 +497,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct gve_tx_buffer_state *info; bool is_gso = skb_is_gso(skb); u32 idx = tx->req & tx->mask; - struct gve_tx_dma_buf *buf; u64 addr; u32 len; int i; @@ -515,9 +520,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dma_mapping_error++; goto drop; } - buf = &info->buf; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); + dma_unmap_len_set(info, len, len); + dma_unmap_addr_set(info, dma, addr); payload_nfrags = shinfo->nr_frags; if (hlen < len) { @@ -549,10 +553,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dma_mapping_error++; goto unmap_drop; } - buf = &tx->info[idx].buf; tx->info[idx].skb = NULL; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); + dma_unmap_len_set(&tx->info[idx], len, len); + dma_unmap_addr_set(&tx->info[idx], dma, addr); gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); } @@ -579,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, "skb queue index out of range"); tx = &priv->tx[skb_get_queue_mapping(skb)]; - if (unlikely(gve_maybe_stop_tx(tx, skb))) { + if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { /* We need to ring the txq doorbell -- we have stopped the Tx * queue for want of resources, but prior calls to gve_tx() * may have added descriptors without ringing the doorbell. @@ -675,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, return pkts; } -__be32 gve_tx_load_event_counter(struct gve_priv *priv, - struct gve_tx_ring *tx) +u32 gve_tx_load_event_counter(struct gve_priv *priv, + struct gve_tx_ring *tx) { - u32 counter_index = be32_to_cpu((tx->q_resources->counter_index)); + u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); + __be32 counter = READ_ONCE(priv->counter_array[counter_index]); - return READ_ONCE(priv->counter_array[counter_index]); + return be32_to_cpu(counter); } bool gve_tx_poll(struct gve_notify_block *block, int budget) { struct gve_priv *priv = block->priv; struct gve_tx_ring *tx = block->tx; - bool repoll = false; u32 nic_done; u32 to_do; @@ -695,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget) if (budget == 0) budget = INT_MAX; + /* In TX path, it may try to clean completed pkts in order to xmit, + * to avoid cleaning conflict, use spin_lock(), it yields better + * concurrency between xmit/clean than netif's lock. + */ + spin_lock(&tx->clean_lock); /* Find out how much work there is to be done */ - tx->last_nic_done = gve_tx_load_event_counter(priv, tx); - nic_done = be32_to_cpu(tx->last_nic_done); - if (budget > 0) { - /* Do as much work as we have that the budget will - * allow - */ - to_do = min_t(u32, (nic_done - tx->done), budget); - gve_clean_tx_done(priv, tx, to_do, true); - } + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = min_t(u32, (nic_done - tx->done), budget); + gve_clean_tx_done(priv, tx, to_do, true); + spin_unlock(&tx->clean_lock); /* If we still have work we want to repoll */ - repoll |= (nic_done != tx->done); - return repoll; + return nic_done != tx->done; +} + +bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) +{ + u32 nic_done = gve_tx_load_event_counter(priv, tx); + + return nic_done != tx->done; } diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 05ddb6a75c38f3..ec394d9916681a 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) int j; for (j = 0; j < cur_state->num_bufs; j++) { - struct gve_tx_dma_buf *buf = &cur_state->bufs[j]; - if (j == 0) { dma_unmap_single(tx->dev, - dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), - DMA_TO_DEVICE); + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); } else { dma_unmap_page(tx->dev, - dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), - DMA_TO_DEVICE); + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); } } if (cur_state->skb) { @@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, const bool is_gso = skb_is_gso(skb); u32 desc_idx = tx->dqo_tx.tail; - struct gve_tx_pending_packet_dqo *pending_packet; + struct gve_tx_pending_packet_dqo *pkt; struct gve_tx_metadata_dqo metadata; s16 completion_tag; int i; - pending_packet = gve_alloc_pending_packet(tx); - pending_packet->skb = skb; - pending_packet->num_bufs = 0; - completion_tag = pending_packet - tx->dqo.pending_packets; + pkt = gve_alloc_pending_packet(tx); + pkt->skb = skb; + pkt->num_bufs = 0; + completion_tag = pkt - tx->dqo.pending_packets; gve_extract_tx_metadata_dqo(skb, &metadata); if (is_gso) { @@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, /* Map the linear portion of skb */ { - struct gve_tx_dma_buf *buf = - &pending_packet->bufs[pending_packet->num_bufs]; u32 len = skb_headlen(skb); dma_addr_t addr; @@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, if (unlikely(dma_mapping_error(tx->dev, addr))) goto err; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); - ++pending_packet->num_bufs; + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, completion_tag, @@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, } for (i = 0; i < shinfo->nr_frags; i++) { - struct gve_tx_dma_buf *buf = - &pending_packet->bufs[pending_packet->num_bufs]; const skb_frag_t *frag = &shinfo->frags[i]; bool is_eop = i == (shinfo->nr_frags - 1); u32 len = skb_frag_size(frag); @@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, if (unlikely(dma_mapping_error(tx->dev, addr))) goto err; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); - ++pending_packet->num_bufs; + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, completion_tag, is_eop, is_gso); @@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, return 0; err: - for (i = 0; i < pending_packet->num_bufs; i++) { - struct gve_tx_dma_buf *buf = &pending_packet->bufs[i]; - + for (i = 0; i < pkt->num_bufs; i++) { if (i == 0) { - dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), + dma_unmap_single(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); } else { - dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); + dma_unmap_page(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); } } - pending_packet->skb = NULL; - pending_packet->num_bufs = 0; - gve_free_pending_packet(tx, pending_packet); + pkt->skb = NULL; + pkt->num_bufs = 0; + gve_free_pending_packet(tx, pkt); return -1; } @@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, static void remove_from_list(struct gve_tx_ring *tx, struct gve_index_list *list, - struct gve_tx_pending_packet_dqo *pending_packet) + struct gve_tx_pending_packet_dqo *pkt) { s16 prev_index, next_index; - prev_index = pending_packet->prev; - next_index = pending_packet->next; + prev_index = pkt->prev; + next_index = pkt->next; if (prev_index == -1) { /* Node is head */ @@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx, } static void gve_unmap_packet(struct device *dev, - struct gve_tx_pending_packet_dqo *pending_packet) + struct gve_tx_pending_packet_dqo *pkt) { - struct gve_tx_dma_buf *buf; int i; /* SKB linear portion is guaranteed to be mapped */ - buf = &pending_packet->bufs[0]; - dma_unmap_single(dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); - for (i = 1; i < pending_packet->num_bufs; i++) { - buf = &pending_packet->bufs[i]; - dma_unmap_page(dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); + for (i = 1; i < pkt->num_bufs; i++) { + dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); } - pending_packet->num_bufs = 0; + pkt->num_bufs = 0; } /* Completion types and expected behavior: diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 93f3dcbeeea9dd..88ca49cbc1e298 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) { + unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2, + num_online_cpus()); int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; struct gve_tx_ring *tx = &priv->tx[queue_idx]; block->tx = tx; tx->ntfy_id = ntfy_idx; + netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus), + queue_idx); } void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) @@ -46,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len, - u16 pad) + u16 padding, struct gve_rx_ctx *ctx) { - struct sk_buff *skb = napi_alloc_skb(napi, len); - void *va = page_info->page_address + pad + - page_info->page_offset; - - if (unlikely(!skb)) - return NULL; - + void *va = page_info->page_address + padding + page_info->page_offset; + int skb_linear_offset = 0; + bool set_protocol = false; + struct sk_buff *skb; + + if (ctx) { + if (!ctx->skb_head) + ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size); + + if (unlikely(!ctx->skb_head)) + return NULL; + skb = ctx->skb_head; + skb_linear_offset = skb->len; + set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1; + } else { + skb = napi_alloc_skb(napi, len); + set_protocol = true; + } __skb_put(skb, len); + skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len); - skb_copy_to_linear_data(skb, va, len); - - skb->protocol = eth_type_trans(skb, dev); + if (set_protocol) + skb->protocol = eth_type_trans(skb, dev); return skb; } diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h index 79595940b35118..6d98e69fd3b8ee 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.h +++ b/drivers/net/ethernet/google/gve/gve_utils.h @@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len, - u16 pad); + u16 pad, struct gve_rx_ctx *ctx); /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 37b605fed32c31..c84ef494bd6077 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -998,7 +998,7 @@ static int hip04_mac_probe(struct platform_device *pdev) hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); hip04_config_fifo(priv); - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); hip04_update_mac_address(ndev); ret = hip04_alloc_ring(ndev, d); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 22bf914f2dbd0d..a6c18b6527f9ae 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -427,7 +427,7 @@ static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv) } static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv, - unsigned char *mac) + const unsigned char *mac) { u32 reg; @@ -555,7 +555,7 @@ static int hisi_femac_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(skaddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, skaddr->sa_data); dev->addr_assign_type &= ~NET_ADDR_RANDOM; hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); @@ -841,7 +841,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) (unsigned long)phy->phy_id, phy_modes(phy->interface)); - ret = of_get_mac_address(node, ndev->dev_addr); + ret = of_get_ethdev_address(node, ndev); if (ret) { eth_hw_addr_random(ndev); dev_warn(dev, "using random MAC address %pM\n", diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index c1aae0fca5e988..d7e62eca050f46 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -429,7 +429,7 @@ static void hix5hd2_port_disable(struct hix5hd2_priv *priv) static void hix5hd2_hw_set_mac_addr(struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); - unsigned char *mac = dev->dev_addr; + const unsigned char *mac = dev->dev_addr; u32 val; val = mac[1] | (mac[0] << 8); @@ -1219,7 +1219,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) goto out_phy_node; } - ret = of_get_mac_address(node, ndev->dev_addr); + ret = of_get_ethdev_address(node, ndev); if (ret) { eth_hw_addr_random(ndev); netdev_warn(ndev, "using random MAC address %pM\n", diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 2b7db1c22321e2..d72657444ef3aa 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -499,7 +499,7 @@ struct hnae_ae_ops { u32 *tx_usecs_high, u32 *rx_usecs_high); void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); int (*get_mac_addr)(struct hnae_handle *handle, void **p); - int (*set_mac_addr)(struct hnae_handle *handle, void *p); + int (*set_mac_addr)(struct hnae_handle *handle, const void *p); int (*add_uc_addr)(struct hnae_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae_handle *handle, @@ -558,7 +558,7 @@ struct hnae_handle { enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ struct hnae_buf_ops *bops; /* operation for the buffer */ - struct hnae_queue **qs; /* array base of all queues */ + struct hnae_queue *qs[]; /* flexible array of all queues */ }; #define ring_to_dev(ring) ((ring)->q->dev->dev) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 75e4ec569da84d..bc3e406f01390c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -81,8 +81,8 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id); qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id); - vf_cb = kzalloc(sizeof(*vf_cb) + - qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL); + vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf), + GFP_KERNEL); if (unlikely(!vf_cb)) { dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n"); ae_handle = ERR_PTR(-ENOMEM); @@ -108,7 +108,6 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, goto vf_id_err; } - ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1); for (i = 0; i < qnum_per_vf; i++) { ae_handle->qs[i] = &ring_pair_cb->q; ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; @@ -207,7 +206,7 @@ static void hns_ae_fini_queue(struct hnae_queue *q) hns_rcb_reset_ring_hw(q); } -static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p) +static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p) { int ret; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index f387a859a2010d..8f391e2adcc0b9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -450,7 +450,7 @@ static void hns_gmac_update_stats(void *mac_drv) += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG); } -static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr) +static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index f41379de21865a..7edf8569514ccd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -240,7 +240,7 @@ int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) *@addr:mac address */ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, - u32 vmid, char *addr) + u32 vmid, const char *addr) { int ret; struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 8943ffab4418de..e3bb05959ba97c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -348,7 +348,7 @@ struct mac_driver { /*disable mac when disable nic or dsaf*/ void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode); /* config mac address*/ - void (*set_mac_addr)(void *mac_drv, char *mac_addr); + void (*set_mac_addr)(void *mac_drv, const char *mac_addr); /*adjust mac mode of port,include speed and duplex*/ int (*adjust_link)(void *mac_drv, enum mac_speed speed, u32 full_duplex); @@ -425,7 +425,8 @@ int hns_mac_init(struct dsaf_device *dsaf_dev); void mac_adjust_link(struct net_device *net_dev); bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); -int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); +int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, + const char *addr); int hns_mac_set_multi(struct hns_mac_cb *mac_cb, u32 port_num, char *addr, bool enable); int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index cba04bfa0b3f8d..5526a10caac5a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -210,7 +210,7 @@ struct hnae_vf_cb { u8 port_index; struct hns_mac_cb *mac_cb; struct dsaf_device *dsaf_dev; - struct hnae_handle ae_handle; /* must be the last number */ + struct hnae_handle ae_handle; /* must be the last member */ }; struct dsaf_int_xge_src { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 401fef5f1d07d3..fc26ffaae62028 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -255,7 +255,7 @@ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin); } -static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) +static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 343c605c4be862..22a463e1567852 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1194,7 +1194,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) return ret; } - memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, mac_addr->sa_data); return 0; } @@ -1212,7 +1212,7 @@ static void hns_init_mac_addr(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { + if (device_get_ethdev_address(priv->dev, ndev)) { eth_hw_addr_random(ndev); dev_warn(priv->dev, "No valid mac, use random mac %pM", ndev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d701451596c825..3f7a9a4c59d566 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, + HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -151,6 +152,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps) +#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -294,6 +298,8 @@ enum hnae3_dbg_cmd { HNAE3_DBG_CMD_MAC_TNL_STATUS, HNAE3_DBG_CMD_SERV_INFO, HNAE3_DBG_CMD_UMV_INFO, + HNAE3_DBG_CMD_PAGE_POOL_INFO, + HNAE3_DBG_CMD_COAL_INFO, HNAE3_DBG_CMD_UNKNOWN, }; @@ -341,6 +347,9 @@ struct hnae3_dev_specs { u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ u16 max_frm_size; u16 max_qset_num; + u16 umv_size; + u16 mc_mac_size; + u32 mac_stats_num; }; struct hnae3_client_ops { @@ -588,7 +597,7 @@ struct hnae3_ae_ops { u32 *tx_usecs_high, u32 *rx_usecs_high); void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); - int (*set_mac_addr)(struct hnae3_handle *handle, void *p, + int (*set_mac_addr)(struct hnae3_handle *handle, const void *p, bool is_first); int (*do_ioctl)(struct hnae3_handle *handle, struct ifreq *ifr, int cmd); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index e54f96251fea97..67364ab63a1ff9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -336,6 +336,20 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .buf_len = HNS3_DBG_READ_LEN, .init = hns3_dbg_common_file_init, }, + { + .name = "page_pool_info", + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "coalesce_info", + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, }; static struct hns3_dbg_cap_info hns3_dbg_cap[] = { @@ -384,6 +398,26 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = { } }; +static const struct hns3_dbg_item coal_info_items[] = { + { "VEC_ID", 2 }, + { "ALGO_STATE", 2 }, + { "PROFILE_ID", 2 }, + { "CQE_MODE", 2 }, + { "TUNE_STATE", 2 }, + { "STEPS_LEFT", 2 }, + { "STEPS_RIGHT", 2 }, + { "TIRED", 2 }, + { "SW_GL", 2 }, + { "SW_QL", 2 }, + { "HW_GL", 2 }, + { "HW_QL", 2 }, +}; + +static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" }; +static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" }; +static const char * const +dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" }; + static void hns3_dbg_fill_content(char *content, u16 len, const struct hns3_dbg_item *items, const char **result, u16 size) @@ -405,6 +439,94 @@ static void hns3_dbg_fill_content(char *content, u16 len, *pos++ = '\0'; } +static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, + char **result, int i, bool is_tx) +{ + unsigned int gl_offset, ql_offset; + struct hns3_enet_coalesce *coal; + unsigned int reg_val; + unsigned int j = 0; + struct dim *dim; + bool ql_enable; + + if (is_tx) { + coal = &tqp_vector->tx_group.coal; + dim = &tqp_vector->tx_group.dim; + gl_offset = HNS3_VECTOR_GL1_OFFSET; + ql_offset = HNS3_VECTOR_TX_QL_OFFSET; + ql_enable = tqp_vector->tx_group.coal.ql_enable; + } else { + coal = &tqp_vector->rx_group.coal; + dim = &tqp_vector->rx_group.dim; + gl_offset = HNS3_VECTOR_GL0_OFFSET; + ql_offset = HNS3_VECTOR_RX_QL_OFFSET; + ql_enable = tqp_vector->rx_group.coal.ql_enable; + } + + sprintf(result[j++], "%d", i); + sprintf(result[j++], "%s", dim_state_str[dim->state]); + sprintf(result[j++], "%u", dim->profile_ix); + sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]); + sprintf(result[j++], "%s", + dim_tune_stat_str[dim->tune_state]); + sprintf(result[j++], "%u", dim->steps_left); + sprintf(result[j++], "%u", dim->steps_right); + sprintf(result[j++], "%u", dim->tired); + sprintf(result[j++], "%u", coal->int_gl); + sprintf(result[j++], "%u", coal->int_ql); + reg_val = readl(tqp_vector->mask_addr + gl_offset) & + HNS3_VECTOR_GL_MASK; + sprintf(result[j++], "%u", reg_val); + if (ql_enable) { + reg_val = readl(tqp_vector->mask_addr + ql_offset) & + HNS3_VECTOR_QL_MASK; + sprintf(result[j++], "%u", reg_val); + } else { + sprintf(result[j++], "NA"); + } +} + +static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len, + int *pos, bool is_tx) +{ + char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(coal_info_items)]; + struct hns3_enet_tqp_vector *tqp_vector; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(coal_info_items); i++) + result[i] = &data_str[i][0]; + + *pos += scnprintf(buf + *pos, len - *pos, + "%s interrupt coalesce info:\n", + is_tx ? "tx" : "rx"); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + NULL, ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_get_coal_info(tqp_vector, result, i, is_tx); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + (const char **)result, + ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } +} + +static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len) +{ + int pos = 0; + + hns3_dump_coal_info(h, buf, len, &pos, true); + pos += scnprintf(buf + pos, len - pos, "\n"); + hns3_dump_coal_info(h, buf, len, &pos, false); + + return 0; +} + static const struct hns3_dbg_item tx_spare_info_items[] = { { "QUEUE_ID", 2 }, { "COPYBREAK", 2 }, @@ -924,6 +1046,12 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) dev_specs->max_tm_rate); *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", dev_specs->max_qset_num); + *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", + dev_specs->umv_size); + *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", + dev_specs->mc_mac_size); + *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n", + dev_specs->mac_stats_num); } static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) @@ -937,6 +1065,69 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) return 0; } +static const struct hns3_dbg_item page_pool_info_items[] = { + { "QUEUE_ID", 2 }, + { "ALLOCATE_CNT", 2 }, + { "FREE_CNT", 6 }, + { "POOL_SIZE(PAGE_NUM)", 2 }, + { "ORDER", 2 }, + { "NUMA_ID", 2 }, + { "MAX_LEN", 2 }, +}; + +static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, + char **result, u32 index) +{ + u32 j = 0; + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); + sprintf(result[j++], "%u", + atomic_read(&ring->page_pool->pages_state_release_cnt)); + sprintf(result[j++], "%u", ring->page_pool->p.pool_size); + sprintf(result[j++], "%u", ring->page_pool->p.order); + sprintf(result[j++], "%d", ring->page_pool->p.nid); + sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); +} + +static int +hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) +{ + char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(page_pool_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items, + NULL, ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_page_pool_info(ring, result, i); + hns3_dbg_fill_content(content, sizeof(content), + page_pool_info_items, + (const char **)result, + ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; @@ -978,6 +1169,14 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = { .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, .dbg_dump = hns3_dbg_tx_queue_info, }, + { + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dbg_dump = hns3_dbg_page_pool_info, + }, + { + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dbg_dump = hns3_dbg_coal_info, + }, }; static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4b886a13e07970..a2b993d62822e1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2284,7 +2284,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return ret; } - ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + eth_hw_addr_set(netdev, mac_addr->sa_data); return 0; } @@ -4940,7 +4940,7 @@ static int hns3_init_mac_addr(struct net_device *netdev) dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { - ether_addr_copy(netdev->dev_addr, mac_addr_temp); + eth_hw_addr_set(netdev, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); } else { return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index f09a61d9c6264e..1715c98d906d5d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -189,12 +189,13 @@ enum hns3_nic_state { #define HNS3_MAX_TSO_SIZE 1048576U #define HNS3_MAX_NON_TSO_SIZE 9728U - +#define HNS3_VECTOR_GL_MASK GENMASK(11, 0) #define HNS3_VECTOR_GL0_OFFSET 0x100 #define HNS3_VECTOR_GL1_OFFSET 0x200 #define HNS3_VECTOR_GL2_OFFSET 0x300 #define HNS3_VECTOR_RL_OFFSET 0x900 #define HNS3_VECTOR_RL_EN_B 6 +#define HNS3_VECTOR_QL_MASK GENMASK(9, 0) #define HNS3_VECTOR_TX_QL_OFFSET 0xe00 #define HNS3_VECTOR_RX_QL_OFFSET 0xf00 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 9c2eeaa822944f..c327df9dbac409 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -482,6 +482,7 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en) hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); if (hnae3_dev_phy_imp_supported(hdev)) hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1); + hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1); req->compat = cpu_to_le32(compat); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 33244472e0d0e7..c38b57fc6c6a54 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1150,6 +1150,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd { #define HCLGE_LINK_EVENT_REPORT_EN_B 0 #define HCLGE_NCSI_ERROR_REPORT_EN_B 1 #define HCLGE_PHY_IMP_EN_B 2 +#define HCLGE_MAC_STATS_EXT_EN_B 3 struct hclge_firmware_compat_cmd { __le32 compat; u8 rsv[20]; @@ -1188,7 +1189,10 @@ struct hclge_dev_specs_1_cmd { __le16 max_frm_size; __le16 max_qset_num; __le16 max_int_gl; - u8 rsv1[18]; + u8 rsv0[2]; + __le16 umv_size; + __le16 mc_mac_size; + u8 rsv1[12]; }; /* mac speed type defined in firmware command */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 9cda8b3562b895..4e0a8c2f7c0536 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1990,6 +1990,9 @@ static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) } mutex_unlock(&hdev->vport_lock); + pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c index e4aad695abcc18..4c441e6a5082da 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -109,7 +109,6 @@ int hclge_devlink_init(struct hclge_dev *hdev) struct pci_dev *pdev = hdev->pdev; struct hclge_devlink_priv *priv; struct devlink *devlink; - int ret; devlink = devlink_alloc(&hclge_devlink_ops, sizeof(struct hclge_devlink_priv), &pdev->dev); @@ -120,28 +119,15 @@ int hclge_devlink_init(struct hclge_dev *hdev) priv->hdev = hdev; hdev->devlink = devlink; - ret = devlink_register(devlink); - if (ret) { - dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", - ret); - goto out_reg_fail; - } - - devlink_reload_enable(devlink); - + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; - -out_reg_fail: - devlink_free(devlink); - return ret; } void hclge_devlink_uninit(struct hclge_dev *hdev) { struct devlink *devlink = hdev->devlink; - devlink_reload_disable(devlink); - devlink_unregister(devlink); devlink_free(devlink); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 93aa7f2bdc13bc..20e628c2bd4457 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1242,6 +1242,9 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { }, { .module_id = MODULE_MASTER, .msg = "MODULE_MASTER" + }, { + .module_id = MODULE_HIMAC, + .msg = "MODULE_HIMAC" }, { .module_id = MODULE_ROCEE_TOP, .msg = "MODULE_ROCEE_TOP" @@ -1315,13 +1318,22 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = { }, { .type_id = GLB_ERROR, .msg = "glb_error" + }, { + .type_id = LINK_ERROR, + .msg = "link_error" + }, { + .type_id = PTP_ERROR, + .msg = "ptp_error" }, { .type_id = ROCEE_NORMAL_ERR, .msg = "rocee_normal_error" }, { .type_id = ROCEE_OVF_ERR, .msg = "rocee_ovf_error" - } + }, { + .type_id = ROCEE_BUS_ERR, + .msg = "rocee_bus_error" + }, }; static void hclge_log_error(struct device *dev, char *reg, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index d811eeefe2c054..86be6fb3299017 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -138,6 +138,7 @@ enum hclge_mod_name_list { MODULE_RCB_TX = 12, MODULE_TXDMA = 13, MODULE_MASTER = 14, + MODULE_HIMAC = 15, /* add new MODULE NAME for NIC here in order */ MODULE_ROCEE_TOP = 40, MODULE_ROCEE_TIMER = 41, @@ -166,9 +167,12 @@ enum hclge_err_type_list { ETS_ERROR = 10, NCSI_ERROR = 11, GLB_ERROR = 12, + LINK_ERROR = 13, + PTP_ERROR = 14, /* add new ERROR TYPE for NIC here in order */ ROCEE_NORMAL_ERR = 40, ROCEE_OVF_ERR = 41, + ROCEE_BUS_ERR = 42, /* add new ERROR TYPE for ROCEE here in order */ }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d891390d492f69..2e41aa2d1df80f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -156,174 +156,210 @@ static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { }; static const struct hclge_comm_stats_str g_mac_stats_string[] = { - {"mac_tx_mac_pause_num", + {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, - {"mac_rx_mac_pause_num", + {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, - {"mac_tx_control_pkt_num", + {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, + {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, + {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, - {"mac_rx_control_pkt_num", + {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, - {"mac_tx_pfc_pkt_num", + {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, - {"mac_tx_pfc_pri0_pkt_num", + {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, - {"mac_tx_pfc_pri1_pkt_num", + {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, - {"mac_tx_pfc_pri2_pkt_num", + {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, - {"mac_tx_pfc_pri3_pkt_num", + {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, - {"mac_tx_pfc_pri4_pkt_num", + {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, - {"mac_tx_pfc_pri5_pkt_num", + {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, - {"mac_tx_pfc_pri6_pkt_num", + {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, - {"mac_tx_pfc_pri7_pkt_num", + {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, - {"mac_rx_pfc_pkt_num", + {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, + {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, + {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, + {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, + {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, + {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, + {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, + {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, + {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, - {"mac_rx_pfc_pri0_pkt_num", + {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, - {"mac_rx_pfc_pri1_pkt_num", + {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, - {"mac_rx_pfc_pri2_pkt_num", + {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, - {"mac_rx_pfc_pri3_pkt_num", + {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, - {"mac_rx_pfc_pri4_pkt_num", + {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, - {"mac_rx_pfc_pri5_pkt_num", + {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, - {"mac_rx_pfc_pri6_pkt_num", + {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, - {"mac_rx_pfc_pri7_pkt_num", + {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, - {"mac_tx_total_pkt_num", + {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, + {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, + {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, + {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, + {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, + {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, + {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, + {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, + {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, - {"mac_tx_total_oct_num", + {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, - {"mac_tx_good_pkt_num", + {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, - {"mac_tx_bad_pkt_num", + {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, - {"mac_tx_good_oct_num", + {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, - {"mac_tx_bad_oct_num", + {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, - {"mac_tx_uni_pkt_num", + {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, - {"mac_tx_multi_pkt_num", + {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, - {"mac_tx_broad_pkt_num", + {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, - {"mac_tx_undersize_pkt_num", + {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, - {"mac_tx_oversize_pkt_num", + {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, - {"mac_tx_64_oct_pkt_num", + {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, - {"mac_tx_65_127_oct_pkt_num", + {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, - {"mac_tx_128_255_oct_pkt_num", + {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, - {"mac_tx_256_511_oct_pkt_num", + {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, - {"mac_tx_512_1023_oct_pkt_num", + {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, - {"mac_tx_1024_1518_oct_pkt_num", + {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, - {"mac_tx_1519_2047_oct_pkt_num", + {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, - {"mac_tx_2048_4095_oct_pkt_num", + {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, - {"mac_tx_4096_8191_oct_pkt_num", + {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, - {"mac_tx_8192_9216_oct_pkt_num", + {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, - {"mac_tx_9217_12287_oct_pkt_num", + {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, - {"mac_tx_12288_16383_oct_pkt_num", + {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, - {"mac_tx_1519_max_good_pkt_num", + {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, - {"mac_tx_1519_max_bad_pkt_num", + {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, - {"mac_rx_total_pkt_num", + {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, - {"mac_rx_total_oct_num", + {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, - {"mac_rx_good_pkt_num", + {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, - {"mac_rx_bad_pkt_num", + {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, - {"mac_rx_good_oct_num", + {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, - {"mac_rx_bad_oct_num", + {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, - {"mac_rx_uni_pkt_num", + {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, - {"mac_rx_multi_pkt_num", + {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, - {"mac_rx_broad_pkt_num", + {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, - {"mac_rx_undersize_pkt_num", + {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, - {"mac_rx_oversize_pkt_num", + {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, - {"mac_rx_64_oct_pkt_num", + {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, - {"mac_rx_65_127_oct_pkt_num", + {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, - {"mac_rx_128_255_oct_pkt_num", + {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, - {"mac_rx_256_511_oct_pkt_num", + {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, - {"mac_rx_512_1023_oct_pkt_num", + {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, - {"mac_rx_1024_1518_oct_pkt_num", + {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, - {"mac_rx_1519_2047_oct_pkt_num", + {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, - {"mac_rx_2048_4095_oct_pkt_num", + {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, - {"mac_rx_4096_8191_oct_pkt_num", + {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, - {"mac_rx_8192_9216_oct_pkt_num", + {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, - {"mac_rx_9217_12287_oct_pkt_num", + {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, - {"mac_rx_12288_16383_oct_pkt_num", + {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, - {"mac_rx_1519_max_good_pkt_num", + {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, - {"mac_rx_1519_max_bad_pkt_num", + {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, - {"mac_tx_fragment_pkt_num", + {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, - {"mac_tx_undermin_pkt_num", + {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, - {"mac_tx_jabber_pkt_num", + {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, - {"mac_tx_err_all_pkt_num", + {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, - {"mac_tx_from_app_good_pkt_num", + {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, - {"mac_tx_from_app_bad_pkt_num", + {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, - {"mac_rx_fragment_pkt_num", + {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, - {"mac_rx_undermin_pkt_num", + {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, - {"mac_rx_jabber_pkt_num", + {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, - {"mac_rx_fcs_err_pkt_num", + {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, - {"mac_rx_send_app_good_pkt_num", + {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, - {"mac_rx_send_app_bad_pkt_num", + {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} }; @@ -451,8 +487,9 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) u64 *data = (u64 *)(&hdev->mac_stats); struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; __le64 *desc_data; - int i, k, n; + u32 data_size; int ret; + u32 i; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); @@ -463,33 +500,37 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) return ret; } - for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { - /* for special opcode 0032, only the first desc has the head */ - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RD_FIRST_STATS_NUM; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RD_OTHER_STATS_NUM; - } + /* The first desc has a 64-bit header, so data size need to minus 1 */ + data_size = sizeof(desc) / (sizeof(u64)) - 1; - for (k = 0; k < n; k++) { - *data += le64_to_cpu(*desc_data); - data++; - desc_data++; - } + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; } return 0; } -static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) { +#define HCLGE_REG_NUM_PER_DESC 4 + + u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; u64 *data = (u64 *)(&hdev->mac_stats); struct hclge_desc *desc; __le64 *desc_data; - u16 i, k, n; + u32 data_size; + u32 desc_num; int ret; + u32 i; + + /* The first desc has a 64-bit header, so need to consider it */ + desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; /* This may be called inside atomic sections, * so GFP_ATOMIC is more suitalbe here @@ -505,21 +546,16 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) return ret; } - for (i = 0; i < desc_num; i++) { - /* for special opcode 0034, only the first desc has the head */ - if (i == 0) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RD_FIRST_STATS_NUM; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RD_OTHER_STATS_NUM; - } + data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); - for (k = 0; k < n; k++) { - *data += le64_to_cpu(*desc_data); - data++; - desc_data++; - } + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; } kfree(desc); @@ -527,42 +563,37 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) return 0; } -static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) { struct hclge_desc desc; - __le32 *desc_data; - u32 reg_num; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query mac statistic reg number, ret = %d\n", + ret); return ret; + } - desc_data = (__le32 *)(&desc.data[0]); - reg_num = le32_to_cpu(*desc_data); - - *desc_num = 1 + ((reg_num - 3) >> 2) + - (u32)(((reg_num - 3) & 0x3) ? 1 : 0); + *reg_num = le32_to_cpu(desc.data[0]); + if (*reg_num == 0) { + dev_err(&hdev->pdev->dev, + "mac statistic reg number is invalid!\n"); + return -ENODATA; + } return 0; } static int hclge_mac_update_stats(struct hclge_dev *hdev) { - u32 desc_num; - int ret; - - ret = hclge_mac_query_reg_num(hdev, &desc_num); /* The firmware supports the new statistics acquisition method */ - if (!ret) - ret = hclge_mac_update_stats_complete(hdev, desc_num); - else if (ret == -EOPNOTSUPP) - ret = hclge_mac_update_stats_defective(hdev); + if (hdev->ae_dev->dev_specs.mac_stats_num) + return hclge_mac_update_stats_complete(hdev); else - dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); - - return ret; + return hclge_mac_update_stats_defective(hdev); } static int hclge_tqps_update_stats(struct hnae3_handle *handle) @@ -670,20 +701,39 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) return buff; } -static u64 *hclge_comm_get_stats(const void *comm_stats, +static int hclge_comm_get_count(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + u32 size) +{ + int count = 0; + u32 i; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) + count++; + + return count; +} + +static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, const struct hclge_comm_stats_str strs[], int size, u64 *data) { u64 *buf = data; u32 i; - for (i = 0; i < size; i++) - buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; - return buf + size; + *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); + buf++; + } + + return buf; } -static u8 *hclge_comm_get_strings(u32 stringset, +static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, const struct hclge_comm_stats_str strs[], int size, u8 *data) { @@ -694,6 +744,9 @@ static u8 *hclge_comm_get_strings(u32 stringset, return buff; for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); buff = buff + ETH_GSTRING_LEN; } @@ -785,7 +838,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } } else if (stringset == ETH_SS_STATS) { - count = ARRAY_SIZE(g_mac_stats_string) + + count = hclge_comm_get_count(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string)) + hclge_tqps_get_sset_count(handle, stringset); } @@ -795,12 +849,14 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, u8 *data) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; u8 *p = (char *)data; int size; if (stringset == ETH_SS_STATS) { size = ARRAY_SIZE(g_mac_stats_string); - p = hclge_comm_get_strings(stringset, g_mac_stats_string, + p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, size, p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { @@ -834,7 +890,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) struct hclge_dev *hdev = vport->back; u64 *p; - p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string, + p = hclge_comm_get_stats(hdev, g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); p = hclge_tqps_get_stats(handle, p); } @@ -1037,96 +1093,100 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) return -EINVAL; } -static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_sr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - mac->supported); + link_mode); } -static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_lr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit( ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - mac->supported); + link_mode); } -static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_cr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, - mac->supported); + link_mode); } -static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_kr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_1G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - mac->supported); + link_mode); } static void hclge_convert_setting_fec(struct hclge_mac *mac) @@ -1170,9 +1230,9 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, mac->supported); - hclge_convert_setting_sr(mac, speed_ability); - hclge_convert_setting_lr(mac, speed_ability); - hclge_convert_setting_cr(mac, speed_ability); + hclge_convert_setting_sr(speed_ability, mac->supported); + hclge_convert_setting_lr(speed_ability, mac->supported); + hclge_convert_setting_cr(speed_ability, mac->supported); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac); @@ -1188,7 +1248,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, { struct hclge_mac *mac = &hdev->hw.mac; - hclge_convert_setting_kr(mac, speed_ability); + hclge_convert_setting_kr(speed_ability, mac->supported); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac); @@ -1342,8 +1402,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_UMV_TBL_SPACE_M, HCLGE_CFG_UMV_TBL_SPACE_S); - if (!cfg->umv_space) - cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), HCLGE_CFG_PF_RSS_SIZE_M, @@ -1419,6 +1477,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@ -1440,6 +1499,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -1460,6 +1521,21 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev) dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; if (!dev_specs->max_frm_size) dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; +} + +static int hclge_query_mac_stats_num(struct hclge_dev *hdev) +{ + u32 reg_num = 0; + int ret; + + ret = hclge_mac_query_reg_num(hdev, ®_num); + if (ret && ret != -EOPNOTSUPP) + return ret; + + hdev->ae_dev->dev_specs.mac_stats_num = reg_num; + return 0; } static int hclge_query_dev_specs(struct hclge_dev *hdev) @@ -1468,6 +1544,10 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev) int ret; int i; + ret = hclge_query_mac_stats_num(hdev); + if (ret) + return ret; + /* set default specifications as devices lower than version V3 do not * support querying specifications from firmware. */ @@ -1549,7 +1629,10 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; - hdev->wanted_umv_size = cfg.umv_space; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) @@ -2964,6 +3047,82 @@ static void hclge_update_link_status(struct hclge_dev *hdev) clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); } +static void hclge_update_speed_advertising(struct hclge_mac *mac) +{ + u32 speed_ability; + + if (hclge_get_speed_bit(mac->speed, &speed_ability)) + return; + + switch (mac->module_type) { + case HNAE3_MODULE_TYPE_FIBRE_LR: + hclge_convert_setting_lr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_FIBRE_SR: + case HNAE3_MODULE_TYPE_AOC: + hclge_convert_setting_sr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_CR: + hclge_convert_setting_cr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_KR: + hclge_convert_setting_kr(speed_ability, mac->advertising); + break; + default: + break; + } +} + +static void hclge_update_fec_advertising(struct hclge_mac *mac) +{ + if (mac->fec_mode & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->advertising); + else + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->advertising); +} + +static void hclge_update_pause_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + bool rx_en, tx_en; + + switch (hdev->fc_mode_last_time) { + case HCLGE_FC_RX_PAUSE: + rx_en = true; + tx_en = false; + break; + case HCLGE_FC_TX_PAUSE: + rx_en = false; + tx_en = true; + break; + case HCLGE_FC_FULL: + rx_en = true; + tx_en = true; + break; + default: + rx_en = false; + tx_en = false; + break; + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); +} + +static void hclge_update_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + linkmode_zero(mac->advertising); + hclge_update_speed_advertising(mac); + hclge_update_fec_advertising(mac); + hclge_update_pause_advertising(hdev); +} + static void hclge_update_port_capability(struct hclge_dev *hdev, struct hclge_mac *mac) { @@ -2986,7 +3145,7 @@ static void hclge_update_port_capability(struct hclge_dev *hdev, } else { linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); - linkmode_zero(mac->advertising); + hclge_update_advertising(hdev); } } @@ -8475,6 +8634,9 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + return 0; } @@ -8492,6 +8654,8 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; } static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) @@ -8746,6 +8910,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; + bool is_new_addr = false; int status; /* mac addr check */ @@ -8759,6 +8924,13 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + /* This mac addr do not exist, add new entry for it */ memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data)); @@ -8768,12 +8940,18 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, if (status) return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - /* if already overflow, not to print each time */ - if (status == -ENOSPC && - !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) - dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++; return status; + +err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + return -ENOSPC; } static int hclge_rm_mc_addr(struct hnae3_handle *handle, @@ -8810,12 +8988,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, if (status) return status; - if (hclge_is_all_function_id_zero(desc)) + if (hclge_is_all_function_id_zero(desc)) { /* All the vfid is zero, so need to delete this entry */ status = hclge_remove_mac_vlan_tbl(vport, &req); - else + if (!status) + hdev->used_mc_mac_num--; + } else { /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } } else if (status == -ENOENT) { status = 0; } @@ -9391,7 +9572,7 @@ int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, return 0; } -static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 69cd8f87b4c864..9e1eede599ec8f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -403,8 +403,13 @@ struct hclge_tm_info { u8 pfc_en; /* PFC enabled or not for user priority */ }; +/* max number of mac statistics on each version */ +#define HCLGE_MAC_STATS_MAX_NUM_V1 84 +#define HCLGE_MAC_STATS_MAX_NUM_V2 105 + struct hclge_comm_stats_str { char desc[ETH_GSTRING_LEN]; + u32 stats_num; unsigned long offset; }; @@ -412,6 +417,7 @@ struct hclge_comm_stats_str { struct hclge_mac_stats { u64 mac_tx_mac_pause_num; u64 mac_rx_mac_pause_num; + u64 rsv0; u64 mac_tx_pfc_pri0_pkt_num; u64 mac_tx_pfc_pri1_pkt_num; u64 mac_tx_pfc_pri2_pkt_num; @@ -448,7 +454,7 @@ struct hclge_mac_stats { u64 mac_tx_1519_2047_oct_pkt_num; u64 mac_tx_2048_4095_oct_pkt_num; u64 mac_tx_4096_8191_oct_pkt_num; - u64 rsv0; + u64 rsv1; u64 mac_tx_8192_9216_oct_pkt_num; u64 mac_tx_9217_12287_oct_pkt_num; u64 mac_tx_12288_16383_oct_pkt_num; @@ -475,7 +481,7 @@ struct hclge_mac_stats { u64 mac_rx_1519_2047_oct_pkt_num; u64 mac_rx_2048_4095_oct_pkt_num; u64 mac_rx_4096_8191_oct_pkt_num; - u64 rsv1; + u64 rsv2; u64 mac_rx_8192_9216_oct_pkt_num; u64 mac_rx_9217_12287_oct_pkt_num; u64 mac_rx_12288_16383_oct_pkt_num; @@ -498,6 +504,28 @@ struct hclge_mac_stats { u64 mac_rx_pfc_pause_pkt_num; u64 mac_tx_ctrl_pkt_num; u64 mac_rx_ctrl_pkt_num; + + /* duration of pfc */ + u64 mac_tx_pfc_pri0_xoff_time; + u64 mac_tx_pfc_pri1_xoff_time; + u64 mac_tx_pfc_pri2_xoff_time; + u64 mac_tx_pfc_pri3_xoff_time; + u64 mac_tx_pfc_pri4_xoff_time; + u64 mac_tx_pfc_pri5_xoff_time; + u64 mac_tx_pfc_pri6_xoff_time; + u64 mac_tx_pfc_pri7_xoff_time; + u64 mac_rx_pfc_pri0_xoff_time; + u64 mac_rx_pfc_pri1_xoff_time; + u64 mac_rx_pfc_pri2_xoff_time; + u64 mac_rx_pfc_pri3_xoff_time; + u64 mac_rx_pfc_pri4_xoff_time; + u64 mac_rx_pfc_pri5_xoff_time; + u64 mac_rx_pfc_pri6_xoff_time; + u64 mac_rx_pfc_pri7_xoff_time; + + /* duration of pause */ + u64 mac_tx_pause_xoff_time; + u64 mac_rx_pause_xoff_time; }; #define HCLGE_STATS_TIMER_INTERVAL 300UL @@ -938,6 +966,8 @@ struct hclge_dev { u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num; DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c index f478770299c6cd..fdc19868b818e2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -110,7 +110,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev) struct pci_dev *pdev = hdev->pdev; struct hclgevf_devlink_priv *priv; struct devlink *devlink; - int ret; devlink = devlink_alloc(&hclgevf_devlink_ops, @@ -122,28 +121,15 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev) priv->hdev = hdev; hdev->devlink = devlink; - ret = devlink_register(devlink); - if (ret) { - dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", - ret); - goto out_reg_fail; - } - - devlink_reload_enable(devlink); - + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; - -out_reg_fail: - devlink_free(devlink); - return ret; } void hclgevf_devlink_uninit(struct hclgevf_dev *hdev) { struct devlink *devlink = hdev->devlink; - devlink_reload_disable(devlink); - devlink_unregister(devlink); devlink_free(devlink); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index cf00ad7bb881f3..645b2c0011e6bc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1349,7 +1349,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 6e11ee339f12f5..60ae8bfc5f69a7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink) devlink_free(devlink); } -int hinic_devlink_register(struct hinic_devlink_priv *priv) +void hinic_devlink_register(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); - return devlink_register(devlink); + devlink_register(devlink); } void hinic_devlink_unregister(struct hinic_devlink_priv *priv) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h index 9e315011015c56..46760d607b9b08 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -110,7 +110,7 @@ struct host_image_st { struct devlink *hinic_devlink_alloc(struct device *dev); void hinic_devlink_free(struct devlink *devlink); -int hinic_devlink_register(struct hinic_devlink_priv *priv); +void hinic_devlink_register(struct hinic_devlink_priv *priv); void hinic_devlink_unregister(struct hinic_devlink_priv *priv); int hinic_health_reporters_create(struct hinic_devlink_priv *priv); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index b431c300ef1b9d..a85667078b724b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -322,12 +322,10 @@ static int hinic_get_link_ksettings(struct net_device *netdev, } } - bitmap_copy(link_ksettings->link_modes.supported, - (unsigned long *)&settings.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_copy(link_ksettings->link_modes.advertising, - (unsigned long *)&settings.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(link_ksettings->link_modes.supported, + (unsigned long *)&settings.supported); + linkmode_copy(link_ksettings->link_modes.advertising, + (unsigned long *)&settings.advertising); return 0; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 56b6b04e209b3e..657a15447bd0e3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -754,17 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } - err = hinic_devlink_register(hwdev->devlink_dev); - if (err) { - dev_err(&hwif->pdev->dev, "Failed to register devlink\n"); - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); - return err; - } - err = hinic_func_to_func_init(hwdev); if (err) { dev_err(&hwif->pdev->dev, "Failed to init mailbox\n"); - hinic_devlink_unregister(hwdev->devlink_dev); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); return err; } @@ -787,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) } hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); - + hinic_devlink_register(hwdev->devlink_dev); return 0; } @@ -799,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) { struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + hinic_devlink_unregister(hwdev->devlink_dev); hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); if (!HINIC_IS_VF(hwdev->hwif)) { @@ -816,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) hinic_func_to_func_free(hwdev); - hinic_devlink_unregister(hwdev->devlink_dev); - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index ae707e305684b2..f9a766b8ac4348 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -656,7 +656,7 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr) err = change_mac_addr(netdev, new_mac); if (!err) - memcpy(netdev->dev_addr, new_mac, ETH_ALEN); + eth_hw_addr_set(netdev, new_mac); return err; } @@ -1181,6 +1181,7 @@ static int nic_dev_init(struct pci_dev *pdev) struct net_device *netdev; struct hinic_hwdev *hwdev; struct devlink *devlink; + u8 addr[ETH_ALEN]; int err, num_qps; devlink = hinic_devlink_alloc(&pdev->dev); @@ -1259,11 +1260,12 @@ static int nic_dev_init(struct pci_dev *pdev) pci_set_drvdata(pdev, netdev); - err = hinic_port_get_mac(nic_dev, netdev->dev_addr); + err = hinic_port_get_mac(nic_dev, addr); if (err) { dev_err(&pdev->dev, "Failed to get mac address\n"); goto err_get_mac; } + eth_hw_addr_set(netdev, addr); if (!is_valid_ether_addr(netdev->dev_addr)) { if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { @@ -1379,10 +1381,8 @@ static int hinic_probe(struct pci_dev *pdev, { int err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, HINIC_DRV_NAME); if (err) { diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 0696f723228a17..3909c6a0af89f9 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -339,14 +339,13 @@ static const struct net_device_ops sun3_82586_netdev_ops = { static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) { - int i, size, retval; + int size, retval; if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME)) return -EBUSY; /* copy in the ethernet address from the prom */ - for(i = 0; i < 6 ; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr); @@ -461,7 +460,7 @@ static int init586(struct net_device *dev) ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST); ias_cmd->cmd_link = 0xffff; - memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); + memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN); p->scb->cbl_offset = make16(ias_cmd); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index d5df131b183c75..bad94e4d50f4e7 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1741,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) goto out_free; } - memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, mac_addr->sa_data); /* Deregister old MAC in pHYP */ if (port->state == EHEA_PORT_UP) { @@ -2986,7 +2986,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, SET_NETDEV_DEV(dev, port_dev); /* initialize net_device structure */ - memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, (u8 *)&port->mac_addr); dev->netdev_ops = &ehea_netdev_ops; ehea_set_ethtool_ops(dev); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 664a91af662df8..6b3fc8823c5410 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1013,7 +1013,7 @@ static int emac_set_mac_address(struct net_device *ndev, void *sa) mutex_lock(&dev->link_lock); - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); emac_rx_disable(dev); emac_tx_disable(dev); @@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev) static int emac_init_config(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; - const void *p; int err; /* Read config from device-tree */ @@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev) } /* Read MAC-address */ - p = of_get_property(np, "local-mac-address", NULL); - if (p == NULL) { - printk(KERN_ERR "%pOF: Can't find local-mac-address property\n", - np); - return -ENXIO; + err = of_get_ethdev_address(np, dev->ndev); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n"); + return err; } - memcpy(dev->ndev->dev_addr, p, ETH_ALEN); /* IAHT and GAHT filter parameterization */ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3d9b4f99d357f7..45ba40cf4d075e 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -483,17 +483,6 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, return rc; } -static u64 ibmveth_encode_mac_addr(u8 *mac) -{ - int i; - u64 encoded = 0; - - for (i = 0; i < ETH_ALEN; i++) - encoded = (encoded << 8) | mac[i]; - - return encoded; -} - static int ibmveth_open(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); @@ -553,7 +542,7 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; - mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); + mac_address = ether_addr_to_u64(netdev->dev_addr); rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; @@ -605,17 +594,13 @@ static int ibmveth_open(struct net_device *netdev) } rc = -ENOMEM; - adapter->bounce_buffer = - kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) - goto out_free_irq; - adapter->bounce_buffer_dma = - dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, - netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - netdev_err(netdev, "unable to map bounce buffer\n"); - goto out_free_bounce_buffer; + adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev, + netdev->mtu + IBMVETH_BUFF_OH, + &adapter->bounce_buffer_dma, GFP_KERNEL); + if (!adapter->bounce_buffer) { + netdev_err(netdev, "unable to alloc bounce buffer\n"); + goto out_free_irq; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -627,8 +612,6 @@ static int ibmveth_open(struct net_device *netdev) return 0; -out_free_bounce_buffer: - kfree(adapter->bounce_buffer); out_free_irq: free_irq(netdev->irq, netdev); out_free_buffer_pools: @@ -702,10 +685,9 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); - dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - kfree(adapter->bounce_buffer); + dma_free_coherent(&adapter->vdev->dev, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + adapter->bounce_buffer, adapter->bounce_buffer_dma); netdev_dbg(netdev, "close complete\n"); @@ -1483,7 +1465,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) netdev_for_each_mc_addr(ha, netdev) { /* add the multicast address to the filter table */ u64 mcast_addr; - mcast_addr = ibmveth_encode_mac_addr(ha->addr); + mcast_addr = ether_addr_to_u64(ha->addr); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); @@ -1613,14 +1595,14 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - mac_address = ibmveth_encode_mac_addr(addr->sa_data); + mac_address = ether_addr_to_u64(addr->sa_data); rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); if (rc) { netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); return rc; } - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -1727,7 +1709,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->min_mtu = IBMVETH_MIN_MTU; netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; - memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); + eth_hw_addr_set(netdev, mac_addr_p); if (firmware_has_feature(FW_FEATURE_CMO)) memcpy(pool_count, pool_count_cmo, sizeof(pool_count)); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6aa6ff89a76511..3cca51735421a7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter); static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *tx_scrq); +static void free_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, return -ETIMEDOUT; } +/** + * reuse_ltb() - Check if a long term buffer can be reused + * @ltb: The long term buffer to be checked + * @size: The size of the long term buffer. + * + * An LTB can be reused unless its size has changed. + * + * Return: Return true if the LTB can be reused, false otherwise. + */ +static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) +{ + return (ltb->buff && ltb->size == size); +} + +/** + * alloc_long_term_buff() - Allocate a long term buffer (LTB) + * + * @adapter: ibmvnic adapter associated to the LTB + * @ltb: container object for the LTB + * @size: size of the LTB + * + * Allocate an LTB of the specified size and notify VIOS. + * + * If the given @ltb already has the correct size, reuse it. Otherwise if + * its non-NULL, free it. Then allocate a new one of the correct size. + * Notify the VIOS either way since we may now be working with a new VIOS. + * + * Allocating larger chunks of memory during resets, specially LPM or under + * low memory situations can cause resets to fail/timeout and for LPAR to + * lose connectivity. So hold onto the LTB even if we fail to communicate + * with the VIOS and reuse it on next open. Free LTB when adapter is closed. + * + * Return: 0 if we were able to allocate the LTB and notify the VIOS and + * a negative value otherwise. + */ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; int rc; - ltb->size = size; - ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, - GFP_KERNEL); + if (!reuse_ltb(ltb, size)) { + dev_dbg(dev, + "LTB size changed from 0x%llx to 0x%x, reallocating\n", + ltb->size, size); + free_long_term_buff(adapter, ltb); + } - if (!ltb->buff) { - dev_err(dev, "Couldn't alloc long term buffer\n"); - return -ENOMEM; + if (ltb->buff) { + dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", + ltb->map_id, ltb->size); + } else { + ltb->buff = dma_alloc_coherent(dev, size, <b->addr, + GFP_KERNEL); + if (!ltb->buff) { + dev_err(dev, "Couldn't alloc long term buffer\n"); + return -ENOMEM; + } + ltb->size = size; + + ltb->map_id = find_first_zero_bit(adapter->map_ids, + MAX_MAP_ID); + bitmap_set(adapter->map_ids, ltb->map_id, 1); + + dev_dbg(dev, + "Allocated new LTB [map %d, size 0x%llx]\n", + ltb->map_id, ltb->size); } - ltb->map_id = adapter->map_id; - adapter->map_id++; + + /* Ensure ltb is zeroed - specially when reusing it. */ + memset(ltb->buff, 0, ltb->size); mutex_lock(&adapter->fw_lock); adapter->fw_done_rc = 0; @@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); if (rc) { - dev_err(dev, - "Long term map request aborted or timed out,rc = %d\n", + dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", rc); goto out; } if (adapter->fw_done_rc) { - dev_err(dev, "Couldn't map long term buffer,rc = %d\n", + dev_err(dev, "Couldn't map LTB, rc = %d\n", adapter->fw_done_rc); rc = -1; goto out; } rc = 0; out: - if (rc) { - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - ltb->buff = NULL; - } + /* don't free LTB on communication error - see function header */ mutex_unlock(&adapter->fw_lock); return rc; } @@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_TIMEOUT) send_request_unmap(adapter, ltb->map_id); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + ltb->buff = NULL; + /* mark this map_id free */ + bitmap_clear(adapter->map_ids, ltb->map_id, 1); ltb->map_id = 0; } -static int reset_long_term_buff(struct ibmvnic_adapter *adapter, - struct ibmvnic_long_term_buff *ltb) -{ - struct device *dev = &adapter->vdev->dev; - int rc; - - memset(ltb->buff, 0, ltb->size); - - mutex_lock(&adapter->fw_lock); - adapter->fw_done_rc = 0; - - reinit_completion(&adapter->fw_done); - rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - if (rc) { - mutex_unlock(&adapter->fw_lock); - return rc; - } - - rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); - if (rc) { - dev_info(dev, - "Reset failed, long term map request timed out or aborted\n"); - mutex_unlock(&adapter->fw_lock); - return rc; - } - - if (adapter->fw_done_rc) { - dev_info(dev, - "Reset failed, attempting to free and reallocate buffer\n"); - free_long_term_buff(adapter, ltb); - mutex_unlock(&adapter->fw_lock); - return alloc_long_term_buff(adapter, ltb, ltb->size); - } - mutex_unlock(&adapter->fw_lock); - return 0; -} - static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) { int i; @@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, * be 0. */ for (i = ind_bufp->index; i < count; ++i) { - skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); + index = pool->free_map[pool->next_free]; + + /* We maybe reusing the skb from earlier resets. Allocate + * only if necessary. But since the LTB may have changed + * during reset (see init_rx_pools()), update LTB below + * even if reusing skb. + */ + skb = pool->rx_buff[index].skb; if (!skb) { - dev_err(dev, "Couldn't replenish rx buff\n"); - adapter->replenish_no_mem++; - break; + skb = netdev_alloc_skb(adapter->netdev, + pool->buff_size); + if (!skb) { + dev_err(dev, "Couldn't replenish rx buff\n"); + adapter->replenish_no_mem++; + break; + } } - index = pool->free_map[pool->next_free]; - - if (pool->rx_buff[index].skb) - dev_err(dev, "Inconsistent free_map!\n"); + pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + pool->next_free = (pool->next_free + 1) % pool->size; /* Copy the skb to the long term mapped DMA buffer */ offset = index * pool->buff_size; dst = pool->long_term_buff.buff + offset; memset(dst, 0, pool->buff_size); dma_addr = pool->long_term_buff.addr + offset; - pool->rx_buff[index].data = dst; - pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + /* add the skb to an rx_buff in the pool */ + pool->rx_buff[index].data = dst; pool->rx_buff[index].dma = dma_addr; pool->rx_buff[index].skb = skb; pool->rx_buff[index].pool_index = pool->index; pool->rx_buff[index].size = pool->buff_size; + /* queue the rx_buff for the next send_subcrq_indirect */ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; memset(sub_crq, 0, sizeof(*sub_crq)); sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; @@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, shift = 8; #endif sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); - pool->next_free = (pool->next_free + 1) % pool->size; + + /* if send_subcrq_indirect queue is full, flush to VIOS */ if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || i == count - 1) { lpar_rc = @@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) return 0; } -static int reset_rx_pools(struct ibmvnic_adapter *adapter) -{ - struct ibmvnic_rx_pool *rx_pool; - u64 buff_size; - int rx_scrqs; - int i, j, rc; - - if (!adapter->rx_pool) - return -1; - - buff_size = adapter->cur_rx_buf_sz; - rx_scrqs = adapter->num_active_rx_pools; - for (i = 0; i < rx_scrqs; i++) { - rx_pool = &adapter->rx_pool[i]; - - netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); - - if (rx_pool->buff_size != buff_size) { - free_long_term_buff(adapter, &rx_pool->long_term_buff); - rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); - rc = alloc_long_term_buff(adapter, - &rx_pool->long_term_buff, - rx_pool->size * - rx_pool->buff_size); - } else { - rc = reset_long_term_buff(adapter, - &rx_pool->long_term_buff); - } - - if (rc) - return rc; - - for (j = 0; j < rx_pool->size; j++) - rx_pool->free_map[j] = j; - - memset(rx_pool->rx_buff, 0, - rx_pool->size * sizeof(struct ibmvnic_rx_buff)); - - atomic_set(&rx_pool->available, 0); - rx_pool->next_alloc = 0; - rx_pool->next_free = 0; - rx_pool->active = 1; - } - - return 0; -} - +/** + * release_rx_pools() - Release any rx pools attached to @adapter. + * @adapter: ibmvnic adapter + * + * Safe to call this multiple times - even if no pools are attached. + */ static void release_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; @@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); kfree(rx_pool->free_map); + free_long_term_buff(adapter, &rx_pool->long_term_buff); if (!rx_pool->rx_buff) @@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) kfree(adapter->rx_pool); adapter->rx_pool = NULL; adapter->num_active_rx_pools = 0; + adapter->prev_rx_pool_size = 0; } +/** + * reuse_rx_pools() - Check if the existing rx pools can be reused. + * @adapter: ibmvnic adapter + * + * Check if the existing rx pools in the adapter can be reused. The + * pools can be reused if the pool parameters (number of pools, + * number of buffers in the pool and size of each buffer) have not + * changed. + * + * NOTE: This assumes that all pools have the same number of buffers + * which is the case currently. If that changes, we must fix this. + * + * Return: true if the rx pools can be reused, false otherwise. + */ +static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) +{ + u64 old_num_pools, new_num_pools; + u64 old_pool_size, new_pool_size; + u64 old_buff_size, new_buff_size; + + if (!adapter->rx_pool) + return false; + + old_num_pools = adapter->num_active_rx_pools; + new_num_pools = adapter->req_rx_queues; + + old_pool_size = adapter->prev_rx_pool_size; + new_pool_size = adapter->req_rx_add_entries_per_subcrq; + + old_buff_size = adapter->prev_rx_buf_sz; + new_buff_size = adapter->cur_rx_buf_sz; + + /* Require buff size to be exactly same for now */ + if (old_buff_size != new_buff_size) + return false; + + if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) + return true; + + if (old_num_pools < adapter->min_rx_queues || + old_num_pools > adapter->max_rx_queues || + old_pool_size < adapter->min_rx_add_entries_per_subcrq || + old_pool_size > adapter->max_rx_add_entries_per_subcrq) + return false; + + return true; +} + +/** + * init_rx_pools(): Initialize the set of receiver pools in the adapter. + * @netdev: net device associated with the vnic interface + * + * Initialize the set of receiver pools in the ibmvnic adapter associated + * with the net_device @netdev. If possible, reuse the existing rx pools. + * Otherwise free any existing pools and allocate a new set of pools + * before initializing them. + * + * Return: 0 on success and negative value on error. + */ static int init_rx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->vdev->dev; struct ibmvnic_rx_pool *rx_pool; - int rxadd_subcrqs; + u64 num_pools; + u64 pool_size; /* # of buffers in one pool */ u64 buff_size; int i, j; - rxadd_subcrqs = adapter->num_active_rx_scrqs; + pool_size = adapter->req_rx_add_entries_per_subcrq; + num_pools = adapter->req_rx_queues; buff_size = adapter->cur_rx_buf_sz; - adapter->rx_pool = kcalloc(rxadd_subcrqs, + if (reuse_rx_pools(adapter)) { + dev_dbg(dev, "Reusing rx pools\n"); + goto update_ltb; + } + + /* Allocate/populate the pools. */ + release_rx_pools(adapter); + + adapter->rx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL); if (!adapter->rx_pool) { @@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev) return -1; } - adapter->num_active_rx_pools = rxadd_subcrqs; + /* Set num_active_rx_pools early. If we fail below after partial + * allocation, release_rx_pools() will know how many to look for. + */ + adapter->num_active_rx_pools = num_pools; - for (i = 0; i < rxadd_subcrqs; i++) { + for (i = 0; i < num_pools; i++) { rx_pool = &adapter->rx_pool[i]; netdev_dbg(adapter->netdev, "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", - i, adapter->req_rx_add_entries_per_subcrq, - buff_size); + i, pool_size, buff_size); - rx_pool->size = adapter->req_rx_add_entries_per_subcrq; + rx_pool->size = pool_size; rx_pool->index = i; rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); - rx_pool->active = 1; rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), GFP_KERNEL); if (!rx_pool->free_map) { - release_rx_pools(adapter); - return -1; + dev_err(dev, "Couldn't alloc free_map %d\n", i); + goto out_release; } rx_pool->rx_buff = kcalloc(rx_pool->size, @@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->rx_buff) { dev_err(dev, "Couldn't alloc rx buffers\n"); - release_rx_pools(adapter); - return -1; + goto out_release; } - - if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size)) { - release_rx_pools(adapter); - return -1; - } - - for (j = 0; j < rx_pool->size; ++j) - rx_pool->free_map[j] = j; - - atomic_set(&rx_pool->available, 0); - rx_pool->next_alloc = 0; - rx_pool->next_free = 0; } - return 0; -} + adapter->prev_rx_pool_size = pool_size; + adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; -static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, - struct ibmvnic_tx_pool *tx_pool) -{ - int rc, i; - - rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); - if (rc) - return rc; - - memset(tx_pool->tx_buff, 0, - tx_pool->num_buffers * - sizeof(struct ibmvnic_tx_buff)); +update_ltb: + for (i = 0; i < num_pools; i++) { + rx_pool = &adapter->rx_pool[i]; + dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", + i, rx_pool->size, rx_pool->buff_size); - for (i = 0; i < tx_pool->num_buffers; i++) - tx_pool->free_map[i] = i; + if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, + rx_pool->size * rx_pool->buff_size)) + goto out; - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; + for (j = 0; j < rx_pool->size; ++j) { + struct ibmvnic_rx_buff *rx_buff; - return 0; -} - -static int reset_tx_pools(struct ibmvnic_adapter *adapter) -{ - int tx_scrqs; - int i, rc; + rx_pool->free_map[j] = j; - if (!adapter->tx_pool) - return -1; + /* NOTE: Don't clear rx_buff->skb here - will leak + * memory! replenish_rx_pool() will reuse skbs or + * allocate as necessary. + */ + rx_buff = &rx_pool->rx_buff[j]; + rx_buff->dma = 0; + rx_buff->data = 0; + rx_buff->size = 0; + rx_buff->pool_index = 0; + } - tx_scrqs = adapter->num_active_tx_pools; - for (i = 0; i < tx_scrqs; i++) { - ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); - rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); - if (rc) - return rc; - rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); - if (rc) - return rc; + /* Mark pool "empty" so replenish_rx_pools() will + * update the LTB info for each buffer + */ + atomic_set(&rx_pool->available, 0); + rx_pool->next_alloc = 0; + rx_pool->next_free = 0; + /* replenish_rx_pool() may have called deactivate_rx_pools() + * on failover. Ensure pool is active now. + */ + rx_pool->active = 1; } - return 0; +out_release: + release_rx_pools(adapter); +out: + /* We failed to allocate one or more LTBs or map them on the VIOS. + * Hold onto the pools and any LTBs that we did allocate/map. + */ + return -1; } static void release_vpd_data(struct ibmvnic_adapter *adapter) @@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter, free_long_term_buff(adapter, &tx_pool->long_term_buff); } +/** + * release_tx_pools() - Release any tx pools attached to @adapter. + * @adapter: ibmvnic adapter + * + * Safe to call this multiple times - even if no pools are attached. + */ static void release_tx_pools(struct ibmvnic_adapter *adapter) { int i; + /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are + * both NULL or both non-NULL. So we only need to check one. + */ if (!adapter->tx_pool) return; @@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) kfree(adapter->tso_pool); adapter->tso_pool = NULL; adapter->num_active_tx_pools = 0; + adapter->prev_tx_pool_size = 0; } static int init_one_tx_pool(struct net_device *netdev, struct ibmvnic_tx_pool *tx_pool, - int num_entries, int buf_size) + int pool_size, int buf_size) { - struct ibmvnic_adapter *adapter = netdev_priv(netdev); int i; - tx_pool->tx_buff = kcalloc(num_entries, + tx_pool->tx_buff = kcalloc(pool_size, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); if (!tx_pool->tx_buff) return -1; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - num_entries * buf_size)) - return -1; - - tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); - if (!tx_pool->free_map) + tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) { + kfree(tx_pool->tx_buff); + tx_pool->tx_buff = NULL; return -1; + } - for (i = 0; i < num_entries; i++) + for (i = 0; i < pool_size; i++) tx_pool->free_map[i] = i; tx_pool->consumer_index = 0; tx_pool->producer_index = 0; - tx_pool->num_buffers = num_entries; + tx_pool->num_buffers = pool_size; tx_pool->buf_size = buf_size; return 0; } +/** + * reuse_tx_pools() - Check if the existing tx pools can be reused. + * @adapter: ibmvnic adapter + * + * Check if the existing tx pools in the adapter can be reused. The + * pools can be reused if the pool parameters (number of pools, + * number of buffers in the pool and mtu) have not changed. + * + * NOTE: This assumes that all pools have the same number of buffers + * which is the case currently. If that changes, we must fix this. + * + * Return: true if the tx pools can be reused, false otherwise. + */ +static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) +{ + u64 old_num_pools, new_num_pools; + u64 old_pool_size, new_pool_size; + u64 old_mtu, new_mtu; + + if (!adapter->tx_pool) + return false; + + old_num_pools = adapter->num_active_tx_pools; + new_num_pools = adapter->num_active_tx_scrqs; + old_pool_size = adapter->prev_tx_pool_size; + new_pool_size = adapter->req_tx_entries_per_subcrq; + old_mtu = adapter->prev_mtu; + new_mtu = adapter->req_mtu; + + /* Require MTU to be exactly same to reuse pools for now */ + if (old_mtu != new_mtu) + return false; + + if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) + return true; + + if (old_num_pools < adapter->min_tx_queues || + old_num_pools > adapter->max_tx_queues || + old_pool_size < adapter->min_tx_entries_per_subcrq || + old_pool_size > adapter->max_tx_entries_per_subcrq) + return false; + + return true; +} + +/** + * init_tx_pools(): Initialize the set of transmit pools in the adapter. + * @netdev: net device associated with the vnic interface + * + * Initialize the set of transmit pools in the ibmvnic adapter associated + * with the net_device @netdev. If possible, reuse the existing tx pools. + * Otherwise free any existing pools and allocate a new set of pools + * before initializing them. + * + * Return: 0 on success and negative value on error. + */ static int init_tx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int tx_subcrqs; + struct device *dev = &adapter->vdev->dev; + int num_pools; + u64 pool_size; /* # of buffers in pool */ u64 buff_size; - int i, rc; + int i, j, rc; + + num_pools = adapter->req_tx_queues; + + /* We must notify the VIOS about the LTB on all resets - but we only + * need to alloc/populate pools if either the number of buffers or + * size of each buffer in the pool has changed. + */ + if (reuse_tx_pools(adapter)) { + netdev_dbg(netdev, "Reusing tx pools\n"); + goto update_ltb; + } + + /* Allocate/populate the pools. */ + release_tx_pools(adapter); + + pool_size = adapter->req_tx_entries_per_subcrq; + num_pools = adapter->num_active_tx_scrqs; - tx_subcrqs = adapter->num_active_tx_scrqs; - adapter->tx_pool = kcalloc(tx_subcrqs, + adapter->tx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) return -1; - adapter->tso_pool = kcalloc(tx_subcrqs, + adapter->tso_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + /* To simplify release_tx_pools() ensure that ->tx_pool and + * ->tso_pool are either both NULL or both non-NULL. + */ if (!adapter->tso_pool) { kfree(adapter->tx_pool); adapter->tx_pool = NULL; return -1; } - adapter->num_active_tx_pools = tx_subcrqs; + /* Set num_active_tx_pools early. If we fail below after partial + * allocation, release_tx_pools() will know how many to look for. + */ + adapter->num_active_tx_pools = num_pools; + + buff_size = adapter->req_mtu + VLAN_HLEN; + buff_size = ALIGN(buff_size, L1_CACHE_BYTES); + + for (i = 0; i < num_pools; i++) { + dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", + i, adapter->req_tx_entries_per_subcrq, buff_size); - for (i = 0; i < tx_subcrqs; i++) { - buff_size = adapter->req_mtu + VLAN_HLEN; - buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], - adapter->req_tx_entries_per_subcrq, - buff_size); - if (rc) { - release_tx_pools(adapter); - return rc; - } + pool_size, buff_size); + if (rc) + goto out_release; rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], IBMVNIC_TSO_BUFS, IBMVNIC_TSO_BUF_SZ); - if (rc) { - release_tx_pools(adapter); - return rc; - } + if (rc) + goto out_release; + } + + adapter->prev_tx_pool_size = pool_size; + adapter->prev_mtu = adapter->req_mtu; + +update_ltb: + /* NOTE: All tx_pools have the same number of buffers (which is + * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS + * buffers (see calls init_one_tx_pool() for these). + * For consistency, we use tx_pool->num_buffers and + * tso_pool->num_buffers below. + */ + rc = -1; + for (i = 0; i < num_pools; i++) { + struct ibmvnic_tx_pool *tso_pool; + struct ibmvnic_tx_pool *tx_pool; + u32 ltb_size; + + tx_pool = &adapter->tx_pool[i]; + ltb_size = tx_pool->num_buffers * tx_pool->buf_size; + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + ltb_size)) + goto out; + + dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", + i, tx_pool->long_term_buff.buff, + tx_pool->num_buffers, tx_pool->buf_size); + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + + for (j = 0; j < tx_pool->num_buffers; j++) + tx_pool->free_map[j] = j; + + tso_pool = &adapter->tso_pool[i]; + ltb_size = tso_pool->num_buffers * tso_pool->buf_size; + if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, + ltb_size)) + goto out; + + dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", + i, tso_pool->long_term_buff.buff, + tso_pool->num_buffers, tso_pool->buf_size); + + tso_pool->consumer_index = 0; + tso_pool->producer_index = 0; + + for (j = 0; j < tso_pool->num_buffers; j++) + tso_pool->free_map[j] = j; } return 0; +out_release: + release_tx_pools(adapter); +out: + /* We failed to allocate one or more LTBs or map them on the VIOS. + * Hold onto the pools and any LTBs that we did allocate/map. + */ + return rc; } static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) @@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter) { release_vpd_data(adapter); - release_tx_pools(adapter); - release_rx_pools(adapter); - release_napi(adapter); release_login_buffer(adapter); release_login_rsp_buffer(adapter); @@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter) return rc; } - adapter->map_id = 1; - rc = init_napi(adapter); if (rc) return rc; @@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev) if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); goto out; } } @@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev) ibmvnic_napi_disable(adapter); ibmvnic_disable_irqs(adapter); - - clean_rx_pools(adapter); - clean_tx_pools(adapter); } static int __ibmvnic_close(struct net_device *netdev) @@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev) rc = __ibmvnic_close(netdev); ibmvnic_cleanup(netdev); + clean_rx_pools(adapter); + clean_tx_pools(adapter); return rc; } @@ -1724,8 +1914,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ind_bufp = &tx_scrq->ind_buf; if (test_bit(0, &adapter->resetting)) { - if (!netif_subqueue_stopped(netdev, skb)) - netif_stop_subqueue(netdev, queue_num); dev_kfree_skb_any(skb); tx_send_failed++; @@ -2036,9 +2224,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { + struct net_device *netdev = adapter->netdev; u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_slots, old_num_tx_slots; - struct net_device *netdev = adapter->netdev; int rc; netdev_dbg(adapter->netdev, @@ -2188,8 +2376,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, !adapter->rx_pool || !adapter->tso_pool || !adapter->tx_pool) { - release_rx_pools(adapter); - release_tx_pools(adapter); release_napi(adapter); release_vpd_data(adapter); @@ -2198,16 +2384,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, goto out; } else { - rc = reset_tx_pools(adapter); + rc = init_tx_pools(netdev); if (rc) { - netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + netdev_dbg(netdev, + "init tx pools failed (%d)\n", rc); goto out; } - rc = reset_rx_pools(adapter); + rc = init_rx_pools(netdev); if (rc) { - netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + netdev_dbg(netdev, + "init rx pools failed (%d)\n", rc); goto out; } @@ -2567,7 +2755,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, if (adapter->state == VNIC_PROBING) { netdev_warn(netdev, "Adapter reset during probe\n"); - adapter->init_done_rc = EAGAIN; + adapter->init_done_rc = -EAGAIN; ret = EAGAIN; goto err; } @@ -4576,8 +4764,7 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq, /* crq->change_mac_addr.mac_addr is the requested one * crq->change_mac_addr_rsp.mac_addr is the returned valid one. */ - ether_addr_copy(netdev->dev_addr, - &crq->change_mac_addr_rsp.mac_addr[0]); + eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); ether_addr_copy(adapter->mac_addr, &crq->change_mac_addr_rsp.mac_addr[0]); out: @@ -4778,9 +4965,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); return; } - netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", - crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, - crq->query_map_rsp.free_pages); + netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", + crq->query_map_rsp.page_size, + __be32_to_cpu(crq->query_map_rsp.tot_pages), + __be32_to_cpu(crq->query_map_rsp.free_pages)); } static void handle_query_cap_rsp(union ibmvnic_crq *crq, @@ -5069,11 +5257,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, */ adapter->login_pending = false; - if (!completion_done(&adapter->init_done)) { - complete(&adapter->init_done); - adapter->init_done_rc = -EIO; - } - if (adapter->state == VNIC_DOWN) rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); else @@ -5094,6 +5277,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, rc); adapter->failover_pending = false; } + + if (!completion_done(&adapter->init_done)) { + complete(&adapter->init_done); + if (!adapter->init_done_rc) + adapter->init_done_rc = -EAGAIN; + } + break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); @@ -5414,6 +5604,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter) crq->cur = 0; spin_lock_init(&crq->lock); + /* process any CRQs that were queued before we enabled interrupts */ + tasklet_schedule(&adapter->tasklet); + return retrc; req_irq_failed: @@ -5527,9 +5720,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; adapter->login_pending = false; + memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); + /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ + bitmap_set(adapter->map_ids, 0, 1); ether_addr_copy(adapter->mac_addr, mac_addr_p); - ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + eth_hw_addr_set(netdev, adapter->mac_addr); netdev->irq = dev->irq; netdev->netdev_ops = &ibmvnic_netdev_ops; netdev->ethtool_ops = &ibmvnic_ethtool_ops; @@ -5547,6 +5743,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_completion(&adapter->reset_done); init_completion(&adapter->stats_done); clear_bit(0, &adapter->resetting); + adapter->prev_rx_buf_sz = 0; + adapter->prev_mtu = 0; init_success = false; do { @@ -5558,7 +5756,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } rc = ibmvnic_reset_init(adapter, false); - } while (rc == EAGAIN); + } while (rc == -EAGAIN); /* We are ignoring the error from ibmvnic_reset_init() assuming that the * partner is not ready. CRQ is not active. When the partner becomes @@ -5647,6 +5845,8 @@ static void ibmvnic_remove(struct vio_dev *dev) unregister_netdevice(netdev); release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); release_sub_crqs(adapter, 1); release_crq_queue(adapter); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22df602323bc02..b8e42f67d897e1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -827,7 +827,7 @@ struct ibmvnic_rx_buff { struct ibmvnic_rx_pool { struct ibmvnic_rx_buff *rx_buff; - int size; + int size; /* # of buffers in the pool */ int index; int buff_size; atomic_t available; @@ -967,6 +967,7 @@ struct ibmvnic_adapter { u64 min_mtu; u64 max_mtu; u64 req_mtu; + u64 prev_mtu; u64 max_multicast_filters; u64 vlan_header_insertion; u64 rx_vlan_header_insertion; @@ -979,13 +980,18 @@ struct ibmvnic_adapter { u64 opt_tx_entries_per_subcrq; u64 opt_rxba_entries_per_subcrq; __be64 tx_rx_desc_req; - u8 map_id; +#define MAX_MAP_ID 255 + DECLARE_BITMAP(map_ids, MAX_MAP_ID); u32 num_active_rx_scrqs; u32 num_active_rx_pools; u32 num_active_rx_napi; u32 num_active_tx_scrqs; u32 num_active_tx_pools; + + u32 prev_rx_pool_size; + u32 prev_tx_pool_size; u32 cur_rx_buf_sz; + u32 prev_rx_buf_sz; struct tasklet_struct tasklet; enum vnic_state state; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index ed8ea63bb17241..0b274d8fa45b68 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -313,6 +313,20 @@ config ICE To compile this driver as a module, choose M here. The module will be called ice. +config ICE_SWITCHDEV + bool "Switchdev Support" + default y + depends on ICE && NET_SWITCHDEV + help + Switchdev support provides internal SRIOV packet steering and switching. + + To enable it on running kernel use devlink tool: + #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev + + Say Y here if you want to use Switchdev in the driver. + + If unsure, say N. + config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 09ae1939e6db4c..5039a25369517b 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2259,7 +2259,7 @@ static int e100_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); e100_exec_cb(nic, NULL, e100_setup_iaaddr); return 0; @@ -2921,7 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e100_phy_init(nic); - memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + eth_hw_addr_set(netdev, (u8 *)nic->eeprom); if (!is_valid_ether_addr(netdev->dev_addr)) { if (!eeprom_bad_csum_allow) { netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index bed4f040face41..669060a2e6aa6c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1103,7 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e_err(probe, "EEPROM Read Error\n"); } /* don't block initialization here due to bad MAC address */ - memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) e_err(probe, "Invalid MAC Address\n"); @@ -2209,7 +2209,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) if (hw->mac_type == e1000_82542_rev2_0) e1000_enter_82542_rst(adapter); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); e1000_rar_set(hw, hw->mac_addr, 0); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 3178efd980066c..c3def0ee77884b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "hw.h" diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ebcb2a30add095..44e2dc8328a222 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2550,7 +2550,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter) /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { - current_itr = 0; new_itr = 4000; goto set_itr_now; } @@ -4787,7 +4786,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); @@ -7590,7 +7589,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error while reading MAC address\n"); - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2fb52bd6fc0e15..2cca9e84e31e1b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -990,7 +990,7 @@ static int fm10k_set_mac(struct net_device *dev, void *p) } if (!err) { - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); ether_addr_copy(hw->mac.addr, addr->sa_data); dev->addr_assign_type &= ~NET_ADDR_RANDOM; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index adfa2768f024dc..b473cb7d7c5751 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -300,7 +300,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface) if (is_valid_ether_addr(hw->mac.perm_addr)) { ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + eth_hw_addr_set(netdev, hw->mac.perm_addr); netdev->addr_assign_type &= ~NET_ADDR_RANDOM; } @@ -2045,7 +2045,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, netdev->addr_assign_type |= NET_ADDR_RANDOM; } - ether_addr_copy(netdev->dev_addr, hw->mac.addr); + eth_hw_addr_set(netdev, hw->mac.addr); ether_addr_copy(netdev->perm_addr, hw->mac.addr); if (!is_valid_ether_addr(netdev->perm_addr)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 39fb3d57c0574e..3d528fba754b4c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -435,7 +435,7 @@ static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch) return !!ch->fwd; } -static inline u8 *i40e_channel_mac(struct i40e_channel *ch) +static inline const u8 *i40e_channel_mac(struct i40e_channel *ch) { if (i40e_is_channel_macvlan(ch)) return ch->fwd->netdev->dev_addr; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e04b540cedc85c..ba862131b9bdf2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1587,7 +1587,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) */ spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -13425,7 +13425,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); - ether_addr_copy(netdev->dev_addr, mac_addr); + eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index e7e778ca074c0f..ea06e957393e62 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) { u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; - struct xdp_buff **bi, *xdp; + struct xdp_buff **xdp; + u32 nb_buffs, i; dma_addr_t dma; - bool ok = true; rx_desc = I40E_RX_DESC(rx_ring, ntu); - bi = i40e_rx_bi(rx_ring, ntu); - do { - xdp = xsk_buff_alloc(rx_ring->xsk_pool); - if (!xdp) { - ok = false; - goto no_buffers; - } - *bi = xdp; - dma = xsk_buff_xdp_get_dma(xdp); + xdp = i40e_rx_bi(rx_ring, ntu); + + nb_buffs = min_t(u16, count, rx_ring->count - ntu); + nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); + if (!nb_buffs) + return false; + + i = nb_buffs; + while (i--) { + dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->read.hdr_addr = 0; rx_desc++; - bi++; - ntu++; - - if (unlikely(ntu == rx_ring->count)) { - rx_desc = I40E_RX_DESC(rx_ring, 0); - bi = i40e_rx_bi(rx_ring, 0); - ntu = 0; - } - } while (--count); + xdp++; + } -no_buffers: - if (rx_ring->next_to_use != ntu) { - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.qword1.status_error_len = 0; - i40e_release_rx_desc(rx_ring, ntu); + ntu += nb_buffs; + if (ntu == rx_ring->count) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + xdp = i40e_rx_bi(rx_ring, 0); + ntu = 0; } - return ok; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + i40e_release_rx_desc(rx_ring, ntu); + + return count == nb_buffs; } /** @@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) break; bi = *i40e_rx_bi(rx_ring, next_to_clean); - bi->data_end = bi->data + size; + xsk_buff_set_size(bi, size); xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); xdp_res = i40e_run_xdp_zc(rx_ring, bi); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 68c80f04113c8d..e6e7c1da47fbe5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -177,6 +177,7 @@ enum iavf_state_t { __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ __IAVF_INIT_SW, /* got resources, setting up structs */ + __IAVF_INIT_FAILED, /* init failed, restarting procedure */ __IAVF_RESETTING, /* in reset */ __IAVF_COMM_FAILED, /* communication with PF failed */ /* Below here, watchdog is running */ @@ -225,7 +226,6 @@ struct iavf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work client_task; - struct delayed_work init_task; wait_queue_head_t down_waitqueue; struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; @@ -312,6 +312,7 @@ struct iavf_adapter { struct iavf_hw hw; /* defined in iavf_type.h */ enum iavf_state_t state; + enum iavf_state_t last_state; unsigned long crit_section; struct delayed_work watchdog_task; @@ -393,6 +394,51 @@ struct iavf_device { extern char iavf_driver_name[]; extern struct workqueue_struct *iavf_wq; +static inline const char *iavf_state_str(enum iavf_state_t state) +{ + switch (state) { + case __IAVF_STARTUP: + return "__IAVF_STARTUP"; + case __IAVF_REMOVE: + return "__IAVF_REMOVE"; + case __IAVF_INIT_VERSION_CHECK: + return "__IAVF_INIT_VERSION_CHECK"; + case __IAVF_INIT_GET_RESOURCES: + return "__IAVF_INIT_GET_RESOURCES"; + case __IAVF_INIT_SW: + return "__IAVF_INIT_SW"; + case __IAVF_INIT_FAILED: + return "__IAVF_INIT_FAILED"; + case __IAVF_RESETTING: + return "__IAVF_RESETTING"; + case __IAVF_COMM_FAILED: + return "__IAVF_COMM_FAILED"; + case __IAVF_DOWN: + return "__IAVF_DOWN"; + case __IAVF_DOWN_PENDING: + return "__IAVF_DOWN_PENDING"; + case __IAVF_TESTING: + return "__IAVF_TESTING"; + case __IAVF_RUNNING: + return "__IAVF_RUNNING"; + default: + return "__IAVF_UNKNOWN_STATE"; + } +} + +static inline void iavf_change_state(struct iavf_adapter *adapter, + enum iavf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } + dev_dbg(&adapter->pdev->dev, + "state transition from:%s to:%s\n", + iavf_state_str(adapter->last_state), + iavf_state_str(adapter->state)); +} + int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index cada4e0e40b48e..847d67e32a5401 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -14,7 +14,7 @@ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); static int iavf_close(struct net_device *netdev); -static int iavf_init_get_resources(struct iavf_adapter *adapter); +static void iavf_init_get_resources(struct iavf_adapter *adapter); static int iavf_check_reset_complete(struct iavf_hw *hw); char iavf_driver_name[] = "iavf"; @@ -51,6 +51,15 @@ MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; struct workqueue_struct *iavf_wq; +/** + * iavf_pdev_to_adapter - go from pci_dev to adapter + * @pdev: pci_dev pointer + */ +static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + /** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -960,7 +969,7 @@ static void iavf_configure(struct iavf_adapter *adapter) **/ static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __IAVF_RUNNING; + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); @@ -1688,9 +1697,9 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) * * Function process __IAVF_STARTUP driver state. * When success the state is changed to __IAVF_INIT_VERSION_CHECK - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_startup(struct iavf_adapter *adapter) +static void iavf_startup(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; @@ -1729,9 +1738,10 @@ static int iavf_startup(struct iavf_adapter *adapter) iavf_shutdown_adminq(hw); goto err; } - adapter->state = __IAVF_INIT_VERSION_CHECK; + iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); + return; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1740,9 +1750,9 @@ static int iavf_startup(struct iavf_adapter *adapter) * * Function process __IAVF_INIT_VERSION_CHECK driver state. * When success the state is changed to __IAVF_INIT_GET_RESOURCES - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_init_version_check(struct iavf_adapter *adapter) +static void iavf_init_version_check(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; @@ -1753,7 +1763,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); goto err; } @@ -1776,10 +1786,10 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) err); goto err; } - adapter->state = __IAVF_INIT_GET_RESOURCES; - + iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); + return; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1789,9 +1799,9 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) * Function process __IAVF_INIT_GET_RESOURCES driver state and * finishes driver initialization procedure. * When success the state is changed to __IAVF_DOWN - * when fails it returns -EAGAIN + * when fails the state is changed to __IAVF_INIT_FAILED **/ -static int iavf_init_get_resources(struct iavf_adapter *adapter) +static void iavf_init_get_resources(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -1819,7 +1829,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) */ iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); - return 0; + return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); @@ -1847,7 +1857,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } @@ -1893,7 +1903,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); rtnl_unlock(); @@ -1911,7 +1921,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) else iavf_init_rss(adapter); - return err; + return; err_mem: iavf_free_rss(adapter); err_register: @@ -1922,7 +1932,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) kfree(adapter->vf_res); adapter->vf_res = NULL; err: - return err; + iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** @@ -1941,9 +1951,50 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - adapter->state = __IAVF_COMM_FAILED; + iavf_change_state(adapter, __IAVF_COMM_FAILED); + + if (adapter->flags & IAVF_FLAG_RESET_NEEDED && + adapter->state != __IAVF_RESETTING) { + iavf_change_state(adapter, __IAVF_RESETTING); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + } switch (adapter->state) { + case __IAVF_STARTUP: + iavf_startup(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_VERSION_CHECK: + iavf_init_version_check(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(30)); + return; + case __IAVF_INIT_GET_RESOURCES: + iavf_init_get_resources(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_FAILED: + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { + dev_err(&adapter->pdev->dev, + "Failed to communicate with PF; waiting before retry\n"); + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, (5 * HZ)); + return; + } + /* Try again from failed step*/ + iavf_change_state(adapter, adapter->last_state); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); + return; case __IAVF_COMM_FAILED: reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; @@ -1952,23 +2003,19 @@ static void iavf_watchdog_task(struct work_struct *work) /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - queue_delayed_work(iavf_wq, &adapter->init_task, 10); - mutex_unlock(&adapter->crit_lock); - /* Don't reschedule the watchdog, since we've restarted - * the init task. When init_task contacts the PF and + /* When init task contacts the PF and * gets everything set up again, it'll restart the * watchdog for us. Down, boy. Sit. Stay. Woof. */ - return; + iavf_change_state(adapter, __IAVF_STARTUP); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); - goto watchdog_done; + return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); @@ -1991,38 +2038,40 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: mutex_unlock(&adapter->crit_lock); return; default: - goto restart_watchdog; + return; } - /* check for hw reset */ + /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); queue_work(iavf_wq, &adapter->reset_task); - goto watchdog_done; + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, HZ * 2); + return; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); -watchdog_done: - if (adapter->state == __IAVF_RUNNING || - adapter->state == __IAVF_COMM_FAILED) - iavf_detect_recover_hung(&adapter->vsi); mutex_unlock(&adapter->crit_lock); restart_watchdog: + queue_work(iavf_wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); - queue_work(iavf_wq, &adapter->adminq_task); } static void iavf_disable_vf(struct iavf_adapter *adapter) @@ -2081,7 +2130,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) adapter->netdev->flags &= ~IFF_UP; mutex_unlock(&adapter->crit_lock); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } @@ -2191,7 +2240,7 @@ static void iavf_reset_task(struct work_struct *work) } iavf_irq_disable(adapter); - adapter->state = __IAVF_RESETTING; + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just @@ -2291,11 +2340,14 @@ static void iavf_reset_task(struct work_struct *work) iavf_configure(adapter); + /* iavf_up_complete() will switch device back + * to __IAVF_RUNNING + */ iavf_up_complete(adapter); iavf_irq_enable(adapter, true); } else { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } mutex_unlock(&adapter->client_lock); @@ -2305,6 +2357,8 @@ static void iavf_reset_task(struct work_struct *work) reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); + if (running) + iavf_change_state(adapter, __IAVF_RUNNING); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -3226,6 +3280,13 @@ static int iavf_open(struct net_device *netdev) goto err_unlock; } + if (adapter->state == __IAVF_RUNNING && + !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto err_unlock; + } + /* allocate transmit descriptors */ err = iavf_setup_all_tx_resources(adapter); if (err) @@ -3297,7 +3358,7 @@ static int iavf_close(struct net_device *netdev) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; iavf_down(adapter); - adapter->state = __IAVF_DOWN_PENDING; + iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); @@ -3630,72 +3691,14 @@ int iavf_process_config(struct iavf_adapter *adapter) return 0; } -/** - * iavf_init_task - worker thread to perform delayed initialization - * @work: pointer to work_struct containing our data - * - * This task completes the work that was begun in probe. Due to the nature - * of VF-PF communications, we may need to wait tens of milliseconds to get - * responses back from the PF. Rather than busy-wait in probe and bog down the - * whole system, we'll do it in a task so we can sleep. - * This task only runs during driver init. Once we've established - * communications with the PF driver and set up our netdev, the watchdog - * takes over. - **/ -static void iavf_init_task(struct work_struct *work) -{ - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - init_task.work); - struct iavf_hw *hw = &adapter->hw; - - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) { - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); - return; - } - switch (adapter->state) { - case __IAVF_STARTUP: - if (iavf_startup(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_VERSION_CHECK: - if (iavf_init_version_check(adapter) < 0) - goto init_failed; - break; - case __IAVF_INIT_GET_RESOURCES: - if (iavf_init_get_resources(adapter) < 0) - goto init_failed; - goto out; - default: - goto init_failed; - } - - queue_delayed_work(iavf_wq, &adapter->init_task, - msecs_to_jiffies(30)); - goto out; -init_failed: - if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { - dev_err(&adapter->pdev->dev, - "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; - iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; - queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); - goto out; - } - queue_delayed_work(iavf_wq, &adapter->init_task, HZ); -out: - mutex_unlock(&adapter->crit_lock); -} - /** * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ static void iavf_shutdown(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); @@ -3705,7 +3708,7 @@ static void iavf_shutdown(struct pci_dev *pdev) if (iavf_lock_timeout(&adapter->crit_lock, 5000)) dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; mutex_unlock(&adapter->crit_lock); @@ -3778,7 +3781,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3822,8 +3825,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); - queue_delayed_work(iavf_wq, &adapter->init_task, + queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ @@ -3880,10 +3882,11 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) static int __maybe_unused iavf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter; u32 err; + adapter = iavf_pdev_to_adapter(pdev); + pci_set_master(pdev); rtnl_lock(); @@ -3902,7 +3905,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d) queue_work(iavf_wq, &adapter->reset_task); - netif_device_attach(netdev); + netif_device_attach(adapter->netdev); return err; } @@ -3918,8 +3921,9 @@ static int __maybe_unused iavf_resume(struct device *dev_d) **/ static void iavf_remove(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); + enum iavf_state_t prev_state = adapter->last_state; + struct net_device *netdev = adapter->netdev; struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_adv_rss *rss, *rsstmp; @@ -3929,8 +3933,8 @@ static void iavf_remove(struct pci_dev *pdev) int err; /* Indicate we are in remove and not to run reset_task */ mutex_lock(&adapter->remove_lock); - cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); cancel_delayed_work_sync(&adapter->client_task); if (adapter->netdev_registered) { unregister_netdev(netdev); @@ -3954,13 +3958,25 @@ static void iavf_remove(struct pci_dev *pdev) dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); iavf_misc_irq_disable(adapter); iavf_free_misc_irq(adapter); + + /* In case we enter iavf_remove from erroneous state, free traffic irqs + * here, so as to not cause a kernel crash, when calling + * iavf_reset_interrupt_capability. + */ + if ((adapter->last_state == __IAVF_RESETTING && + prev_state != __IAVF_DOWN) || + (adapter->last_state == __IAVF_RUNNING && + !(netdev->flags & IFF_UP))) + iavf_free_traffic_irqs(adapter); + iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 3c735968e1b852..8c3f0f77cb5742 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1685,7 +1685,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (!v_retval) iavf_mac_add_ok(adapter); if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = @@ -1716,7 +1716,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { /* refresh current mac address if changed */ - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } @@ -1735,7 +1735,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } break; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 4f538cdf42c1cf..c36faa7d14717a 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -26,10 +26,13 @@ ice-y := ice_main.o \ ice_devlink.o \ ice_fw_update.o \ ice_lag.o \ - ice_ethtool.o + ice_ethtool.o \ + ice_repr.o \ + ice_tc_lib.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o +ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 3c4f08d20414e0..bf4ecd9a517c8d 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,10 +34,15 @@ #include #include #include +#include #include #include #include #include +#include +#include +#include +#include #include #include #include @@ -55,6 +60,7 @@ #include "ice_dcb.h" #include "ice_switch.h" #include "ice_common.h" +#include "ice_flow.h" #include "ice_sched.h" #include "ice_idc_int.h" #include "ice_virtchnl_pf.h" @@ -63,6 +69,8 @@ #include "ice_fdir.h" #include "ice_xsk.h" #include "ice_arfs.h" +#include "ice_repr.h" +#include "ice_eswitch.h" #include "ice_lag.h" #define ICE_BAR0 0 @@ -84,6 +92,7 @@ #define ICE_FDIR_MSIX 2 #define ICE_RDMA_NUM_AEQ_MSIX 4 #define ICE_MIN_RDMA_MSIX 2 +#define ICE_ESWITCH_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -101,6 +110,10 @@ #define ICE_INVAL_VFID 256 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ + +#define ICE_CHNL_START_TC 1 +#define ICE_CHNL_MAX_TC 16 + #define ICE_MAX_RESET_WAIT 20 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) @@ -118,14 +131,24 @@ #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) +/* Minimum BW limit is 500 Kbps for any scheduler node */ +#define ICE_MIN_BW_LIMIT 500 +/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. + * use it to convert user specified BW limit into Kbps + */ +#define ICE_BW_KBPS_DIVISOR 125 + /* Macro for each VSI in a PF */ #define ice_for_each_vsi(pf, i) \ for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) -/* Macros for each Tx/Rx ring in a VSI */ +/* Macros for each Tx/Xdp/Rx ring in a VSI */ #define ice_for_each_txq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_txq; (i)++) +#define ice_for_each_xdp_txq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) + #define ice_for_each_rxq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) @@ -139,6 +162,9 @@ #define ice_for_each_q_vector(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) +#define ice_for_each_chnl_tc(i) \ + for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) + #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \ ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX) @@ -158,6 +184,29 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) +enum ice_feature { + ICE_F_DSCP, + ICE_F_SMA_CTRL, + ICE_F_MAX +}; + +DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); + +struct ice_channel { + struct list_head list; + u8 type; + u16 sw_id; + u16 base_q; + u16 num_rxq; + u16 num_txq; + u16 vsi_num; + u8 ena_tc; + struct ice_aqc_vsi_props info; + u64 max_tx_rate; + u64 min_tx_rate; + struct ice_vsi *ch_vsi; +}; + struct ice_txq_meta { u32 q_teid; /* Tx-scheduler element identifier */ u16 q_id; /* Entry in VSI's txq_map bitmap */ @@ -175,7 +224,7 @@ struct ice_tc_info { struct ice_tc_cfg { u8 numtc; /* Total number of enabled TCs */ - u8 ena_tc; /* Tx map */ + u16 ena_tc; /* Tx map */ struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; }; @@ -266,8 +315,8 @@ struct ice_vsi { struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ struct ice_port_info *port_info; /* back pointer to port_info */ - struct ice_ring **rx_rings; /* Rx ring array */ - struct ice_ring **tx_rings; /* Tx ring array */ + struct ice_rx_ring **rx_rings; /* Rx ring array */ + struct ice_tx_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -306,10 +355,6 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; - /* devlink port data */ - struct devlink_port devlink_port; - bool devlink_port_registered; - u16 max_frame; u16 rx_buf_len; @@ -344,11 +389,42 @@ struct ice_vsi { u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; - struct ice_ring **xdp_rings; /* XDP ring array */ + struct ice_tx_ring **xdp_rings; /* XDP ring array */ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct net_device **target_netdevs; + + struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ + + /* Channel Specific Fields */ + struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; + u16 cnt_q_avail; + u16 next_base_q; /* next queue to be used for channel setup */ + struct list_head ch_list; + u16 num_chnl_rxq; + u16 num_chnl_txq; + u16 ch_rss_size; + u16 num_chnl_fltr; + /* store away rss size info before configuring ADQ channels so that, + * it can be used after tc-qdisc delete, to get back RSS setting as + * they were before + */ + u16 orig_rss_size; + /* this keeps tracks of all enabled TC with and without DCB + * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue + * information + */ + u8 all_numtc; + u16 all_enatc; + + /* store away TC info, to be used for rebuild logic */ + u8 old_numtc; + u16 old_ena_tc; + + struct ice_channel *ch; + /* setup back reference, to which aggregator node this VSI * corresponds to */ @@ -377,6 +453,8 @@ struct ice_q_vector { cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; + struct ice_channel *ch; + char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ @@ -395,11 +473,14 @@ enum ice_pf_flags { ICE_FLAG_PTP, /* PTP is enabled by software */ ICE_FLAG_AUX_ENA, ICE_FLAG_ADV_FEATURES, + ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ + ICE_FLAG_CLS_FLOWER, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, ICE_FLAG_NO_MEDIA, ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_MOD_POWER_UNSUPPORTED, + ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, @@ -408,6 +489,12 @@ enum ice_pf_flags { ICE_PF_FLAGS_NBITS /* must be last */ }; +struct ice_switchdev_info { + struct ice_vsi *control_vsi; + struct ice_vsi *uplink_vsi; + bool is_running; +}; + struct ice_agg_node { u32 agg_id; #define ICE_MAX_VSIS_IN_AGG_NODE 64 @@ -421,6 +508,9 @@ struct ice_pf { struct devlink_region *nvm_region; struct devlink_region *devcaps_region; + /* devlink port data */ + struct devlink_port devlink_port; + /* OS reserved IRQ details */ struct msix_entry *msix_entries; struct ice_res_tracker *irq_tracker; @@ -434,6 +524,7 @@ struct ice_pf { struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ + u16 eswitch_mode; /* current mode of eswitch */ /* Virtchnl/SR-IOV config info */ struct ice_vf *vf; u16 num_alloc_vfs; /* actual number of VFs allocated */ @@ -443,6 +534,7 @@ struct ice_pf { /* used to ratelimit the MDD event logging */ unsigned long last_printed_mdd_jiffies; DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ @@ -495,12 +587,19 @@ struct ice_pf { struct auxiliary_device *adev; int aux_idx; u32 sw_int_count; + /* count of tc_flower filters specific to channel (aka where filter + * action is "hw_tc ") + */ + u16 num_dmac_chnl_fltrs; + struct hlist_head tc_flower_fltr_list; __le64 nvm_phy_type_lo; /* NVM PHY type low */ __le64 nvm_phy_type_hi; /* NVM PHY type high */ struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ + struct ice_switchdev_info switchdev; + #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 @@ -512,8 +611,27 @@ struct ice_pf { struct ice_netdev_priv { struct ice_vsi *vsi; + struct ice_repr *repr; + /* indirect block callbacks on registered higher level devices + * (e.g. tunnel devices) + * + * tc_indr_block_cb_priv_list is used to look up indirect callback + * private data + */ + struct list_head tc_indr_block_priv_list; }; +/** + * ice_vector_ch_enabled + * @qv: pointer to q_vector, can be NULL + * + * This function returns true if vector is channel enabled otherwise false + */ +static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) +{ + return !!qv->ch; /* Enable it to run with TC */ +} + /** * ice_irq_dynamic_ena - Enable default interrupt generation settings * @hw: pointer to HW struct @@ -556,25 +674,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) return !!vsi->xdp_prog; } -static inline void ice_set_ring_xdp(struct ice_ring *ring) +static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) { ring->flags |= ICE_TX_FLAGS_RING_XDP; } /** * ice_xsk_pool - get XSK buffer pool bound to a ring - * @ring: ring to use + * @ring: Rx ring to use * * Returns a pointer to xdp_umem structure if there is a buffer pool present, * NULL otherwise. */ -static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring) +static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) { struct ice_vsi *vsi = ring->vsi; u16 qid = ring->q_index; - if (ice_ring_is_xdp(ring)) - qid -= vsi->num_xdp_txq; + if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) + return NULL; + + return xsk_get_pool_from_qid(vsi->netdev, qid); +} + +/** + * ice_tx_xsk_pool - get XSK buffer pool bound to a ring + * @ring: Tx ring to use + * + * Returns a pointer to xdp_umem structure if there is a buffer pool present, + * NULL otherwise. Tx equivalent of ice_xsk_pool. + */ +static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + u16 qid; + + qid = ring->q_index - vsi->num_xdp_txq; if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) return NULL; @@ -596,6 +731,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) return NULL; } +/** + * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. + * @np: private netdev structure + */ +static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) +{ + /* In case of port representor return source port VSI. */ + if (np->repr) + return np->repr->src_vsi; + else + return np->vsi; +} + /** * ice_get_ctrl_vsi - Get the control VSI * @pf: PF instance @@ -609,6 +757,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) return pf->vsi[pf->ctrl_vsi_idx]; } +/** + * ice_is_switchdev_running - check if switchdev is configured + * @pf: pointer to PF structure + * + * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV + * and switchdev is configured, false otherwise. + */ +static inline bool ice_is_switchdev_running(struct ice_pf *pf) +{ + return pf->switchdev.is_running; +} + /** * ice_set_sriov_cap - enable SRIOV in PF flags * @pf: PF struct @@ -633,11 +793,37 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf) ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) +/** + * ice_is_adq_active - any active ADQs + * @pf: pointer to PF + * + * This function returns true if there are any ADQs configured (which is + * determined by looking at VSI type (which should be VSI_PF), numtc, and + * TC_MQPRIO flag) otherwise return false + */ +static inline bool ice_is_adq_active(struct ice_pf *pf) +{ + struct ice_vsi *vsi; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return false; + + /* is ADQ configured */ + if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + return true; + + return false; +} + bool netif_is_ice(struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi); +int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); @@ -648,6 +834,7 @@ int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); +int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); int ice_destroy_xdp_rings(struct ice_vsi *vsi); int diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 21b4c7cd6f0502..4eef3488d86f8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -233,6 +233,7 @@ struct ice_aqc_get_sw_cfg_resp_elem { */ #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 +#define ICE_AQC_RES_TYPE_RECIPE 0x05 #define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 #define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 #define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 @@ -241,6 +242,7 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 +#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) @@ -474,6 +476,53 @@ struct ice_aqc_vsi_props { #define ICE_MAX_NUM_RECIPES 64 +/* Add/Get Recipe (indirect 0x0290/0x0292) */ +struct ice_aqc_add_get_recipe { + __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */ + __le16 return_index; /* Input, used for Get cmd only */ + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_recipe_content { + u8 rid; +#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7) +#define ICE_AQ_SW_ID_LKUP_IDX 0 + u8 lkup_indx[5]; +#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7) +#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF + __le16 mask[5]; + u8 result_indx; +#define ICE_AQ_RECIPE_RESULT_DATA_S 0 +#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S) +#define ICE_AQ_RECIPE_RESULT_EN BIT(7) + u8 rsvd0[3]; + u8 act_ctrl_join_priority; + u8 act_ctrl_fwd_priority; + u8 act_ctrl; +#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2) + u8 rsvd1; + __le32 dflt_act; +}; + +struct ice_aqc_recipe_data_elem { + u8 recipe_indx; + u8 resp_bits; + u8 rsvd0[2]; + u8 recipe_bitmap[8]; + u8 rsvd1[4]; + struct ice_aqc_recipe_content content; + u8 rsvd2[20]; +}; + +/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */ +struct ice_aqc_recipe_to_profile { + __le16 profile_id; + u8 rsvd[6]; + DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES); +}; + /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { @@ -671,6 +720,16 @@ struct ice_aqc_sw_rules_elem { } __packed pdata; }; +/* Query PFC Mode (direct 0x0302) + * Set PFC Mode (direct 0x0303) + */ +struct ice_aqc_set_query_pfc_mode { + u8 pfc_mode; +/* For Query Command response, reserved in all other cases */ +#define ICE_AQC_PFC_VLAN_BASED_PFC 1 +#define ICE_AQC_PFC_DSCP_BASED_PFC 2 + u8 rsvd[15]; +}; /* Get Default Topology (indirect 0x0400) */ struct ice_aqc_get_topo { u8 port_num; @@ -1126,6 +1185,7 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) u8 link_cfg_err; #define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5) +#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) #define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7) u8 link_info; #define ICE_AQ_LINK_UP BIT(0) /* Link Status */ @@ -1209,6 +1269,7 @@ struct ice_aqc_set_event_mask { #define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) #define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) #define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) +#define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12) u8 reserved1[6]; }; @@ -1220,7 +1281,7 @@ struct ice_aqc_set_mac_lb { u8 reserved[15]; }; -struct ice_aqc_link_topo_addr { +struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) @@ -1246,6 +1307,10 @@ struct ice_aqc_link_topo_addr { #define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 u8 index; +}; + +struct ice_aqc_link_topo_addr { + struct ice_aqc_link_topo_params topo_params; __le16 handle; #define ICE_AQC_LINK_TOPO_HANDLE_S 0 #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) @@ -1268,6 +1333,7 @@ struct ice_aqc_link_topo_addr { struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 u8 rsvd[9]; }; @@ -1281,6 +1347,16 @@ struct ice_aqc_set_port_id_led { u8 rsvd[13]; }; +/* Set/Get GPIO (direct, 0x06EC/0x06ED) */ +struct ice_aqc_gpio { + __le16 gpio_ctrl_handle; +#define ICE_AQC_GPIO_HANDLE_S 0 +#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S) + u8 gpio_num; + u8 gpio_val; + u8 rsvd[12]; +}; + /* Read/Write SFF EEPROM command (indirect 0x06EE) */ struct ice_aqc_sff_eeprom { u8 lport_num; @@ -1922,10 +1998,13 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_sw_rules sw_rules; + struct ice_aqc_add_get_recipe add_get_recipe; + struct ice_aqc_recipe_to_profile recipe_to_profile; struct ice_aqc_get_topo get_topo; struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; @@ -1936,6 +2015,7 @@ struct ice_aq_desc { struct ice_aqc_nvm_pkg_data pkg_data; struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl; struct ice_aqc_pf_vf_msg virt; + struct ice_aqc_set_query_pfc_mode set_query_pfc_mode; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; struct ice_aqc_lldp_stop lldp_stop; @@ -2033,6 +2113,12 @@ enum ice_adminq_opc { ice_aqc_opc_update_vsi = 0x0211, ice_aqc_opc_free_vsi = 0x0213, + /* recipe commands */ + ice_aqc_opc_add_recipe = 0x0290, + ice_aqc_opc_recipe_to_profile = 0x0291, + ice_aqc_opc_get_recipe = 0x0292, + ice_aqc_opc_get_recipe_to_profile = 0x0293, + /* switch rules population commands */ ice_aqc_opc_add_sw_rules = 0x02A0, ice_aqc_opc_update_sw_rules = 0x02A1, @@ -2040,6 +2126,10 @@ enum ice_adminq_opc { ice_aqc_opc_clear_pf_cfg = 0x02A4, + /* DCB commands */ + ice_aqc_opc_query_pfc_mode = 0x0302, + ice_aqc_opc_set_pfc_mode = 0x0303, + /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, @@ -2064,6 +2154,8 @@ enum ice_adminq_opc { ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_set_port_id_led = 0x06E9, + ice_aqc_opc_set_gpio = 0x06EC, + ice_aqc_opc_get_gpio = 0x06ED, ice_aqc_opc_sff_eeprom = 0x06EE, /* NVM commands */ diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 88d98c9e5f914d..5daade32ea6258 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi) if (!vsi || vsi->type != ICE_VSI_PF) return; - arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, + arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), GFP_KERNEL); if (!arfs_fltr_list) return; @@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) return -EINVAL; base_idx = vsi->base_vector; - for (i = 0; i < vsi->num_q_vectors; i++) + ice_for_each_q_vector(vsi, i) if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, pf->msix_entries[base_idx + i].vector)) { ice_free_cpu_rx_rmap(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index c36057efc7ae3f..fa6cd63cbf1f09 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; q_vector->tx.itr_mode = ITR_DYNAMIC; q_vector->rx.itr_mode = ITR_DYNAMIC; + q_vector->tx.type = ICE_TX_CONTAINER; + q_vector->rx.type = ICE_RX_CONTAINER; if (vsi->type == ICE_VSI_VF) goto out; @@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) { struct ice_q_vector *q_vector; struct ice_pf *pf = vsi->back; - struct ice_ring *ring; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; struct device *dev; dev = ice_pf_to_dev(pf); @@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) } q_vector = vsi->q_vectors[v_idx]; - ice_for_each_ring(ring, q_vector->tx) - ring->q_vector = NULL; - ice_for_each_ring(ring, q_vector->rx) - ring->q_vector = NULL; + ice_for_each_tx_ring(tx_ring, q_vector->tx) + tx_ring->q_vector = NULL; + ice_for_each_rx_ring(rx_ring, q_vector->rx) + rx_ring->q_vector = NULL; /* only VSI with an associated netdev is set up with NAPI */ if (vsi->netdev) @@ -201,15 +204,18 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) } /** - * ice_calc_q_handle - calculate the queue handle + * ice_calc_txq_handle - calculate the queue handle * @vsi: VSI that ring belongs to * @ring: ring to get the absolute queue index * @tc: traffic class number */ -static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) +static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc) { WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); + if (ring->ch) + return ring->q_index - ring->ch->base_q; + /* Idea here for calculation is that we subtract the number of queue * count from TC that ring belongs to from it's absolute queue index * and as a result we get the queue's index within TC. @@ -217,6 +223,30 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; } +/** + * ice_eswitch_calc_txq_handle + * @ring: pointer to ring which unique index is needed + * + * To correctly work with many netdevs ring->q_index of Tx rings on switchdev + * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it + * here by finding index in vsi->tx_rings of this ring. + * + * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, + * because VSI is get from ring->vsi, so it has to be present in this VSI. + */ +static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + int i; + + ice_for_each_txq(vsi, i) { + if (vsi->tx_rings[i] == ring) + return i; + } + + return ICE_INVAL_Q_INDEX; +} + /** * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure @@ -224,7 +254,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) * This enables/disables XPS for a given Tx descriptor ring * based on the TCs enabled for the VSI that ring belongs to. */ -static void ice_cfg_xps_tx_ring(struct ice_ring *ring) +static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) { if (!ring->q_vector || !ring->netdev) return; @@ -246,7 +276,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring) * Configure the Tx descriptor ring in TLAN context. */ static void -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) +ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) { struct ice_vsi *vsi = ring->vsi; struct ice_hw *hw = &vsi->back->hw; @@ -258,7 +288,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) /* Transmit Queue Length */ tlan_ctx->qlen = ring->count; - ice_set_cgd_num(tlan_ctx, ring); + ice_set_cgd_num(tlan_ctx, ring->dcb_tc); /* PF number */ tlan_ctx->pf_num = hw->pf_id; @@ -273,19 +303,28 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) case ICE_VSI_LB: case ICE_VSI_CTRL: case ICE_VSI_PF: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + if (ring->ch) + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; + else + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; case ICE_VSI_VF: /* Firmware expects vmvf_num to be absolute VF ID */ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; + case ICE_VSI_SWITCHDEV_CTRL: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; + break; default: return; } /* make sure the context is associated with the right VSI */ - tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); + if (ring->ch) + tlan_ctx->src_vsi = ring->ch->vsi_num; + else + tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); /* Restrict Tx timestamps to the PF VSI */ switch (vsi->type) { @@ -312,7 +351,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) * * Returns the offset value for ring into the data buffer. */ -static unsigned int ice_rx_offset(struct ice_ring *rx_ring) +static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) { if (ice_ring_uses_build_skb(rx_ring)) return ICE_SKB_PAD; @@ -328,7 +367,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring) * * Configure the Rx descriptor ring in RLAN context. */ -static int ice_setup_rx_ctx(struct ice_ring *ring) +static int ice_setup_rx_ctx(struct ice_rx_ring *ring) { int chain_len = ICE_MAX_CHAINED_RX_BUFS; struct ice_vsi *vsi = ring->vsi; @@ -439,7 +478,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) * * Return 0 on success and a negative value on error. */ -int ice_vsi_cfg_rxq(struct ice_ring *ring) +int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); u16 num_bufs = ICE_DESC_UNUSED(ring); @@ -660,16 +699,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); q_vector->num_ring_tx = tx_rings_per_v; - q_vector->tx.ring = NULL; + q_vector->tx.tx_ring = NULL; q_vector->tx.itr_idx = ICE_TX_ITR; q_base = vsi->num_txq - tx_rings_rem; for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; + tx_ring->next = q_vector->tx.tx_ring; + q_vector->tx.tx_ring = tx_ring; } tx_rings_rem -= tx_rings_per_v; @@ -677,16 +716,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); q_vector->num_ring_rx = rx_rings_per_v; - q_vector->rx.ring = NULL; + q_vector->rx.rx_ring = NULL; q_vector->rx.itr_idx = ICE_RX_ITR; q_base = vsi->num_rxq - rx_rings_rem; for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; + rx_ring->next = q_vector->rx.rx_ring; + q_vector->rx.rx_ring = rx_ring; } rx_rings_rem -= rx_rings_per_v; } @@ -711,12 +750,13 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) * @qg_buf: queue group buffer */ int -ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_aqc_add_tx_qgrp *qg_buf) { u8 buf_len = struct_size(qg_buf, txqs, 1); struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_aqc_add_txqs_perq *txq; + struct ice_channel *ch = ring->ch; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; @@ -746,10 +786,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, /* Add unique software queue handle of the Tx queue per * TC into the VSI Tx ring */ - ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + ring->q_handle = ice_eswitch_calc_txq_handle(ring); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, - 1, qg_buf, buf_len, NULL); + if (ring->q_handle == ICE_INVAL_Q_INDEX) + return -ENODEV; + } else { + ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); + } + + if (ch) + status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, + ring->q_handle, 1, qg_buf, buf_len, + NULL); + else + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, + ring->q_handle, 1, qg_buf, buf_len, + NULL); if (status) { dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", ice_stat_str(status)); @@ -870,7 +923,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) */ int ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring *ring, + u16 rel_vmvf_num, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta) { struct ice_pf *pf = vsi->back; @@ -927,9 +980,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, * are needed for stopping Tx queue */ void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta) { + struct ice_channel *ch = ring->ch; u8 tc; if (IS_ENABLED(CONFIG_DCB)) @@ -940,6 +994,11 @@ ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, txq_meta->q_id = ring->reg_idx; txq_meta->q_teid = ring->txq_teid; txq_meta->q_handle = ring->q_handle; - txq_meta->vsi_idx = vsi->idx; - txq_meta->tc = tc; + if (ch) { + txq_meta->vsi_idx = ch->ch_vsi->idx; + txq_meta->tc = 0; + } else { + txq_meta->vsi_idx = vsi->idx; + txq_meta->tc = tc; + } } diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h index 20e1c29aa68a01..b67dca417acb23 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.h +++ b/drivers/net/ethernet/intel/ice/ice_base.h @@ -6,7 +6,7 @@ #include "ice.h" -int ice_vsi_cfg_rxq(struct ice_ring *ring); +int ice_vsi_cfg_rxq(struct ice_rx_ring *ring); int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); int ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); @@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi); int -ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_aqc_add_tx_qgrp *qg_buf); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void @@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); int ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring *ring, + u16 rel_vmvf_num, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta); void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta); #endif /* _ICE_BASE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index df5ad4de1f00e4..b3066d0fea8ba8 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -71,6 +71,27 @@ bool ice_is_e810(struct ice_hw *hw) return hw->mac_type == ICE_MAC_E810; } +/** + * ice_is_e810t + * @hw: pointer to the hardware structure + * + * returns true if the device is E810T based, false if not. + */ +bool ice_is_e810t(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T || + hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2) + return true; + break; + default: + break; + } + + return false; +} + /** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure @@ -242,11 +263,13 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << - ICE_AQC_LINK_TOPO_NODE_CTX_S); + cmd->addr.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << + ICE_AQC_LINK_TOPO_NODE_CTX_S); /* set node type */ - cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); + cmd->addr.topo_params.node_type_ctx |= + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } @@ -570,6 +593,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) return ICE_ERR_NO_MEMORY; INIT_LIST_HEAD(&sw->vsi_list_map_head); + sw->prof_res_bm_init = 0; status = ice_init_def_sw_recp(hw); if (status) { @@ -596,17 +620,42 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) list_del(&v_pos_map->list_entry); devm_kfree(ice_hw_to_dev(hw), v_pos_map); } - recps = hw->switch_info->recp_list; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { - struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + recps = sw->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct ice_recp_grp_entry *rg_entry, *tmprg_entry; recps[i].root_rid = i; - mutex_destroy(&recps[i].filt_rule_lock); - list_for_each_entry_safe(lst_itr, tmp_entry, - &recps[i].filt_rules, list_entry) { - list_del(&lst_itr->list_entry); - devm_kfree(ice_hw_to_dev(hw), lst_itr); + list_for_each_entry_safe(rg_entry, tmprg_entry, + &recps[i].rg_list, l_entry) { + list_del(&rg_entry->l_entry); + devm_kfree(ice_hw_to_dev(hw), rg_entry); + } + + if (recps[i].adv_rule) { + struct ice_adv_fltr_mgmt_list_entry *tmp_entry; + struct ice_adv_fltr_mgmt_list_entry *lst_itr; + + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, + list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } + } else { + struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, + list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } } + if (recps[i].root_buf) + devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); } ice_rm_all_sw_replay_rule_info(hw); devm_kfree(ice_hw_to_dev(hw), sw->recp_list); @@ -4768,6 +4817,64 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, return 0; } +/** + * ice_aq_set_gpio + * @hw: pointer to the hw struct + * @gpio_ctrl_handle: GPIO controller node handle + * @pin_idx: IO Number of the GPIO that needs to be set + * @value: SW provide IO value to set in the LSB + * @cd: pointer to command details structure or NULL + * + * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology + */ +int +ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, + struct ice_sq_cd *cd) +{ + struct ice_aqc_gpio *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); + cmd = &desc.params.read_write_gpio; + cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); + cmd->gpio_num = pin_idx; + cmd->gpio_val = value ? 1 : 0; + + return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); +} + +/** + * ice_aq_get_gpio + * @hw: pointer to the hw struct + * @gpio_ctrl_handle: GPIO controller node handle + * @pin_idx: IO Number of the GPIO that needs to be set + * @value: IO value read + * @cd: pointer to command details structure or NULL + * + * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of + * the topology + */ +int +ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, + bool *value, struct ice_sq_cd *cd) +{ + struct ice_aqc_gpio *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); + cmd = &desc.params.read_write_gpio; + cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); + cmd->gpio_num = pin_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (status) + return ice_status_to_errno(status); + + *value = !!cmd->gpio_val; + return 0; +} + /** * ice_fw_supports_link_override * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index fb16070f02e242..65c1b3244264db 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -183,6 +183,7 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +bool ice_is_e810t(struct ice_hw *hw); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); @@ -192,6 +193,12 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, int ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, u32 *value, struct ice_sq_cd *cd); +int +ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, + struct ice_sq_cd *cd); +int +ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, + bool *value, struct ice_sq_cd *cd); enum ice_status ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 849fcf60547908..241427cd9bc02e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_common.h" +#include "ice_lib.h" #include "ice_sched.h" #include "ice_dcb.h" @@ -735,6 +736,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd); } +/** + * ice_aq_set_pfc_mode - Set PFC mode + * @hw: pointer to the HW struct + * @pfc_mode: value of PFC mode to set + * @cd: pointer to command details structure or NULL + * + * This AQ call configures the PFC mode to DSCP-based PFC mode or + * VLAN-based PFC (0x0303) + */ +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_query_pfc_mode *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) + return -EINVAL; + + cmd = &desc.params.set_query_pfc_mode; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode); + + cmd->pfc_mode = pfc_mode; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (status) + return ice_status_to_errno(status); + + /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is + * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has + * been executed, check if cmd->pfc_mode is what was requested. If not, + * return an error. + */ + if (cmd->pfc_mode != pfc_mode) + return -EOPNOTSUPP; + + return 0; +} + /** * ice_cee_to_dcb_cfg * @cee_cfg: pointer to CEE configuration struct @@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, } /** - * ice_add_dcb_tlv - Add all IEEE TLVs + * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV + * @tlv: location to build the TLV data + * @dcbcfg: location of data to convert to TLV + */ +static void +ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + int i; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_UP_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_DSCP2UP); + tlv->ouisubtype = htonl(ouisubtype); + + /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ + for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { + /* IPv4 mapping */ + buf[i] = dcbcfg->dscp_map[i]; + /* IPv6 mapping */ + buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i]; + } + + /* byte 64 - IPv4 untagged traffic */ + buf[i] = 0; + + /* byte 144 - IPv6 untagged traffic */ + buf[i + ICE_DSCP_IPV6_OFFSET] = 0; +} + +#define ICE_BYTES_PER_TC 8 +/** + * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV + * @tlv: location to build the TLV data + */ +static void +ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_ENF_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_ENFORCE); + tlv->ouisubtype = htonl(ouisubtype); + + /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */ + memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC)); +} + +/** + * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV + * @tlv: location to build the TLV data + * @dcbcfg: location of the data to convert to TLV + */ +static void +ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u8 offset = 0; + u16 typelen; + int i; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_TC_BW_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_TCBW); + tlv->ouisubtype = htonl(ouisubtype); + + /* First Octect after subtype + * ---------------------------- + * | RSV | CBS | RSV | Max TCs | + * | 1b | 1b | 3b | 3b | + * ---------------------------- + */ + etscfg = &dcbcfg->etscfg; + buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; + + /* bytes 1 - 4 reserved */ + offset = 5; + + /* TC BW table + * bytes 0 - 7 for TC 0 - 7 + * + * TSA Assignment table + * bytes 8 - 15 for TC 0 - 7 + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + buf[offset] = etscfg->tcbwtable[i]; + buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i]; + offset++; + } +} + +/** + * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store which holds the PFC CFG data + */ +static void +ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_PFC); + tlv->ouisubtype = htonl(ouisubtype); + + buf[0] = dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcena & 0xF; +} + +/** + * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs * @tlv: Fill TLV data in IEEE format * @dcbcfg: Local store which holds the DCB Config * @tlvid: Type of IEEE TLV @@ -1218,21 +1391,41 @@ static void ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, u16 tlvid) { - switch (tlvid) { - case ICE_IEEE_TLV_ID_ETS_CFG: - ice_add_ieee_ets_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_ETS_REC: - ice_add_ieee_etsrec_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_PFC_CFG: - ice_add_ieee_pfc_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_APP_PRI: - ice_add_ieee_app_pri_tlv(tlv, dcbcfg); - break; - default: - break; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) { + switch (tlvid) { + case ICE_IEEE_TLV_ID_ETS_CFG: + ice_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_ETS_REC: + ice_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_PFC_CFG: + ice_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_APP_PRI: + ice_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } + } else { + /* pfc_mode == ICE_QOS_MODE_DSCP */ + switch (tlvid) { + case ICE_TLV_ID_DSCP_UP: + ice_add_dscp_up_tlv(tlv, dcbcfg); + break; + case ICE_TLV_ID_DSCP_ENF: + ice_add_dscp_enf_tlv(tlv); + break; + case ICE_TLV_ID_DSCP_TC_BW: + ice_add_dscp_tc_bw_tlv(tlv, dcbcfg); + break; + case ICE_TLV_ID_DSCP_TO_PFC: + ice_add_dscp_pfc_tlv(tlv, dcbcfg); + break; + default: + break; + } } } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index d7e5e6178a21c1..9b6f87a889a633 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -22,6 +22,14 @@ #define ICE_CEE_DCBX_OUI 0x001B21 #define ICE_CEE_DCBX_TYPE 2 + +#define ICE_DSCP_OUI 0xFFFFFF +#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41 +#define ICE_DSCP_SUBTYPE_ENFORCE 0x42 +#define ICE_DSCP_SUBTYPE_TCBW 0x43 +#define ICE_DSCP_SUBTYPE_PFC 0x44 +#define ICE_DSCP_IPV6_OFFSET 80 + #define ICE_CEE_SUBTYPE_PG_CFG 2 #define ICE_CEE_SUBTYPE_PFC_CFG 3 #define ICE_CEE_SUBTYPE_APP_PRI 4 @@ -78,11 +86,20 @@ #define ICE_IEEE_TLV_ID_APP_PRI 6 #define ICE_TLV_ID_END_OF_LLDPPDU 7 #define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG +#define ICE_TLV_ID_DSCP_UP 3 +#define ICE_TLV_ID_DSCP_ENF 4 +#define ICE_TLV_ID_DSCP_TC_BW 5 +#define ICE_TLV_ID_DSCP_TO_PFC 6 #define ICE_IEEE_ETS_TLV_LEN 25 #define ICE_IEEE_PFC_TLV_LEN 6 #define ICE_IEEE_APP_TLV_LEN 11 +#define ICE_DSCP_UP_TLV_LEN 148 +#define ICE_DSCP_ENF_TLV_LEN 132 +#define ICE_DSCP_TC_BW_TLV_LEN 25 +#define ICE_DSCP_PFC_TLV_LEN 6 + /* IEEE 802.1AB LLDP Organization specific TLV */ struct ice_lldp_org_tlv { __be16 typelen; @@ -120,6 +137,7 @@ struct ice_cee_app_prio { u8 prio_map; } __packed; +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); enum ice_status ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 926cf748c5ecd1..a72e18320a227a 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -4,53 +4,11 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -/** - * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration - * @vsi: the VSI being configured - * @ena_tc: TC map to be enabled - */ -void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) -{ - struct net_device *netdev = vsi->netdev; - struct ice_pf *pf = vsi->back; - struct ice_dcbx_cfg *dcbcfg; - u8 netdev_tc; - int i; - - if (!netdev) - return; - - if (!ena_tc) { - netdev_reset_tc(netdev); - return; - } - - if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) - return; - - dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - - ice_for_each_traffic_class(i) - if (vsi->tc_cfg.ena_tc & BIT(i)) - netdev_set_tc_queue(netdev, - vsi->tc_cfg.tc_info[i].netdev_tc, - vsi->tc_cfg.tc_info[i].qcount_tx, - vsi->tc_cfg.tc_info[i].qoffset); - - for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - u8 ets_tc = dcbcfg->etscfg.prio_table[i]; - - /* Get the mapped netdev TC# for the UP */ - netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; - netdev_set_prio_tc_map(netdev, i, netdev_tc); - } -} - /** * ice_dcb_get_ena_tc - return bitmap of enabled TCs * @dcbcfg: DCB config to evaluate for enabled TCs */ -u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) +static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) { u8 i, num_tc, ena_tc = 1; @@ -178,6 +136,67 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg) return ret; } +/** + * ice_get_first_droptc - returns number of first droptc + * @vsi: used to find the first droptc + * + * This function returns the value of first_droptc. + * When DCB is enabled, first droptc information is derived from enabled_tc + * and PFC enabled bits. otherwise this function returns 0 as there is one + * TC without DCB (tc0) + */ +static u8 ice_get_first_droptc(struct ice_vsi *vsi) +{ + struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; + struct device *dev = ice_pf_to_dev(vsi->back); + u8 num_tc, ena_tc_map, pfc_ena_map; + u8 i; + + num_tc = ice_dcb_get_num_tc(cfg); + + /* get bitmap of enabled TCs */ + ena_tc_map = ice_dcb_get_ena_tc(cfg); + + /* get bitmap of PFC enabled TCs */ + pfc_ena_map = cfg->pfc.pfcena; + + /* get first TC that is not PFC enabled */ + for (i = 0; i < num_tc; i++) { + if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) { + dev_dbg(dev, "first drop tc = %d\n", i); + return i; + } + } + + dev_dbg(dev, "first drop tc = 0\n"); + return 0; +} + +/** + * ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration + * @vsi: pointer to the VSI instance + */ +void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) +{ + struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; + + switch (vsi->type) { + case ICE_VSI_PF: + vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); + vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); + break; + case ICE_VSI_CHNL: + vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi)); + vsi->tc_cfg.numtc = 1; + break; + case ICE_VSI_CTRL: + case ICE_VSI_LB: + default: + vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; + vsi->tc_cfg.numtc = 1; + } +} + /** * ice_dcb_get_tc - Get the TC associated with the queue * @vsi: ptr to the VSI @@ -194,17 +213,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index) */ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { - struct ice_ring *tx_ring, *rx_ring; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; u16 qoffset, qcount; int i, n; if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { /* Reset the TC information */ - for (i = 0; i < vsi->num_txq; i++) { + ice_for_each_txq(vsi, i) { tx_ring = vsi->tx_rings[i]; tx_ring->dcb_tc = 0; } - for (i = 0; i < vsi->num_rxq; i++) { + ice_for_each_rxq(vsi, i) { rx_ring = vsi->rx_rings[i]; rx_ring->dcb_tc = 0; } @@ -217,11 +237,68 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) qoffset = vsi->tc_cfg.tc_info[n].qoffset; qcount = vsi->tc_cfg.tc_info[n].qcount_tx; - for (i = qoffset; i < (qoffset + qcount); i++) { - tx_ring = vsi->tx_rings[i]; - rx_ring = vsi->rx_rings[i]; - tx_ring->dcb_tc = n; - rx_ring->dcb_tc = n; + for (i = qoffset; i < (qoffset + qcount); i++) + vsi->tx_rings[i]->dcb_tc = n; + + qcount = vsi->tc_cfg.tc_info[n].qcount_rx; + for (i = qoffset; i < (qoffset + qcount); i++) + vsi->rx_rings[i]->dcb_tc = n; + } + /* applicable only if "all_enatc" is set, which will be set from + * setup_tc method as part of configuring channels + */ + if (vsi->all_enatc) { + u8 first_droptc = ice_get_first_droptc(vsi); + + /* When DCB is configured, TC for ADQ queues (which are really + * PF queues) should be the first drop TC of the main VSI + */ + ice_for_each_chnl_tc(n) { + if (!(vsi->all_enatc & BIT(n))) + break; + + qoffset = vsi->mqprio_qopt.qopt.offset[n]; + qcount = vsi->mqprio_qopt.qopt.count[n]; + for (i = qoffset; i < (qoffset + qcount); i++) { + vsi->tx_rings[i]->dcb_tc = first_droptc; + vsi->rx_rings[i]->dcb_tc = first_droptc; + } + } + } +} + +/** + * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig + * @pf: pointer to the PF instance + * @ena: true to enable VSIs, false to disable + * @locked: true if caller holds RTNL lock, false otherwise + * + * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV + * and CHNL need to be brought down. Following completion of DCB configuration + * the VSIs that were downed need to be brought up again. This helper function + * does both. + */ +static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked) +{ + int i; + + ice_for_each_vsi(pf, i) { + struct ice_vsi *vsi = pf->vsi[i]; + + if (!vsi) + continue; + + switch (vsi->type) { + case ICE_VSI_CHNL: + case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_PF: + if (ena) + ice_ena_vsi(vsi, locked); + else + ice_dis_vsi(vsi, locked); + break; + default: + continue; } } } @@ -330,7 +407,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) */ if (!locked) rtnl_lock(); - ice_dis_vsi(pf_vsi, true); + + /* disable VSIs affected by DCB changes */ + ice_dcb_ena_dis_vsi(pf, false, true); memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg)); memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec)); @@ -358,7 +437,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ice_pf_dcb_recfg(pf); out: - ice_ena_vsi(pf_vsi, true); + /* enable previously downed VSIs */ + ice_dcb_ena_dis_vsi(pf, true, true); if (!locked) rtnl_unlock(); free_cfg: @@ -544,7 +624,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) * @ets_willing: configure ETS willing * @locked: was this function called with RTNL held */ -static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) +int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) { struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *dcbcfg; @@ -673,6 +753,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) tc_map = ICE_DFLT_TRAFFIC_CLASS; ice_dcb_noncontig_cfg(pf); } + } else if (vsi->type == ICE_VSI_CHNL) { + tc_map = BIT(ice_get_first_droptc(vsi)); } else { tc_map = ICE_DFLT_TRAFFIC_CLASS; } @@ -683,6 +765,12 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) vsi->idx); continue; } + /* no need to proceed with remaining cfg if it is CHNL + * or switchdev VSI + */ + if (vsi->type == ICE_VSI_CHNL || + vsi->type == ICE_VSI_SWITCHDEV_CTRL) + continue; ice_vsi_map_rings_to_vectors(vsi); if (vsi->type == ICE_VSI_PF) @@ -726,6 +814,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, + NULL); + if (err) + dev_info(dev, "Failed to set VLAN PFC mode\n"); + err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) { dev_err(dev, "Failed to set local DCB config %d\n", @@ -814,7 +907,7 @@ void ice_update_dcb_stats(struct ice_pf *pf) * tag will already be configured with the correct ID and priority bits */ void -ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, +ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) { struct sk_buff *skb = first->skb; @@ -851,7 +944,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_dcbx_cfg tmp_dcbx_cfg; bool need_reconfig = false; struct ice_port_info *pi; - struct ice_vsi *pf_vsi; u8 mib_type; int ret; @@ -927,14 +1019,9 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } - pf_vsi = ice_get_main_vsi(pf); - if (!pf_vsi) { - dev_dbg(dev, "PF VSI doesn't exist\n"); - goto out; - } - rtnl_lock(); - ice_dis_vsi(pf_vsi, true); + /* disable VSIs affected by DCB changes */ + ice_dcb_ena_dis_vsi(pf, false, true); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { @@ -945,7 +1032,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, /* changes in configuration update VSI */ ice_pf_dcb_recfg(pf); - ice_ena_vsi(pf_vsi, true); + /* enable previously downed VSIs */ + ice_dcb_ena_dis_vsi(pf, true, true); unlock_rtnl: rtnl_unlock(); out: diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 261b6e2ed7bc24..4c421c842a13fc 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -15,7 +15,7 @@ #define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */ void ice_dcb_rebuild(struct ice_pf *pf); -u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); +int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi); bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue); @@ -28,13 +28,11 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); int ice_init_pf_dcb(struct ice_pf *pf, bool locked); void ice_update_dcb_stats(struct ice_pf *pf); void -ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, +ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first); void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); -void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); - /** * ice_find_q_in_range * @low: start of queue range for a TC i.e. offset of TC @@ -49,9 +47,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q) } static inline void -ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) +ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { - tlan_ctx->cgd_num = ring->dcb_tc; + tlan_ctx->cgd_num = dcb_tc; } static inline bool ice_is_dcb_active(struct ice_pf *pf) @@ -59,9 +57,21 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf) return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) || test_bit(ICE_FLAG_DCB_ENA, pf->flags)); } + +static inline u8 ice_get_pfc_mode(struct ice_pf *pf) +{ + return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode; +} + #else static inline void ice_dcb_rebuild(struct ice_pf *pf) { } +static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) +{ + vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; + vsi->tc_cfg.numtc = 1; +} + static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) { return ICE_DFLT_TRAFFIC_CLASS; @@ -95,7 +105,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf, } static inline int -ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, +ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring, struct ice_tx_buf __always_unused *first) { return 0; @@ -113,12 +123,16 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, return false; } +static inline u8 ice_get_pfc_mode(struct ice_pf *pf) +{ + return 0; +} + static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { } static inline void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } -static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { } -static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { } +static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { } #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 4180f1f35fb89c..7fdeb411b6df44 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcbx_cfg *new_cfg; int bwcfg = 0, bwrec = 0; - int err, i, max_tc = 0; + int err, i; if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i]; bwcfg += ets->tc_tx_bw[i]; new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i]; - new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; + if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) { + /* in DSCP mode up->tc mapping cannot change */ + new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; + new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; + } new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i]; bwrec += ets->tc_reco_bw[i]; new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i]; - new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; } if (ice_dcb_bwchk(pf, new_cfg)) { @@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) goto ets_out; } - max_tc = pf->hw.func_caps.common_cap.maxtc; - - new_cfg->etscfg.maxtcs = max_tc; - - if (!bwcfg) - new_cfg->etscfg.tcbwtable[0] = 100; + new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) + if (mode & DCB_CAP_DCBX_VER_CEE) { + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - else + } else { qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; + } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; @@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg, return false; } +#define ICE_BYTES_PER_DSCP_VAL 8 + /** * ice_dcbnl_setapp - set local IEEE App config * @netdev: relevant netdev struct @@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcb_app_priority_table new_app; struct ice_dcbx_cfg *old_cfg, *new_cfg; + u8 max_tc; int ret; - if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || - !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + /* ONLY DSCP APP TLVs have operational significance */ + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) return -EINVAL; - mutex_lock(&pf->tc_mutex); + /* only allow APP TLVs in SW Mode */ + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n"); + return -EINVAL; + } - new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; - old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (!ice_is_feature_supported(pf, ICE_F_DSCP)) + return -EOPNOTSUPP; - if (old_cfg->numapps == ICE_DCBX_MAX_APPS) { - ret = -EINVAL; - goto setapp_out; + if (app->protocol >= ICE_DSCP_NUM_VAL) { + netdev_err(netdev, "DSCP value 0x%04X out of range\n", + app->protocol); + return -EINVAL; + } + + max_tc = pf->hw.func_caps.common_cap.maxtc; + if (app->priority >= max_tc) { + netdev_err(netdev, "TC %d out of range, max TC %d\n", + app->priority, max_tc); + return -EINVAL; } + /* grab TC mutex */ + mutex_lock(&pf->tc_mutex); + + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; + old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + ret = dcb_ieee_setapp(netdev, app); if (ret) goto setapp_out; + if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) { + netdev_err(netdev, "DSCP value 0x%04X already user mapped\n", + app->protocol); + ret = dcb_ieee_delapp(netdev, app); + if (ret) + netdev_err(netdev, "Failed to delete re-mapping TLV\n"); + ret = -EINVAL; + goto setapp_out; + } + new_app.selector = app->selector; new_app.prot_id = app->protocol; new_app.priority = app->priority; - if (ice_dcbnl_find_app(old_cfg, &new_app)) { - ret = 0; - goto setapp_out; - } + /* If port is not in DSCP mode, need to set */ + if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) { + int i, j; + + /* set DSCP mode */ + ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC, + NULL); + if (ret) { + netdev_err(netdev, "Failed to set DSCP PFC mode %d\n", + ret); + goto setapp_out; + } + netdev_info(netdev, "Switched QoS to L3 DSCP mode\n"); + + new_cfg->pfc_mode = ICE_QOS_MODE_DSCP; + + /* set default DSCP QoS values */ + new_cfg->etscfg.willing = 0; + new_cfg->pfc.pfccap = max_tc; + new_cfg->pfc.willing = 0; + + for (i = 0; i < max_tc; i++) + for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) { + int dscp, offset; + + dscp = (i * max_tc) + j; + offset = max_tc * ICE_BYTES_PER_DSCP_VAL; + + new_cfg->dscp_map[dscp] = i; + /* if less that 8 TCs supported */ + if (max_tc < ICE_MAX_TRAFFIC_CLASS) + new_cfg->dscp_map[dscp + offset] = i; + } + + new_cfg->etscfg.tcbwtable[0] = 100; + new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; + new_cfg->etscfg.prio_table[0] = 0; + + for (i = 1; i < max_tc; i++) { + new_cfg->etscfg.tcbwtable[i] = 0; + new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + new_cfg->etscfg.prio_table[i] = i; + } + } /* end of switching to DSCP mode */ + + /* apply new mapping for this DSCP value */ + new_cfg->dscp_map[app->protocol] = app->priority; new_cfg->app[new_cfg->numapps++] = new_app; + ret = ice_pf_dcb_cfg(pf, new_cfg, true); /* return of zero indicates new cfg applied */ if (ret == ICE_DCB_HW_CHG_RST) ice_dcbnl_devreset(netdev); - if (ret == ICE_DCB_NO_HW_CHG) - ret = ICE_DCB_HW_CHG_RST; + else + ret = ICE_DCB_NO_HW_CHG; setapp_out: mutex_unlock(&pf->tc_mutex); @@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) unsigned int i, j; int ret = 0; - if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n"); return -EINVAL; + } mutex_lock(&pf->tc_mutex); old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - if (old_cfg->numapps <= 1) - goto delapp_out; - ret = dcb_ieee_delapp(netdev, app); if (ret) goto delapp_out; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; - for (i = 1; i < new_cfg->numapps; i++) { + for (i = 0; i < new_cfg->numapps; i++) { if (app->selector == new_cfg->app[i].selector && app->protocol == new_cfg->app[i].prot_id && app->priority == new_cfg->app[i].priority) { @@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) new_cfg->numapps--; for (j = i; j < new_cfg->numapps; j++) { - new_cfg->app[i].selector = old_cfg->app[j + 1].selector; - new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id; - new_cfg->app[i].priority = old_cfg->app[j + 1].priority; + new_cfg->app[j].selector = old_cfg->app[j + 1].selector; + new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id; + new_cfg->app[j].priority = old_cfg->app[j + 1].priority; } - ret = ice_pf_dcb_cfg(pf, new_cfg, true); - /* return of zero indicates new cfg applied */ + /* if not a DSCP APP TLV or DSCP is not supported, we are done */ + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + !ice_is_feature_supported(pf, ICE_F_DSCP)) { + ret = ICE_DCB_HW_CHG; + goto delapp_out; + } + + /* if DSCP TLV, then need to address change in mapping */ + clear_bit(app->protocol, new_cfg->dscp_mapped); + /* remap this DSCP value to default value */ + new_cfg->dscp_map[app->protocol] = app->protocol % + ICE_BYTES_PER_DSCP_VAL; + + /* if the last DSCP mapping just got deleted, need to switch + * to L2 VLAN QoS mode + */ + if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) && + new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) { + ret = ice_aq_set_pfc_mode(&pf->hw, + ICE_AQC_PFC_VLAN_BASED_PFC, + NULL); + if (ret) { + netdev_info(netdev, "Failed to set VLAN PFC mode %d\n", + ret); + goto delapp_out; + } + netdev_info(netdev, "Switched QoS to L2 VLAN mode\n"); + + new_cfg->pfc_mode = ICE_QOS_MODE_VLAN; + + ret = ice_dcb_sw_dflt_cfg(pf, true, true); + } else { + ret = ice_pf_dcb_cfg(pf, new_cfg, true); + } + + /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied + * and reset needs to be performed + */ if (ret == ICE_DCB_HW_CHG_RST) ice_dcbnl_devreset(netdev); + + /* if the change was not siginificant enough to actually call + * the reconfiguration flow, we still need to tell caller that + * their request was successfully handled + */ if (ret == ICE_DCB_NO_HW_CHG) - ret = ICE_DCB_HW_CHG_RST; + ret = ICE_DCB_HW_CHG; delapp_out: mutex_unlock(&pf->tc_mutex); diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index ef4392e6e24448..61dd2f18dee8ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -21,6 +21,8 @@ #define ICE_DEV_ID_E810C_QSFP 0x1592 /* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_E810C_SFP 0x1593 +#define ICE_SUBDEV_ID_E810T 0x000E +#define ICE_SUBDEV_ID_E810T2 0x000F /* Intel(R) Ethernet Controller E810-XXV for backplane */ #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 /* Intel(R) Ethernet Controller E810-XXV for QSFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index da7288bdc9a3fe..b9bd9f9472f6cc 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_lib.h" #include "ice_devlink.h" +#include "ice_eswitch.h" #include "ice_fw_update.h" /* context for devlink info version reporting */ @@ -22,7 +23,7 @@ struct ice_info_ctx { * * If a version does not exist, for example when attempting to get the * inactive version of flash when there is no pending update, the function - * should leave the buffer in the ctx structure empty and return 0. + * should leave the buffer in the ctx structure empty. */ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -35,7 +36,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); } -static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; enum ice_status status; @@ -45,149 +46,127 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) /* We failed to locate the PBA, so just skip this entry */ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", ice_stat_str(status)); - - return 0; } -static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, - hw->fw_patch); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); } -static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver, hw->api_min_ver, hw->api_patch); - - return 0; } -static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); - - return 0; } -static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &pf->hw.flash.orom; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + orom->major, orom->build, orom->patch); } -static int -ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &ctx->pending_orom; if (ctx->dev_caps.common_cap.nvm_update_pending_orom) snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); - - return 0; } -static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &pf->hw.flash.nvm; snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); - - return 0; } -static int -ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", + nvm->major, nvm->minor); } -static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &pf->hw.flash.nvm; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); - - return 0; } -static int -ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); - - return 0; } -static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); - - return 0; } -static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update, - pkg->draft); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", + pkg->major, pkg->minor, pkg->update, pkg->draft); } -static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) { snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); - - return 0; } -static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; /* The netlist version fields are BCD formatted */ - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", + netlist->major, netlist->minor, + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); - - return 0; } -static int -ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; @@ -195,21 +174,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); - - return 0; + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static int -ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); - - return 0; } #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } @@ -239,8 +215,8 @@ enum ice_version_type { static const struct ice_devlink_version { enum ice_version_type type; const char *key; - int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); - int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); } ice_devlink_versions[] = { fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), @@ -352,24 +328,15 @@ static int ice_devlink_info_get(struct devlink *devlink, memset(ctx->buf, 0, sizeof(ctx->buf)); - err = ice_devlink_versions[i].getter(pf, ctx); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); - goto out_free_ctx; - } + ice_devlink_versions[i].getter(pf, ctx); /* If the default getter doesn't report a version, use the * fallback function. This is primarily useful in the case of * "stored" versions that want to report the same value as the * running version in the normal case of no pending update. */ - if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) { - err = ice_devlink_versions[i].fallback(pf, ctx); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); - goto out_free_ctx; - } - } + if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) + ice_devlink_versions[i].fallback(pf, ctx); /* Do not report missing versions */ if (ctx->buf[0] == '\0') @@ -457,6 +424,8 @@ ice_devlink_flash_update(struct devlink *devlink, static const struct devlink_ops ice_devlink_ops = { .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, + .eswitch_mode_get = ice_eswitch_mode_get, + .eswitch_mode_set = ice_eswitch_mode_set, .info_get = ice_devlink_info_get, .flash_update = ice_devlink_flash_update, }; @@ -483,10 +452,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev) return NULL; /* Add an action to teardown the devlink when unwinding the driver */ - if (devm_add_action(dev, ice_devlink_free, devlink)) { - devlink_free(devlink); + if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) return NULL; - } return devlink_priv(devlink); } @@ -499,15 +466,58 @@ struct ice_pf *ice_allocate_pf(struct device *dev) * * Return: zero on success or an error code on failure. */ -int ice_devlink_register(struct ice_pf *pf) +void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct device *dev = ice_pf_to_dev(pf); + + devlink_register(devlink); +} + +/** + * ice_devlink_unregister - Unregister devlink resources for this PF. + * @pf: the PF structure to cleanup + * + * Releases resources used by devlink and cleans up associated memory. + */ +void ice_devlink_unregister(struct ice_pf *pf) +{ + devlink_unregister(priv_to_devlink(pf)); +} + +/** + * ice_devlink_create_pf_port - Create a devlink port for this PF + * @pf: the PF to create a devlink port for + * + * Create and register a devlink_port for this PF. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_pf_port(struct ice_pf *pf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; int err; - err = devlink_register(devlink); + dev = ice_pf_to_dev(pf); + + devlink_port = &pf->devlink_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EIO; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.bus.func; + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "devlink registration failed: %d\n", err); + dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", + pf->hw.pf_id, err); return err; } @@ -515,71 +525,75 @@ int ice_devlink_register(struct ice_pf *pf) } /** - * ice_devlink_unregister - Unregister devlink resources for this PF. - * @pf: the PF structure to cleanup + * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup * - * Releases resources used by devlink and cleans up associated memory. + * Unregisters the devlink_port structure associated with this PF. */ -void ice_devlink_unregister(struct ice_pf *pf) +void ice_devlink_destroy_pf_port(struct ice_pf *pf) { - devlink_unregister(priv_to_devlink(pf)); + struct devlink_port *devlink_port; + + devlink_port = &pf->devlink_port; + + devlink_port_type_clear(devlink_port); + devlink_port_unregister(devlink_port); } /** - * ice_devlink_create_port - Create a devlink port for this VSI - * @vsi: the VSI to create a port for + * ice_devlink_create_vf_port - Create a devlink port for this VF + * @vf: the VF to create a port for * - * Create and register a devlink_port for this VSI. + * Create and register a devlink_port for this VF. * * Return: zero on success or an error code on failure. */ -int ice_devlink_create_port(struct ice_vsi *vsi) +int ice_devlink_create_vf_port(struct ice_vf *vf) { struct devlink_port_attrs attrs = {}; - struct ice_port_info *pi; + struct devlink_port *devlink_port; struct devlink *devlink; + struct ice_vsi *vsi; struct device *dev; struct ice_pf *pf; int err; - /* Currently we only create devlink_port instances for PF VSIs */ - if (vsi->type != ICE_VSI_PF) - return -EINVAL; + pf = vf->pf; + dev = ice_pf_to_dev(pf); + vsi = ice_get_vf_vsi(vf); + devlink_port = &vf->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = pf->hw.bus.func; + attrs.pci_vf.vf = vf->vf_id; - pf = vsi->back; + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); - dev = ice_pf_to_dev(pf); - pi = pf->hw.port_info; - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pi->lport; - devlink_port_attrs_set(&vsi->devlink_port, &attrs); - err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx); + err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "devlink_port_register failed: %d\n", err); + dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", + vf->vf_id, err); return err; } - vsi->devlink_port_registered = true; - return 0; } /** - * ice_devlink_destroy_port - Destroy the devlink_port for this VSI - * @vsi: the VSI to cleanup + * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF + * @vf: the VF to cleanup * - * Unregisters the devlink_port structure associated with this VSI. + * Unregisters the devlink_port structure associated with this VF. */ -void ice_devlink_destroy_port(struct ice_vsi *vsi) +void ice_devlink_destroy_vf_port(struct ice_vf *vf) { - if (!vsi->devlink_port_registered) - return; + struct devlink_port *devlink_port; - devlink_port_type_clear(&vsi->devlink_port); - devlink_port_unregister(&vsi->devlink_port); + devlink_port = &vf->devlink_port; - vsi->devlink_port_registered = false; + devlink_port_type_clear(devlink_port); + devlink_port_unregister(devlink_port); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index e07e74426bde85..b7f9551e4fc441 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -6,10 +6,12 @@ struct ice_pf *ice_allocate_pf(struct device *dev); -int ice_devlink_register(struct ice_pf *pf); +void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); -int ice_devlink_create_port(struct ice_vsi *vsi); -void ice_devlink_destroy_port(struct ice_vsi *vsi); +int ice_devlink_create_pf_port(struct ice_pf *pf); +void ice_devlink_destroy_pf_port(struct ice_pf *pf); +int ice_devlink_create_vf_port(struct ice_vf *vf); +void ice_devlink_destroy_vf_port(struct ice_vf *vf); void ice_devlink_init_regions(struct ice_pf *pf); void ice_devlink_destroy_regions(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c new file mode 100644 index 00000000000000..d1d7389b0bffc5 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_eswitch.h" +#include "ice_fltr.h" +#include "ice_repr.h" +#include "ice_devlink.h" +#include "ice_tc_lib.h" + +/** + * ice_eswitch_setup_env - configure switchdev HW filters + * @pf: pointer to PF struct + * + * This function adds HW filters configuration specific for switchdev + * mode. + */ +static int ice_eswitch_setup_env(struct ice_pf *pf) +{ + struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct net_device *uplink_netdev = uplink_vsi->netdev; + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_port_info *pi = pf->hw.port_info; + bool rule_added = false; + + ice_vsi_manage_vlan_stripping(ctrl_vsi, false); + + ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + + netif_addr_lock_bh(uplink_netdev); + __dev_uc_unsync(uplink_netdev, NULL); + __dev_mc_unsync(uplink_netdev, NULL); + netif_addr_unlock_bh(uplink_netdev); + + if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI)) + goto err_def_rx; + + if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { + if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) + goto err_def_rx; + rule_added = true; + } + + if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX)) + goto err_def_tx; + + if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) + goto err_override_uplink; + + if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) + goto err_override_control; + + if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id, + ICE_FLTR_TX, + ICE_SINGLE_ACT_LB_ENABLE)) + goto err_update_action; + + return 0; + +err_update_action: + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); +err_override_control: + ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); +err_override_uplink: + ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); +err_def_tx: + if (rule_added) + ice_clear_dflt_vsi(uplink_vsi->vsw); +err_def_rx: + ice_fltr_add_mac_and_broadcast(uplink_vsi, + uplink_vsi->port_info->mac.perm_addr, + ICE_FWD_TO_VSI); + return -ENODEV; +} + +/** + * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI + * @pf: pointer to PF struct + * + * In switchdev number of allocated Tx/Rx rings is equal. + * + * This function fills q_vectors structures associated with representor and + * move each ring pairs to port representor netdevs. Each port representor + * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to + * number of VFs. + */ +static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +{ + struct ice_vsi *vsi = pf->switchdev.control_vsi; + int q_id; + + ice_for_each_txq(vsi, q_id) { + struct ice_repr *repr = pf->vf[q_id].repr; + struct ice_q_vector *q_vector = repr->q_vector; + struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; + struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; + + q_vector->vsi = vsi; + q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; + + q_vector->num_ring_tx = 1; + q_vector->tx.tx_ring = tx_ring; + tx_ring->q_vector = q_vector; + tx_ring->next = NULL; + tx_ring->netdev = repr->netdev; + /* In switchdev mode, from OS stack perspective, there is only + * one queue for given netdev, so it needs to be indexed as 0. + */ + tx_ring->q_index = 0; + + q_vector->num_ring_rx = 1; + q_vector->rx.rx_ring = rx_ring; + rx_ring->q_vector = q_vector; + rx_ring->next = NULL; + rx_ring->netdev = repr->netdev; + } +} + +/** + * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode + * @pf: pointer to PF struct + */ +static int ice_eswitch_setup_reprs(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + int max_vsi_num = 0; + int i; + + ice_for_each_vf(pf, i) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf->repr->dst) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + goto err; + } + + if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + metadata_dst_free(vf->repr->dst); + goto err; + } + + if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + metadata_dst_free(vf->repr->dst); + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + goto err; + } + + if (max_vsi_num < vsi->vsi_num) + max_vsi_num = vsi->vsi_num; + + netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + + netif_keep_dst(vf->repr->netdev); + } + + kfree(ctrl_vsi->target_netdevs); + + ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1, + sizeof(*ctrl_vsi->target_netdevs), + GFP_KERNEL); + if (!ctrl_vsi->target_netdevs) + goto err; + + ice_for_each_vf(pf, i) { + struct ice_repr *repr = pf->vf[i].repr; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; + + ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; + + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = repr->netdev; + ice_repr_set_traffic_vsi(repr, ctrl_vsi); + } + + return 0; + +err: + for (i = i - 1; i >= 0; i--) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + } + + return -ENODEV; +} + +/** + * ice_eswitch_release_reprs - clear PR VSIs configuration + * @pf: poiner to PF struct + * @ctrl_vsi: pointer to switchdev control VSI + */ +static void +ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +{ + int i; + + kfree(ctrl_vsi->target_netdevs); + ice_for_each_vf(pf, i) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + + netif_napi_del(&vf->repr->q_vector->napi); + } +} + +/** + * ice_eswitch_update_repr - reconfigure VF port representor + * @vsi: VF VSI for which port representor is configured + */ +void ice_eswitch_update_repr(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_repr *repr; + struct ice_vf *vf; + int ret; + + if (!ice_is_switchdev_running(pf)) + return; + + vf = &pf->vf[vsi->vf_id]; + repr = vf->repr; + repr->src_vsi = vsi; + repr->dst->u.port_info.port_id = vsi->vsi_num; + + ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (ret) { + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); + } +} + +/** + * ice_eswitch_port_start_xmit - callback for packets transmit + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_repr *repr; + struct ice_vsi *vsi; + + np = netdev_priv(netdev); + vsi = np->vsi; + + if (ice_is_reset_in_progress(vsi->back->state)) + return NETDEV_TX_BUSY; + + repr = ice_netdev_to_repr(netdev); + skb_dst_drop(skb); + dst_hold((struct dst_entry *)repr->dst); + skb_dst_set(skb, (struct dst_entry *)repr->dst); + skb->queue_mapping = repr->vf->vf_id; + + return ice_start_xmit(skb, netdev); +} + +/** + * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * @skb: pointer to send buffer + * @off: pointer to offload struct + */ +void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) +{ + struct metadata_dst *dst = skb_metadata_dst(skb); + u64 cd_cmd, dst_vsi; + + if (!dst) { + cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; + off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); + } else { + cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; + dst_vsi = ((u64)dst->u.port_info.port_id << + ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; + off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; + } +} + +/** + * ice_eswitch_release_env - clear switchdev HW filters + * @pf: pointer to PF struct + * + * This function removes HW filters configuration specific for switchdev + * mode and restores default legacy mode settings. + */ +static void ice_eswitch_release_env(struct ice_pf *pf) +{ + struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); + ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); + ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); + ice_clear_dflt_vsi(uplink_vsi->vsw); + ice_fltr_add_mac_and_broadcast(uplink_vsi, + uplink_vsi->port_info->mac.perm_addr, + ICE_FWD_TO_VSI); +} + +/** + * ice_eswitch_vsi_setup - configure switchdev control VSI + * @pf: pointer to PF structure + * @pi: pointer to port_info structure + */ +static struct ice_vsi * +ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL); +} + +/** + * ice_eswitch_napi_del - remove NAPI handle for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_del(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + netif_napi_del(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_napi_enable - enable NAPI for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_enable(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + napi_enable(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_napi_disable - disable NAPI for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_disable(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + napi_disable(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI + * @vsi: VSI to setup rxdid on + * @rxdid: flex descriptor id + */ +static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) +{ + struct ice_hw *hw = &vsi->back->hw; + int i; + + ice_for_each_rxq(vsi, i) { + struct ice_rx_ring *ring = vsi->rx_rings[i]; + u16 pf_q = vsi->rxq_map[ring->q_index]; + + ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); + } +} + +/** + * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode + * @pf: pointer to PF structure + */ +static int ice_eswitch_enable_switchdev(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi; + + pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); + if (!pf->switchdev.control_vsi) + return -ENODEV; + + ctrl_vsi = pf->switchdev.control_vsi; + pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); + if (!pf->switchdev.uplink_vsi) + goto err_vsi; + + if (ice_eswitch_setup_env(pf)) + goto err_vsi; + + if (ice_repr_add_for_all_vfs(pf)) + goto err_repr_add; + + if (ice_eswitch_setup_reprs(pf)) + goto err_setup_reprs; + + ice_eswitch_remap_rings_to_vectors(pf); + + if (ice_vsi_open(ctrl_vsi)) + goto err_setup_reprs; + + ice_eswitch_napi_enable(pf); + + ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); + + return 0; + +err_setup_reprs: + ice_repr_rem_from_all_vfs(pf); +err_repr_add: + ice_eswitch_release_env(pf); +err_vsi: + ice_vsi_release(ctrl_vsi); + return -ENODEV; +} + +/** + * ice_eswitch_disable_switchdev - disable switchdev resources + * @pf: pointer to PF structure + */ +static void ice_eswitch_disable_switchdev(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + + ice_eswitch_napi_disable(pf); + ice_eswitch_release_env(pf); + ice_eswitch_release_reprs(pf, ctrl_vsi); + ice_vsi_release(ctrl_vsi); + ice_repr_rem_from_all_vfs(pf); +} + +/** + * ice_eswitch_mode_set - set new eswitch mode + * @devlink: pointer to devlink structure + * @mode: eswitch mode to switch to + * @extack: pointer to extack structure + */ +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (pf->eswitch_mode == mode) + return 0; + + if (pf->num_alloc_vfs) { + dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); + NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); + return -EOPNOTSUPP; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", + pf->hw.pf_id); + NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + { + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", + pf->hw.pf_id); + NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); + break; + } + default: + NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); + return -EINVAL; + } + + pf->eswitch_mode = mode; + return 0; +} + +/** + * ice_eswitch_get_target_netdev - return port representor netdev + * @rx_ring: pointer to Rx ring + * @rx_desc: pointer to Rx descriptor + * + * When working in switchdev mode context (when control VSI is used), this + * function returns netdev of appropriate port representor. For non-switchdev + * context, regular netdev associated with Rx ring is returned. + */ +struct net_device * +ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_vsi *vsi = rx_ring->vsi; + struct ice_vsi *control_vsi; + u16 target_vsi_id; + + control_vsi = vsi->back->switchdev.control_vsi; + if (vsi != control_vsi) + return rx_ring->netdev; + + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + target_vsi_id = le16_to_cpu(desc->src_vsi); + + return vsi->target_netdevs[target_vsi_id]; +} + +/** + * ice_eswitch_mode_get - get current eswitch mode + * @devlink: pointer to devlink structure + * @mode: output parameter for current eswitch mode + */ +int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct ice_pf *pf = devlink_priv(devlink); + + *mode = pf->eswitch_mode; + return 0; +} + +/** + * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev + * @pf: pointer to PF structure + * + * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV, + * false otherwise. + */ +bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) +{ + return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; +} + +/** + * ice_eswitch_release - cleanup eswitch + * @pf: pointer to PF structure + */ +void ice_eswitch_release(struct ice_pf *pf) +{ + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + return; + + ice_eswitch_disable_switchdev(pf); + pf->switchdev.is_running = false; +} + +/** + * ice_eswitch_configure - configure eswitch + * @pf: pointer to PF structure + */ +int ice_eswitch_configure(struct ice_pf *pf) +{ + int status; + + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) + return 0; + + status = ice_eswitch_enable_switchdev(pf); + if (status) + return status; + + pf->switchdev.is_running = true; + return 0; +} + +/** + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + int i; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + ice_for_each_vf(pf, i) { + repr = pf->vf[i].repr; + if (repr) + ice_repr_start_tx_queues(repr); + } +} + +/** + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors + * @pf: pointer to PF structure + */ +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + int i; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + ice_for_each_vf(pf, i) { + repr = pf->vf[i].repr; + if (repr) + ice_repr_stop_tx_queues(repr); + } +} + +/** + * ice_eswitch_rebuild - rebuild eswitch + * @pf: pointer to PF structure + */ +int ice_eswitch_rebuild(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + int status; + + ice_eswitch_napi_disable(pf); + ice_eswitch_napi_del(pf); + + status = ice_eswitch_setup_env(pf); + if (status) + return status; + + status = ice_eswitch_setup_reprs(pf); + if (status) + return status; + + ice_eswitch_remap_rings_to_vectors(pf); + + ice_replay_tc_fltrs(pf); + + status = ice_vsi_open(ctrl_vsi); + if (status) + return status; + + ice_eswitch_napi_enable(pf); + ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); + ice_eswitch_start_all_tx_queues(pf); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h new file mode 100644 index 00000000000000..364cd2a79c378a --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_ESWITCH_H_ +#define _ICE_ESWITCH_H_ + +#include + +#ifdef CONFIG_ICE_SWITCHDEV +void ice_eswitch_release(struct ice_pf *pf); +int ice_eswitch_configure(struct ice_pf *pf); +int ice_eswitch_rebuild(struct ice_pf *pf); + +int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); +bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); + +void ice_eswitch_update_repr(struct ice_vsi *vsi); + +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); + +struct net_device * +ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc); + +void ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off); +netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); +#else /* CONFIG_ICE_SWITCHDEV */ +static inline void ice_eswitch_release(struct ice_pf *pf) { } + +static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } + +static inline void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) { } + +static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } + +static inline int ice_eswitch_configure(struct ice_pf *pf) +{ + return -EOPNOTSUPP; +} + +static inline int ice_eswitch_rebuild(struct ice_pf *pf) +{ + return -EOPNOTSUPP; +} + +static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + return DEVLINK_ESWITCH_MODE_LEGACY; +} + +static inline int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) +{ + return false; +} + +static inline struct net_device * +ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + return rx_ring->netdev; +} + +static inline netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + return NETDEV_TX_BUSY; +} +#endif /* CONFIG_ICE_SWITCHDEV */ +#endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c451cf401e635b..572519e402f461 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) static void -ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct ice_orom_info *orom; @@ -190,9 +189,19 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, nvm->eetrack, orom->major, orom->build, orom->patch); +} + +static void +ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + __ice_get_drvinfo(netdev, drvinfo, np->vsi); strscpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } @@ -584,7 +593,7 @@ static bool ice_lbtest_check_frame(u8 *frame) * * Function sends loopback packets on a test Tx ring. */ -static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size) +static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size) { struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -637,7 +646,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size) * Function receives loopback packets and verify their correctness. * Returns number of received valid frames. */ -static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) +static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) { struct ice_rx_buf *rx_buf; int valid_frames, i; @@ -676,9 +685,10 @@ static u64 ice_loopback_test(struct net_device *netdev) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *orig_vsi = np->vsi, *test_vsi; struct ice_pf *pf = orig_vsi->back; - struct ice_ring *tx_ring, *rx_ring; u8 broadcast[ETH_ALEN], ret = 0; int num_frames, valid_frames; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; struct device *dev; u8 *tx_frame; int i; @@ -866,10 +876,10 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, netdev_info(netdev, "testing finished\n"); } -static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +static void +__ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; unsigned int i; u8 *p = data; @@ -879,6 +889,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ethtool_sprintf(&p, ice_gstrings_vsi_stats[i].stat_string); + if (ice_is_port_repr_netdev(netdev)) + return; + ice_for_each_alloc_txq(vsi, i) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -917,6 +930,13 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } +static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + __ice_get_strings(netdev, stringset, data, np->vsi); +} + static int ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { @@ -1215,6 +1235,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) enum ice_status status; bool dcbx_agent_status; + if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { + clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n"); + ret = -EOPNOTSUPP; + goto ethtool_exit; + } + /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. */ @@ -1312,13 +1339,13 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) } static void -ice_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats __always_unused *stats, u64 *data) +__ice_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - struct ice_ring *ring; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; unsigned int j; int i = 0; char *p; @@ -1332,14 +1359,17 @@ ice_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + if (ice_is_port_repr_netdev(netdev)) + return; + /* populate per queue stats */ rcu_read_lock(); ice_for_each_alloc_txq(vsi, j) { - ring = READ_ONCE(vsi->tx_rings[j]); - if (ring) { - data[i++] = ring->stats.pkts; - data[i++] = ring->stats.bytes; + tx_ring = READ_ONCE(vsi->tx_rings[j]); + if (tx_ring) { + data[i++] = tx_ring->stats.pkts; + data[i++] = tx_ring->stats.bytes; } else { data[i++] = 0; data[i++] = 0; @@ -1347,10 +1377,10 @@ ice_get_ethtool_stats(struct net_device *netdev, } ice_for_each_alloc_rxq(vsi, j) { - ring = READ_ONCE(vsi->rx_rings[j]); - if (ring) { - data[i++] = ring->stats.pkts; - data[i++] = ring->stats.bytes; + rx_ring = READ_ONCE(vsi->rx_rings[j]); + if (rx_ring) { + data[i++] = rx_ring->stats.pkts; + data[i++] = rx_ring->stats.bytes; } else { data[i++] = 0; data[i++] = 0; @@ -1379,6 +1409,15 @@ ice_get_ethtool_stats(struct net_device *netdev, } } +static void +ice_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, u64 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + __ice_get_ethtool_stats(netdev, stats, data, np->vsi); +} + #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \ ICE_PHY_TYPE_LOW_100M_SGMII) @@ -2667,9 +2706,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) static int ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { - struct ice_ring *tx_rings = NULL, *rx_rings = NULL; struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_ring *xdp_rings = NULL; + struct ice_tx_ring *xdp_rings = NULL; + struct ice_tx_ring *tx_rings = NULL; + struct ice_rx_ring *rx_rings = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; int i, timeout = 50, err = 0; @@ -2718,12 +2758,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { - for (i = 0; i < vsi->alloc_txq; i++) + ice_for_each_alloc_txq(vsi, i) vsi->tx_rings[i]->count = new_tx_cnt; - for (i = 0; i < vsi->alloc_rxq; i++) + ice_for_each_alloc_rxq(vsi, i) vsi->rx_rings[i]->count = new_rx_cnt; if (ice_is_xdp_ena_vsi(vsi)) - for (i = 0; i < vsi->num_xdp_txq; i++) + ice_for_each_xdp_txq(vsi, i) vsi->xdp_rings[i]->count = new_tx_cnt; vsi->num_tx_desc = (u16)new_tx_cnt; vsi->num_rx_desc = (u16)new_rx_cnt; @@ -2772,7 +2812,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) goto free_tx; } - for (i = 0; i < vsi->num_xdp_txq; i++) { + ice_for_each_xdp_txq(vsi, i) { /* clone ring and setup updated count */ xdp_rings[i] = *vsi->xdp_rings[i]; xdp_rings[i].count = new_tx_cnt; @@ -2866,7 +2906,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) } if (xdp_rings) { - for (i = 0; i < vsi->num_xdp_txq; i++) { + ice_for_each_xdp_txq(vsi, i) { ice_free_tx_ring(vsi->xdp_rings[i]); *vsi->xdp_rings[i] = xdp_rings[i]; } @@ -3155,6 +3195,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, return -EIO; } + if (ice_is_adq_active(pf)) { + netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n"); + return -EOPNOTSUPP; + } + if (key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = @@ -3255,7 +3300,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.ring && q_vector->tx.ring) + if (q_vector->rx.rx_ring && q_vector->tx.tx_ring) combined++; } @@ -3365,6 +3410,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U)) return -EINVAL; + if (ice_is_adq_active(pf)) { + netdev_err(dev, "Cannot set channels with ADQ configured.\n"); + return -EOPNOTSUPP; + } + if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) { netdev_err(dev, "Cannot set channels when Flow Director filters are active\n"); return -EOPNOTSUPP; @@ -3466,15 +3516,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } -enum ice_container_type { - ICE_RX_CONTAINER, - ICE_TX_CONTAINER, -}; - /** * ice_get_rc_coalesce - get ITR values for specific ring container * @ec: ethtool structure to fill with driver's coalesce settings - * @c_type: container type, Rx or Tx * @rc: ring container that the ITR values will come from * * Query the device for ice_ring_container specific ITR values. This is @@ -3484,24 +3528,23 @@ enum ice_container_type { * Returns 0 on success, negative otherwise. */ static int -ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, - struct ice_ring_container *rc) +ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc) { - if (!rc->ring) + if (!rc->rx_ring) return -EINVAL; - switch (c_type) { + switch (rc->type) { case ICE_RX_CONTAINER: ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); ec->rx_coalesce_usecs = rc->itr_setting; - ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl; + ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl; break; case ICE_TX_CONTAINER: ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc); ec->tx_coalesce_usecs = rc->itr_setting; break; default: - dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type); + dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type); return -EINVAL; } @@ -3522,18 +3565,18 @@ static int ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) { if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { - if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, + if (ice_get_rc_coalesce(ec, &vsi->rx_rings[q_num]->q_vector->rx)) return -EINVAL; - if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, + if (ice_get_rc_coalesce(ec, &vsi->tx_rings[q_num]->q_vector->tx)) return -EINVAL; } else if (q_num < vsi->num_rxq) { - if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, + if (ice_get_rc_coalesce(ec, &vsi->rx_rings[q_num]->q_vector->rx)) return -EINVAL; } else if (q_num < vsi->num_txq) { - if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, + if (ice_get_rc_coalesce(ec, &vsi->tx_rings[q_num]->q_vector->tx)) return -EINVAL; } else { @@ -3585,7 +3628,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * ice_set_rc_coalesce - set ITR values for specific ring container - * @c_type: container type, Rx or Tx * @ec: ethtool structure from user to update ITR settings * @rc: ring container that the ITR values will come from * @vsi: VSI associated to the ring container @@ -3597,19 +3639,22 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, * Returns 0 on success, negative otherwise. */ static int -ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, +ice_set_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc, struct ice_vsi *vsi) { - const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; + const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx"; u32 use_adaptive_coalesce, coalesce_usecs; struct ice_pf *pf = vsi->back; u16 itr_setting; - if (!rc->ring) + if (!rc->rx_ring) return -EINVAL; - switch (c_type) { + switch (rc->type) { case ICE_RX_CONTAINER: + { + struct ice_q_vector *q_vector = rc->rx_ring->q_vector; + if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || (ec->rx_coalesce_usecs_high && ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { @@ -3618,22 +3663,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ICE_MAX_INTRL); return -EINVAL; } - if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl && + if (ec->rx_coalesce_usecs_high != q_vector->intrl && (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n", c_type_str); return -EINVAL; } - if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { - rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; - ice_write_intrl(rc->ring->q_vector, - ec->rx_coalesce_usecs_high); - } + if (ec->rx_coalesce_usecs_high != q_vector->intrl) + q_vector->intrl = ec->rx_coalesce_usecs_high; use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; coalesce_usecs = ec->rx_coalesce_usecs; break; + } case ICE_TX_CONTAINER: use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; coalesce_usecs = ec->tx_coalesce_usecs; @@ -3641,7 +3684,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, break; default: dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", - c_type); + rc->type); return -EINVAL; } @@ -3690,22 +3733,22 @@ static int ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) { if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { - if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, + if (ice_set_rc_coalesce(ec, &vsi->rx_rings[q_num]->q_vector->rx, vsi)) return -EINVAL; - if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, + if (ice_set_rc_coalesce(ec, &vsi->tx_rings[q_num]->q_vector->tx, vsi)) return -EINVAL; } else if (q_num < vsi->num_rxq) { - if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, + if (ice_set_rc_coalesce(ec, &vsi->rx_rings[q_num]->q_vector->rx, vsi)) return -EINVAL; } else if (q_num < vsi->num_txq) { - if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, + if (ice_set_rc_coalesce(ec, &vsi->tx_rings[q_num]->q_vector->tx, vsi)) return -EINVAL; @@ -3778,6 +3821,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, v_idx)) return -EINVAL; + + ice_set_q_vector_intrl(vsi->q_vectors[v_idx]); } goto set_complete; } @@ -3785,6 +3830,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, q_num)) return -EINVAL; + ice_set_q_vector_intrl(vsi->q_vectors[q_num]); + set_complete: return 0; } @@ -3804,6 +3851,54 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, return __ice_set_coalesce(netdev, ec, q_num); } +static void +ice_repr_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + if (ice_check_vf_ready_for_cfg(repr->vf)) + return; + + __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); +} + +static void +ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + /* for port representors only ETH_SS_STATS is supported */ + if (ice_check_vf_ready_for_cfg(repr->vf) || + stringset != ETH_SS_STATS) + return; + + __ice_get_strings(netdev, stringset, data, repr->src_vsi); +} + +static void +ice_repr_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + if (ice_check_vf_ready_for_cfg(repr->vf)) + return; + + __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); +} + +static int ice_repr_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ICE_VSI_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + #define ICE_I2C_EEPROM_DEV_ADDR 0xA0 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2 #define ICE_MODULE_TYPE_SFP 0x03 @@ -4055,6 +4150,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; } +static const struct ethtool_ops ice_ethtool_repr_ops = { + .get_drvinfo = ice_repr_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = ice_repr_get_strings, + .get_ethtool_stats = ice_repr_get_ethtool_stats, + .get_sset_count = ice_repr_get_sset_count, +}; + +/** + * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops + * @netdev: network interface device structure + */ +void ice_set_ethtool_repr_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_repr_ops; +} + /** * ice_set_ethtool_ops - setup netdev ethtool ops * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 16de603b280c6e..38960bcc384c03 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -706,7 +706,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow) if (!seg) return -ENOMEM; - tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, + tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, GFP_KERNEL); if (!tun_seg) { devm_kfree(dev, seg); @@ -1068,7 +1068,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, if (!seg) return -ENOMEM; - tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX, + tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX, GFP_KERNEL); if (!tun_seg) { devm_kfree(dev, seg); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 59ef68f072c0ac..cbd8424631e32a 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); if (frag) - loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF; break; case ICE_FLTR_PTYPE_NONF_IPV4_UDP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index d2d40e18ae8aab..da4163856f4c27 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -48,7 +48,7 @@ * requests that the packet not be fragmented. MF indicates that a packet has * been fragmented. */ -#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20 +#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20 enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 1ac96dc66d0db8..23cfcceb1536dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -735,7 +735,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw) * * This function will request ownership of the change lock. */ -static enum ice_status +enum ice_status ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, @@ -748,7 +748,7 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) * * This function will release the change lock using the proper Admin Command. */ -static void ice_release_change_lock(struct ice_hw *hw) +void ice_release_change_lock(struct ice_hw *hw) { ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); } @@ -1329,6 +1329,86 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, return status; } +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= le16_to_cpu(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = le16_to_cpu(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in HW for following use. + */ +static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + + do { + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return 0; +} + /** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure @@ -1408,6 +1488,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); + ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", status); @@ -1484,6 +1565,195 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) return bld; } +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + */ +static enum ice_prof_type +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) +{ + u16 i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return ICE_PROF_NON_TUN; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + unsigned long *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + if (req_profs == ICE_PROF_ALL) { + bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); + return; + } + + memset(&state, 0, sizeof(state)); + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv); + + if (req_profs & prof_type) + set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @prot_ids: field vector to search for with a given protocol ID + * @ids_cnt: lookup/protocol count + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, + unsigned long *bm, struct list_head *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!ids_cnt || !hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + do { + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!test_bit((u16)offset, bm)) + continue; + + for (i = 0; i < ids_cnt; i++) { + int j; + + /* This code assumes that if a switch field vector line + * has a matching protocol, then this line will contain + * the entries necessary to represent every field in + * that protocol header. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == prot_ids[i]) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == ids_cnt) { + fvl = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*fvl), GFP_KERNEL); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + list_add(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (list_empty(fv_list)) + return ICE_ERR_CFG; + return 0; + +err: + list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { + list_del(&fvl->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvl); + } + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + bitmap_zero(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + set_bit(i, hw->switch_info->prof_res_bm[off]); + } while (fv); +} + /** * ice_pkg_buf_free * @hw: pointer to the HW structure @@ -1863,6 +2133,35 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, return 0; } +/** + * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index + * @hw: pointer to the hardware structure + * @blk: hardware block + * @prof: profile ID + * @fv_idx: field vector word index + * @prot: variable to receive the protocol ID + * @off: variable to receive the protocol offset + */ +enum ice_status +ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, + u8 *prot, u16 *off) +{ + struct ice_fv_word *fv_ext; + + if (prof >= hw->blk[blk].es.count) + return ICE_ERR_PARAM; + + if (fv_idx >= hw->blk[blk].es.fvw) + return ICE_ERR_PARAM; + + fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); + + *prot = fv_ext[fv_idx].prot_id; + *off = fv_ext[fv_idx].off; + + return 0; +} + /* PTG Management */ /** diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 8a58e79729b983..344c2637facda3 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -18,6 +18,20 @@ #define ICE_PKG_CNT 4 +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_change_lock(struct ice_hw *hw); +enum ice_status +ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, + u8 *prot, u16 *off); +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, + unsigned long *bm); +void +ice_init_prof_result_bm(struct ice_hw *hw); +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, + unsigned long *bm, struct list_head *fv_list); bool ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port); int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 7d8b517a63c9c2..0f572a36d0217a 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -13,6 +13,8 @@ struct ice_fv_word { u8 resvrd; } __packed; +#define ICE_MAX_NUM_PROFILES 256 + #define ICE_MAX_FV_WORDS 48 struct ice_fv { struct ice_fv_word ew[ICE_MAX_FV_WORDS]; @@ -279,6 +281,12 @@ struct ice_sw_fv_section { struct ice_fv fv[]; }; +struct ice_sw_fv_list_entry { + struct list_head list_entry; + u32 profile_id; + struct ice_fv *fv_ptr; +}; + /* The BOOST TCAM stores the match packet header in reverse order, meaning * the fields are reversed; in addition, this means that the normally big endian * fields of the packet are now little endian. @@ -365,6 +373,7 @@ struct ice_pkg_enum { enum ice_tunnel_type { TNL_VXLAN = 0, TNL_GENEVE, + TNL_GRETAP, __TNL_TYPE_CNT, TNL_LAST = 0xFF, TNL_ALL = 0xFF, @@ -603,4 +612,12 @@ struct ice_chs_chg { }; #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT + +enum ice_prof_type { + ICE_PROF_NON_TUN = 0x1, + ICE_PROF_TUN_UDP = 0x2, + ICE_PROF_TUN_GRE = 0x4, + ICE_PROF_TUN_ALL = 0x6, + ICE_PROF_ALL = 0xFF, +}; #endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index 2418d4fff037f2..c2e78eaf4ccbec 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -395,3 +395,83 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); } + +/** + * ice_fltr_update_rule_flags - update lan_en/lb_en flags + * @hw: pointer to hw + * @rule_id: id of rule being updated + * @recipe_id: recipe id of rule + * @act: current action field + * @type: Rx or Tx + * @src: source VSI + * @new_flags: combinations of lb_en and lan_en + */ +static enum ice_status +ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, + u32 act, u16 type, u16 src, u32 new_flags) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status err; + u32 flags_mask; + + s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; + act &= ~flags_mask; + act |= (flags_mask & new_flags); + + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id); + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + if (type & ICE_FLTR_RX) { + s_rule->pdata.lkup_tx_rx.src = + cpu_to_le16(hw->port_info->lport); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + + } else { + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + } + + err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_update_sw_rules, NULL); + + kfree(s_rule); + return err; +} + +/** + * ice_fltr_build_action - build action for rule + * @vsi_id: id of VSI which is use to build action + */ +static u32 ice_fltr_build_action(u16 vsi_id) +{ + return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) | + ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; +} + +/** + * ice_fltr_update_flags_dflt_rule - update flags on default rule + * @vsi: pointer to VSI + * @rule_id: id of rule + * @direction: Tx or Rx + * @new_flags: flags to update + * + * Function updates flags on default rule with ICE_SW_LKUP_DFLT. + * + * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and + * ICE_SINGLE_ACT_LAN_ENABLE. + */ +enum ice_status +ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, + u32 new_flags) +{ + u32 action = ice_fltr_build_action(vsi->vsi_num); + struct ice_hw *hw = &vsi->back->hw; + + return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action, + direction, vsi->vsi_num, new_flags); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h index 361cb4da9b43b6..8eec4febead1e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.h +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -36,4 +36,7 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); +enum ice_status +ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, + u32 new_flags); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 76021d977b6099..a490824856420d 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -182,6 +182,7 @@ #define GLINT_DYN_CTL_INTERVAL_S 5 #define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5) #define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) +#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) #define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 80736e0ec0dca0..d981dc6f232355 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic { } flex_ts; }; +/* Rx Flex Descriptor NIC Profile + * RxDID Profile ID 6 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Source VSI + * Flex-field 4: reserved, VLAN ID taken from L2Tag + */ +struct ice_32b_rx_flex_desc_nic_2 { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flow_id; + __le16 src_vsi; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed @@ -529,6 +569,9 @@ struct ice_tx_ctx_desc { #define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_QW1_VSI_S 50 +#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) + enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_TSO = 0x01, ICE_TX_CTX_DESC_TSYN = 0x02, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index b718e196af2a4a..40562600a8cf2b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -22,8 +22,12 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_VF"; case ICE_VSI_CTRL: return "ICE_VSI_CTRL"; + case ICE_VSI_CHNL: + return "ICE_VSI_CHNL"; case ICE_VSI_LB: return "ICE_VSI_LB"; + case ICE_VSI_SWITCHDEV_CTRL: + return "ICE_VSI_SWITCHDEV_CTRL"; default: return "unknown"; } @@ -44,12 +48,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) int ret = 0; u16 i; - for (i = 0; i < vsi->num_rxq; i++) + ice_for_each_rxq(vsi, i) ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); ice_flush(&vsi->back->hw); - for (i = 0; i < vsi->num_rxq; i++) { + ice_for_each_rxq(vsi, i) { ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); if (ret) break; @@ -71,6 +75,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) struct device *dev; dev = ice_pf_to_dev(pf); + if (vsi->type == ICE_VSI_CHNL) + return 0; /* allocate memory for both Tx and Rx ring pointers */ vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, @@ -132,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -200,6 +207,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; + case ICE_VSI_SWITCHDEV_CTRL: + /* The number of queues for ctrl VSI is equal to number of VFs. + * Each ring is associated to the corresponding VF_PR netdev. + */ + vsi->alloc_txq = pf->num_alloc_vfs; + vsi->alloc_rxq = pf->num_alloc_vfs; + vsi->num_q_vectors = 1; + break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; if (vf->num_req_qs) @@ -218,6 +233,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = 1; vsi->num_q_vectors = 1; break; + case ICE_VSI_CHNL: + vsi->alloc_txq = 0; + vsi->alloc_rxq = 0; + break; case ICE_VSI_LB: vsi->alloc_txq = 1; vsi->alloc_rxq = 1; @@ -263,7 +282,7 @@ static int ice_get_free_slot(void *array, int size, int curr) * ice_vsi_delete - delete a VSI from the switch * @vsi: pointer to VSI being removed */ -static void ice_vsi_delete(struct ice_vsi *vsi) +void ice_vsi_delete(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; @@ -334,7 +353,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) * * Returns 0 on success, negative on failure */ -static int ice_vsi_clear(struct ice_vsi *vsi) +int ice_vsi_clear(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; struct device *dev; @@ -379,12 +398,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - if (!q_vector->tx.ring) + if (!q_vector->tx.tx_ring) return IRQ_HANDLED; #define FDIR_RX_DESC_CLEAN_BUDGET 64 - ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); - ice_clean_ctrl_tx_irq(q_vector->tx.ring); + ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); + ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); return IRQ_HANDLED; } @@ -398,7 +417,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - if (!q_vector->tx.ring && !q_vector->rx.ring) + if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; q_vector->total_events++; @@ -408,16 +427,33 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } +static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + struct ice_pf *pf = q_vector->vsi->back; + int i; + + if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) + return IRQ_HANDLED; + + ice_for_each_vf(pf, i) + napi_schedule(&pf->vf[i].repr->q_vector->napi); + + return IRQ_HANDLED; +} + /** * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure * @vsi_type: type of VSI + * @ch: ptr to channel * @vf_id: ID of the VF being configured * * returns a pointer to a VSI on success, NULL on failure. */ static struct ice_vsi * -ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) +ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, + struct ice_channel *ch, u16 vf_id) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; @@ -444,10 +480,17 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) if (vsi_type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); - else + else if (vsi_type != ICE_VSI_CHNL) ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); switch (vsi->type) { + case ICE_VSI_SWITCHDEV_CTRL: + if (ice_vsi_alloc_arrays(vsi)) + goto err_rings; + + /* Setup eswitch MSIX irq handler for VSI */ + vsi->irq_handler = ice_eswitch_msix_clean_rings; + break; case ICE_VSI_PF: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; @@ -466,6 +509,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) if (ice_vsi_alloc_arrays(vsi)) goto err_rings; break; + case ICE_VSI_CHNL: + if (!ch) + goto err_rings; + vsi->num_rxq = ch->num_rxq; + vsi->num_txq = ch->num_txq; + vsi->next_base_q = ch->base_q; + break; case ICE_VSI_LB: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; @@ -582,6 +632,9 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) }; int ret; + if (vsi->type == ICE_VSI_CHNL) + return 0; + ret = __ice_vsi_get_qs(&tx_qs_cfg); if (ret) return ret; @@ -606,12 +659,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi) mutex_lock(&pf->avail_q_mutex); - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_alloc_txq(vsi, i) { clear_bit(vsi->txq_map[i], pf->avail_txqs); vsi->txq_map[i] = ICE_INVAL_Q_INDEX; } - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_alloc_rxq(vsi, i) { clear_bit(vsi->rxq_map[i], pf->avail_rxqs); vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; } @@ -700,12 +753,23 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) cap = &pf->hw.func_caps.common_cap; switch (vsi->type) { + case ICE_VSI_CHNL: case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ vsi->rss_table_size = (u16)cap->rss_table_size; + if (vsi->type == ICE_VSI_CHNL) + vsi->rss_size = min_t(u16, vsi->num_rxq, + BIT(cap->rss_table_entry_width)); + else + vsi->rss_size = min_t(u16, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + break; + case ICE_VSI_SWITCHDEV_CTRL: + vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; vsi->rss_size = min_t(u16, num_online_cpus(), BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. @@ -775,21 +839,13 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) u16 num_txq_per_tc, num_rxq_per_tc; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; - bool ena_tc0 = false; u8 netdev_tc = 0; int i; - /* at least TC0 should be enabled by default */ - if (vsi->tc_cfg.numtc) { - if (!(vsi->tc_cfg.ena_tc & BIT(0))) - ena_tc0 = true; - } else { - ena_tc0 = true; - } - - if (ena_tc0) { - vsi->tc_cfg.numtc++; - vsi->tc_cfg.ena_tc |= 1; + if (!vsi->tc_cfg.numtc) { + /* at least TC0 should be enabled by default */ + vsi->tc_cfg.numtc = 1; + vsi->tc_cfg.ena_tc = 1; } num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); @@ -931,6 +987,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); switch (vsi->type) { + case ICE_VSI_CHNL: case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; @@ -953,6 +1010,28 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ICE_AQ_VSI_Q_OPT_RSS_HASH_M); } +static void +ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +{ + struct ice_pf *pf = vsi->back; + u16 qcount, qmap; + u8 offset = 0; + int pow; + + qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); + + pow = order_base_2(qcount); + qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & + ICE_AQ_VSI_TC_Q_OFFSET_M) | + ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & + ICE_AQ_VSI_TC_Q_NUM_M); + + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); + ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); + ctxt->info.q_mapping[1] = cpu_to_le16(qcount); +} + /** * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured @@ -980,6 +1059,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; + case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_CHNL: + ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; + break; case ICE_VSI_VF: ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ @@ -990,6 +1073,21 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) goto out; } + /* Handle VLAN pruning for channel VSI if main VSI has VLAN + * prune enabled + */ + if (vsi->type == ICE_VSI_CHNL) { + struct ice_vsi *main_vsi; + + main_vsi = ice_get_main_vsi(pf); + if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi)) + ctxt->info.sw_flags2 |= + ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + else + ctxt->info.sw_flags2 &= + ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + ice_set_dflt_vsi_ctx(ctxt); if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) ice_set_fd_vsi_ctx(ctxt, vsi); @@ -1010,13 +1108,17 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) } ctxt->info.sw_id = vsi->port_info->sw_id; - ice_vsi_setup_q_map(vsi, ctxt); - if (!init_vsi) /* means VSI being updated */ - /* must to indicate which section of VSI context are - * being modified - */ - ctxt->info.valid_sections |= - cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); + if (vsi->type == ICE_VSI_CHNL) { + ice_chnl_vsi_setup_q_map(vsi, ctxt); + } else { + ice_vsi_setup_q_map(vsi, ctxt); + if (!init_vsi) /* means VSI being updated */ + /* must to indicate which section of VSI context are + * being modified + */ + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); + } /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off * respectively @@ -1195,6 +1297,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) /* SRIOV doesn't grab irq_tracker entries for each VSI */ if (vsi->type == ICE_VSI_VF) return 0; + if (vsi->type == ICE_VSI_CHNL) + return 0; if (vsi->base_vector) { dev_dbg(dev, "VSI %d has non-zero base vector %d\n", @@ -1249,14 +1353,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) struct ice_q_vector *q_vector = vsi->q_vectors[i]; if (q_vector) { - q_vector->tx.ring = NULL; - q_vector->rx.ring = NULL; + q_vector->tx.tx_ring = NULL; + q_vector->rx.rx_ring = NULL; } } } if (vsi->tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_alloc_txq(vsi, i) { if (vsi->tx_rings[i]) { kfree_rcu(vsi->tx_rings[i], rcu); WRITE_ONCE(vsi->tx_rings[i], NULL); @@ -1264,7 +1368,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) } } if (vsi->rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_alloc_rxq(vsi, i) { if (vsi->rx_rings[i]) { kfree_rcu(vsi->rx_rings[i], rcu); WRITE_ONCE(vsi->rx_rings[i], NULL); @@ -1285,8 +1389,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); /* Allocate Tx rings */ - for (i = 0; i < vsi->alloc_txq; i++) { - struct ice_ring *ring; + ice_for_each_alloc_txq(vsi, i) { + struct ice_tx_ring *ring; /* allocate with kzalloc(), free with kfree_rcu() */ ring = kzalloc(sizeof(*ring), GFP_KERNEL); @@ -1296,7 +1400,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->q_index = i; ring->reg_idx = vsi->txq_map[i]; - ring->ring_active = false; ring->vsi = vsi; ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; @@ -1305,8 +1408,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) } /* Allocate Rx rings */ - for (i = 0; i < vsi->alloc_rxq; i++) { - struct ice_ring *ring; + ice_for_each_alloc_rxq(vsi, i) { + struct ice_rx_ring *ring; /* allocate with kzalloc(), free with kfree_rcu() */ ring = kzalloc(sizeof(*ring), GFP_KERNEL); @@ -1315,7 +1418,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->q_index = i; ring->reg_idx = vsi->rxq_map[i]; - ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = dev; @@ -1363,7 +1465,7 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI * @vsi: VSI to be configured */ -static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) +int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct device *dev; @@ -1371,7 +1473,25 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) int err; dev = ice_pf_to_dev(pf); - vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); + if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && + (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { + vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); + } else { + vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); + + /* If orig_rss_size is valid and it is less than determined + * main VSI's rss_size, update main VSI's rss_size to be + * orig_rss_size so that when tc-qdisc is deleted, main VSI + * RSS table gets programmed to be correct (whatever it was + * to begin with (prior to setup-tc for ADQ config) + */ + if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && + vsi->orig_rss_size <= vsi->num_rxq) { + vsi->rss_size = vsi->orig_rss_size; + /* now orig_rss_size is used, reset it to zero */ + vsi->orig_rss_size = 0; + } + } lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) @@ -1710,7 +1830,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); } -int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx) +int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) { struct ice_aqc_add_tx_qgrp *qg_buf; int err; @@ -1766,7 +1886,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) * Configure the Tx VSI for operation. */ static int -ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count) +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) { struct ice_aqc_add_tx_qgrp *qg_buf; u16 q_idx = 0; @@ -1817,8 +1937,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) if (ret) return ret; - for (i = 0; i < vsi->num_xdp_txq; i++) - vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); + ice_for_each_xdp_txq(vsi, i) + vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]); return ret; } @@ -1853,6 +1973,24 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); } +static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) +{ + switch (rc->type) { + case ICE_RX_CONTAINER: + if (rc->rx_ring) + return rc->rx_ring->q_vector; + break; + case ICE_TX_CONTAINER: + if (rc->tx_ring) + return rc->tx_ring->q_vector; + break; + default: + break; + } + + return NULL; +} + /** * __ice_write_itr - write throttle rate to register * @q_vector: pointer to interrupt data structure @@ -1877,14 +2015,38 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr) { struct ice_q_vector *q_vector; - if (!rc->ring) + q_vector = ice_pull_qvec_from_rc(rc); + if (!q_vector) return; - q_vector = rc->ring->q_vector; - __ice_write_itr(q_vector, rc, itr); } +/** + * ice_set_q_vector_intrl - set up interrupt rate limiting + * @q_vector: the vector to be configured + * + * Interrupt rate limiting is local to the vector, not per-queue so we must + * detect if either ring container has dynamic moderation enabled to decide + * what to set the interrupt rate limit to via INTRL settings. In the case that + * dynamic moderation is disabled on both, write the value with the cached + * setting to make sure INTRL register matches the user visible value. + */ +void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) +{ + if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { + /* in the case of dynamic enabled, cap each vector to no more + * than (4 us) 250,000 ints/sec, which allows low latency + * but still less than 500,000 interrupts per second, which + * reduces CPU a bit in the case of the lowest latency + * setting. The 4 here is a value in microseconds. + */ + ice_write_intrl(q_vector, 4); + } else { + ice_write_intrl(q_vector, q_vector->intrl); + } +} + /** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured @@ -1899,7 +2061,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) u16 txq = 0, rxq = 0; int i, q; - for (i = 0; i < vsi->num_q_vectors; i++) { + ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; u16 reg_idx = q_vector->reg_idx; @@ -2057,7 +2219,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) */ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings, u16 count) + u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) { u16 q_idx; @@ -2122,11 +2284,10 @@ bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on * @ena: set to true to enable VLAN pruning and false to disable it - * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode * * returns 0 if VSI is updated, negative otherwise */ -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) { struct ice_vsi_ctx *ctxt; struct ice_pf *pf; @@ -2154,9 +2315,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) else ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; - if (!vlan_promisc) - ctxt->info.valid_sections = - cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); if (status) { @@ -2179,10 +2338,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) { - struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; + if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { + vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; + vsi->tc_cfg.numtc = 1; + return; + } - vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); - vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); + /* set VSI TC information based on DCB config */ + ice_vsi_set_dcb_tc_cfg(vsi); } /** @@ -2295,8 +2458,10 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_CTRL: + case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: + case ICE_VSI_SWITCHDEV_CTRL: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2393,6 +2558,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) * @vf_id: defines VF ID to which this VSI connects. This field is meant to be * used only for ICE_VSI_VF VSI type. For other VSI types, should * fill-in ICE_INVAL_VFID as input. + * @ch: ptr to channel * * This allocates the sw VSI structure and its queue resources. * @@ -2401,7 +2567,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) */ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id) + enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); @@ -2409,10 +2575,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi *vsi; int ret, i; - if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) - vsi = ice_vsi_alloc(pf, vsi_type, vf_id); + if (vsi_type == ICE_VSI_CHNL) + vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID); + else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) + vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id); else - vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID); + vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID); if (!vsi) { dev_err(dev, "could not allocate VSI\n"); @@ -2429,10 +2597,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_alloc_fd_res(vsi); - if (ice_vsi_get_qs(vsi)) { - dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", - vsi->idx); - goto unroll_vsi_alloc; + if (vsi_type != ICE_VSI_CHNL) { + if (ice_vsi_get_qs(vsi)) { + dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", + vsi->idx); + goto unroll_vsi_alloc; + } } /* set RSS capabilities */ @@ -2448,6 +2618,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, switch (vsi->type) { case ICE_VSI_CTRL: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2490,6 +2661,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } ice_init_arfs(vsi); break; + case ICE_VSI_CHNL: + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + ice_vsi_cfg_rss_lut_key(vsi); + ice_vsi_set_rss_flow_fld(vsi); + } + break; case ICE_VSI_VF: /* VF driver will take care of creating netdev for this type and * map queues to vectors through Virtchnl, PF driver only @@ -2528,9 +2705,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->alloc_txq; + ice_for_each_traffic_class(i) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) + continue; + + if (vsi->type == ICE_VSI_CHNL) { + if (!vsi->alloc_txq && vsi->num_txq) + max_txqs[i] = vsi->num_txq; + else + max_txqs[i] = pf->num_lan_tx; + } else { + max_txqs[i] = vsi->alloc_txq; + } + } + dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { @@ -2591,7 +2780,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) u32 rxq = 0; int i, q; - for (i = 0; i < vsi->num_q_vectors; i++) { + ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; ice_write_intrl(q_vector, 0); @@ -2757,7 +2946,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } else { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL || + vsi->type == ICE_VSI_SWITCHDEV_CTRL) { ice_vsi_close(vsi); } } @@ -2860,7 +3050,8 @@ int ice_vsi_release(struct ice_vsi *vsi) clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } - ice_devlink_destroy_port(vsi); + if (vsi->type == ICE_VSI_PF) + ice_devlink_destroy_pf_port(pf); if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); @@ -3041,7 +3232,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, } vsi->q_vectors[i]->intrl = coalesce[i].intrl; - ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl); + ice_set_q_vector_intrl(vsi->q_vectors[i]); } /* the number of queue vectors increased so write whatever is in @@ -3059,7 +3250,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, ice_write_itr(rc, rc->itr_setting); vsi->q_vectors[i]->intrl = coalesce[0].intrl; - ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl); + ice_set_q_vector_intrl(vsi->q_vectors[i]); } } @@ -3144,6 +3335,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) switch (vtype) { case ICE_VSI_CTRL: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -3163,7 +3355,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_map_rings_to_vectors(vsi); if (ice_is_xdp_ena_vsi(vsi)) { - vsi->num_xdp_txq = vsi->alloc_rxq; + ret = ice_vsi_determine_xdp_res(vsi); + if (ret) + goto err_vectors; ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); if (ret) goto err_vectors; @@ -3190,6 +3384,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) if (ret) goto err_vectors; + break; + case ICE_VSI_CHNL: + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + ice_vsi_cfg_rss_lut_key(vsi); + ice_vsi_set_rss_flow_fld(vsi); + } break; default: break; @@ -3197,14 +3397,30 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) { - max_txqs[i] = vsi->alloc_txq; + /* configure VSI nodes based on number of queues and TC's. + * ADQ creates VSIs for each TC/Channel but doesn't + * allocate queues instead it reconfigures the PF queues + * as per the TC command. So max_txqs should point to the + * PF Tx queues. + */ + if (vtype == ICE_VSI_CHNL) + max_txqs[i] = pf->num_lan_tx; + else + max_txqs[i] = vsi->alloc_txq; if (ice_is_xdp_ena_vsi(vsi)) max_txqs[i] += vsi->num_xdp_txq; } - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + /* If MQPRIO is set, means channel code path, hence for main + * VSI's, use TC as 1 + */ + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); + else + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); + if (status) { dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", vsi->vsi_num, ice_stat_str(status)); @@ -3276,7 +3492,6 @@ int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) return 0; } -#ifdef CONFIG_DCB /** * ice_vsi_update_q_map - update our copy of the VSI info with new queue map * @vsi: VSI being configured @@ -3291,6 +3506,146 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) sizeof(vsi->info.tc_mapping)); } +/** + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @ena_tc: TC map to be enabled + */ +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) +{ + struct net_device *netdev = vsi->netdev; + struct ice_pf *pf = vsi->back; + int numtc = vsi->tc_cfg.numtc; + struct ice_dcbx_cfg *dcbcfg; + u8 netdev_tc; + int i; + + if (!netdev) + return; + + /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */ + if (vsi->type == ICE_VSI_CHNL) + return; + + if (!ena_tc) { + netdev_reset_tc(netdev); + return; + } + + if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) + numtc = vsi->all_numtc; + + if (netdev_set_num_tc(netdev, numtc)) + return; + + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + /* setup TC queue map for CHNL TCs */ + ice_for_each_chnl_tc(i) { + if (!(vsi->all_enatc & BIT(i))) + break; + if (!vsi->mqprio_qopt.qopt.count[i]) + break; + netdev_set_tc_queue(netdev, i, + vsi->mqprio_qopt.qopt.count[i], + vsi->mqprio_qopt.qopt.offset[i]); + } + + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + return; + + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; + + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + +/** + * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config + * @vsi: the VSI being configured, + * @ctxt: VSI context structure + * @ena_tc: number of traffic classes to enable + * + * Prepares VSI tc_config to have queue configurations based on MQPRIO options. + */ +static void +ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, + u8 ena_tc) +{ + u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; + u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; + int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; + u8 netdev_tc = 0; + int i; + + vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; + + pow = order_base_2(tc0_qcount); + qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & + ICE_AQ_VSI_TC_Q_OFFSET_M) | + ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); + + ice_for_each_traffic_class(i) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) { + /* TC is not enabled */ + vsi->tc_cfg.tc_info[i].qoffset = 0; + vsi->tc_cfg.tc_info[i].qcount_rx = 1; + vsi->tc_cfg.tc_info[i].qcount_tx = 1; + vsi->tc_cfg.tc_info[i].netdev_tc = 0; + ctxt->info.tc_mapping[i] = 0; + continue; + } + + offset = vsi->mqprio_qopt.qopt.offset[i]; + qcount_rx = vsi->mqprio_qopt.qopt.count[i]; + qcount_tx = vsi->mqprio_qopt.qopt.count[i]; + vsi->tc_cfg.tc_info[i].qoffset = offset; + vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; + vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; + vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; + } + + if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { + ice_for_each_chnl_tc(i) { + if (!(vsi->all_enatc & BIT(i))) + continue; + offset = vsi->mqprio_qopt.qopt.offset[i]; + qcount_rx = vsi->mqprio_qopt.qopt.count[i]; + qcount_tx = vsi->mqprio_qopt.qopt.count[i]; + } + } + + /* Set actual Tx/Rx queue pairs */ + vsi->num_txq = offset + qcount_tx; + vsi->num_rxq = offset + qcount_rx; + + /* Setup queue TC[0].qmap for given VSI context */ + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); + ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); + + /* Find queue count available for channel VSIs and starting offset + * for channel VSIs + */ + if (tc0_qcount && tc0_qcount < vsi->num_rxq) { + vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; + vsi->next_base_q = tc0_qcount; + } + dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); + dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); + dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", + vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); +} + /** * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map * @vsi: VSI to be configured @@ -3309,6 +3664,9 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) u8 num_tc = 0; dev = ice_pf_to_dev(pf); + if (vsi->tc_cfg.ena_tc == ena_tc && + vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) + return ret; ice_for_each_traffic_class(i) { /* build bitmap of enabled TCs */ @@ -3316,6 +3674,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) num_tc++; /* populate max_txqs per TC */ max_txqs[i] = vsi->alloc_txq; + /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are + * zero for CHNL VSI, hence use num_txq instead as max_txqs + */ + if (vsi->type == ICE_VSI_CHNL && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + max_txqs[i] = vsi->num_txq; } vsi->tc_cfg.ena_tc = ena_tc; @@ -3328,7 +3692,11 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ctx->vf_num = 0; ctx->info = vsi->info; - ice_vsi_setup_q_map(vsi, ctx); + if (vsi->type == ICE_VSI_PF && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); + else + ice_vsi_setup_q_map(vsi, ctx); /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); @@ -3339,8 +3707,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) goto out; } - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); + if (vsi->type == ICE_VSI_PF && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, + max_txqs); + else + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); if (status) { dev_err(dev, "VSI %d failed TC config, error %s\n", @@ -3356,20 +3729,19 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) kfree(ctx); return ret; } -#endif /* CONFIG_DCB */ /** * ice_update_ring_stats - Update ring statistics - * @ring: ring to update + * @stats: stats to be updated * @pkts: number of processed packets * @bytes: number of processed bytes * * This function assumes that caller has acquired a u64_stats_sync lock. */ -static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) +static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes) { - ring->stats.bytes += bytes; - ring->stats.pkts += pkts; + stats->bytes += bytes; + stats->pkts += pkts; } /** @@ -3378,10 +3750,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes) * @pkts: number of processed packets * @bytes: number of processed bytes */ -void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) +void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes) { u64_stats_update_begin(&tx_ring->syncp); - ice_update_ring_stats(tx_ring, pkts, bytes); + ice_update_ring_stats(&tx_ring->stats, pkts, bytes); u64_stats_update_end(&tx_ring->syncp); } @@ -3391,10 +3763,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) * @pkts: number of processed packets * @bytes: number of processed bytes */ -void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) +void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) { u64_stats_update_begin(&rx_ring->syncp); - ice_update_ring_stats(rx_ring, pkts, bytes); + ice_update_ring_stats(&rx_ring->stats, pkts, bytes); u64_stats_update_end(&rx_ring->syncp); } @@ -3546,6 +3918,180 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) return 0; } +/** + * ice_get_link_speed_mbps - get link speed in Mbps + * @vsi: the VSI whose link speed is being queried + * + * Return current VSI link speed and 0 if the speed is unknown. + */ +int ice_get_link_speed_mbps(struct ice_vsi *vsi) +{ + switch (vsi->port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_100GB: + return SPEED_100000; + case ICE_AQ_LINK_SPEED_50GB: + return SPEED_50000; + case ICE_AQ_LINK_SPEED_40GB: + return SPEED_40000; + case ICE_AQ_LINK_SPEED_25GB: + return SPEED_25000; + case ICE_AQ_LINK_SPEED_20GB: + return SPEED_20000; + case ICE_AQ_LINK_SPEED_10GB: + return SPEED_10000; + case ICE_AQ_LINK_SPEED_5GB: + return SPEED_5000; + case ICE_AQ_LINK_SPEED_2500MB: + return SPEED_2500; + case ICE_AQ_LINK_SPEED_1000MB: + return SPEED_1000; + case ICE_AQ_LINK_SPEED_100MB: + return SPEED_100; + case ICE_AQ_LINK_SPEED_10MB: + return SPEED_10; + case ICE_AQ_LINK_SPEED_UNKNOWN: + default: + return 0; + } +} + +/** + * ice_get_link_speed_kbps - get link speed in Kbps + * @vsi: the VSI whose link speed is being queried + * + * Return current VSI link speed and 0 if the speed is unknown. + */ +int ice_get_link_speed_kbps(struct ice_vsi *vsi) +{ + int speed_mbps; + + speed_mbps = ice_get_link_speed_mbps(vsi); + + return speed_mbps * 1000; +} + +/** + * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate + * @vsi: VSI to be configured + * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit + * + * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit + * profile, otherwise a non-zero value will force a minimum BW limit for the VSI + * on TC 0. + */ +int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) +{ + struct ice_pf *pf = vsi->back; + enum ice_status status; + struct device *dev; + int speed; + + dev = ice_pf_to_dev(pf); + if (!vsi->port_info) { + dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", + vsi->idx, vsi->type); + return -EINVAL; + } + + speed = ice_get_link_speed_kbps(vsi); + if (min_tx_rate > (u64)speed) { + dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", + min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, + speed); + return -EINVAL; + } + + /* Configure min BW for VSI limit */ + if (min_tx_rate) { + status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, + ICE_MIN_BW, min_tx_rate); + if (status) { + dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", + min_tx_rate, ice_vsi_type_str(vsi->type), + vsi->idx); + return -EIO; + } + + dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", + min_tx_rate, ice_vsi_type_str(vsi->type)); + } else { + status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, + vsi->idx, 0, + ICE_MIN_BW); + if (status) { + dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + return -EIO; + } + + dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + } + + return 0; +} + +/** + * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate + * @vsi: VSI to be configured + * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit + * + * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit + * profile, otherwise a non-zero value will force a maximum BW limit for the VSI + * on TC 0. + */ +int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) +{ + struct ice_pf *pf = vsi->back; + enum ice_status status; + struct device *dev; + int speed; + + dev = ice_pf_to_dev(pf); + if (!vsi->port_info) { + dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", + vsi->idx, vsi->type); + return -EINVAL; + } + + speed = ice_get_link_speed_kbps(vsi); + if (max_tx_rate > (u64)speed) { + dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", + max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, + speed); + return -EINVAL; + } + + /* Configure max BW for VSI limit */ + if (max_tx_rate) { + status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, + ICE_MAX_BW, max_tx_rate); + if (status) { + dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", + max_tx_rate, ice_vsi_type_str(vsi->type), + vsi->idx); + return -EIO; + } + + dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", + max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); + } else { + status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, + vsi->idx, 0, + ICE_MAX_BW); + if (status) { + dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + return -EIO; + } + + dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + } + + return 0; +} + /** * ice_set_link - turn on/off physical link * @vsi: VSI to modify physical link on @@ -3582,3 +4128,126 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) return 0; } + +/** + * ice_is_feature_supported + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to be checked + * + * returns true if feature is supported, false otherwise + */ +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return false; + + return test_bit(f, pf->features); +} + +/** + * ice_set_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to set + */ +static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + set_bit(f, pf->features); +} + +/** + * ice_clear_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to clear + */ +void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + clear_bit(f, pf->features); +} + +/** + * ice_init_feature_support + * @pf: pointer to the struct ice_pf instance + * + * called during init to setup supported feature + */ +void ice_init_feature_support(struct ice_pf *pf) +{ + switch (pf->hw.device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + ice_set_feature_support(pf, ICE_F_DSCP); + if (ice_is_e810t(&pf->hw)) + ice_set_feature_support(pf, ICE_F_SMA_CTRL); + break; + default: + break; + } +} + +/** + * ice_vsi_update_security - update security block in VSI + * @vsi: pointer to VSI structure + * @fill: function pointer to fill ctx + */ +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) +{ + struct ice_vsi_ctx ctx = { 0 }; + + ctx.info = vsi->info; + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + fill(&ctx); + + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) + return -ENODEV; + + vsi->info = ctx.info; + return 0; +} + +/** + * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | + (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_ctx_set_allow_override - allow destination override on VSI + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; +} + +/** + * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index d5a28bf0fc2ccc..6c803407b67d3a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi); int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); -int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx); +int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); @@ -45,19 +45,24 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi); -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); int ice_set_link(struct ice_vsi *vsi, bool ena); -#ifdef CONFIG_DCB +void ice_vsi_delete(struct ice_vsi *vsi); +int ice_vsi_clear(struct ice_vsi *vsi); + int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); -#endif /* CONFIG_DCB */ + +int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi); + +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id); + enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch); void ice_napi_del(struct ice_vsi *vsi); @@ -93,9 +98,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); -void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); +void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes); -void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); +void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); @@ -103,6 +108,7 @@ int ice_status_to_errno(enum ice_status err); void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); +void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); enum ice_status ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); @@ -116,4 +122,22 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_clear_dflt_vsi(struct ice_sw *sw); +int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate); +int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate); +int ice_get_link_speed_kbps(struct ice_vsi *vsi); +int ice_get_link_speed_mbps(struct ice_vsi *vsi); +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)); + +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); + +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); +void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); +void ice_init_feature_support(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 06fa93e597fbc6..f099797f35e375 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -19,6 +19,8 @@ */ #define CREATE_TRACE_POINTS #include "ice_trace.h" +#include "ice_eswitch.h" +#include "ice_tc_lib.h" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" static const char ice_driver_string[] = DRV_SUMMARY; @@ -42,16 +44,26 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ static DEFINE_IDA(ice_aux_ida); +DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); +EXPORT_SYMBOL(ice_xdp_locking_key); static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static int ice_vsi_open(struct ice_vsi *vsi); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_vsi_release_all(struct ice_pf *pf); +static int ice_rebuild_channels(struct ice_pf *pf); +static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); + +static int +ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, + void *cb_priv, enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)); + bool netif_is_ice(struct net_device *dev) { return dev && (dev->netdev_ops == &ice_netdev_ops); @@ -61,7 +73,7 @@ bool netif_is_ice(struct net_device *dev) * ice_get_tx_pending - returns number of Tx descriptors not processed * @ring: the ring of descriptors */ -static u16 ice_get_tx_pending(struct ice_ring *ring) +static u16 ice_get_tx_pending(struct ice_tx_ring *ring) { u16 head, tail; @@ -100,10 +112,15 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) hw = &vsi->back->hw; - for (i = 0; i < vsi->num_txq; i++) { - struct ice_ring *tx_ring = vsi->tx_rings[i]; + ice_for_each_txq(vsi, i) { + struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; + + if (!tx_ring) + continue; + if (ice_ring_ch_enabled(tx_ring)) + continue; - if (tx_ring && tx_ring->desc) { + if (tx_ring->desc) { /* If packet counter has not changed the queue is * likely stalled, so force an interrupt for this * queue. @@ -379,7 +396,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } - ice_cfg_vlan_pruning(vsi, false, false); + ice_cfg_vlan_pruning(vsi, false); } } else { /* Clear Rx filter to remove traffic from wire */ @@ -393,7 +410,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) goto out_promisc; } if (vsi->num_vlan > 1) - ice_cfg_vlan_pruning(vsi, true, false); + ice_cfg_vlan_pruning(vsi, true); } } } @@ -455,17 +472,21 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) } /** - * ice_prepare_for_reset - prep for the core to reset + * ice_prepare_for_reset - prep for reset * @pf: board private structure + * @reset_type: reset type requested * * Inform or close all dependent features in prep for reset. */ static void -ice_prepare_for_reset(struct ice_pf *pf) +ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { struct ice_hw *hw = &pf->hw; + struct ice_vsi *vsi; unsigned int i; + dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); + /* already prepared for reset */ if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; @@ -480,6 +501,38 @@ ice_prepare_for_reset(struct ice_pf *pf) ice_for_each_vf(pf, i) ice_set_vf_state_qs_dis(&pf->vf[i]); + /* release ADQ specific HW and SW resources */ + vsi = ice_get_main_vsi(pf); + if (!vsi) + goto skip; + + /* to be on safe side, reset orig_rss_size so that normal flow + * of deciding rss_size can take precedence + */ + vsi->orig_rss_size = 0; + + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { + if (reset_type == ICE_RESET_PFR) { + vsi->old_ena_tc = vsi->all_enatc; + vsi->old_numtc = vsi->all_numtc; + } else { + ice_remove_q_channels(vsi, true); + + /* for other reset type, do not support channel rebuild + * hence reset needed info + */ + vsi->old_ena_tc = 0; + vsi->all_enatc = 0; + vsi->old_numtc = 0; + vsi->all_numtc = 0; + vsi->req_txq = 0; + vsi->req_rxq = 0; + clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); + memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); + } + } +skip: + /* clear SW filtering DB */ ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ @@ -499,8 +552,7 @@ ice_prepare_for_reset(struct ice_pf *pf) /** * ice_do_reset - Initiate one of many types of resets * @pf: board private structure - * @reset_type: reset type requested - * before this function was called. + * @reset_type: reset type requested before this function was called. */ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { @@ -509,7 +561,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); - ice_prepare_for_reset(pf); + ice_prepare_for_reset(pf, reset_type); /* trigger the reset */ if (ice_reset(hw, reset_type)) { @@ -567,7 +619,7 @@ static void ice_reset_subtask(struct ice_pf *pf) /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) return; - ice_prepare_for_reset(pf); + ice_prepare_for_reset(pf, reset_type); /* make sure we are ready to rebuild */ if (ice_check_reset(&pf->hw)) { @@ -624,7 +676,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); break; case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: - netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) + netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); + else + netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); break; default: break; @@ -881,6 +936,29 @@ static void ice_set_dflt_mib(struct ice_pf *pf) kfree(lldpmib); } +/** + * ice_check_phy_fw_load - check if PHY FW load failed + * @pf: pointer to PF struct + * @link_cfg_err: bitmap from the link info structure + * + * check if external PHY FW load failed and print an error message if it did + */ +static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) +{ + if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { + clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); + return; + } + + if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) + return; + + if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { + dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); + set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); + } +} + /** * ice_check_module_power * @pf: pointer to PF struct @@ -913,6 +991,20 @@ static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) } } +/** + * ice_check_link_cfg_err - check if link configuration failed + * @pf: pointer to the PF struct + * @link_cfg_err: bitmap from the link info structure + * + * print if any link configuration failure happens due to the value in the + * link_cfg_err parameter in the link info structure + */ +static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) +{ + ice_check_module_power(pf, link_cfg_err); + ice_check_phy_fw_load(pf, link_cfg_err); +} + /** * ice_link_event - process the link event * @pf: PF that the link event is associated with @@ -948,7 +1040,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, pi->lport, ice_stat_str(status), ice_aq_str(pi->hw->adminq.sq_last_status)); - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); /* Check if the link state is up after updating link info, and treat * this event as an UP event since the link is actually UP now. @@ -1026,7 +1118,8 @@ static int ice_init_link_events(struct ice_port_info *pi) u16 mask; mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | - ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); + ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | + ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", @@ -1965,7 +2058,8 @@ static int ice_configure_phy(struct ice_vsi *vsi) ice_print_topo_conflict(vsi); - if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) + if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && + phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) return -EPERM; if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) @@ -2096,7 +2190,7 @@ static void ice_check_media_subtask(struct ice_pf *pf) if (err) return; - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) @@ -2302,14 +2396,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) irq_num = pf->msix_entries[base + vector].vector; - if (q_vector->tx.ring && q_vector->rx.ring) { + if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "TxRx", rx_int_idx++); tx_int_idx++; - } else if (q_vector->rx.ring) { + } else if (q_vector->rx.rx_ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "rx", rx_int_idx++); - } else if (q_vector->tx.ring) { + } else if (q_vector->tx.tx_ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "tx", tx_int_idx++); } else { @@ -2367,11 +2461,12 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); - int i; + struct ice_tx_desc *tx_desc; + int i, j; - for (i = 0; i < vsi->num_xdp_txq; i++) { + ice_for_each_xdp_txq(vsi, i) { u16 xdp_q_idx = vsi->alloc_txq + i; - struct ice_ring *xdp_ring; + struct ice_tx_ring *xdp_ring; xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); @@ -2380,16 +2475,29 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) xdp_ring->q_index = xdp_q_idx; xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; - xdp_ring->ring_active = false; xdp_ring->vsi = vsi; xdp_ring->netdev = NULL; + xdp_ring->next_dd = ICE_TX_THRESH - 1; + xdp_ring->next_rs = ICE_TX_THRESH - 1; xdp_ring->dev = dev; xdp_ring->count = vsi->num_tx_desc; WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); if (ice_setup_tx_ring(xdp_ring)) goto free_xdp_rings; ice_set_ring_xdp(xdp_ring); - xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); + xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); + spin_lock_init(&xdp_ring->tx_lock); + for (j = 0; j < xdp_ring->count; j++) { + tx_desc = ICE_TX_DESC(xdp_ring, j); + tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); + } + } + + ice_for_each_rxq(vsi, i) { + if (static_key_enabled(&ice_xdp_locking_key)) + vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; + else + vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i]; } return 0; @@ -2455,6 +2563,10 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) if (__ice_vsi_get_qs(&xdp_qs_cfg)) goto err_map_xdp; + if (static_key_enabled(&ice_xdp_locking_key)) + netdev_warn(vsi->netdev, + "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); + if (ice_xdp_alloc_setup_rings(vsi)) goto clear_xdp_rings; @@ -2468,11 +2580,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) q_base = vsi->num_xdp_txq - xdp_rings_rem; for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { - struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; xdp_ring->q_vector = q_vector; - xdp_ring->next = q_vector->tx.ring; - q_vector->tx.ring = xdp_ring; + xdp_ring->next = q_vector->tx.tx_ring; + q_vector->tx.tx_ring = xdp_ring; } xdp_rings_rem -= xdp_rings_per_v; } @@ -2501,7 +2613,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) return 0; clear_xdp_rings: - for (i = 0; i < vsi->num_xdp_txq; i++) + ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; @@ -2509,7 +2621,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) err_map_xdp: mutex_lock(&pf->avail_q_mutex); - for (i = 0; i < vsi->num_xdp_txq; i++) { + ice_for_each_xdp_txq(vsi, i) { clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; } @@ -2542,25 +2654,25 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, v_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - struct ice_ring *ring; + struct ice_tx_ring *ring; - ice_for_each_ring(ring, q_vector->tx) + ice_for_each_tx_ring(ring, q_vector->tx) if (!ring->tx_buf || !ice_ring_is_xdp(ring)) break; /* restore the value of last node prior to XDP setup */ - q_vector->tx.ring = ring; + q_vector->tx.tx_ring = ring; } free_qmap: mutex_lock(&pf->avail_q_mutex); - for (i = 0; i < vsi->num_xdp_txq; i++) { + ice_for_each_xdp_txq(vsi, i) { clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; } mutex_unlock(&pf->avail_q_mutex); - for (i = 0; i < vsi->num_xdp_txq; i++) + ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { if (vsi->xdp_rings[i]->desc) ice_free_tx_ring(vsi->xdp_rings[i]); @@ -2571,6 +2683,9 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); vsi->xdp_rings = NULL; + if (static_key_enabled(&ice_xdp_locking_key)) + static_branch_dec(&ice_xdp_locking_key); + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) return 0; @@ -2598,13 +2713,36 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) int i; ice_for_each_rxq(vsi, i) { - struct ice_ring *rx_ring = vsi->rx_rings[i]; + struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; if (rx_ring->xsk_pool) napi_schedule(&rx_ring->q_vector->napi); } } +/** + * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have + * @vsi: VSI to determine the count of XDP Tx qs + * + * returns 0 if Tx qs count is higher than at least half of CPU count, + * -ENOMEM otherwise + */ +int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) +{ + u16 avail = ice_get_avail_txq_count(vsi->back); + u16 cpus = num_possible_cpus(); + + if (avail < cpus / 2) + return -ENOMEM; + + vsi->num_xdp_txq = min_t(u16, avail, cpus); + + if (vsi->num_xdp_txq < cpus) + static_branch_inc(&ice_xdp_locking_key); + + return 0; +} + /** * ice_xdp_setup_prog - Add or remove XDP eBPF program * @vsi: VSI to setup XDP for @@ -2634,10 +2772,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } if (!ice_is_xdp_ena_vsi(vsi) && prog) { - vsi->num_xdp_txq = vsi->alloc_rxq; - xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); + xdp_ring_err = ice_vsi_determine_xdp_res(vsi); + if (xdp_ring_err) { + NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); + } else { + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); + } } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) @@ -3103,6 +3245,9 @@ static void ice_set_netdev_features(struct net_device *netdev) /* enable features */ netdev->features |= netdev->hw_features; + + netdev->hw_features |= NETIF_F_HW_TC; + /* encap and VLAN devices inherit default, csumo and tso features */ netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; @@ -3139,7 +3284,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF) { SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, mac_addr); + eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); } @@ -3182,7 +3327,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) static struct ice_vsi * ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL); +} + +static struct ice_vsi * +ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + struct ice_channel *ch) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch); } /** @@ -3196,7 +3348,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) static struct ice_vsi * ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL); } /** @@ -3210,7 +3362,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi * ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); + return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL); } /** @@ -3235,7 +3387,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, /* Enable VLAN pruning when a VLAN other than 0 is added */ if (!ice_vsi_is_vlan_pruning_ena(vsi)) { - ret = ice_cfg_vlan_pruning(vsi, true, false); + ret = ice_cfg_vlan_pruning(vsi, true); if (ret) return ret; } @@ -3279,12 +3431,69 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, /* Disable pruning when VLAN 0 is the only VLAN rule */ if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) - ret = ice_cfg_vlan_pruning(vsi, false, false); + ret = ice_cfg_vlan_pruning(vsi, false); set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); return ret; } +/** + * ice_rep_indr_tc_block_unbind + * @cb_priv: indirection block private data + */ +static void ice_rep_indr_tc_block_unbind(void *cb_priv) +{ + struct ice_indr_block_priv *indr_priv = cb_priv; + + list_del(&indr_priv->list); + kfree(indr_priv); +} + +/** + * ice_tc_indir_block_unregister - Unregister TC indirect block notifications + * @vsi: VSI struct which has the netdev + */ +static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np = netdev_priv(vsi->netdev); + + flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, + ice_rep_indr_tc_block_unbind); +} + +/** + * ice_tc_indir_block_remove - clean indirect TC block notifications + * @pf: PF structure + */ +static void ice_tc_indir_block_remove(struct ice_pf *pf) +{ + struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); + + if (!pf_vsi) + return; + + ice_tc_indir_block_unregister(pf_vsi); +} + +/** + * ice_tc_indir_block_register - Register TC indirect block notifications + * @vsi: VSI struct which has the netdev + * + * Returns 0 on success, negative value on failure + */ +static int ice_tc_indir_block_register(struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np; + + if (!vsi || !vsi->netdev) + return -EINVAL; + + np = netdev_priv(vsi->netdev); + + INIT_LIST_HEAD(&np->tc_indr_block_priv_list); + return flow_indr_dev_register(ice_indr_setup_tc_cb, np); +} + /** * ice_setup_pf_sw - Setup the HW switch on startup or after reset * @pf: board private structure @@ -3293,6 +3502,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, */ static int ice_setup_pf_sw(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi; int status = 0; @@ -3303,6 +3513,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf) if (!vsi) return -ENOMEM; + /* init channel list */ + INIT_LIST_HEAD(&vsi->ch_list); + status = ice_cfg_netdev(vsi); if (status) { status = -ENODEV; @@ -3311,6 +3524,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf) /* netdev has to be configured before setting frame size */ ice_vsi_cfg_frame_size(vsi); + /* init indirect block notifications */ + status = ice_tc_indir_block_register(vsi); + if (status) { + dev_err(dev, "Failed to register netdev notifier\n"); + goto unroll_cfg_netdev; + } + /* Setup DCB netlink interface */ ice_dcbnl_setup(vsi); @@ -3322,7 +3542,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf) status = ice_set_cpu_rx_rmap(vsi); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", + dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", vsi->vsi_num, status); status = -EINVAL; goto unroll_napi_add; @@ -3335,8 +3555,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf) free_cpu_rx_map: ice_free_cpu_rx_rmap(vsi); - unroll_napi_add: + ice_tc_indir_block_unregister(vsi); +unroll_cfg_netdev: if (vsi) { ice_napi_del(vsi); if (vsi->netdev) { @@ -3538,6 +3759,13 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_left -= needed; } + /* reserve for switchdev */ + needed = ICE_ESWITCH_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + v_budget += needed; + v_left -= needed; + /* total used for non-traffic vectors */ v_other = v_budget; @@ -4170,11 +4398,11 @@ static int ice_register_netdev(struct ice_pf *pf) set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); - err = ice_devlink_create_port(vsi); + err = ice_devlink_create_pf_port(pf); if (err) goto err_devlink_create; - devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); + devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); return 0; err_devlink_create: @@ -4261,12 +4489,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); - err = ice_devlink_register(pf); - if (err) { - dev_err(dev, "ice_devlink_register failed: %d\n", err); - goto err_exit_unroll; - } - #ifndef CONFIG_DYNAMIC_DEBUG if (debug < -1) hw->debug_mask = debug; @@ -4279,6 +4501,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } + ice_init_feature_support(pf); + ice_request_fw(pf); /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be @@ -4414,7 +4638,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_init_link_dflt_override(pf->hw.port_info); - ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, + pf->hw.port_info->phy.link_info.link_cfg_err); /* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & @@ -4500,6 +4725,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) dev_warn(dev, "RDMA is not supported on this device\n"); } + ice_devlink_register(pf); return 0; err_init_aux_unroll: @@ -4523,7 +4749,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_devlink_destroy_regions(pf); ice_deinit_hw(hw); err_exit_unroll: - ice_devlink_unregister(pf); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return err; @@ -4600,15 +4825,15 @@ static void ice_remove(struct pci_dev *pdev) struct ice_pf *pf = pci_get_drvdata(pdev); int i; - if (!pf) - return; - + ice_devlink_unregister(pf); for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { if (!ice_is_reset_in_progress(pf->state)) break; msleep(100); } + ice_tc_indir_block_remove(pf); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); @@ -4640,7 +4865,6 @@ static void ice_remove(struct pci_dev *pdev) ice_deinit_pf(pf); ice_devlink_destroy_regions(pf); ice_deinit_hw(&pf->hw); - ice_devlink_unregister(pf); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild @@ -4902,7 +5126,7 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { set_bit(ICE_PFR_REQ, pf->state); - ice_prepare_for_reset(pf); + ice_prepare_for_reset(pf, ICE_RESET_PFR); } } @@ -4994,7 +5218,7 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev) if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { set_bit(ICE_PFR_REQ, pf->state); - ice_prepare_for_reset(pf); + ice_prepare_for_reset(pf, ICE_RESET_PFR); } } } @@ -5150,10 +5374,16 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EBUSY; } + if (ice_chnl_dmac_fltr_cnt(pf)) { + netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", + mac); + return -EAGAIN; + } + netif_addr_lock_bh(netdev); ether_addr_copy(old_mac, netdev->dev_addr); /* change the netdev's MAC address */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); + eth_hw_addr_set(netdev, mac); netif_addr_unlock_bh(netdev); /* Clean up old MAC filter. Not an error if old filter doesn't exist */ @@ -5181,7 +5411,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); netif_addr_lock_bh(netdev); - ether_addr_copy(netdev->dev_addr, old_mac); + eth_hw_addr_set(netdev, old_mac); netif_addr_unlock_bh(netdev); return err; } @@ -5386,10 +5616,10 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) - ret = ice_cfg_vlan_pruning(vsi, true, false); + ret = ice_cfg_vlan_pruning(vsi, true); else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) - ret = ice_cfg_vlan_pruning(vsi, false, false); + ret = ice_cfg_vlan_pruning(vsi, false); if ((features & NETIF_F_NTUPLE) && !(netdev->features & NETIF_F_NTUPLE)) { @@ -5401,6 +5631,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) ice_clear_arfs(vsi); } + /* don't turn off hw_tc_offload when ADQ is already enabled */ + if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { + dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); + return -EACCES; + } + + if ((features & NETIF_F_HW_TC) && + !(netdev->features & NETIF_F_HW_TC)) + set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + else + clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + return ret; } @@ -5450,77 +5692,59 @@ int ice_vsi_cfg(struct ice_vsi *vsi) } /* THEORY OF MODERATION: - * The below code creates custom DIM profiles for use by this driver, because - * the ice driver hardware works differently than the hardware that DIMLIB was + * The ice driver hardware works differently than the hardware that DIMLIB was * originally made for. ice hardware doesn't have packet count limits that * can trigger an interrupt, but it *does* have interrupt rate limit support, - * and this code adds that capability to be used by the driver when it's using - * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver - * for how to "respond" to traffic and interrupts, so this driver uses a - * slightly different set of moderation parameters to get best performance. + * which is hard-coded to a limit of 250,000 ints/second. + * If not using dynamic moderation, the INTRL value can be modified + * by ethtool rx-usecs-high. */ struct ice_dim { /* the throttle rate for interrupts, basically worst case delay before * an initial interrupt fires, value is stored in microseconds. */ u16 itr; - /* the rate limit for interrupts, which can cap a delay from a small - * ITR at a certain amount of interrupts per second. f.e. a 2us ITR - * could yield as much as 500,000 interrupts per second, but with a - * 10us rate limit, it limits to 100,000 interrupts per second. Value - * is stored in microseconds. - */ - u16 intrl; }; /* Make a different profile for Rx that doesn't allow quite so aggressive - * moderation at the high end (it maxes out at 128us or about 8k interrupts a - * second. The INTRL/rate parameters here are only useful to cap small ITR - * values, which is why for larger ITR's - like 128, which can only generate - * 8k interrupts per second, there is no point to rate limit and the values - * are set to zero. The rate limit values do affect latency, and so must - * be reasonably small so to not impact latency sensitive tests. + * moderation at the high end (it maxes out at 126us or about 8k interrupts a + * second. */ static const struct ice_dim rx_profile[] = { - {2, 10}, - {8, 16}, - {32, 0}, - {96, 0}, - {128, 0} + {2}, /* 500,000 ints/s, capped at 250K by INTRL */ + {8}, /* 125,000 ints/s */ + {16}, /* 62,500 ints/s */ + {62}, /* 16,129 ints/s */ + {126} /* 7,936 ints/s */ }; /* The transmit profile, which has the same sorts of values * as the previous struct */ static const struct ice_dim tx_profile[] = { - {2, 10}, - {8, 16}, - {64, 0}, - {128, 0}, - {256, 0} + {2}, /* 500,000 ints/s, capped at 250K by INTRL */ + {8}, /* 125,000 ints/s */ + {40}, /* 16,125 ints/s */ + {128}, /* 7,812 ints/s */ + {256} /* 3,906 ints/s */ }; static void ice_tx_dim_work(struct work_struct *work) { struct ice_ring_container *rc; - struct ice_q_vector *q_vector; struct dim *dim; - u16 itr, intrl; + u16 itr; dim = container_of(work, struct dim, work); - rc = container_of(dim, struct ice_ring_container, dim); - q_vector = container_of(rc, struct ice_q_vector, tx); + rc = (struct ice_ring_container *)dim->priv; - if (dim->profile_ix >= ARRAY_SIZE(tx_profile)) - dim->profile_ix = ARRAY_SIZE(tx_profile) - 1; + WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); /* look up the values in our local table */ itr = tx_profile[dim->profile_ix].itr; - intrl = tx_profile[dim->profile_ix].intrl; - ice_trace(tx_dim_work, q_vector, dim); + ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); ice_write_itr(rc, itr); - ice_write_intrl(q_vector, intrl); dim->state = DIM_START_MEASURE; } @@ -5528,28 +5752,65 @@ static void ice_tx_dim_work(struct work_struct *work) static void ice_rx_dim_work(struct work_struct *work) { struct ice_ring_container *rc; - struct ice_q_vector *q_vector; struct dim *dim; - u16 itr, intrl; + u16 itr; dim = container_of(work, struct dim, work); - rc = container_of(dim, struct ice_ring_container, dim); - q_vector = container_of(rc, struct ice_q_vector, rx); + rc = (struct ice_ring_container *)dim->priv; - if (dim->profile_ix >= ARRAY_SIZE(rx_profile)) - dim->profile_ix = ARRAY_SIZE(rx_profile) - 1; + WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); /* look up the values in our local table */ itr = rx_profile[dim->profile_ix].itr; - intrl = rx_profile[dim->profile_ix].intrl; - ice_trace(rx_dim_work, q_vector, dim); + ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); ice_write_itr(rc, itr); - ice_write_intrl(q_vector, intrl); dim->state = DIM_START_MEASURE; } +#define ICE_DIM_DEFAULT_PROFILE_IX 1 + +/** + * ice_init_moderation - set up interrupt moderation + * @q_vector: the vector containing rings to be configured + * + * Set up interrupt moderation registers, with the intent to do the right thing + * when called from reset or from probe, and whether or not dynamic moderation + * is enabled or not. Take special care to write all the registers in both + * dynamic moderation mode or not in order to make sure hardware is in a known + * state. + */ +static void ice_init_moderation(struct ice_q_vector *q_vector) +{ + struct ice_ring_container *rc; + bool tx_dynamic, rx_dynamic; + + rc = &q_vector->tx; + INIT_WORK(&rc->dim.work, ice_tx_dim_work); + rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; + rc->dim.priv = rc; + tx_dynamic = ITR_IS_DYNAMIC(rc); + + /* set the initial TX ITR to match the above */ + ice_write_itr(rc, tx_dynamic ? + tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); + + rc = &q_vector->rx; + INIT_WORK(&rc->dim.work, ice_rx_dim_work); + rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; + rc->dim.priv = rc; + rx_dynamic = ITR_IS_DYNAMIC(rc); + + /* set the initial RX ITR to match the above */ + ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : + rc->itr_setting); + + ice_set_q_vector_intrl(q_vector); +} + /** * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured @@ -5564,13 +5825,9 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work); - q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; - - INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work); - q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + ice_init_moderation(q_vector); - if (q_vector->rx.ring || q_vector->tx.ring) + if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) napi_enable(&q_vector->napi); } } @@ -5630,7 +5887,8 @@ int ice_up(struct ice_vsi *vsi) /** * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring - * @ring: Tx or Rx ring to read stats from + * @syncp: pointer to u64_stats_sync + * @stats: stats that pkts and bytes count will be taken from * @pkts: packets stats counter * @bytes: bytes stats counter * @@ -5638,19 +5896,16 @@ int ice_up(struct ice_vsi *vsi) * that needs to be performed to read u64 values in 32 bit machine. */ static void -ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) +ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats, + u64 *pkts, u64 *bytes) { unsigned int start; - *pkts = 0; - *bytes = 0; - if (!ring) - return; do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - *pkts = ring->stats.pkts; - *bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + start = u64_stats_fetch_begin_irq(syncp); + *pkts = stats.pkts; + *bytes = stats.bytes; + } while (u64_stats_fetch_retry_irq(syncp, start)); } /** @@ -5660,18 +5915,19 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) * @count: number of rings */ static void -ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) { struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; u16 i; for (i = 0; i < count; i++) { - struct ice_ring *ring; - u64 pkts, bytes; + struct ice_tx_ring *ring; + u64 pkts = 0, bytes = 0; ring = READ_ONCE(rings[i]); - ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + if (ring) + ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); vsi_stats->tx_packets += pkts; vsi_stats->tx_bytes += bytes; vsi->tx_restart += ring->tx_stats.restart_q; @@ -5710,9 +5966,9 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { - struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]); + struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); - ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); vsi_stats->rx_packets += pkts; vsi_stats->rx_bytes += bytes; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; @@ -5976,7 +6232,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.ring || q_vector->tx.ring) + if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) napi_disable(&q_vector->napi); cancel_work_sync(&q_vector->tx.dim.work); @@ -5995,9 +6251,11 @@ int ice_down(struct ice_vsi *vsi) /* Caller of this function is expected to set the * vsi->state ICE_DOWN bit */ - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); + } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + ice_eswitch_stop_all_tx_queues(vsi->back); } ice_vsi_dis_irq(vsi); @@ -6059,12 +6317,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) } ice_for_each_txq(vsi, i) { - struct ice_ring *ring = vsi->tx_rings[i]; + struct ice_tx_ring *ring = vsi->tx_rings[i]; if (!ring) return -EINVAL; - ring->netdev = vsi->netdev; + if (vsi->netdev) + ring->netdev = vsi->netdev; err = ice_setup_tx_ring(ring); if (err) break; @@ -6090,12 +6349,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } ice_for_each_rxq(vsi, i) { - struct ice_ring *ring = vsi->rx_rings[i]; + struct ice_rx_ring *ring = vsi->rx_rings[i]; if (!ring) return -EINVAL; - ring->netdev = vsi->netdev; + if (vsi->netdev) + ring->netdev = vsi->netdev; err = ice_setup_rx_ring(ring); if (err) break; @@ -6168,7 +6428,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi) * * Returns 0 on success, negative value on error */ -static int ice_vsi_open(struct ice_vsi *vsi) +int ice_vsi_open(struct ice_vsi *vsi) { char int_name[ICE_INT_NAME_STR_LEN]; struct ice_pf *pf = vsi->back; @@ -6193,14 +6453,16 @@ static int ice_vsi_open(struct ice_vsi *vsi) if (err) goto err_setup_rx; - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); - if (err) - goto err_set_qs; + if (vsi->type == ICE_VSI_PF) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); + if (err) + goto err_set_qs; - err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); - if (err) - goto err_set_qs; + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); + if (err) + goto err_set_qs; + } err = ice_up_complete(vsi); if (err) @@ -6235,6 +6497,9 @@ static void ice_vsi_release_all(struct ice_pf *pf) if (!pf->vsi[i]) continue; + if (pf->vsi[i]->type == ICE_VSI_CHNL) + continue; + err = ice_vsi_release(pf->vsi[i]); if (err) dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", @@ -6439,6 +6704,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); + if (err) { + dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + goto err_vsi_rebuild; + } + + if (reset_type == ICE_RESET_PFR) { + err = ice_rebuild_channels(pf); + if (err) { + dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", + err); + goto err_vsi_rebuild; + } + } + /* If Flow Director is active */ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); @@ -6985,7 +7265,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_ring *tx_ring = NULL; + struct ice_tx_ring *tx_ring = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; u32 i; @@ -7003,7 +7283,7 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) } /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_txq; i++) + ice_for_each_txq(vsi, i) if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) if (txqueue == vsi->tx_rings[i]->q_index) { tx_ring = vsi->tx_rings[i]; @@ -7059,6 +7339,1050 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) pf->tx_timeout_recovery_level++; } +/** + * ice_setup_tc_cls_flower - flower classifier offloads + * @np: net device to configure + * @filter_dev: device on which filter is added + * @cls_flower: offload data + */ +static int +ice_setup_tc_cls_flower(struct ice_netdev_priv *np, + struct net_device *filter_dev, + struct flow_cls_offload *cls_flower) +{ + struct ice_vsi *vsi = np->vsi; + + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return ice_add_cls_flower(filter_dev, vsi, cls_flower); + case FLOW_CLS_DESTROY: + return ice_del_cls_flower(vsi, cls_flower); + default: + return -EINVAL; + } +} + +/** + * ice_setup_tc_block_cb - callback handler registered for TC block + * @type: TC SETUP type + * @type_data: TC flower offload data that contains user input + * @cb_priv: netdev private data + */ +static int +ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct ice_netdev_priv *np = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, np->vsi->netdev, + type_data); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_validate_mqprio_qopt - Validate TCF input parameters + * @vsi: Pointer to VSI + * @mqprio_qopt: input parameters for mqprio queue configuration + * + * This function validates MQPRIO params, such as qcount (power of 2 wherever + * needed), and make sure user doesn't specify qcount and BW rate limit + * for TCs, which are more than "num_tc" + */ +static int +ice_validate_mqprio_qopt(struct ice_vsi *vsi, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u64 sum_max_rate = 0, sum_min_rate = 0; + int non_power_of_2_qcount = 0; + struct ice_pf *pf = vsi->back; + int max_rss_q_cnt = 0; + struct device *dev; + int i, speed; + u8 num_tc; + + if (vsi->type != ICE_VSI_PF) + return -EINVAL; + + if (mqprio_qopt->qopt.offset[0] != 0 || + mqprio_qopt->qopt.num_tc < 1 || + mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) + return -EINVAL; + + dev = ice_pf_to_dev(pf); + vsi->ch_rss_size = 0; + num_tc = mqprio_qopt->qopt.num_tc; + + for (i = 0; num_tc; i++) { + int qcount = mqprio_qopt->qopt.count[i]; + u64 max_rate, min_rate, rem; + + if (!qcount) + return -EINVAL; + + if (is_power_of_2(qcount)) { + if (non_power_of_2_qcount && + qcount > non_power_of_2_qcount) { + dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", + qcount, non_power_of_2_qcount); + return -EINVAL; + } + if (qcount > max_rss_q_cnt) + max_rss_q_cnt = qcount; + } else { + if (non_power_of_2_qcount && + qcount != non_power_of_2_qcount) { + dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", + qcount, non_power_of_2_qcount); + return -EINVAL; + } + if (qcount < max_rss_q_cnt) { + dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", + qcount, max_rss_q_cnt); + return -EINVAL; + } + max_rss_q_cnt = qcount; + non_power_of_2_qcount = qcount; + } + + /* TC command takes input in K/N/Gbps or K/M/Gbit etc but + * converts the bandwidth rate limit into Bytes/s when + * passing it down to the driver. So convert input bandwidth + * from Bytes/s to Kbps + */ + max_rate = mqprio_qopt->max_rate[i]; + max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); + sum_max_rate += max_rate; + + /* min_rate is minimum guaranteed rate and it can't be zero */ + min_rate = mqprio_qopt->min_rate[i]; + min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); + sum_min_rate += min_rate; + + if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { + dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, + min_rate, ICE_MIN_BW_LIMIT); + return -EINVAL; + } + + iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); + if (rem) { + dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", + i, ICE_MIN_BW_LIMIT); + return -EINVAL; + } + + iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); + if (rem) { + dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", + i, ICE_MIN_BW_LIMIT); + return -EINVAL; + } + + /* min_rate can't be more than max_rate, except when max_rate + * is zero (implies max_rate sought is max line rate). In such + * a case min_rate can be more than max. + */ + if (max_rate && min_rate > max_rate) { + dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", + min_rate, max_rate); + return -EINVAL; + } + + if (i >= mqprio_qopt->qopt.num_tc - 1) + break; + if (mqprio_qopt->qopt.offset[i + 1] != + (mqprio_qopt->qopt.offset[i] + qcount)) + return -EINVAL; + } + if (vsi->num_rxq < + (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) + return -EINVAL; + if (vsi->num_txq < + (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) + return -EINVAL; + + speed = ice_get_link_speed_kbps(vsi); + if (sum_max_rate && sum_max_rate > (u64)speed) { + dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n", + sum_max_rate, speed); + return -EINVAL; + } + if (sum_min_rate && sum_min_rate > (u64)speed) { + dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", + sum_min_rate, speed); + return -EINVAL; + } + + /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ + vsi->ch_rss_size = max_rss_q_cnt; + + return 0; +} + +/** + * ice_add_channel - add a channel by adding VSI + * @pf: ptr to PF device + * @sw_id: underlying HW switching element ID + * @ch: ptr to channel structure + * + * Add a channel (VSI) using add_vsi and queue_map + */ +static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *vsi; + + if (ch->type != ICE_VSI_CHNL) { + dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); + return -EINVAL; + } + + vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); + if (!vsi || vsi->type != ICE_VSI_CHNL) { + dev_err(dev, "create chnl VSI failure\n"); + return -EINVAL; + } + + ch->sw_id = sw_id; + ch->vsi_num = vsi->vsi_num; + ch->info.mapping_flags = vsi->info.mapping_flags; + ch->ch_vsi = vsi; + /* set the back pointer of channel for newly created VSI */ + vsi->ch = ch; + + memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, + sizeof(vsi->info.q_mapping)); + memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + + return 0; +} + +/** + * ice_chnl_cfg_res + * @vsi: the VSI being setup + * @ch: ptr to channel structure + * + * Configure channel specific resources such as rings, vector. + */ +static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) +{ + int i; + + for (i = 0; i < ch->num_txq; i++) { + struct ice_q_vector *tx_q_vector, *rx_q_vector; + struct ice_ring_container *rc; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; + + tx_ring = vsi->tx_rings[ch->base_q + i]; + rx_ring = vsi->rx_rings[ch->base_q + i]; + if (!tx_ring || !rx_ring) + continue; + + /* setup ring being channel enabled */ + tx_ring->ch = ch; + rx_ring->ch = ch; + + /* following code block sets up vector specific attributes */ + tx_q_vector = tx_ring->q_vector; + rx_q_vector = rx_ring->q_vector; + if (!tx_q_vector && !rx_q_vector) + continue; + + if (tx_q_vector) { + tx_q_vector->ch = ch; + /* setup Tx and Rx ITR setting if DIM is off */ + rc = &tx_q_vector->tx; + if (!ITR_IS_DYNAMIC(rc)) + ice_write_itr(rc, rc->itr_setting); + } + if (rx_q_vector) { + rx_q_vector->ch = ch; + /* setup Tx and Rx ITR setting if DIM is off */ + rc = &rx_q_vector->rx; + if (!ITR_IS_DYNAMIC(rc)) + ice_write_itr(rc, rc->itr_setting); + } + } + + /* it is safe to assume that, if channel has non-zero num_t[r]xq, then + * GLINT_ITR register would have written to perform in-context + * update, hence perform flush + */ + if (ch->num_txq || ch->num_rxq) + ice_flush(&vsi->back->hw); +} + +/** + * ice_cfg_chnl_all_res - configure channel resources + * @vsi: pte to main_vsi + * @ch: ptr to channel structure + * + * This function configures channel specific resources such as flow-director + * counter index, and other resources such as queues, vectors, ITR settings + */ +static void +ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) +{ + /* configure channel (aka ADQ) resources such as queues, vectors, + * ITR settings for channel specific vectors and anything else + */ + ice_chnl_cfg_res(vsi, ch); +} + +/** + * ice_setup_hw_channel - setup new channel + * @pf: ptr to PF device + * @vsi: the VSI being setup + * @ch: ptr to channel structure + * @sw_id: underlying HW switching element ID + * @type: type of channel to be created (VMDq2/VF) + * + * Setup new channel (VSI) based on specified type (VMDq2/VF) + * and configures Tx rings accordingly + */ +static int +ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, + struct ice_channel *ch, u16 sw_id, u8 type) +{ + struct device *dev = ice_pf_to_dev(pf); + int ret; + + ch->base_q = vsi->next_base_q; + ch->type = type; + + ret = ice_add_channel(pf, sw_id, ch); + if (ret) { + dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); + return ret; + } + + /* configure/setup ADQ specific resources */ + ice_cfg_chnl_all_res(vsi, ch); + + /* make sure to update the next_base_q so that subsequent channel's + * (aka ADQ) VSI queue map is correct + */ + vsi->next_base_q = vsi->next_base_q + ch->num_rxq; + dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, + ch->num_rxq); + + return 0; +} + +/** + * ice_setup_channel - setup new channel using uplink element + * @pf: ptr to PF device + * @vsi: the VSI being setup + * @ch: ptr to channel structure + * + * Setup new channel (VSI) based on specified type (VMDq2/VF) + * and uplink switching element + */ +static bool +ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, + struct ice_channel *ch) +{ + struct device *dev = ice_pf_to_dev(pf); + u16 sw_id; + int ret; + + if (vsi->type != ICE_VSI_PF) { + dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); + return false; + } + + sw_id = pf->first_sw->sw_id; + + /* create channel (VSI) */ + ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); + if (ret) { + dev_err(dev, "failed to setup hw_channel\n"); + return false; + } + dev_dbg(dev, "successfully created channel()\n"); + + return ch->ch_vsi ? true : false; +} + +/** + * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate + * @vsi: VSI to be configured + * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit + * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit + */ +static int +ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) +{ + int err; + + err = ice_set_min_bw_limit(vsi, min_tx_rate); + if (err) + return err; + + return ice_set_max_bw_limit(vsi, max_tx_rate); +} + +/** + * ice_create_q_channel - function to create channel + * @vsi: VSI to be configured + * @ch: ptr to channel (it contains channel specific params) + * + * This function creates channel (VSI) using num_queues specified by user, + * reconfigs RSS if needed. + */ +static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + + if (!ch) + return -EINVAL; + + dev = ice_pf_to_dev(pf); + if (!ch->num_txq || !ch->num_rxq) { + dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); + return -EINVAL; + } + + if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { + dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", + vsi->cnt_q_avail, ch->num_txq); + return -EINVAL; + } + + if (!ice_setup_channel(pf, vsi, ch)) { + dev_info(dev, "Failed to setup channel\n"); + return -EINVAL; + } + /* configure BW rate limit */ + if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { + int ret; + + ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, + ch->min_tx_rate); + if (ret) + dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", + ch->max_tx_rate, ch->ch_vsi->vsi_num); + else + dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", + ch->max_tx_rate, ch->ch_vsi->vsi_num); + } + + vsi->cnt_q_avail -= ch->num_txq; + + return 0; +} + +/** + * ice_rem_all_chnl_fltrs - removes all channel filters + * @pf: ptr to PF, TC-flower based filter are tracked at PF level + * + * Remove all advanced switch filters only if they are channel specific + * tc-flower based filter + */ +static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + struct hlist_node *node; + + /* to remove all channel filters, iterate an ordered list of filters */ + hlist_for_each_entry_safe(fltr, node, + &pf->tc_flower_fltr_list, + tc_flower_node) { + struct ice_rule_query_data rule; + int status; + + /* for now process only channel specific filters */ + if (!ice_is_chnl_fltr(fltr)) + continue; + + rule.rid = fltr->rid; + rule.rule_id = fltr->rule_id; + rule.vsi_handle = fltr->dest_id; + status = ice_rem_adv_rule_by_id(&pf->hw, &rule); + if (status) { + if (status == -ENOENT) + dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", + rule.rule_id); + else + dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", + status); + } else if (fltr->dest_vsi) { + /* update advanced switch filter count */ + if (fltr->dest_vsi->type == ICE_VSI_CHNL) { + u32 flags = fltr->flags; + + fltr->dest_vsi->num_chnl_fltr--; + if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_ENC_DST_MAC)) + pf->num_dmac_chnl_fltrs--; + } + } + + hlist_del(&fltr->tc_flower_node); + kfree(fltr); + } +} + +/** + * ice_remove_q_channels - Remove queue channels for the TCs + * @vsi: VSI to be configured + * @rem_fltr: delete advanced switch filter or not + * + * Remove queue channels for the TCs + */ +static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) +{ + struct ice_channel *ch, *ch_tmp; + struct ice_pf *pf = vsi->back; + int i; + + /* remove all tc-flower based filter if they are channel filters only */ + if (rem_fltr) + ice_rem_all_chnl_fltrs(pf); + + /* perform cleanup for channels if they exist */ + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { + struct ice_vsi *ch_vsi; + + list_del(&ch->list); + ch_vsi = ch->ch_vsi; + if (!ch_vsi) { + kfree(ch); + continue; + } + + /* Reset queue contexts */ + for (i = 0; i < ch->num_rxq; i++) { + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; + + tx_ring = vsi->tx_rings[ch->base_q + i]; + rx_ring = vsi->rx_rings[ch->base_q + i]; + if (tx_ring) { + tx_ring->ch = NULL; + if (tx_ring->q_vector) + tx_ring->q_vector->ch = NULL; + } + if (rx_ring) { + rx_ring->ch = NULL; + if (rx_ring->q_vector) + rx_ring->q_vector->ch = NULL; + } + } + + /* clear the VSI from scheduler tree */ + ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); + + /* Delete VSI from FW */ + ice_vsi_delete(ch->ch_vsi); + + /* Delete VSI from PF and HW VSI arrays */ + ice_vsi_clear(ch->ch_vsi); + + /* free the channel */ + kfree(ch); + } + + /* clear the channel VSI map which is stored in main VSI */ + ice_for_each_chnl_tc(i) + vsi->tc_map_vsi[i] = NULL; + + /* reset main VSI's all TC information */ + vsi->all_enatc = 0; + vsi->all_numtc = 0; +} + +/** + * ice_rebuild_channels - rebuild channel + * @pf: ptr to PF + * + * Recreate channel VSIs and replay filters + */ +static int ice_rebuild_channels(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *main_vsi; + bool rem_adv_fltr = true; + struct ice_channel *ch; + struct ice_vsi *vsi; + int tc_idx = 1; + int i, err; + + main_vsi = ice_get_main_vsi(pf); + if (!main_vsi) + return 0; + + if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || + main_vsi->old_numtc == 1) + return 0; /* nothing to be done */ + + /* reconfigure main VSI based on old value of TC and cached values + * for MQPRIO opts + */ + err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); + if (err) { + dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", + main_vsi->old_ena_tc, main_vsi->vsi_num); + return err; + } + + /* rebuild ADQ VSIs */ + ice_for_each_vsi(pf, i) { + enum ice_vsi_type type; + + vsi = pf->vsi[i]; + if (!vsi || vsi->type != ICE_VSI_CHNL) + continue; + + type = vsi->type; + + /* rebuild ADQ VSI */ + err = ice_vsi_rebuild(vsi, true); + if (err) { + dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", + ice_vsi_type_str(type), vsi->idx, err); + goto cleanup; + } + + /* Re-map HW VSI number, using VSI handle that has been + * previously validated in ice_replay_vsi() call above + */ + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + + /* replay filters for the VSI */ + err = ice_replay_vsi(&pf->hw, vsi->idx); + if (err) { + dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", + ice_vsi_type_str(type), err, vsi->idx); + rem_adv_fltr = false; + goto cleanup; + } + dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", + ice_vsi_type_str(type), vsi->idx); + + /* store ADQ VSI at correct TC index in main VSI's + * map of TC to VSI + */ + main_vsi->tc_map_vsi[tc_idx++] = vsi; + } + + /* ADQ VSI(s) has been rebuilt successfully, so setup + * channel for main VSI's Tx and Rx rings + */ + list_for_each_entry(ch, &main_vsi->ch_list, list) { + struct ice_vsi *ch_vsi; + + ch_vsi = ch->ch_vsi; + if (!ch_vsi) + continue; + + /* reconfig channel resources */ + ice_cfg_chnl_all_res(main_vsi, ch); + + /* replay BW rate limit if it is non-zero */ + if (!ch->max_tx_rate && !ch->min_tx_rate) + continue; + + err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, + ch->min_tx_rate); + if (err) + dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", + err, ch->max_tx_rate, ch->min_tx_rate, + ch_vsi->vsi_num); + else + dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", + ch->max_tx_rate, ch->min_tx_rate, + ch_vsi->vsi_num); + } + + /* reconfig RSS for main VSI */ + if (main_vsi->ch_rss_size) + ice_vsi_cfg_rss_lut_key(main_vsi); + + return 0; + +cleanup: + ice_remove_q_channels(main_vsi, rem_adv_fltr); + return err; +} + +/** + * ice_create_q_channels - Add queue channel for the given TCs + * @vsi: VSI to be configured + * + * Configures queue channel mapping to the given TCs + */ +static int ice_create_q_channels(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_channel *ch; + int ret = 0, i; + + ice_for_each_chnl_tc(i) { + if (!(vsi->all_enatc & BIT(i))) + continue; + + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) { + ret = -ENOMEM; + goto err_free; + } + INIT_LIST_HEAD(&ch->list); + ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; + ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; + ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; + ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; + ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; + + /* convert to Kbits/s */ + if (ch->max_tx_rate) + ch->max_tx_rate = div_u64(ch->max_tx_rate, + ICE_BW_KBPS_DIVISOR); + if (ch->min_tx_rate) + ch->min_tx_rate = div_u64(ch->min_tx_rate, + ICE_BW_KBPS_DIVISOR); + + ret = ice_create_q_channel(vsi, ch); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "failed creating channel TC:%d\n", i); + kfree(ch); + goto err_free; + } + list_add_tail(&ch->list, &vsi->ch_list); + vsi->tc_map_vsi[i] = ch->ch_vsi; + dev_dbg(ice_pf_to_dev(pf), + "successfully created channel: VSI %pK\n", ch->ch_vsi); + } + return 0; + +err_free: + ice_remove_q_channels(vsi, false); + + return ret; +} + +/** + * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes + * @netdev: net device to configure + * @type_data: TC offload data + */ +static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 mode, ena_tc_qdisc = 0; + int cur_txq, cur_rxq; + u8 hw = 0, num_tcf; + struct device *dev; + int ret, i; + + dev = ice_pf_to_dev(pf); + num_tcf = mqprio_qopt->qopt.num_tc; + hw = mqprio_qopt->qopt.hw; + mode = mqprio_qopt->mode; + if (!hw) { + clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); + vsi->ch_rss_size = 0; + memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); + goto config_tcf; + } + + /* Generate queue region map for number of TCF requested */ + for (i = 0; i < num_tcf; i++) + ena_tc_qdisc |= BIT(i); + + switch (mode) { + case TC_MQPRIO_MODE_CHANNEL: + + ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); + if (ret) { + netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", + ret); + return ret; + } + memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); + set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); + /* don't assume state of hw_tc_offload during driver load + * and set the flag for TC flower filter if hw_tc_offload + * already ON + */ + if (vsi->netdev->features & NETIF_F_HW_TC) + set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + break; + default: + return -EINVAL; + } + +config_tcf: + + /* Requesting same TCF configuration as already enabled */ + if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && + mode != TC_MQPRIO_MODE_CHANNEL) + return 0; + + /* Pause VSI queues */ + ice_dis_vsi(vsi, true); + + if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + ice_remove_q_channels(vsi, true); + + if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { + vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), + num_online_cpus()); + vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), + num_online_cpus()); + } else { + /* logic to rebuild VSI, same like ethtool -L */ + u16 offset = 0, qcount_tx = 0, qcount_rx = 0; + + for (i = 0; i < num_tcf; i++) { + if (!(ena_tc_qdisc & BIT(i))) + continue; + + offset = vsi->mqprio_qopt.qopt.offset[i]; + qcount_rx = vsi->mqprio_qopt.qopt.count[i]; + qcount_tx = vsi->mqprio_qopt.qopt.count[i]; + } + vsi->req_txq = offset + qcount_tx; + vsi->req_rxq = offset + qcount_rx; + + /* store away original rss_size info, so that it gets reused + * form ice_vsi_rebuild during tc-qdisc delete stage - to + * determine, what should be the rss_sizefor main VSI + */ + vsi->orig_rss_size = vsi->rss_size; + } + + /* save current values of Tx and Rx queues before calling VSI rebuild + * for fallback option + */ + cur_txq = vsi->num_txq; + cur_rxq = vsi->num_rxq; + + /* proceed with rebuild main VSI using correct number of queues */ + ret = ice_vsi_rebuild(vsi, false); + if (ret) { + /* fallback to current number of queues */ + dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); + vsi->req_txq = cur_txq; + vsi->req_rxq = cur_rxq; + clear_bit(ICE_RESET_FAILED, pf->state); + if (ice_vsi_rebuild(vsi, false)) { + dev_err(dev, "Rebuild of main VSI failed again\n"); + return ret; + } + } + + vsi->all_numtc = num_tcf; + vsi->all_enatc = ena_tc_qdisc; + ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); + if (ret) { + netdev_err(netdev, "failed configuring TC for VSI id=%d\n", + vsi->vsi_num); + goto exit; + } + + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; + + /* set TC0 rate limit if specified */ + if (max_tx_rate || min_tx_rate) { + /* convert to Kbits/s */ + if (max_tx_rate) + max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); + if (min_tx_rate) + min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); + + ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); + if (!ret) { + dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", + max_tx_rate, min_tx_rate, vsi->vsi_num); + } else { + dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", + max_tx_rate, min_tx_rate, vsi->vsi_num); + goto exit; + } + } + ret = ice_create_q_channels(vsi); + if (ret) { + netdev_err(netdev, "failed configuring queue channels\n"); + goto exit; + } else { + netdev_dbg(netdev, "successfully configured channels\n"); + } + } + + if (vsi->ch_rss_size) + ice_vsi_cfg_rss_lut_key(vsi); + +exit: + /* if error, reset the all_numtc and all_enatc */ + if (ret) { + vsi->all_numtc = 0; + vsi->all_enatc = 0; + } + /* resume VSI */ + ice_ena_vsi(vsi, true); + + return ret; +} + +static LIST_HEAD(ice_block_cb_list); + +static int +ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + int err; + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &ice_block_cb_list, + ice_setup_tc_block_cb, + np, np, true); + case TC_SETUP_QDISC_MQPRIO: + /* setup traffic classifier for receive side */ + mutex_lock(&pf->tc_mutex); + err = ice_setup_tc_mqprio_qdisc(netdev, type_data); + mutex_unlock(&pf->tc_mutex); + return err; + default: + return -EOPNOTSUPP; + } + return -EOPNOTSUPP; +} + +static struct ice_indr_block_priv * +ice_indr_block_priv_lookup(struct ice_netdev_priv *np, + struct net_device *netdev) +{ + struct ice_indr_block_priv *cb_priv; + + list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { + if (!cb_priv->netdev) + return NULL; + if (cb_priv->netdev == netdev) + return cb_priv; + } + return NULL; +} + +static int +ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, + void *indr_priv) +{ + struct ice_indr_block_priv *priv = indr_priv; + struct ice_netdev_priv *np = priv->np; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, priv->netdev, + (struct flow_cls_offload *) + type_data); + default: + return -EOPNOTSUPP; + } +} + +static int +ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, + struct ice_netdev_priv *np, + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct ice_indr_block_priv *indr_priv; + struct flow_block_cb *block_cb; + + if (!ice_is_tunnel_supported(netdev) && + !(is_vlan_dev(netdev) && + vlan_dev_real_dev(netdev) == np->vsi->netdev)) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_BLOCK_BIND: + indr_priv = ice_indr_block_priv_lookup(np, netdev); + if (indr_priv) + return -EEXIST; + + indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->netdev = netdev; + indr_priv->np = np; + list_add(&indr_priv->list, &np->tc_indr_block_priv_list); + + block_cb = + flow_indr_block_cb_alloc(ice_indr_setup_block_cb, + indr_priv, indr_priv, + ice_rep_indr_tc_block_unbind, + f, netdev, sch, data, np, + cleanup); + + if (IS_ERR(block_cb)) { + list_del(&indr_priv->list); + kfree(indr_priv); + return PTR_ERR(block_cb); + } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &ice_block_cb_list); + break; + case FLOW_BLOCK_UNBIND: + indr_priv = ice_indr_block_priv_lookup(np, netdev); + if (!indr_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(f->block, + ice_indr_setup_block_cb, + indr_priv); + if (!block_cb) + return -ENOENT; + + flow_indr_block_cb_remove(block_cb, f); + + list_del(&block_cb->driver_list); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int +ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, + void *cb_priv, enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + switch (type) { + case TC_SETUP_BLOCK: + return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, + data, cleanup); + + default: + return -EOPNOTSUPP; + } +} + /** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure @@ -7117,7 +8441,7 @@ int ice_open_internal(struct net_device *netdev) return -EIO; } - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { @@ -7245,6 +8569,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, .ndo_start_xmit = ice_start_xmit, + .ndo_select_queue = ice_select_queue, .ndo_features_check = ice_features_check, .ndo_set_rx_mode = ice_set_rx_mode, .ndo_set_mac_address = ice_set_mac_address, @@ -7260,8 +8585,10 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_set_vf_vlan = ice_set_vf_port_vlan, .ndo_set_vf_link_state = ice_set_vf_link_state, .ndo_get_vf_stats = ice_get_vf_stats, + .ndo_set_vf_rate = ice_set_vf_bw, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, + .ndo_setup_tc = ice_setup_tc, .ndo_set_features = ice_set_features, .ndo_bridge_getlink = ice_bridge_getlink, .ndo_bridge_setlink = ice_bridge_setlink, diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 199aa5b715402a..dc1b0e9e6df5fd 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -3,6 +3,56 @@ #ifndef _ICE_PROTOCOL_TYPE_H_ #define _ICE_PROTOCOL_TYPE_H_ +#define ICE_IPV6_ADDR_LENGTH 16 + +/* Each recipe can match up to 5 different fields. Fields to match can be meta- + * data, values extracted from packet headers, or results from other recipes. + * One of the 5 fields is reserved for matching the switch ID. So, up to 4 + * recipes can provide intermediate results to another one through chaining, + * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. + */ +#define ICE_NUM_WORDS_RECIPE 4 + +/* Max recipes that can be chained */ +#define ICE_MAX_CHAIN_RECIPE 5 + +/* 1 word reserved for switch ID from allowed 5 words. + * So a recipe can have max 4 words. And you can chain 5 such recipes + * together. So maximum words that can be programmed for look up is 5 * 4. + */ +#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) + +/* Field vector index corresponding to chaining */ +#define ICE_CHAIN_FV_INDEX_START 47 + +enum ice_protocol_type { + ICE_MAC_OFOS = 0, + ICE_MAC_IL, + ICE_ETYPE_OL, + ICE_VLAN_OFOS, + ICE_IPV4_OFOS, + ICE_IPV4_IL, + ICE_IPV6_OFOS, + ICE_IPV6_IL, + ICE_TCP_IL, + ICE_UDP_OF, + ICE_UDP_ILOS, + ICE_VXLAN, + ICE_GENEVE, + ICE_NVGRE, + ICE_VXLAN_GPE, + ICE_SCTP_IL, + ICE_PROTOCOL_LAST +}; + +enum ice_sw_tunnel_type { + ICE_NON_TUN = 0, + ICE_SW_TUN_VXLAN, + ICE_SW_TUN_GENEVE, + ICE_SW_TUN_NVGRE, + ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ +}; + /* Decoders for ice_prot_id: * - F: First * - I: Inner @@ -35,4 +85,158 @@ enum ice_prot_id { ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; + +#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ + +#define ICE_MAC_OFOS_HW 1 +#define ICE_MAC_IL_HW 4 +#define ICE_ETYPE_OL_HW 9 +#define ICE_VLAN_OF_HW 16 +#define ICE_VLAN_OL_HW 17 +#define ICE_IPV4_OFOS_HW 32 +#define ICE_IPV4_IL_HW 33 +#define ICE_IPV6_OFOS_HW 40 +#define ICE_IPV6_IL_HW 41 +#define ICE_TCP_IL_HW 49 +#define ICE_UDP_ILOS_HW 53 +#define ICE_GRE_OF_HW 64 + +#define ICE_UDP_OF_HW 52 /* UDP Tunnels */ +#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ + +#define ICE_MDID_SIZE 2 +#define ICE_TUN_FLAG_MDID 21 +#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) +#define ICE_TUN_FLAG_MASK 0xFF + +#define ICE_TUN_FLAG_FV_IND 2 + +/* Mapping of software defined protocol ID to hardware defined protocol ID */ +struct ice_protocol_entry { + enum ice_protocol_type type; + u8 protocol_id; +}; + +struct ice_ether_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; +}; + +struct ice_ethtype_hdr { + __be16 ethtype_id; +}; + +struct ice_ether_vlan_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 vlan_id; +}; + +struct ice_vlan_hdr { + __be16 type; + __be16 vlan; +}; + +struct ice_ipv4_hdr { + u8 version; + u8 tos; + __be16 total_length; + __be16 id; + __be16 frag_off; + u8 time_to_live; + u8 protocol; + __be16 check; + __be32 src_addr; + __be32 dst_addr; +}; + +struct ice_ipv6_hdr { + __be32 be_ver_tc_flow; + __be16 payload_len; + u8 next_hdr; + u8 hop_limit; + u8 src_addr[ICE_IPV6_ADDR_LENGTH]; + u8 dst_addr[ICE_IPV6_ADDR_LENGTH]; +}; + +struct ice_sctp_hdr { + __be16 src_port; + __be16 dst_port; + __be32 verification_tag; + __be32 check; +}; + +struct ice_l4_hdr { + __be16 src_port; + __be16 dst_port; + __be16 len; + __be16 check; +}; + +struct ice_udp_tnl_hdr { + __be16 field; + __be16 proto_type; + __be32 vni; /* only use lower 24-bits */ +}; + +struct ice_nvgre_hdr { + __be16 flags; + __be16 protocol; + __be32 tni_flow; +}; + +union ice_prot_hdr { + struct ice_ether_hdr eth_hdr; + struct ice_ethtype_hdr ethertype; + struct ice_vlan_hdr vlan_hdr; + struct ice_ipv4_hdr ipv4_hdr; + struct ice_ipv6_hdr ipv6_hdr; + struct ice_l4_hdr l4_hdr; + struct ice_sctp_hdr sctp_hdr; + struct ice_udp_tnl_hdr tnl_hdr; + struct ice_nvgre_hdr nvgre_hdr; +}; + +/* This is mapping table entry that maps every word within a given protocol + * structure to the real byte offset as per the specification of that + * protocol header. + * for e.g. dst address is 3 words in ethertype header and corresponding bytes + * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 + */ +struct ice_prot_ext_tbl_entry { + enum ice_protocol_type prot_type; + /* Byte offset into header of given protocol type */ + u8 offs[sizeof(union ice_prot_hdr)]; +}; + +/* Extractions to be looked up for a given recipe */ +struct ice_prot_lkup_ext { + u16 prot_type; + u8 n_val_words; + /* create a buffer to hold max words per recipe */ + u16 field_off[ICE_MAX_CHAIN_WORDS]; + u16 field_mask[ICE_MAX_CHAIN_WORDS]; + + struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; + + /* Indicate field offsets that have field vector indices assigned */ + DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS); +}; + +struct ice_pref_recipe_group { + u8 n_val_pairs; /* Number of valid pairs */ + struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; + u16 mask[ICE_NUM_WORDS_RECIPE]; +}; + +struct ice_recp_grp_entry { + struct list_head l_entry; + +#define ICE_INVAL_CHAIN_IND 0xFF + u16 rid; + u8 chain_idx; + u16 fv_idx[ICE_NUM_WORDS_RECIPE]; + u16 fv_mask[ICE_NUM_WORDS_RECIPE]; + struct ice_pref_recipe_group r_group; +}; #endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index d1ef3d48a4b036..bf7247c6f58e25 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -6,6 +6,252 @@ #define E810_OUT_PROP_DELAY_NS 1 +static const struct ptp_pin_desc ice_pin_desc_e810t[] = { + /* name idx func chan */ + { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, + { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, + { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, + { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, + { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, +}; + +/** + * ice_get_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Read the configuration of the SMA control logic and put it into the + * ptp_pin_desc structure + */ +static int +ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) +{ + u8 data, i; + int status; + + /* Read initial pin state */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* initialize with defaults */ + for (i = 0; i < NUM_PTP_PINS_E810T; i++) { + snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), + "%s", ice_pin_desc_e810t[i].name); + ptp_pins[i].index = ice_pin_desc_e810t[i].index; + ptp_pins[i].func = ice_pin_desc_e810t[i].func; + ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; + } + + /* Parse SMA1/UFL1 */ + switch (data & ICE_SMA1_MASK_E810T) { + case ICE_SMA1_MASK_E810T: + default: + ptp_pins[SMA1].func = PTP_PF_NONE; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_DIR_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_PEROUT; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_TX_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case 0: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_PEROUT; + break; + } + + /* Parse SMA2/UFL2 */ + switch (data & ICE_SMA2_MASK_E810T) { + case ICE_SMA2_MASK_E810T: + default: + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_EXTTS; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + case ICE_SMA2_DIR_EN_E810T: + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + } + + return 0; +} + +/** + * ice_ptp_set_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Set the configuration of the SMA control logic based on the configuration in + * num_pins parameter + */ +static int +ice_ptp_set_sma_config_e810t(struct ice_hw *hw, + const struct ptp_pin_desc *ptp_pins) +{ + int status; + u8 data; + + /* SMA1 and UFL1 cannot be set to TX at the same time */ + if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_PEROUT) + return -EINVAL; + + /* SMA2 and UFL2 cannot be set to RX at the same time */ + if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_EXTTS) + return -EINVAL; + + /* Read initial pin state value */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* Set the right sate based on the desired configuration */ + data &= ~ICE_SMA1_MASK_E810T; + if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); + data |= ICE_SMA1_MASK_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX"); + data |= ICE_SMA1_TX_EN_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + /* U.FL 1 TX will always enable SMA 1 RX */ + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 TX"); + data |= ICE_SMA1_DIR_EN_E810T; + } + + data &= ~ICE_SMA2_MASK_E810T; + if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); + data |= ICE_SMA2_MASK_E810T; + } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 RX"); + data |= (ICE_SMA2_TX_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "UFL2 RX"); + data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX"); + data |= (ICE_SMA2_DIR_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); + data |= ICE_SMA2_DIR_EN_E810T; + } + + return ice_write_sma_ctrl_e810t(hw, data); +} + +/** + * ice_ptp_set_sma_e810t + * @info: the driver's PTP info structure + * @pin: pin index in kernel structure + * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) + * + * Set the configuration of a single SMA pin + */ +static int +ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func) +{ + struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_hw *hw = &pf->hw; + int err; + + if (pin < SMA1 || func > PTP_PF_PEROUT) + return -EOPNOTSUPP; + + err = ice_get_sma_config_e810t(hw, ptp_pins); + if (err) + return err; + + /* Disable the same function on the other pin sharing the channel */ + if (pin == SMA1 && ptp_pins[UFL1].func == func) + ptp_pins[UFL1].func = PTP_PF_NONE; + if (pin == UFL1 && ptp_pins[SMA1].func == func) + ptp_pins[SMA1].func = PTP_PF_NONE; + + if (pin == SMA2 && ptp_pins[UFL2].func == func) + ptp_pins[UFL2].func = PTP_PF_NONE; + if (pin == UFL2 && ptp_pins[SMA2].func == func) + ptp_pins[SMA2].func = PTP_PF_NONE; + + /* Set up new pin function in the temp table */ + ptp_pins[pin].func = func; + + return ice_ptp_set_sma_config_e810t(hw, ptp_pins); +} + +/** + * ice_verify_pin_e810t + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Verify if pin supports requested pin function. If the Check pins consistency. + * Reconfigure the SMA logic attached to the given pin to enable its + * desired functionality + */ +static int +ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + /* Don't allow channel reassignment */ + if (chan != ice_pin_desc_e810t[pin].chan) + return -EOPNOTSUPP; + + /* Check if functions are properly assigned */ + switch (func) { + case PTP_PF_NONE: + break; + case PTP_PF_EXTTS: + if (pin == UFL1) + return -EOPNOTSUPP; + break; + case PTP_PF_PEROUT: + if (pin == UFL2 || pin == GNSS) + return -EOPNOTSUPP; + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + + return ice_ptp_set_sma_e810t(info, pin, func); +} + /** * ice_set_tx_tstamp - Enable or disable Tx timestamping * @pf: The PF pointer to search in @@ -735,17 +981,34 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_perout_channel clk_cfg = {0}; + bool sma_pres = false; unsigned int chan; u32 gpio_pin; int err; + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + sma_pres = true; + switch (rq->type) { case PTP_CLK_REQ_PEROUT: chan = rq->perout.index; - if (chan == PPS_CLK_GEN_CHAN) + if (sma_pres) { + if (chan == ice_pin_desc_e810t[SMA1].chan) + clk_cfg.gpio_pin = GPIO_20; + else if (chan == ice_pin_desc_e810t[SMA2].chan) + clk_cfg.gpio_pin = GPIO_22; + else + return -1; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + clk_cfg.gpio_pin = GPIO_20; + else + clk_cfg.gpio_pin = GPIO_22; + } else if (chan == PPS_CLK_GEN_CHAN) { clk_cfg.gpio_pin = PPS_PIN_INDEX; - else + } else { clk_cfg.gpio_pin = chan; + } clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + rq->perout.period.nsec); @@ -757,7 +1020,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, break; case PTP_CLK_REQ_EXTTS: chan = rq->extts.index; - gpio_pin = chan; + if (sma_pres) { + if (chan < ice_pin_desc_e810t[SMA2].chan) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else { + gpio_pin = chan; + } err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, rq->extts.flags); @@ -1012,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) * The timestamp is in ns, so we must convert the result first. */ void -ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, +ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { u32 ts_high; @@ -1037,14 +1312,94 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, } } +/** + * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Disable the OS access to the SMA pins. Called to clear out the OS + * indications of pin support when we fail to setup the E810-T SMA control + * register. + */ +static void +ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + + dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + + info->enable = NULL; + info->verify = NULL; + info->n_pins = 0; + info->n_ext_ts = 0; + info->n_per_out = 0; +} + +/** + * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Finish setting up the SMA pins by allocating pin_config, and setting it up + * according to the current status of the SMA. On failure, disable all of the + * extended SMA pin support. + */ +static void +ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Allocate memory for kernel pins interface */ + info->pin_config = devm_kcalloc(dev, info->n_pins, + sizeof(*info->pin_config), GFP_KERNEL); + if (!info->pin_config) { + ice_ptp_disable_sma_pins_e810t(pf, info); + return; + } + + /* Read current SMA status */ + err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); + if (err) + ice_ptp_disable_sma_pins_e810t(pf, info); +} + +/** + * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ +static void +ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + /* Check if SMA controller is in the netlist */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) && + !ice_is_pca9575_present(&pf->hw)) + ice_clear_feature_support(pf, ICE_F_SMA_CTRL); + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + info->n_ext_ts = N_EXT_TS_E810_NO_SMA; + info->n_per_out = N_PER_OUT_E810T_NO_SMA; + return; + } + + info->n_per_out = N_PER_OUT_E810T; + info->n_ext_ts = N_EXT_TS_E810; + info->n_pins = NUM_PTP_PINS_E810T; + info->verify = ice_verify_pin_e810t; + + /* Complete setup of the SMA pins */ + ice_ptp_setup_sma_pins_e810t(pf, info); +} + /** * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs * @info: PTP clock capabilities */ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) { - info->n_per_out = E810_N_PER_OUT; - info->n_ext_ts = E810_N_EXT_TS; + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; } /** @@ -1062,7 +1417,10 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) { info->enable = ice_ptp_gpio_enable_e810; - ice_ptp_setup_pins_e810(info); + if (ice_is_e810t(&pf->hw)) + ice_ptp_setup_pins_e810t(pf, info); + else + ice_ptp_setup_pins_e810(info); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index e1c787bd5b967b..f71ad317d6c8f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -9,12 +9,21 @@ #include "ice_ptp_hw.h" -enum ice_ptp_pin { +enum ice_ptp_pin_e810 { GPIO_20 = 0, GPIO_21, GPIO_22, GPIO_23, - NUM_ICE_PTP_PIN + NUM_PTP_PIN_E810 +}; + +enum ice_ptp_pin_e810t { + GNSS = 0, + SMA1, + UFL1, + SMA2, + UFL2, + NUM_PTP_PINS_E810T }; struct ice_perout_channel { @@ -155,8 +164,11 @@ struct ice_ptp { #define PPS_CLK_SRC_CHAN 2 #define PPS_PIN_INDEX 5 #define TIME_SYNC_PIN_INDEX 4 -#define E810_N_EXT_TS 3 -#define E810_N_PER_OUT 4 +#define N_EXT_TS_E810 3 +#define N_PER_OUT_E810 4 +#define N_PER_OUT_E810T 3 +#define N_PER_OUT_E810T_NO_SMA 2 +#define N_EXT_TS_E810_NO_SMA 2 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) struct ice_pf; @@ -168,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); void ice_ptp_process_ts(struct ice_pf *pf); void -ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, +ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); @@ -196,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) static inline void ice_ptp_process_ts(struct ice_pf *pf) { } static inline void -ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, +ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 3eca0e4eab0bee..29f947c0cd2e64 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -649,3 +649,154 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { return ice_clear_phy_tstamp_e810(hw, block, idx); } + +/* E810T SMA functions + * + * The following functions operate specifically on E810T hardware and are used + * to access the extended GPIOs available. + */ + +/** + * ice_get_pca9575_handle + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value + */ +static int +ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + int status; + u8 idx; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + + /* If handle was not detected read it from the netlist */ + cmd = &desc.params.get_link_topo; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + + /* Set node type to GPIO controller */ + cmd->addr.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -EOPNOTSUPP; + + cmd->addr.topo_params.index = idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) + return -EOPNOTSUPP; + + /* Verify if we found the right IO expander type */ + if (desc.params.get_link_topo.node_part_num != + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) + return -EOPNOTSUPP; + + /* If present save the handle and return it */ + hw->io_expander_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_read_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: pointer to data to be read from the GPIO controller + * + * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the + * PCA9575 expander, so only bits 3-7 in data are valid. + */ +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + *data = 0; + + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + bool pin; + + status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, + &pin, NULL); + if (status) + break; + *data |= (u8)(!pin) << i; + } + + return status; +} + +/** + * ice_write_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: data to be written to the GPIO controller + * + * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 + * of the PCA9575 expander, so only bits 3-7 in data are valid. + */ +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + bool pin; + + pin = !(data & (1 << i)); + status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, + pin, NULL); + if (status) + break; + } + + return status; +} + +/** + * ice_is_pca9575_present + * @hw: pointer to the hw struct + * + * Check if the SW IO expander is present in the netlist + */ +bool ice_is_pca9575_present(struct ice_hw *hw) +{ + u16 handle = 0; + int status; + + if (!ice_is_e810t(hw)) + return false; + + status = ice_get_pca9575_handle(hw, &handle); + + return !status && handle; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 55a414e87018a8..b2984b5c22c113 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -30,6 +30,9 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); /* E810 family functions */ int ice_ptp_init_phy_e810(struct ice_hw *hw); +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); +bool ice_is_pca9575_present(struct ice_hw *hw); #define PFTSYN_SEM_BYTES 4 @@ -76,4 +79,23 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw); #define LOW_TX_MEMORY_BANK_START 0x03090000 #define HIGH_TX_MEMORY_BANK_START 0x03090004 +/* E810T SMA controller pin control */ +#define ICE_SMA1_DIR_EN_E810T BIT(4) +#define ICE_SMA1_TX_EN_E810T BIT(5) +#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3) +#define ICE_SMA2_DIR_EN_E810T BIT(6) +#define ICE_SMA2_TX_EN_E810T BIT(7) + +#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \ + ICE_SMA1_TX_EN_E810T) +#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \ + ICE_SMA2_DIR_EN_E810T | \ + ICE_SMA2_TX_EN_E810T) +#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \ + ICE_SMA2_MASK_E810T) + +#define ICE_SMA_MIN_BIT_E810T 3 +#define ICE_SMA_MAX_BIT_E810T 7 +#define ICE_PCA9575_P1_OFFSET 8 + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c new file mode 100644 index 00000000000000..af8e6ef5f57190 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_eswitch.h" +#include "ice_devlink.h" +#include "ice_virtchnl_pf.h" +#include "ice_tc_lib.h" + +/** + * ice_repr_get_sw_port_id - get port ID associated with representor + * @repr: pointer to port representor + */ +static int ice_repr_get_sw_port_id(struct ice_repr *repr) +{ + return repr->vf->pf->hw.port_info->lport; +} + +/** + * ice_repr_get_phys_port_name - get phys port name + * @netdev: pointer to port representor netdev + * @buf: write here port name + * @len: max length of buf + */ +static int +ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_repr *repr = np->repr; + int res; + + /* Devlink port is registered and devlink core is taking care of name formatting. */ + if (repr->vf->devlink_port.devlink) + return -EOPNOTSUPP; + + res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), + repr->vf->vf_id); + if (res <= 0) + return -EOPNOTSUPP; + return 0; +} + +/** + * ice_repr_get_stats64 - get VF stats for VFPR use + * @netdev: pointer to port representor netdev + * @stats: pointer to struct where stats can be stored + */ +static void +ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_eth_stats *eth_stats; + struct ice_vsi *vsi; + + if (ice_is_vf_disabled(np->repr->vf)) + return; + vsi = np->repr->src_vsi; + + ice_update_vsi_stats(vsi); + eth_stats = &vsi->eth_stats; + + stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + + eth_stats->tx_multicast; + stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + + eth_stats->rx_multicast; + stats->tx_bytes = eth_stats->tx_bytes; + stats->rx_bytes = eth_stats->rx_bytes; + stats->multicast = eth_stats->rx_multicast; + stats->tx_errors = eth_stats->tx_errors; + stats->tx_dropped = eth_stats->tx_discards; + stats->rx_dropped = eth_stats->rx_discards; +} + +/** + * ice_netdev_to_repr - Get port representor for given netdevice + * @netdev: pointer to port representor netdev + */ +struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + return np->repr; +} + +/** + * ice_repr_open - Enable port representor's network interface + * @netdev: network interface device structure + * + * The open entry point is called when a port representor's network + * interface is made active by the system (IFF_UP). Corresponding + * VF is notified about link status change. + * + * Returns 0 on success + */ +static int ice_repr_open(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_vf *vf; + + vf = repr->vf; + vf->link_forced = true; + vf->link_up = true; + ice_vc_notify_vf_link_state(vf); + + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +} + +/** + * ice_repr_stop - Disable port representor's network interface + * @netdev: network interface device structure + * + * The stop entry point is called when a port representor's network + * interface is de-activated by the system. Corresponding + * VF is notified about link status change. + * + * Returns 0 on success + */ +static int ice_repr_stop(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_vf *vf; + + vf = repr->vf; + vf->link_forced = true; + vf->link_up = false; + ice_vc_notify_vf_link_state(vf); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + +static struct devlink_port * +ice_repr_get_devlink_port(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + return &repr->vf->devlink_port; +} + +static int +ice_repr_setup_tc_cls_flower(struct ice_repr *repr, + struct flow_cls_offload *flower) +{ + switch (flower->command) { + case FLOW_CLS_REPLACE: + return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); + case FLOW_CLS_DESTROY: + return ice_del_cls_flower(repr->src_vsi, flower); + default: + return -EINVAL; + } +} + +static int +ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data; + struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_repr_setup_tc_cls_flower(np->repr, flower); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(ice_repr_block_cb_list); + +static int +ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple((struct flow_block_offload *) + type_data, + &ice_repr_block_cb_list, + ice_repr_setup_tc_block_cb, + np, np, true); + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops ice_repr_netdev_ops = { + .ndo_get_phys_port_name = ice_repr_get_phys_port_name, + .ndo_get_stats64 = ice_repr_get_stats64, + .ndo_open = ice_repr_open, + .ndo_stop = ice_repr_stop, + .ndo_start_xmit = ice_eswitch_port_start_xmit, + .ndo_get_devlink_port = ice_repr_get_devlink_port, + .ndo_setup_tc = ice_repr_setup_tc, +}; + +/** + * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev + * @netdev: pointer to netdev + */ +bool ice_is_port_repr_netdev(struct net_device *netdev) +{ + return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); +} + +/** + * ice_repr_reg_netdev - register port representor netdev + * @netdev: pointer to port representor netdev + */ +static int +ice_repr_reg_netdev(struct net_device *netdev) +{ + eth_hw_addr_random(netdev); + netdev->netdev_ops = &ice_repr_netdev_ops; + ice_set_ethtool_repr_ops(netdev); + + netdev->hw_features |= NETIF_F_HW_TC; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return register_netdev(netdev); +} + +/** + * ice_repr_add - add representor for VF + * @vf: pointer to VF structure + */ +static int ice_repr_add(struct ice_vf *vf) +{ + struct ice_q_vector *q_vector; + struct ice_netdev_priv *np; + struct ice_repr *repr; + int err; + + repr = kzalloc(sizeof(*repr), GFP_KERNEL); + if (!repr) + return -ENOMEM; + + repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); + if (!repr->netdev) { + err = -ENOMEM; + goto err_alloc; + } + + repr->src_vsi = ice_get_vf_vsi(vf); + repr->vf = vf; + vf->repr = repr; + np = netdev_priv(repr->netdev); + np->repr = repr; + + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) { + err = -ENOMEM; + goto err_alloc_q_vector; + } + repr->q_vector = q_vector; + + err = ice_devlink_create_vf_port(vf); + if (err) + goto err_devlink; + + repr->netdev->min_mtu = ETH_MIN_MTU; + repr->netdev->max_mtu = ICE_MAX_MTU; + + err = ice_repr_reg_netdev(repr->netdev); + if (err) + goto err_netdev; + + devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); + + return 0; + +err_netdev: + ice_devlink_destroy_vf_port(vf); +err_devlink: + kfree(repr->q_vector); + vf->repr->q_vector = NULL; +err_alloc_q_vector: + free_netdev(repr->netdev); + repr->netdev = NULL; +err_alloc: + kfree(repr); + vf->repr = NULL; + return err; +} + +/** + * ice_repr_rem - remove representor from VF + * @vf: pointer to VF structure + */ +static void ice_repr_rem(struct ice_vf *vf) +{ + ice_devlink_destroy_vf_port(vf); + kfree(vf->repr->q_vector); + vf->repr->q_vector = NULL; + unregister_netdev(vf->repr->netdev); + free_netdev(vf->repr->netdev); + vf->repr->netdev = NULL; + kfree(vf->repr); + vf->repr = NULL; +} + +/** + * ice_repr_add_for_all_vfs - add port representor for all VFs + * @pf: pointer to PF structure + */ +int ice_repr_add_for_all_vfs(struct ice_pf *pf) +{ + int err; + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + err = ice_repr_add(vf); + if (err) + goto err; + + ice_vc_change_ops_to_repr(&vf->vc_ops); + } + + return 0; + +err: + for (i = i - 1; i >= 0; i--) { + struct ice_vf *vf = &pf->vf[i]; + + ice_repr_rem(vf); + ice_vc_set_dflt_vf_ops(&vf->vc_ops); + } + + return err; +} + +/** + * ice_repr_rem_from_all_vfs - remove port representor for all VFs + * @pf: pointer to PF structure + */ +void ice_repr_rem_from_all_vfs(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + ice_repr_rem(vf); + ice_vc_set_dflt_vf_ops(&vf->vc_ops); + } +} + +/** + * ice_repr_start_tx_queues - start Tx queues of port representor + * @repr: pointer to repr structure + */ +void ice_repr_start_tx_queues(struct ice_repr *repr) +{ + netif_carrier_on(repr->netdev); + netif_tx_start_all_queues(repr->netdev); +} + +/** + * ice_repr_stop_tx_queues - stop Tx queues of port representor + * @repr: pointer to repr structure + */ +void ice_repr_stop_tx_queues(struct ice_repr *repr) +{ + netif_carrier_off(repr->netdev); + netif_tx_stop_all_queues(repr->netdev); +} + +/** + * ice_repr_set_traffic_vsi - set traffic VSI for port representor + * @repr: repr on with VSI will be set + * @vsi: pointer to VSI that will be used by port representor to pass traffic + */ +void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np = netdev_priv(repr->netdev); + + np->vsi = vsi; +} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h new file mode 100644 index 00000000000000..806de22933c6ca --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_REPR_H_ +#define _ICE_REPR_H_ + +#include +#include "ice.h" + +struct ice_repr { + struct ice_vsi *src_vsi; + struct ice_vf *vf; + struct ice_q_vector *q_vector; + struct net_device *netdev; + struct metadata_dst *dst; +}; + +int ice_repr_add_for_all_vfs(struct ice_pf *pf); +void ice_repr_rem_from_all_vfs(struct ice_pf *pf); + +void ice_repr_start_tx_queues(struct ice_repr *repr); +void ice_repr_stop_tx_queues(struct ice_repr *repr); + +void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); + +struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); +bool ice_is_port_repr_netdev(struct net_device *netdev); +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2d9b10277186b6..ce3c7bded4cbee 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -3011,6 +3011,43 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) } } +/** + * ice_sched_save_vsi_bw - save VSI node's BW information + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save BW information of VSI type node for post replay use. + */ +static int +ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + struct ice_vsi_ctx *vsi_ctx; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return -EINVAL; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return -EINVAL; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); + break; + default: + return -EINVAL; + } + return 0; +} + /** * ice_sched_calc_wakeup - calculate RL profile wakeup parameter * @hw: pointer to the HW struct @@ -3783,6 +3820,153 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, ICE_SCHED_DFLT_BW); } +/** + * ice_sched_get_node_by_id_type - get node from ID type + * @pi: port information structure + * @id: identifier + * @agg_type: type of aggregator + * @tc: traffic class + * + * This function returns node identified by ID of type aggregator, and + * based on traffic class (TC). This function needs to be called with + * the scheduler lock held. + */ +static struct ice_sched_node * +ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc) +{ + struct ice_sched_node *node = NULL; + + switch (agg_type) { + case ICE_AGG_TYPE_VSI: { + struct ice_vsi_ctx *vsi_ctx; + u16 vsi_handle = (u16)id; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + break; + /* Get sched_vsi_info */ + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + break; + node = vsi_ctx->sched.vsi_node[tc]; + break; + } + + case ICE_AGG_TYPE_AGG: { + struct ice_sched_node *tc_node; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (tc_node) + node = ice_sched_get_agg_node(pi, tc_node, id); + break; + } + + default: + break; + } + + return node; +} + +/** + * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC + * @pi: port information structure + * @id: ID (software VSI handle or AGG ID) + * @agg_type: aggregator type (VSI or AGG type node) + * @tc: traffic class + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function sets BW limit of VSI or Aggregator scheduling node + * based on TC information from passed in argument BW. + */ +static enum ice_status +ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_node *node; + + if (!pi) + return status; + + if (rl_type == ICE_UNKNOWN_BW) + return status; + + mutex_lock(&pi->sched_lock); + node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); + if (!node) { + ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); + goto exit_set_node_bw_lmt_per_tc; + } + if (bw == ICE_SCHED_DFLT_BW) + status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); + +exit_set_node_bw_lmt_per_tc: + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: traffic class + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function configures BW limit of VSI scheduling node based on TC + * information. + */ +enum ice_status +ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + int status; + + status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, + ICE_AGG_TYPE_VSI, + tc, rl_type, bw); + if (!status) { + mutex_lock(&pi->sched_lock); + status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); + mutex_unlock(&pi->sched_lock); + } + return status; +} + +/** + * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: traffic class + * @rl_type: min or max + * + * This function configures default BW limit of VSI scheduling node based on TC + * information. + */ +enum ice_status +ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type) +{ + int status; + + status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, + ICE_AGG_TYPE_VSI, + tc, rl_type, + ICE_SCHED_DFLT_BW); + if (!status) { + mutex_lock(&pi->sched_lock); + status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, + ICE_SCHED_DFLT_BW); + mutex_unlock(&pi->sched_lock); + } + return status; +} + /** * ice_cfg_rl_burst_size - Set burst size value * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index fdf7a5882f076a..6bddcbecaf5e2e 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -58,6 +58,8 @@ struct ice_sched_agg_info { DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); u32 agg_id; enum ice_agg_type agg_type; + /* bw_t_info saves aggregator BW information */ + struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS]; /* save aggregator TC bitmap */ DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS); }; @@ -104,6 +106,12 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_status ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type); +enum ice_status +ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type); enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 3b6c1420aa7bea..793f4a9fc2cdb0 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -8,6 +8,7 @@ #define ICE_ETH_ETHTYPE_OFFSET 12 #define ICE_ETH_VLAN_TCI_OFFSET 14 #define ICE_MAX_VLAN_ID 0xFFF +#define ICE_IPV6_ETHER_ID 0x86DD /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem * struct to configure any switch filter rules. @@ -29,6 +30,476 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; +struct ice_dummy_pkt_offsets { + enum ice_protocol_type type; + u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ +}; + +static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_IPV4_IL, 56 }, + { ICE_TCP_IL, 76 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_IPV4_IL, 56 }, + { ICE_UDP_ILOS, 76 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ + 0x00, 0x08, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV4_IL, 64 }, + { ICE_TCP_IL, 84 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x46, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV4_IL, 64 }, + { ICE_UDP_ILOS, 84 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x3a, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ + 0x00, 0x08, 0x00, 0x00, +}; + +/* offset info for MAC + IPv4 + UDP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* Dummy packet for MAC + IPv4 + UDP */ +static const u8 dummy_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV4_OFOS, 18 }, + { ICE_UDP_ILOS, 38 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (801.1Q), IPv4:UDP dummy packet */ +static const u8 dummy_vlan_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x08, 0x00, /* ICE_ETYPE_OL 16 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + IPv4 + TCP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_TCP_IL, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* Dummy packet for MAC + IPv4 + TCP */ +static const u8 dummy_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV4_OFOS, 18 }, + { ICE_TCP_IL, 38 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (801.1Q), IPv4:TCP dummy packet */ +static const u8 dummy_vlan_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x08, 0x00, /* ICE_ETYPE_OL 16 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_TCP_IL, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_tcp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* C-tag (802.1Q): IPv6 + TCP */ +static const struct ice_dummy_pkt_offsets +dummy_vlan_tcp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV6_OFOS, 18 }, + { ICE_TCP_IL, 58 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (802.1Q), IPv6 + TCP dummy packet */ +static const u8 dummy_vlan_tcp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* IPv6 + UDP */ +static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* IPv6 + UDP dummy packet */ +static const u8 dummy_udp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ + 0x00, 0x10, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* C-tag (802.1Q): IPv6 + UDP */ +static const struct ice_dummy_pkt_offsets +dummy_vlan_udp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV6_OFOS, 18 }, + { ICE_UDP_ILOS, 58 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (802.1Q), IPv6 + UDP dummy packet */ +static const u8 dummy_vlan_udp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ + + 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ + 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ (DUMMY_ETH_HDR_LEN * \ @@ -42,6 +513,14 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) +/* this is a recipe to profile association bitmap */ +static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], + ICE_MAX_NUM_PROFILES); + +/* this is a profile to recipe association bitmap */ +static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], + ICE_MAX_NUM_RECIPES); + /** * ice_init_def_sw_recp - initialize the recipe book keeping tables * @hw: pointer to the HW struct @@ -59,10 +538,11 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) if (!recps) return ICE_ERR_NO_MEMORY; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); + INIT_LIST_HEAD(&recps[i].rg_list); mutex_init(&recps[i].filt_rule_lock); } @@ -518,7 +998,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -static enum ice_status +enum ice_status ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { @@ -543,6 +1023,360 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, return status; } +/** + * ice_aq_add_recipe - add switch recipe + * @hw: pointer to the HW struct + * @s_recipe_list: pointer to switch rule population list + * @num_recipes: number of switch recipes in the list + * @cd: pointer to command details structure or NULL + * + * Add(0x0290) + */ +static enum ice_status +ice_aq_add_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 num_recipes, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_recipe *cmd; + struct ice_aq_desc desc; + u16 buf_size; + + cmd = &desc.params.add_get_recipe; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); + + cmd->num_sub_recipes = cpu_to_le16(num_recipes); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + buf_size = num_recipes * sizeof(*s_recipe_list); + + return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); +} + +/** + * ice_aq_get_recipe - get switch recipe + * @hw: pointer to the HW struct + * @s_recipe_list: pointer to switch rule population list + * @num_recipes: pointer to the number of recipes (input and output) + * @recipe_root: root recipe number of recipe(s) to retrieve + * @cd: pointer to command details structure or NULL + * + * Get(0x0292) + * + * On input, *num_recipes should equal the number of entries in s_recipe_list. + * On output, *num_recipes will equal the number of entries returned in + * s_recipe_list. + * + * The caller must supply enough space in s_recipe_list to hold all possible + * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. + */ +static enum ice_status +ice_aq_get_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_recipe *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u16 buf_size; + + if (*num_recipes != ICE_MAX_NUM_RECIPES) + return ICE_ERR_PARAM; + + cmd = &desc.params.add_get_recipe; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); + + cmd->return_index = cpu_to_le16(recipe_root); + cmd->num_sub_recipes = 0; + + buf_size = *num_recipes * sizeof(*s_recipe_list); + + status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); + *num_recipes = le16_to_cpu(cmd->num_sub_recipes); + + return status; +} + +/** + * ice_aq_map_recipe_to_profile - Map recipe to packet profile + * @hw: pointer to the HW struct + * @profile_id: package profile ID to associate the recipe with + * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @cd: pointer to command details structure or NULL + * Recipe to profile association (0x0291) + */ +static enum ice_status +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd) +{ + struct ice_aqc_recipe_to_profile *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.recipe_to_profile; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); + cmd->profile_id = cpu_to_le16(profile_id); + /* Set the recipe ID bit in the bitmask to let the device know which + * profile we are associating the recipe to + */ + memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_get_recipe_to_profile - Map recipe to packet profile + * @hw: pointer to the HW struct + * @profile_id: package profile ID to associate the recipe with + * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @cd: pointer to command details structure or NULL + * Associate profile ID with given recipe (0x0293) + */ +static enum ice_status +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd) +{ + struct ice_aqc_recipe_to_profile *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.recipe_to_profile; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); + cmd->profile_id = cpu_to_le16(profile_id); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) + memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); + + return status; +} + +/** + * ice_alloc_recipe - add recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID returned as response to AQ call + */ +static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + enum ice_status status; + u16 buf_len; + + buf_len = struct_size(sw_buf, elem, 1); + sw_buf = kzalloc(buf_len, GFP_KERNEL); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + sw_buf->num_elems = cpu_to_le16(1); + sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << + ICE_AQC_RES_TYPE_S) | + ICE_AQC_RES_TYPE_FLAG_SHARED); + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (!status) + *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); + kfree(sw_buf); + + return status; +} + +/** + * ice_get_recp_to_prof_map - updates recipe to profile mapping + * @hw: pointer to hardware structure + * + * This function is used to populate recipe_to_profile matrix where index to + * this array is the recipe ID and the element is the mapping of which profiles + * is this recipe mapped to. + */ +static void ice_get_recp_to_prof_map(struct ice_hw *hw) +{ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u16 i; + + for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { + u16 j; + + bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); + bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); + if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) + continue; + bitmap_copy(profile_to_recipe[i], r_bitmap, + ICE_MAX_NUM_RECIPES); + for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) + set_bit(i, recipe_to_profile[j]); + } +} + +/** + * ice_collect_result_idx - copy result index values + * @buf: buffer that contains the result index + * @recp: the recipe struct to copy data into + */ +static void +ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, + struct ice_sw_recipe *recp) +{ + if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) + set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, + recp->res_idxs); +} + +/** + * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries + * @hw: pointer to hardware structure + * @recps: struct that we need to populate + * @rid: recipe ID that we are populating + * @refresh_required: true if we should get recipe to profile mapping from FW + * + * This function is used to populate all the necessary entries into our + * bookkeeping so that we have a current list of all the recipes that are + * programmed in the firmware. + */ +static enum ice_status +ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, + bool *refresh_required) +{ + DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *tmp; + u16 num_recps = ICE_MAX_NUM_RECIPES; + struct ice_prot_lkup_ext *lkup_exts; + enum ice_status status; + u8 fv_word_idx = 0; + u16 sub_recps; + + bitmap_zero(result_bm, ICE_MAX_FV_WORDS); + + /* we need a buffer big enough to accommodate all the recipes */ + tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp[0].recipe_indx = rid; + status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); + /* non-zero status meaning recipe doesn't exist */ + if (status) + goto err_unroll; + + /* Get recipe to profile map so that we can get the fv from lkups that + * we read for a recipe from FW. Since we want to minimize the number of + * times we make this FW call, just make one call and cache the copy + * until a new recipe is added. This operation is only required the + * first time to get the changes from FW. Then to search existing + * entries we don't need to update the cache again until another recipe + * gets added. + */ + if (*refresh_required) { + ice_get_recp_to_prof_map(hw); + *refresh_required = false; + } + + /* Start populating all the entries for recps[rid] based on lkups from + * firmware. Note that we are only creating the root recipe in our + * database. + */ + lkup_exts = &recps[rid].lkup_exts; + + for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { + struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; + struct ice_recp_grp_entry *rg_entry; + u8 i, prof, idx, prot = 0; + bool is_root; + u16 off = 0; + + rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), + GFP_KERNEL); + if (!rg_entry) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + + idx = root_bufs.recipe_indx; + is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; + + /* Mark all result indices in this chain */ + if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) + set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, + result_bm); + + /* get the first profile that is associated with rid */ + prof = find_first_bit(recipe_to_profile[idx], + ICE_MAX_NUM_PROFILES); + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; + + rg_entry->fv_idx[i] = lkup_indx; + rg_entry->fv_mask[i] = + le16_to_cpu(root_bufs.content.mask[i + 1]); + + /* If the recipe is a chained recipe then all its + * child recipe's result will have a result index. + * To fill fv_words we should not use those result + * index, we only need the protocol ids and offsets. + * We will skip all the fv_idx which stores result + * index in them. We also need to skip any fv_idx which + * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a + * valid offset value. + */ + if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || + rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || + rg_entry->fv_idx[i] == 0) + continue; + + ice_find_prot_off(hw, ICE_BLK_SW, prof, + rg_entry->fv_idx[i], &prot, &off); + lkup_exts->fv_words[fv_word_idx].prot_id = prot; + lkup_exts->fv_words[fv_word_idx].off = off; + lkup_exts->field_mask[fv_word_idx] = + rg_entry->fv_mask[i]; + fv_word_idx++; + } + /* populate rg_list with the data from the child entry of this + * recipe + */ + list_add(&rg_entry->l_entry, &recps[rid].rg_list); + + /* Propagate some data to the recipe database */ + recps[idx].is_root = !!is_root; + recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); + if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { + recps[idx].chain_idx = root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + set_bit(recps[idx].chain_idx, recps[idx].res_idxs); + } else { + recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + } + + if (!is_root) + continue; + + /* Only do the following for root recipes entries */ + memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, + sizeof(recps[idx].r_bitmap)); + recps[idx].root_rid = root_bufs.content.rid & + ~ICE_AQ_RECIPE_ID_IS_ROOT; + recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + } + + /* Complete initialization of the root recipe entry */ + lkup_exts->n_val_words = fv_word_idx; + recps[rid].big_recp = (num_recps > 1); + recps[rid].n_grp_count = (u8)num_recps; + recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, + recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), + GFP_KERNEL); + if (!recps[rid].root_buf) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + + /* Copy result indexes */ + bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); + recps[rid].recp_created = true; + +err_unroll: + kfree(tmp); + return status; +} + /* ice_init_port_info - Initialize port_info with switch configuration data * @pi: pointer to port_info * @vsi_port_num: VSI number or port number @@ -1626,6 +2460,125 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, return status; } +/** + * ice_mac_fltr_exist - does this MAC filter exist for given VSI + * @hw: pointer to the hardware structure + * @mac: MAC address to be checked (for MAC filter) + * @vsi_handle: check MAC filter for this VSI + */ +bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) +{ + struct ice_fltr_mgmt_list_entry *entry; + struct list_head *rule_head; + struct ice_switch_info *sw; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + u16 hw_vsi_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return false; + + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + sw = hw->switch_info; + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; + if (!rule_head) + return false; + + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; + mutex_lock(rule_lock); + list_for_each_entry(entry, rule_head, list_entry) { + struct ice_fltr_info *f_info = &entry->fltr_info; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; + + if (is_zero_ether_addr(mac_addr)) + continue; + + if (f_info->flag != ICE_FLTR_TX || + f_info->src_id != ICE_SRC_ID_VSI || + f_info->lkup_type != ICE_SW_LKUP_MAC || + f_info->fltr_act != ICE_FWD_TO_VSI || + hw_vsi_id != f_info->fwd_id.hw_vsi_id) + continue; + + if (ether_addr_equal(mac, mac_addr)) { + mutex_unlock(rule_lock); + return true; + } + } + mutex_unlock(rule_lock); + return false; +} + +/** + * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI + * @hw: pointer to the hardware structure + * @vlan_id: VLAN ID + * @vsi_handle: check MAC filter for this VSI + */ +bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) +{ + struct ice_fltr_mgmt_list_entry *entry; + struct list_head *rule_head; + struct ice_switch_info *sw; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + u16 hw_vsi_id; + + if (vlan_id > ICE_MAX_VLAN_ID) + return false; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return false; + + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + sw = hw->switch_info; + rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; + if (!rule_head) + return false; + + rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + mutex_lock(rule_lock); + list_for_each_entry(entry, rule_head, list_entry) { + struct ice_fltr_info *f_info = &entry->fltr_info; + u16 entry_vlan_id = f_info->l_data.vlan.vlan_id; + struct ice_vsi_list_map_info *map_info; + + if (entry_vlan_id > ICE_MAX_VLAN_ID) + continue; + + if (f_info->flag != ICE_FLTR_TX || + f_info->src_id != ICE_SRC_ID_VSI || + f_info->lkup_type != ICE_SW_LKUP_VLAN) + continue; + + /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */ + if (f_info->fltr_act != ICE_FWD_TO_VSI && + f_info->fltr_act != ICE_FWD_TO_VSI_LIST) + continue; + + if (f_info->fltr_act == ICE_FWD_TO_VSI) { + if (hw_vsi_id != f_info->fwd_id.hw_vsi_id) + continue; + } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { + /* If filter_action is FWD_TO_VSI_LIST, make sure + * that VSI being checked is part of VSI list + */ + if (entry->vsi_count == 1 && + entry->vsi_list_info) { + map_info = entry->vsi_list_info; + if (!test_bit(vsi_handle, map_info->vsi_map)) + continue; + } + } + + if (vlan_id == entry_vlan_id) { + mutex_unlock(rule_lock); + return true; + } + } + mutex_unlock(rule_lock); + + return false; +} + /** * ice_add_mac - Add a MAC address based filter rule * @hw: pointer to the hardware structure @@ -2036,6 +2989,27 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) } } +/** + * ice_rem_adv_rule_info + * @hw: pointer to the hardware structure + * @rule_head: pointer to the switch list structure that we want to delete + */ +static void +ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) +{ + struct ice_adv_fltr_mgmt_list_entry *tmp_entry; + struct ice_adv_fltr_mgmt_list_entry *lst_itr; + + if (list_empty(rule_head)) + return; + + list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } +} + /** * ice_cfg_dflt_vsi - change state of VSI to set/clear default * @hw: pointer to the hardware structure @@ -2773,43 +3747,1658 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, return status; } +/* This is mapping table entry that maps every word within a given protocol + * structure to the real byte offset as per the specification of that + * protocol header. + * for example dst address is 3 words in ethertype header and corresponding + * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 + * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a + * matching entry describing its field. This needs to be updated if new + * structure is added to that union. + */ +static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { + { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, + { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, + { ICE_ETYPE_OL, { 0 } }, + { ICE_VLAN_OFOS, { 2, 0 } }, + { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, + { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, + { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38 } }, + { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38 } }, + { ICE_TCP_IL, { 0, 2 } }, + { ICE_UDP_OF, { 0, 2 } }, + { ICE_UDP_ILOS, { 0, 2 } }, + { ICE_VXLAN, { 8, 10, 12, 14 } }, + { ICE_GENEVE, { 8, 10, 12, 14 } }, + { ICE_NVGRE, { 0, 2, 4, 6 } }, +}; + +static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { + { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, + { ICE_MAC_IL, ICE_MAC_IL_HW }, + { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, + { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, + { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, + { ICE_IPV4_IL, ICE_IPV4_IL_HW }, + { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, + { ICE_IPV6_IL, ICE_IPV6_IL_HW }, + { ICE_TCP_IL, ICE_TCP_IL_HW }, + { ICE_UDP_OF, ICE_UDP_OF_HW }, + { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, + { ICE_VXLAN, ICE_UDP_OF_HW }, + { ICE_GENEVE, ICE_UDP_OF_HW }, + { ICE_NVGRE, ICE_GRE_OF_HW }, +}; + /** - * ice_replay_vsi_fltr - Replay filters for requested VSI + * ice_find_recp - find a recipe * @hw: pointer to the hardware structure - * @vsi_handle: driver VSI handle - * @recp_id: Recipe ID for which rules need to be replayed - * @list_head: list for which filters need to be replayed + * @lkup_exts: extension sequence to match * - * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. - * It is required to pass valid VSI handle. + * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ -static enum ice_status -ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, - struct list_head *list_head) +static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) { - struct ice_fltr_mgmt_list_entry *itr; - enum ice_status status = 0; - u16 hw_vsi_id; - - if (list_empty(list_head)) - return status; - hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + bool refresh_required = true; + struct ice_sw_recipe *recp; + u8 i; - list_for_each_entry(itr, list_head, list_entry) { - struct ice_fltr_list_entry f_entry; + /* Walk through existing recipes to find a match */ + recp = hw->switch_info->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + /* If recipe was not created for this ID, in SW bookkeeping, + * check if FW has an entry for this recipe. If the FW has an + * entry update it in our SW bookkeeping and continue with the + * matching. + */ + if (!recp[i].recp_created) + if (ice_get_recp_frm_fw(hw, + hw->switch_info->recp_list, i, + &refresh_required)) + continue; - f_entry.fltr_info = itr->fltr_info; - if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && - itr->fltr_info.vsi_handle == vsi_handle) { - /* update the src in case it is VSI num */ - if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) - f_entry.fltr_info.src = hw_vsi_id; - status = ice_add_rule_internal(hw, recp_id, &f_entry); - if (status) - goto end; + /* Skip inverse action recipes */ + if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & + ICE_AQ_RECIPE_ACT_INV_ACT) continue; - } - if (!itr->vsi_list_info || + + /* if number of words we are looking for match */ + if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { + struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; + struct ice_fv_word *be = lkup_exts->fv_words; + u16 *cr = recp[i].lkup_exts.field_mask; + u16 *de = lkup_exts->field_mask; + bool found = true; + u8 pe, qr; + + /* ar, cr, and qr are related to the recipe words, while + * be, de, and pe are related to the lookup words + */ + for (pe = 0; pe < lkup_exts->n_val_words; pe++) { + for (qr = 0; qr < recp[i].lkup_exts.n_val_words; + qr++) { + if (ar[qr].off == be[pe].off && + ar[qr].prot_id == be[pe].prot_id && + cr[qr] == de[pe]) + /* Found the "pe"th word in the + * given recipe + */ + break; + } + /* After walking through all the words in the + * "i"th recipe if "p"th word was not found then + * this recipe is not what we are looking for. + * So break out from this loop and try the next + * recipe + */ + if (qr >= recp[i].lkup_exts.n_val_words) { + found = false; + break; + } + } + /* If for "i"th recipe the found was never set to false + * then it means we found our match + */ + if (found) + return i; /* Return the recipe ID */ + } + } + return ICE_MAX_NUM_RECIPES; +} + +/** + * ice_prot_type_to_id - get protocol ID from protocol type + * @type: protocol type + * @id: pointer to variable that will receive the ID + * + * Returns true if found, false otherwise + */ +static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) +{ + u8 i; + + for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) + if (ice_prot_id_tbl[i].type == type) { + *id = ice_prot_id_tbl[i].protocol_id; + return true; + } + return false; +} + +/** + * ice_fill_valid_words - count valid words + * @rule: advanced rule with lookup information + * @lkup_exts: byte offset extractions of the words that are valid + * + * calculate valid words in a lookup rule using mask value + */ +static u8 +ice_fill_valid_words(struct ice_adv_lkup_elem *rule, + struct ice_prot_lkup_ext *lkup_exts) +{ + u8 j, word, prot_id, ret_val; + + if (!ice_prot_type_to_id(rule->type, &prot_id)) + return 0; + + word = lkup_exts->n_val_words; + + for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) + if (((u16 *)&rule->m_u)[j] && + rule->type < ARRAY_SIZE(ice_prot_ext)) { + /* No more space to accommodate */ + if (word >= ICE_MAX_CHAIN_WORDS) + return 0; + lkup_exts->fv_words[word].off = + ice_prot_ext[rule->type].offs[j]; + lkup_exts->fv_words[word].prot_id = + ice_prot_id_tbl[rule->type].protocol_id; + lkup_exts->field_mask[word] = + be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); + word++; + } + + ret_val = word - lkup_exts->n_val_words; + lkup_exts->n_val_words = word; + + return ret_val; +} + +/** + * ice_create_first_fit_recp_def - Create a recipe grouping + * @hw: pointer to the hardware structure + * @lkup_exts: an array of protocol header extractions + * @rg_list: pointer to a list that stores new recipe groups + * @recp_cnt: pointer to a variable that stores returned number of recipe groups + * + * Using first fit algorithm, take all the words that are still not done + * and start grouping them in 4-word groups. Each group makes up one + * recipe. + */ +static enum ice_status +ice_create_first_fit_recp_def(struct ice_hw *hw, + struct ice_prot_lkup_ext *lkup_exts, + struct list_head *rg_list, + u8 *recp_cnt) +{ + struct ice_pref_recipe_group *grp = NULL; + u8 j; + + *recp_cnt = 0; + + /* Walk through every word in the rule to check if it is not done. If so + * then this word needs to be part of a new recipe. + */ + for (j = 0; j < lkup_exts->n_val_words; j++) + if (!test_bit(j, lkup_exts->done)) { + if (!grp || + grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { + struct ice_recp_grp_entry *entry; + + entry = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*entry), + GFP_KERNEL); + if (!entry) + return ICE_ERR_NO_MEMORY; + list_add(&entry->l_entry, rg_list); + grp = &entry->r_group; + (*recp_cnt)++; + } + + grp->pairs[grp->n_val_pairs].prot_id = + lkup_exts->fv_words[j].prot_id; + grp->pairs[grp->n_val_pairs].off = + lkup_exts->fv_words[j].off; + grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; + grp->n_val_pairs++; + } + + return 0; +} + +/** + * ice_fill_fv_word_index - fill in the field vector indices for a recipe group + * @hw: pointer to the hardware structure + * @fv_list: field vector with the extraction sequence information + * @rg_list: recipe groupings with protocol-offset pairs + * + * Helper function to fill in the field vector indices for protocol-offset + * pairs. These indexes are then ultimately programmed into a recipe. + */ +static enum ice_status +ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, + struct list_head *rg_list) +{ + struct ice_sw_fv_list_entry *fv; + struct ice_recp_grp_entry *rg; + struct ice_fv_word *fv_ext; + + if (list_empty(fv_list)) + return 0; + + fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, + list_entry); + fv_ext = fv->fv_ptr->ew; + + list_for_each_entry(rg, rg_list, l_entry) { + u8 i; + + for (i = 0; i < rg->r_group.n_val_pairs; i++) { + struct ice_fv_word *pr; + bool found = false; + u16 mask; + u8 j; + + pr = &rg->r_group.pairs[i]; + mask = rg->r_group.mask[i]; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv_ext[j].prot_id == pr->prot_id && + fv_ext[j].off == pr->off) { + found = true; + + /* Store index of field vector */ + rg->fv_idx[i] = j; + rg->fv_mask[i] = mask; + break; + } + + /* Protocol/offset could not be found, caller gave an + * invalid pair + */ + if (!found) + return ICE_ERR_PARAM; + } + } + + return 0; +} + +/** + * ice_find_free_recp_res_idx - find free result indexes for recipe + * @hw: pointer to hardware structure + * @profiles: bitmap of profiles that will be associated with the new recipe + * @free_idx: pointer to variable to receive the free index bitmap + * + * The algorithm used here is: + * 1. When creating a new recipe, create a set P which contains all + * Profiles that will be associated with our new recipe + * + * 2. For each Profile p in set P: + * a. Add all recipes associated with Profile p into set R + * b. Optional : PossibleIndexes &= profile[p].possibleIndexes + * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] + * i. Or just assume they all have the same possible indexes: + * 44, 45, 46, 47 + * i.e., PossibleIndexes = 0x0000F00000000000 + * + * 3. For each Recipe r in set R: + * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes + * b. FreeIndexes = UsedIndexes ^ PossibleIndexes + * + * FreeIndexes will contain the bits indicating the indexes free for use, + * then the code needs to update the recipe[r].used_result_idx_bits to + * indicate which indexes were selected for use by this recipe. + */ +static u16 +ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, + unsigned long *free_idx) +{ + DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); + DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); + DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); + u16 bit; + + bitmap_zero(possible_idx, ICE_MAX_FV_WORDS); + bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); + bitmap_zero(used_idx, ICE_MAX_FV_WORDS); + bitmap_zero(free_idx, ICE_MAX_FV_WORDS); + + bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); + + /* For each profile we are going to associate the recipe with, add the + * recipes that are associated with that profile. This will give us + * the set of recipes that our recipe may collide with. Also, determine + * what possible result indexes are usable given this set of profiles. + */ + for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { + bitmap_or(recipes, recipes, profile_to_recipe[bit], + ICE_MAX_NUM_RECIPES); + bitmap_and(possible_idx, possible_idx, + hw->switch_info->prof_res_bm[bit], + ICE_MAX_FV_WORDS); + } + + /* For each recipe that our new recipe may collide with, determine + * which indexes have been used. + */ + for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) + bitmap_or(used_idx, used_idx, + hw->switch_info->recp_list[bit].res_idxs, + ICE_MAX_FV_WORDS); + + bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); + + /* return number of free indexes */ + return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); +} + +/** + * ice_add_sw_recipe - function to call AQ calls to create switch recipe + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @profiles: bitmap of profiles that will be associated. + */ +static enum ice_status +ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, + unsigned long *profiles) +{ + DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *tmp; + struct ice_aqc_recipe_data_elem *buf; + struct ice_recp_grp_entry *entry; + enum ice_status status; + u16 free_res_idx; + u16 recipe_count; + u8 chain_idx; + u8 recps = 0; + + /* When more than one recipe are required, another recipe is needed to + * chain them together. Matching a tunnel metadata ID takes up one of + * the match fields in the chaining recipe reducing the number of + * chained recipes by one. + */ + /* check number of free result indices */ + bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); + free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); + + ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", + free_res_idx, rm->n_grp_count); + + if (rm->n_grp_count > 1) { + if (rm->n_grp_count > free_res_idx) + return ICE_ERR_MAX_LIMIT; + + rm->n_grp_count++; + } + + if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) + return ICE_ERR_MAX_LIMIT; + + tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), + GFP_KERNEL); + if (!buf) { + status = ICE_ERR_NO_MEMORY; + goto err_mem; + } + + bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); + recipe_count = ICE_MAX_NUM_RECIPES; + status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, + NULL); + if (status || recipe_count == 0) + goto err_unroll; + + /* Allocate the recipe resources, and configure them according to the + * match fields from protocol headers and extracted field vectors. + */ + chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + list_for_each_entry(entry, &rm->rg_list, l_entry) { + u8 i; + + status = ice_alloc_recipe(hw, &entry->rid); + if (status) + goto err_unroll; + + /* Clear the result index of the located recipe, as this will be + * updated, if needed, later in the recipe creation process. + */ + tmp[0].content.result_indx = 0; + + buf[recps] = tmp[0]; + buf[recps].recipe_indx = (u8)entry->rid; + /* if the recipe is a non-root recipe RID should be programmed + * as 0 for the rules to be applied correctly. + */ + buf[recps].content.rid = 0; + memset(&buf[recps].content.lkup_indx, 0, + sizeof(buf[recps].content.lkup_indx)); + + /* All recipes use look-up index 0 to match switch ID. */ + buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + buf[recps].content.mask[0] = + cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); + /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask + * to be 0 + */ + for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { + buf[recps].content.lkup_indx[i] = 0x80; + buf[recps].content.mask[i] = 0; + } + + for (i = 0; i < entry->r_group.n_val_pairs; i++) { + buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; + buf[recps].content.mask[i + 1] = + cpu_to_le16(entry->fv_mask[i]); + } + + if (rm->n_grp_count > 1) { + /* Checks to see if there really is a valid result index + * that can be used. + */ + if (chain_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); + status = ICE_ERR_MAX_LIMIT; + goto err_unroll; + } + + entry->chain_idx = chain_idx; + buf[recps].content.result_indx = + ICE_AQ_RECIPE_RESULT_EN | + ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & + ICE_AQ_RECIPE_RESULT_DATA_M); + clear_bit(chain_idx, result_idx_bm); + chain_idx = find_first_bit(result_idx_bm, + ICE_MAX_FV_WORDS); + } + + /* fill recipe dependencies */ + bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, + ICE_MAX_NUM_RECIPES); + set_bit(buf[recps].recipe_indx, + (unsigned long *)buf[recps].recipe_bitmap); + buf[recps].content.act_ctrl_fwd_priority = rm->priority; + recps++; + } + + if (rm->n_grp_count == 1) { + rm->root_rid = buf[0].recipe_indx; + set_bit(buf[0].recipe_indx, rm->r_bitmap); + buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; + if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { + memcpy(buf[0].recipe_bitmap, rm->r_bitmap, + sizeof(buf[0].recipe_bitmap)); + } else { + status = ICE_ERR_BAD_PTR; + goto err_unroll; + } + /* Applicable only for ROOT_RECIPE, set the fwd_priority for + * the recipe which is getting created if specified + * by user. Usually any advanced switch filter, which results + * into new extraction sequence, ended up creating a new recipe + * of type ROOT and usually recipes are associated with profiles + * Switch rule referreing newly created recipe, needs to have + * either/or 'fwd' or 'join' priority, otherwise switch rule + * evaluation will not happen correctly. In other words, if + * switch rule to be evaluated on priority basis, then recipe + * needs to have priority, otherwise it will be evaluated last. + */ + buf[0].content.act_ctrl_fwd_priority = rm->priority; + } else { + struct ice_recp_grp_entry *last_chain_entry; + u16 rid, i; + + /* Allocate the last recipe that will chain the outcomes of the + * other recipes together + */ + status = ice_alloc_recipe(hw, &rid); + if (status) + goto err_unroll; + + buf[recps].recipe_indx = (u8)rid; + buf[recps].content.rid = (u8)rid; + buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; + /* the new entry created should also be part of rg_list to + * make sure we have complete recipe + */ + last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*last_chain_entry), + GFP_KERNEL); + if (!last_chain_entry) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + last_chain_entry->rid = rid; + memset(&buf[recps].content.lkup_indx, 0, + sizeof(buf[recps].content.lkup_indx)); + /* All recipes use look-up index 0 to match switch ID. */ + buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + buf[recps].content.mask[0] = + cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); + for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { + buf[recps].content.lkup_indx[i] = + ICE_AQ_RECIPE_LKUP_IGNORE; + buf[recps].content.mask[i] = 0; + } + + i = 1; + /* update r_bitmap with the recp that is used for chaining */ + set_bit(rid, rm->r_bitmap); + /* this is the recipe that chains all the other recipes so it + * should not have a chaining ID to indicate the same + */ + last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; + list_for_each_entry(entry, &rm->rg_list, l_entry) { + last_chain_entry->fv_idx[i] = entry->chain_idx; + buf[recps].content.lkup_indx[i] = entry->chain_idx; + buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); + set_bit(entry->rid, rm->r_bitmap); + } + list_add(&last_chain_entry->l_entry, &rm->rg_list); + if (sizeof(buf[recps].recipe_bitmap) >= + sizeof(rm->r_bitmap)) { + memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, + sizeof(buf[recps].recipe_bitmap)); + } else { + status = ICE_ERR_BAD_PTR; + goto err_unroll; + } + buf[recps].content.act_ctrl_fwd_priority = rm->priority; + + recps++; + rm->root_rid = (u8)rid; + } + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + goto err_unroll; + + status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); + ice_release_change_lock(hw); + if (status) + goto err_unroll; + + /* Every recipe that just got created add it to the recipe + * book keeping list + */ + list_for_each_entry(entry, &rm->rg_list, l_entry) { + struct ice_switch_info *sw = hw->switch_info; + bool is_root, idx_found = false; + struct ice_sw_recipe *recp; + u16 idx, buf_idx = 0; + + /* find buffer index for copying some data */ + for (idx = 0; idx < rm->n_grp_count; idx++) + if (buf[idx].recipe_indx == entry->rid) { + buf_idx = idx; + idx_found = true; + } + + if (!idx_found) { + status = ICE_ERR_OUT_OF_RANGE; + goto err_unroll; + } + + recp = &sw->recp_list[entry->rid]; + is_root = (rm->root_rid == entry->rid); + recp->is_root = is_root; + + recp->root_rid = entry->rid; + recp->big_recp = (is_root && rm->n_grp_count > 1); + + memcpy(&recp->ext_words, entry->r_group.pairs, + entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); + + memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, + sizeof(recp->r_bitmap)); + + /* Copy non-result fv index values and masks to recipe. This + * call will also update the result recipe bitmask. + */ + ice_collect_result_idx(&buf[buf_idx], recp); + + /* for non-root recipes, also copy to the root, this allows + * easier matching of a complete chained recipe + */ + if (!is_root) + ice_collect_result_idx(&buf[buf_idx], + &sw->recp_list[rm->root_rid]); + + recp->n_ext_words = entry->r_group.n_val_pairs; + recp->chain_idx = entry->chain_idx; + recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; + recp->n_grp_count = rm->n_grp_count; + recp->tun_type = rm->tun_type; + recp->recp_created = true; + } + rm->root_buf = buf; + kfree(tmp); + return status; + +err_unroll: +err_mem: + kfree(tmp); + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_create_recipe_group - creates recipe group + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @lkup_exts: lookup elements + */ +static enum ice_status +ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, + struct ice_prot_lkup_ext *lkup_exts) +{ + enum ice_status status; + u8 recp_count = 0; + + rm->n_grp_count = 0; + + /* Create recipes for words that are marked not done by packing them + * as best fit. + */ + status = ice_create_first_fit_recp_def(hw, lkup_exts, + &rm->rg_list, &recp_count); + if (!status) { + rm->n_grp_count += recp_count; + rm->n_ext_words = lkup_exts->n_val_words; + memcpy(&rm->ext_words, lkup_exts->fv_words, + sizeof(rm->ext_words)); + memcpy(rm->word_masks, lkup_exts->field_mask, + sizeof(rm->word_masks)); + } + + return status; +} + +/** + * ice_get_fv - get field vectors/extraction sequences for spec. lookup types + * @hw: pointer to hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @bm: bitmap of field vectors to consider + * @fv_list: pointer to a list that holds the returned field vectors + */ +static enum ice_status +ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + unsigned long *bm, struct list_head *fv_list) +{ + enum ice_status status; + u8 *prot_ids; + u16 i; + + prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); + if (!prot_ids) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < lkups_cnt; i++) + if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { + status = ICE_ERR_CFG; + goto free_mem; + } + + /* Find field vectors that include all specified protocol types */ + status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); + +free_mem: + kfree(prot_ids); + return status; +} + +/** + * ice_tun_type_match_word - determine if tun type needs a match mask + * @tun_type: tunnel type + * @mask: mask to be used for the tunnel + */ +static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) +{ + switch (tun_type) { + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_NVGRE: + *mask = ICE_TUN_FLAG_MASK; + return true; + + default: + *mask = 0; + return false; + } +} + +/** + * ice_add_special_words - Add words that are not protocols, such as metadata + * @rinfo: other information regarding the rule e.g. priority and action info + * @lkup_exts: lookup word structure + */ +static enum ice_status +ice_add_special_words(struct ice_adv_rule_info *rinfo, + struct ice_prot_lkup_ext *lkup_exts) +{ + u16 mask; + + /* If this is a tunneled packet, then add recipe index to match the + * tunnel bit in the packet metadata flags. + */ + if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { + if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { + u8 word = lkup_exts->n_val_words++; + + lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; + lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; + lkup_exts->field_mask[word] = mask; + } else { + return ICE_ERR_MAX_LIMIT; + } + } + + return 0; +} + +/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule + * @hw: pointer to hardware structure + * @rinfo: other information regarding the rule e.g. priority and action info + * @bm: pointer to memory for returning the bitmap of field vectors + */ +static void +ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, + unsigned long *bm) +{ + enum ice_prof_type prof_type; + + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + + switch (rinfo->tun_type) { + case ICE_NON_TUN: + prof_type = ICE_PROF_NON_TUN; + break; + case ICE_ALL_TUNNELS: + prof_type = ICE_PROF_TUN_ALL; + break; + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + prof_type = ICE_PROF_TUN_UDP; + break; + case ICE_SW_TUN_NVGRE: + prof_type = ICE_PROF_TUN_GRE; + break; + default: + prof_type = ICE_PROF_ALL; + break; + } + + ice_get_sw_fv_bitmap(hw, prof_type, bm); +} + +/** + * ice_add_adv_recipe - Add an advanced recipe that is not part of the default + * @hw: pointer to hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @rinfo: other information regarding the rule e.g. priority and action info + * @rid: return the recipe ID of the recipe created + */ +static enum ice_status +ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) +{ + DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); + DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); + struct ice_prot_lkup_ext *lkup_exts; + struct ice_recp_grp_entry *r_entry; + struct ice_sw_fv_list_entry *fvit; + struct ice_recp_grp_entry *r_tmp; + struct ice_sw_fv_list_entry *tmp; + enum ice_status status = 0; + struct ice_sw_recipe *rm; + u8 i; + + if (!lkups_cnt) + return ICE_ERR_PARAM; + + lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); + if (!lkup_exts) + return ICE_ERR_NO_MEMORY; + + /* Determine the number of words to be matched and if it exceeds a + * recipe's restrictions + */ + for (i = 0; i < lkups_cnt; i++) { + u16 count; + + if (lkups[i].type >= ICE_PROTOCOL_LAST) { + status = ICE_ERR_CFG; + goto err_free_lkup_exts; + } + + count = ice_fill_valid_words(&lkups[i], lkup_exts); + if (!count) { + status = ICE_ERR_CFG; + goto err_free_lkup_exts; + } + } + + rm = kzalloc(sizeof(*rm), GFP_KERNEL); + if (!rm) { + status = ICE_ERR_NO_MEMORY; + goto err_free_lkup_exts; + } + + /* Get field vectors that contain fields extracted from all the protocol + * headers being programmed. + */ + INIT_LIST_HEAD(&rm->fv_list); + INIT_LIST_HEAD(&rm->rg_list); + + /* Get bitmap of field vectors (profiles) that are compatible with the + * rule request; only these will be searched in the subsequent call to + * ice_get_fv. + */ + ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); + + status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); + if (status) + goto err_unroll; + + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, lkup_exts); + if (status) + goto err_free_lkup_exts; + + /* Group match words into recipes using preferred recipe grouping + * criteria. + */ + status = ice_create_recipe_group(hw, rm, lkup_exts); + if (status) + goto err_unroll; + + /* set the recipe priority if specified */ + rm->priority = (u8)rinfo->priority; + + /* Find offsets from the field vector. Pick the first one for all the + * recipes. + */ + status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + if (status) + goto err_unroll; + + /* get bitmap of all profiles the recipe will be associated with */ + bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); + list_for_each_entry(fvit, &rm->fv_list, list_entry) { + ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); + set_bit((u16)fvit->profile_id, profiles); + } + + /* Look for a recipe which matches our requested fv / mask list */ + *rid = ice_find_recp(hw, lkup_exts); + if (*rid < ICE_MAX_NUM_RECIPES) + /* Success if found a recipe that match the existing criteria */ + goto err_unroll; + + /* Recipe we need does not exist, add a recipe */ + status = ice_add_sw_recipe(hw, rm, profiles); + if (status) + goto err_unroll; + + /* Associate all the recipes created with all the profiles in the + * common field vector. + */ + list_for_each_entry(fvit, &rm->fv_list, list_entry) { + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u16 j; + + status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, + (u8 *)r_bitmap, NULL); + if (status) + goto err_unroll; + + bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, + ICE_MAX_NUM_RECIPES); + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + goto err_unroll; + + status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, + (u8 *)r_bitmap, + NULL); + ice_release_change_lock(hw); + + if (status) + goto err_unroll; + + /* Update profile to recipe bitmap array */ + bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, + ICE_MAX_NUM_RECIPES); + + /* Update recipe to profile bitmap array */ + for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) + set_bit((u16)fvit->profile_id, recipe_to_profile[j]); + } + + *rid = rm->root_rid; + memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, + sizeof(*lkup_exts)); +err_unroll: + list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { + list_del(&r_entry->l_entry); + devm_kfree(ice_hw_to_dev(hw), r_entry); + } + + list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { + list_del(&fvit->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvit); + } + + if (rm->root_buf) + devm_kfree(ice_hw_to_dev(hw), rm->root_buf); + + kfree(rm); + +err_free_lkup_exts: + kfree(lkup_exts); + + return status; +} + +/** + * ice_find_dummy_packet - find dummy packet + * + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @tun_type: tunnel type + * @pkt: dummy packet to fill according to filter match criteria + * @pkt_len: packet length of dummy packet + * @offsets: pointer to receive the pointer to the offsets for the packet + */ +static void +ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + enum ice_sw_tunnel_type tun_type, + const u8 **pkt, u16 *pkt_len, + const struct ice_dummy_pkt_offsets **offsets) +{ + bool tcp = false, udp = false, ipv6 = false, vlan = false; + u16 i; + + for (i = 0; i < lkups_cnt; i++) { + if (lkups[i].type == ICE_UDP_ILOS) + udp = true; + else if (lkups[i].type == ICE_TCP_IL) + tcp = true; + else if (lkups[i].type == ICE_IPV6_OFOS) + ipv6 = true; + else if (lkups[i].type == ICE_VLAN_OFOS) + vlan = true; + else if (lkups[i].type == ICE_ETYPE_OL && + lkups[i].h_u.ethertype.ethtype_id == + cpu_to_be16(ICE_IPV6_ETHER_ID) && + lkups[i].m_u.ethertype.ethtype_id == + cpu_to_be16(0xFFFF)) + ipv6 = true; + } + + if (tun_type == ICE_SW_TUN_NVGRE) { + if (tcp) { + *pkt = dummy_gre_tcp_packet; + *pkt_len = sizeof(dummy_gre_tcp_packet); + *offsets = dummy_gre_tcp_packet_offsets; + return; + } + + *pkt = dummy_gre_udp_packet; + *pkt_len = sizeof(dummy_gre_udp_packet); + *offsets = dummy_gre_udp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_VXLAN || + tun_type == ICE_SW_TUN_GENEVE) { + if (tcp) { + *pkt = dummy_udp_tun_tcp_packet; + *pkt_len = sizeof(dummy_udp_tun_tcp_packet); + *offsets = dummy_udp_tun_tcp_packet_offsets; + return; + } + + *pkt = dummy_udp_tun_udp_packet; + *pkt_len = sizeof(dummy_udp_tun_udp_packet); + *offsets = dummy_udp_tun_udp_packet_offsets; + return; + } + + if (udp && !ipv6) { + if (vlan) { + *pkt = dummy_vlan_udp_packet; + *pkt_len = sizeof(dummy_vlan_udp_packet); + *offsets = dummy_vlan_udp_packet_offsets; + return; + } + *pkt = dummy_udp_packet; + *pkt_len = sizeof(dummy_udp_packet); + *offsets = dummy_udp_packet_offsets; + return; + } else if (udp && ipv6) { + if (vlan) { + *pkt = dummy_vlan_udp_ipv6_packet; + *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); + *offsets = dummy_vlan_udp_ipv6_packet_offsets; + return; + } + *pkt = dummy_udp_ipv6_packet; + *pkt_len = sizeof(dummy_udp_ipv6_packet); + *offsets = dummy_udp_ipv6_packet_offsets; + return; + } else if ((tcp && ipv6) || ipv6) { + if (vlan) { + *pkt = dummy_vlan_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); + *offsets = dummy_vlan_tcp_ipv6_packet_offsets; + return; + } + *pkt = dummy_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_tcp_ipv6_packet); + *offsets = dummy_tcp_ipv6_packet_offsets; + return; + } + + if (vlan) { + *pkt = dummy_vlan_tcp_packet; + *pkt_len = sizeof(dummy_vlan_tcp_packet); + *offsets = dummy_vlan_tcp_packet_offsets; + } else { + *pkt = dummy_tcp_packet; + *pkt_len = sizeof(dummy_tcp_packet); + *offsets = dummy_tcp_packet_offsets; + } +} + +/** + * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria + * + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @s_rule: stores rule information from the match criteria + * @dummy_pkt: dummy packet to fill according to filter match criteria + * @pkt_len: packet length of dummy packet + * @offsets: offset info for the dummy packet + */ +static enum ice_status +ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + struct ice_aqc_sw_rules_elem *s_rule, + const u8 *dummy_pkt, u16 pkt_len, + const struct ice_dummy_pkt_offsets *offsets) +{ + u8 *pkt; + u16 i; + + /* Start with a packet with a pre-defined/dummy content. Then, fill + * in the header values to be looked up or matched. + */ + pkt = s_rule->pdata.lkup_tx_rx.hdr; + + memcpy(pkt, dummy_pkt, pkt_len); + + for (i = 0; i < lkups_cnt; i++) { + enum ice_protocol_type type; + u16 offset = 0, len = 0, j; + bool found = false; + + /* find the start of this layer; it should be found since this + * was already checked when search for the dummy packet + */ + type = lkups[i].type; + for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { + if (type == offsets[j].type) { + offset = offsets[j].offset; + found = true; + break; + } + } + /* this should never happen in a correct calling sequence */ + if (!found) + return ICE_ERR_PARAM; + + switch (lkups[i].type) { + case ICE_MAC_OFOS: + case ICE_MAC_IL: + len = sizeof(struct ice_ether_hdr); + break; + case ICE_ETYPE_OL: + len = sizeof(struct ice_ethtype_hdr); + break; + case ICE_VLAN_OFOS: + len = sizeof(struct ice_vlan_hdr); + break; + case ICE_IPV4_OFOS: + case ICE_IPV4_IL: + len = sizeof(struct ice_ipv4_hdr); + break; + case ICE_IPV6_OFOS: + case ICE_IPV6_IL: + len = sizeof(struct ice_ipv6_hdr); + break; + case ICE_TCP_IL: + case ICE_UDP_OF: + case ICE_UDP_ILOS: + len = sizeof(struct ice_l4_hdr); + break; + case ICE_SCTP_IL: + len = sizeof(struct ice_sctp_hdr); + break; + case ICE_NVGRE: + len = sizeof(struct ice_nvgre_hdr); + break; + case ICE_VXLAN: + case ICE_GENEVE: + len = sizeof(struct ice_udp_tnl_hdr); + break; + default: + return ICE_ERR_PARAM; + } + + /* the length should be a word multiple */ + if (len % ICE_BYTES_PER_WORD) + return ICE_ERR_CFG; + + /* We have the offset to the header start, the length, the + * caller's header values and mask. Use this information to + * copy the data into the dummy packet appropriately based on + * the mask. Note that we need to only write the bits as + * indicated by the mask to make sure we don't improperly write + * over any significant packet data. + */ + for (j = 0; j < len / sizeof(u16); j++) + if (((u16 *)&lkups[i].m_u)[j]) + ((u16 *)(pkt + offset))[j] = + (((u16 *)(pkt + offset))[j] & + ~((u16 *)&lkups[i].m_u)[j]) | + (((u16 *)&lkups[i].h_u)[j] & + ((u16 *)&lkups[i].m_u)[j]); + } + + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); + + return 0; +} + +/** + * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port + * @hw: pointer to the hardware structure + * @tun_type: tunnel type + * @pkt: dummy packet to fill in + * @offsets: offset info for the dummy packet + */ +static enum ice_status +ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, + u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) +{ + u16 open_port, i; + + switch (tun_type) { + case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_GENEVE: + if (!ice_get_open_tunnel_port(hw, &open_port)) + return ICE_ERR_CFG; + break; + + default: + /* Nothing needs to be done for this tunnel type */ + return 0; + } + + /* Find the outer UDP protocol header and insert the port number */ + for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { + if (offsets[i].type == ICE_UDP_OF) { + struct ice_l4_hdr *hdr; + u16 offset; + + offset = offsets[i].offset; + hdr = (struct ice_l4_hdr *)&pkt[offset]; + hdr->dst_port = cpu_to_be16(open_port); + + return 0; + } + } + + return ICE_ERR_CFG; +} + +/** + * ice_find_adv_rule_entry - Search a rule entry + * @hw: pointer to the hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @recp_id: recipe ID for which we are finding the rule + * @rinfo: other information regarding the rule e.g. priority and action info + * + * Helper function to search for a given advance rule entry + * Returns pointer to entry storing the rule if found + */ +static struct ice_adv_fltr_mgmt_list_entry * +ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, u16 recp_id, + struct ice_adv_rule_info *rinfo) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct ice_switch_info *sw = hw->switch_info; + int i; + + list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, + list_entry) { + bool lkups_matched = true; + + if (lkups_cnt != list_itr->lkups_cnt) + continue; + for (i = 0; i < list_itr->lkups_cnt; i++) + if (memcmp(&list_itr->lkups[i], &lkups[i], + sizeof(*lkups))) { + lkups_matched = false; + break; + } + if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && + rinfo->tun_type == list_itr->rule_info.tun_type && + lkups_matched) + return list_itr; + } + return NULL; +} + +/** + * ice_adv_add_update_vsi_list + * @hw: pointer to the hardware structure + * @m_entry: pointer to current adv filter management list entry + * @cur_fltr: filter information from the book keeping entry + * @new_fltr: filter information with the new VSI to be added + * + * Call AQ command to add or update previously created VSI list with new VSI. + * + * Helper function to do book keeping associated with adding filter information + * The algorithm to do the booking keeping is described below : + * When a VSI needs to subscribe to a given advanced filter + * if only one VSI has been added till now + * Allocate a new VSI list and add two VSIs + * to this list using switch rule command + * Update the previously created switch rule with the + * newly created VSI list ID + * if a VSI list was previously created + * Add the new VSI to the previously created VSI list set + * using the update switch rule command + */ +static enum ice_status +ice_adv_add_update_vsi_list(struct ice_hw *hw, + struct ice_adv_fltr_mgmt_list_entry *m_entry, + struct ice_adv_rule_info *cur_fltr, + struct ice_adv_rule_info *new_fltr) +{ + enum ice_status status; + u16 vsi_list_id = 0; + + if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || + cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) + return ICE_ERR_NOT_IMPL; + + if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || + new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && + (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) + return ICE_ERR_NOT_IMPL; + + if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { + /* Only one entry existed in the mapping and it was not already + * a part of a VSI list. So, create a VSI list with the old and + * new VSIs. + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ + if (cur_fltr->sw_act.fwd_id.hw_vsi_id == + new_fltr->sw_act.fwd_id.hw_vsi_id) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; + vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, + ICE_SW_LKUP_LAST); + if (status) + return status; + + memset(&tmp_fltr, 0, sizeof(tmp_fltr)); + tmp_fltr.flag = m_entry->rule_info.sw_act.flag; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; + + /* Update the previous switch rule of "forward to VSI" to + * "fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + return status; + + cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; + cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; + m_entry->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + } else { + u16 vsi_handle = new_fltr->sw_act.vsi_handle; + + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + + /* A rule already exists with the new VSI being added */ + if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) + return 0; + + /* Update the previously created VSI list set with + * the new VSI ID passed in + */ + vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; + + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, + vsi_list_id, false, + ice_aqc_opc_update_sw_rules, + ICE_SW_LKUP_LAST); + /* update VSI list mapping info with new VSI ID */ + if (!status) + set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); + } + if (!status) + m_entry->vsi_count++; + return status; +} + +/** + * ice_add_adv_rule - helper function to create an advanced switch rule + * @hw: pointer to the hardware structure + * @lkups: information on the words that needs to be looked up. All words + * together makes one recipe + * @lkups_cnt: num of entries in the lkups array + * @rinfo: other information related to the rule that needs to be programmed + * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be + * ignored is case of error. + * + * This function can program only 1 rule at a time. The lkups is used to + * describe the all the words that forms the "lookup" portion of the recipe. + * These words can span multiple protocols. Callers to this function need to + * pass in a list of protocol headers with lookup information along and mask + * that determines which words are valid from the given protocol header. + * rinfo describes other information related to this rule such as forwarding + * IDs, priority of this rule, etc. + */ +enum ice_status +ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *added_entry) +{ + struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; + u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; + const struct ice_dummy_pkt_offsets *pkt_offsets; + struct ice_aqc_sw_rules_elem *s_rule = NULL; + struct list_head *rule_head; + struct ice_switch_info *sw; + enum ice_status status; + const u8 *pkt = NULL; + u16 word_cnt; + u32 act = 0; + u8 q_rgn; + + /* Initialize profile to result index bitmap */ + if (!hw->switch_info->prof_res_bm_init) { + hw->switch_info->prof_res_bm_init = 1; + ice_init_prof_result_bm(hw); + } + + if (!lkups_cnt) + return ICE_ERR_PARAM; + + /* get # of words we need to match */ + word_cnt = 0; + for (i = 0; i < lkups_cnt; i++) { + u16 j, *ptr; + + ptr = (u16 *)&lkups[i].m_u; + for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) + if (ptr[j] != 0) + word_cnt++; + } + + if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) + return ICE_ERR_PARAM; + + /* make sure that we can locate a dummy packet */ + ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, + &pkt_offsets); + if (!pkt) { + status = ICE_ERR_PARAM; + goto err_ice_add_adv_rule; + } + + if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || + rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || + rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || + rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) + return ICE_ERR_CFG; + + vsi_handle = rinfo->sw_act.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + rinfo->sw_act.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (rinfo->sw_act.flag & ICE_FLTR_TX) + rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); + + status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); + if (status) + return status; + m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); + if (m_entry) { + /* we have to add VSI to VSI_LIST and increment vsi_count. + * Also Update VSI list so that we can change forwarding rule + * if the rule already exists, we will check if it exists with + * same vsi_id, if not then add it to the VSI list if it already + * exists if not then create a VSI list and add the existing VSI + * ID and the new VSI ID to the list + * We will add that VSI to the list + */ + status = ice_adv_add_update_vsi_list(hw, m_entry, + &m_entry->rule_info, + rinfo); + if (added_entry) { + added_entry->rid = rid; + added_entry->rule_id = m_entry->rule_info.fltr_rule_id; + added_entry->vsi_handle = rinfo->sw_act.vsi_handle; + } + return status; + } + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + if (!rinfo->flags_info.act_valid) { + act |= ICE_SINGLE_ACT_LAN_ENABLE; + act |= ICE_SINGLE_ACT_LB_ENABLE; + } else { + act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | + ICE_SINGLE_ACT_LB_ENABLE); + } + + switch (rinfo->sw_act.fltr_act) { + case ICE_FWD_TO_VSI: + act |= (rinfo->sw_act.fwd_id.hw_vsi_id << + ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_Q: + act |= ICE_SINGLE_ACT_TO_Q; + act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + break; + case ICE_FWD_TO_QGRP: + q_rgn = rinfo->sw_act.qgrp_size > 0 ? + (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; + act |= ICE_SINGLE_ACT_TO_Q; + act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & + ICE_SINGLE_ACT_Q_REGION_M; + break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; + default: + status = ICE_ERR_CFG; + goto err_ice_add_adv_rule; + } + + /* set the rule LOOKUP type based on caller specified 'Rx' + * instead of hardcoding it to be either LOOKUP_TX/RX + * + * for 'Rx' set the source to be the port number + * for 'Tx' set the source to be the source HW VSI number (determined + * by caller) + */ + if (rinfo->rx) { + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->pdata.lkup_tx_rx.src = + cpu_to_le16(hw->port_info->lport); + } else { + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); + } + + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, + pkt_len, pkt_offsets); + if (status) + goto err_ice_add_adv_rule; + + if (rinfo->tun_type != ICE_NON_TUN) { + status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, + s_rule->pdata.lkup_tx_rx.hdr, + pkt_offsets); + if (status) + goto err_ice_add_adv_rule; + } + + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, + NULL); + if (status) + goto err_ice_add_adv_rule; + adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(struct ice_adv_fltr_mgmt_list_entry), + GFP_KERNEL); + if (!adv_fltr) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_adv_rule; + } + + adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, + lkups_cnt * sizeof(*lkups), GFP_KERNEL); + if (!adv_fltr->lkups) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_adv_rule; + } + + adv_fltr->lkups_cnt = lkups_cnt; + adv_fltr->rule_info = *rinfo; + adv_fltr->rule_info.fltr_rule_id = + le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + sw = hw->switch_info; + sw->recp_list[rid].adv_rule = true; + rule_head = &sw->recp_list[rid].filt_rules; + + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + adv_fltr->vsi_count = 1; + + /* Add rule entry to book keeping list */ + list_add(&adv_fltr->list_entry, rule_head); + if (added_entry) { + added_entry->rid = rid; + added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; + added_entry->vsi_handle = rinfo->sw_act.vsi_handle; + } +err_ice_add_adv_rule: + if (status && adv_fltr) { + devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); + devm_kfree(ice_hw_to_dev(hw), adv_fltr); + } + + kfree(s_rule); + + return status; +} + +/** + * ice_replay_vsi_fltr - Replay filters for requested VSI + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * @recp_id: Recipe ID for which rules need to be replayed + * @list_head: list for which filters need to be replayed + * + * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. + * It is required to pass valid VSI handle. + */ +static enum ice_status +ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, + struct list_head *list_head) +{ + struct ice_fltr_mgmt_list_entry *itr; + enum ice_status status = 0; + u16 hw_vsi_id; + + if (list_empty(list_head)) + return status; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + list_for_each_entry(itr, list_head, list_entry) { + struct ice_fltr_list_entry f_entry; + + f_entry.fltr_info = itr->fltr_info; + if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && + itr->fltr_info.vsi_handle == vsi_handle) { + /* update the src in case it is VSI num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + status = ice_add_rule_internal(hw, recp_id, &f_entry); + if (status) + goto end; + continue; + } + if (!itr->vsi_list_info || !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) continue; /* Clearing it so that the logic can add it back */ @@ -2830,6 +5419,236 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, return status; } +/** + * ice_adv_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, + struct ice_adv_fltr_mgmt_list_entry *fm_list) +{ + struct ice_vsi_list_map_info *vsi_list_info; + enum ice_sw_lkup_type lkup_type; + enum ice_status status; + u16 vsi_list_id; + + if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = ICE_SW_LKUP_LAST; + vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); + vsi_list_info = fm_list->vsi_list_info; + if (fm_list->vsi_count == 1) { + struct ice_fltr_info tmp_fltr; + u16 rem_vsi_handle; + + rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, rem_vsi_handle)) + return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ + status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + memset(&tmp_fltr, 0, sizeof(tmp_fltr)); + tmp_fltr.flag = fm_list->rule_info.sw_act.flag; + tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; + fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; + + /* Update the previous switch rule of "MAC forward to VSI" to + * "MAC fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) { + ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr.fwd_id.hw_vsi_id, status); + return status; + } + fm_list->vsi_list_info->ref_cnt--; + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) { + ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); + return status; + } + + list_del(&vsi_list_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_rem_adv_rule - removes existing advanced switch rule + * @hw: pointer to the hardware structure + * @lkups: information on the words that needs to be looked up. All words + * together makes one recipe + * @lkups_cnt: num of entries in the lkups array + * @rinfo: Its the pointer to the rule information for the rule + * + * This function can be used to remove 1 rule at a time. The lkups is + * used to describe all the words that forms the "lookup" portion of the + * rule. These words can span multiple protocols. Callers to this function + * need to pass in a list of protocol headers with lookup information along + * and mask that determines which words are valid from the given protocol + * header. rinfo describes other information related to this rule such as + * forwarding IDs, priority of this rule, etc. + */ +static enum ice_status +ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo) +{ + struct ice_adv_fltr_mgmt_list_entry *list_elem; + struct ice_prot_lkup_ext lkup_exts; + enum ice_status status = 0; + bool remove_rule = false; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + u16 i, rid, vsi_handle; + + memset(&lkup_exts, 0, sizeof(lkup_exts)); + for (i = 0; i < lkups_cnt; i++) { + u16 count; + + if (lkups[i].type >= ICE_PROTOCOL_LAST) + return ICE_ERR_CFG; + + count = ice_fill_valid_words(&lkups[i], &lkup_exts); + if (!count) + return ICE_ERR_CFG; + } + + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, &lkup_exts); + if (status) + return status; + + rid = ice_find_recp(hw, &lkup_exts); + /* If did not find a recipe that match the existing criteria */ + if (rid == ICE_MAX_NUM_RECIPES) + return ICE_ERR_PARAM; + + rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; + list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); + /* the rule is already removed */ + if (!list_elem) + return 0; + mutex_lock(rule_lock); + if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else if (list_elem->vsi_count > 1) { + remove_rule = false; + vsi_handle = rinfo->sw_act.vsi_handle; + status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); + } else { + vsi_handle = rinfo->sw_act.vsi_handle; + status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); + if (status) { + mutex_unlock(rule_lock); + return status; + } + if (list_elem->vsi_count == 0) + remove_rule = true; + } + mutex_unlock(rule_lock); + if (remove_rule) { + struct ice_aqc_sw_rules_elem *s_rule; + u16 rule_buf_sz; + + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + s_rule->pdata.lkup_tx_rx.act = 0; + s_rule->pdata.lkup_tx_rx.index = + cpu_to_le16(list_elem->rule_info.fltr_rule_id); + s_rule->pdata.lkup_tx_rx.hdr_len = 0; + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (!status || status == ICE_ERR_DOES_NOT_EXIST) { + struct ice_switch_info *sw = hw->switch_info; + + mutex_lock(rule_lock); + list_del(&list_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); + devm_kfree(ice_hw_to_dev(hw), list_elem); + mutex_unlock(rule_lock); + if (list_empty(&sw->recp_list[rid].filt_rules)) + sw->recp_list[rid].adv_rule = false; + } + kfree(s_rule); + } + return status; +} + +/** + * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID + * @hw: pointer to the hardware structure + * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID + * + * This function is used to remove 1 rule at a time. The removal is based on + * the remove_entry parameter. This function will remove rule for a given + * vsi_handle with a given rule_id which is passed as parameter in remove_entry + */ +enum ice_status +ice_rem_adv_rule_by_id(struct ice_hw *hw, + struct ice_rule_query_data *remove_entry) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + struct ice_adv_rule_info rinfo; + struct ice_switch_info *sw; + + sw = hw->switch_info; + if (!sw->recp_list[remove_entry->rid].recp_created) + return ICE_ERR_PARAM; + list_head = &sw->recp_list[remove_entry->rid].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (list_itr->rule_info.fltr_rule_id == + remove_entry->rule_id) { + rinfo = list_itr->rule_info; + rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; + return ice_rem_adv_rule(hw, list_itr->lkups, + list_itr->lkups_cnt, &rinfo); + } + } + /* either list is empty or unable to find rule */ + return ICE_ERR_DOES_NOT_EXIST; +} + /** * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists * @hw: pointer to the hardware structure @@ -2868,12 +5687,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) if (!sw) return; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { struct list_head *l_head; l_head = &sw->recp_list[i].filt_replay_rules; - ice_rem_sw_rule_info(hw, l_head); + if (!sw->recp_list[i].adv_rule) + ice_rem_sw_rule_info(hw, l_head); + else + ice_rem_adv_rule_info(hw, l_head); } } } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index c5db8d56133f31..d8a38906f16f3c 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -14,6 +14,9 @@ #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ + (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) + /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; @@ -122,30 +125,124 @@ struct ice_fltr_info { u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; +struct ice_adv_lkup_elem { + enum ice_protocol_type type; + union ice_prot_hdr h_u; /* Header values */ + union ice_prot_hdr m_u; /* Mask of header values to match */ +}; + +struct ice_sw_act_ctrl { + /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ + u16 src; + u16 flag; + enum ice_sw_fwd_act_type fltr_act; + /* Depending on filter action */ + union { + /* This is a queue ID in case of ICE_FWD_TO_Q and starting + * queue ID in case of ICE_FWD_TO_QGRP. + */ + u16 q_id:11; + u16 vsi_id:10; + u16 hw_vsi_id:10; + u16 vsi_list_id:10; + } fwd_id; + /* software VSI handle */ + u16 vsi_handle; + u8 qgrp_size; +}; + +struct ice_rule_query_data { + /* Recipe ID for which the requested rule was added */ + u16 rid; + /* Rule ID that was added or is supposed to be removed */ + u16 rule_id; + /* vsi_handle for which Rule was added or is supposed to be removed */ + u16 vsi_handle; +}; + +/* This structure allows to pass info about lb_en and lan_en + * flags to ice_add_adv_rule. Values in act would be used + * only if act_valid was set to true, otherwise default + * values would be used. + */ +struct ice_adv_rule_flags_info { + u32 act; + u8 act_valid; /* indicate if flags in act are valid */ +}; + +struct ice_adv_rule_info { + enum ice_sw_tunnel_type tun_type; + struct ice_sw_act_ctrl sw_act; + u32 priority; + u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ + u16 fltr_rule_id; + struct ice_adv_rule_flags_info flags_info; +}; + +/* A collection of one or more four word recipe */ struct ice_sw_recipe { - struct list_head l_entry; + /* For a chained recipe the root recipe is what should be used for + * programming rules + */ + u8 is_root; + u8 root_rid; + u8 recp_created; - /* To protect modification of filt_rule list - * defined below + /* Number of extraction words */ + u8 n_ext_words; + /* Protocol ID and Offset pair (extraction word) to describe the + * recipe */ - struct mutex filt_rule_lock; + struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; + u16 word_masks[ICE_MAX_CHAIN_WORDS]; - /* List of type ice_fltr_mgmt_list_entry */ + /* if this recipe is a collection of other recipe */ + u8 big_recp; + + /* if this recipe is part of another bigger recipe then chain index + * corresponding to this recipe + */ + u8 chain_idx; + + /* if this recipe is a collection of other recipe then count of other + * recipes and recipe IDs of those recipes + */ + u8 n_grp_count; + + /* Bit map specifying the IDs associated with this group of recipe */ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + + enum ice_sw_tunnel_type tun_type; + + /* List of type ice_fltr_mgmt_list_entry or adv_rule */ + u8 adv_rule; struct list_head filt_rules; struct list_head filt_replay_rules; - /* linked list of type recipe_list_entry */ - struct list_head rg_list; - /* linked list of type ice_sw_fv_list_entry*/ + struct mutex filt_rule_lock; /* protect filter rule structure */ + + /* Profiles this recipe should be associated with */ struct list_head fv_list; - struct ice_aqc_recipe_data_elem *r_buf; - u8 recp_count; - u8 root_rid; - u8 num_profs; - u8 *prof_ids; - /* recipe bitmap: what all recipes makes this recipe */ - DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + /* Profiles this recipe is associated with */ + u8 num_profs, *prof_ids; + + /* Bit map for possible result indexes */ + DECLARE_BITMAP(res_idxs, ICE_MAX_FV_WORDS); + + /* This allows user to specify the recipe priority. + * For now, this becomes 'fwd_priority' when recipe + * is created, usually recipes can have 'fwd' and 'join' + * priority. + */ + u8 priority; + + struct list_head rg_list; + + /* AQ buffer associated with this recipe */ + struct ice_aqc_recipe_data_elem *root_buf; + /* This struct saves the fv_words for a given lookup */ + struct ice_prot_lkup_ext lkup_exts; }; /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */ @@ -183,6 +280,16 @@ struct ice_fltr_mgmt_list_entry { u8 counter_index; }; +struct ice_adv_fltr_mgmt_list_entry { + struct list_head list_entry; + + struct ice_adv_lkup_elem *lkups; + struct ice_adv_rule_info rule_info; + u16 lkups_cnt; + struct ice_vsi_list_map_info *vsi_list_info; + u16 vsi_count; +}; + enum ice_promisc_flags { ICE_PROMISC_UCAST_RX = 0x1, ICE_PROMISC_UCAST_TX = 0x2, @@ -218,6 +325,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); /* Switch/bridge related commands */ +enum ice_status +ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *added_entry); enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); @@ -227,6 +338,8 @@ enum ice_status ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); +bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); +bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); @@ -245,10 +358,19 @@ enum ice_status ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); +enum ice_status +ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); +enum ice_status +ice_rem_adv_rule_by_id(struct ice_hw *hw, + struct ice_rule_query_data *remove_entry); + enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); +enum ice_status +ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, + u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c new file mode 100644 index 00000000000000..e5d23feb670177 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_tc_lib.h" +#include "ice_fltr.h" +#include "ice_lib.h" +#include "ice_protocol_type.h" + +/** + * ice_tc_count_lkups - determine lookup count for switch filter + * @flags: TC-flower flags + * @headers: Pointer to TC flower filter header structure + * @fltr: Pointer to outer TC filter structure + * + * Determine lookup count based on TC flower input for switch filter. + */ +static int +ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, + struct ice_tc_flower_fltr *fltr) +{ + int lkups_cnt = 0; + + if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) + lkups_cnt++; + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) + lkups_cnt++; + + /* currently inner etype filter isn't supported */ + if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) && + fltr->tunnel_type == TNL_LAST) + lkups_cnt++; + + /* are MAC fields specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) + lkups_cnt++; + + /* is VLAN specified? */ + if (flags & ICE_TC_FLWR_FIELD_VLAN) + lkups_cnt++; + + /* are IPv[4|6] fields specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | + ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) + lkups_cnt++; + + /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | + ICE_TC_FLWR_FIELD_SRC_L4_PORT)) + lkups_cnt++; + + return lkups_cnt; +} + +static enum ice_protocol_type ice_proto_type_from_mac(bool inner) +{ + return inner ? ICE_MAC_IL : ICE_MAC_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) +{ + return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) +{ + return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; +} + +static enum ice_protocol_type +ice_proto_type_from_l4_port(bool inner, u16 ip_proto) +{ + if (inner) { + switch (ip_proto) { + case IPPROTO_UDP: + return ICE_UDP_ILOS; + } + } else { + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_OF; + } + } + + return 0; +} + +static enum ice_protocol_type +ice_proto_type_from_tunnel(enum ice_tunnel_type type) +{ + switch (type) { + case TNL_VXLAN: + return ICE_VXLAN; + case TNL_GENEVE: + return ICE_GENEVE; + case TNL_GRETAP: + return ICE_NVGRE; + default: + return 0; + } +} + +static enum ice_sw_tunnel_type +ice_sw_type_from_tunnel(enum ice_tunnel_type type) +{ + switch (type) { + case TNL_VXLAN: + return ICE_SW_TUN_VXLAN; + case TNL_GENEVE: + return ICE_SW_TUN_GENEVE; + case TNL_GRETAP: + return ICE_SW_TUN_NVGRE; + default: + return ICE_NON_TUN; + } +} + +static int +ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, + struct ice_adv_lkup_elem *list) +{ + struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; + int i = 0; + + if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { + u32 tenant_id; + + list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); + switch (fltr->tunnel_type) { + case TNL_VXLAN: + case TNL_GENEVE: + tenant_id = be32_to_cpu(fltr->tenant_id) << 8; + list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); + memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4); + i++; + break; + case TNL_GRETAP: + list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; + memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4); + i++; + break; + default: + break; + } + } + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { + list[i].type = ice_proto_type_from_ipv4(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { + list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; + list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; + } + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { + list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; + list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; + } + i++; + } + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { + list[i].type = ice_proto_type_from_ipv6(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { + memcpy(&list[i].h_u.ipv6_hdr.src_addr, + &hdr->l3_key.src_ipv6_addr, + sizeof(hdr->l3_key.src_ipv6_addr)); + memcpy(&list[i].m_u.ipv6_hdr.src_addr, + &hdr->l3_mask.src_ipv6_addr, + sizeof(hdr->l3_mask.src_ipv6_addr)); + } + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { + memcpy(&list[i].h_u.ipv6_hdr.dst_addr, + &hdr->l3_key.dst_ipv6_addr, + sizeof(hdr->l3_key.dst_ipv6_addr)); + memcpy(&list[i].m_u.ipv6_hdr.dst_addr, + &hdr->l3_mask.dst_ipv6_addr, + sizeof(hdr->l3_mask.dst_ipv6_addr)); + } + i++; + } + + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) { + list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto); + list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; + list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; + i++; + } + + return i; +} + +/** + * ice_tc_fill_rules - fill filter rules based on TC fltr + * @hw: pointer to HW structure + * @flags: tc flower field flags + * @tc_fltr: pointer to TC flower filter + * @list: list of advance rule elements + * @rule_info: pointer to information about rule + * @l4_proto: pointer to information such as L4 proto type + * + * Fill ice_adv_lkup_elem list based on TC flower flags and + * TC flower headers. This list should be used to add + * advance filter in hardware. + */ +static int +ice_tc_fill_rules(struct ice_hw *hw, u32 flags, + struct ice_tc_flower_fltr *tc_fltr, + struct ice_adv_lkup_elem *list, + struct ice_adv_rule_info *rule_info, + u16 *l4_proto) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; + bool inner = false; + int i = 0; + + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); + if (tc_fltr->tunnel_type != TNL_LAST) { + i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); + + headers = &tc_fltr->inner_headers; + inner = true; + } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { + list[i].type = ICE_ETYPE_OL; + list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; + list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; + i++; + } + + if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_SRC_MAC)) { + struct ice_tc_l2_hdr *l2_key, *l2_mask; + + l2_key = &headers->l2_key; + l2_mask = &headers->l2_mask; + + list[i].type = ice_proto_type_from_mac(inner); + if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { + ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, + l2_key->dst_mac); + ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, + l2_mask->dst_mac); + } + if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { + ether_addr_copy(list[i].h_u.eth_hdr.src_addr, + l2_key->src_mac); + ether_addr_copy(list[i].m_u.eth_hdr.src_addr, + l2_mask->src_mac); + } + i++; + } + + /* copy VLAN info */ + if (flags & ICE_TC_FLWR_FIELD_VLAN) { + list[i].type = ICE_VLAN_OFOS; + list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; + list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); + i++; + } + + /* copy L3 (IPv[4|6]: src, dest) address */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | + ICE_TC_FLWR_FIELD_SRC_IPV4)) { + struct ice_tc_l3_hdr *l3_key, *l3_mask; + + list[i].type = ice_proto_type_from_ipv4(inner); + l3_key = &headers->l3_key; + l3_mask = &headers->l3_mask; + if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { + list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4; + list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4; + } + if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { + list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4; + list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4; + } + i++; + } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | + ICE_TC_FLWR_FIELD_SRC_IPV6)) { + struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; + struct ice_tc_l3_hdr *l3_key, *l3_mask; + + list[i].type = ice_proto_type_from_ipv6(inner); + ipv6_hdr = &list[i].h_u.ipv6_hdr; + ipv6_mask = &list[i].m_u.ipv6_hdr; + l3_key = &headers->l3_key; + l3_mask = &headers->l3_mask; + + if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { + memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr, + sizeof(l3_key->dst_ipv6_addr)); + memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr, + sizeof(l3_mask->dst_ipv6_addr)); + } + if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { + memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr, + sizeof(l3_key->src_ipv6_addr)); + memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr, + sizeof(l3_mask->src_ipv6_addr)); + } + i++; + } + + /* copy L4 (src, dest) port */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | + ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { + struct ice_tc_l4_hdr *l4_key, *l4_mask; + + list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto); + l4_key = &headers->l4_key; + l4_mask = &headers->l4_mask; + + if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { + list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; + list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; + } + if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { + list[i].h_u.l4_hdr.src_port = l4_key->src_port; + list[i].m_u.l4_hdr.src_port = l4_mask->src_port; + } + i++; + } + + return i; +} + +/** + * ice_tc_tun_get_type - get the tunnel type + * @tunnel_dev: ptr to tunnel device + * + * This function detects appropriate tunnel_type if specified device is + * tunnel device such as VXLAN/Geneve + */ +static int ice_tc_tun_get_type(struct net_device *tunnel_dev) +{ + if (netif_is_vxlan(tunnel_dev)) + return TNL_VXLAN; + if (netif_is_geneve(tunnel_dev)) + return TNL_GENEVE; + if (netif_is_gretap(tunnel_dev) || + netif_is_ip6gretap(tunnel_dev)) + return TNL_GRETAP; + return TNL_LAST; +} + +bool ice_is_tunnel_supported(struct net_device *dev) +{ + return ice_tc_tun_get_type(dev) != TNL_LAST; +} + +static int +ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, + struct flow_action_entry *act) +{ + struct ice_repr *repr; + + switch (act->id) { + case FLOW_ACTION_DROP: + fltr->action.fltr_act = ICE_DROP_PACKET; + break; + + case FLOW_ACTION_REDIRECT: + fltr->action.fltr_act = ICE_FWD_TO_VSI; + + if (ice_is_port_repr_netdev(act->dev)) { + repr = ice_netdev_to_repr(act->dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else if (netif_is_ice(act->dev) || + ice_is_tunnel_supported(act->dev)) { + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + break; + + default: + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); + return -EINVAL; + } + + return 0; +} + +static int +ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct ice_adv_rule_info rule_info = { 0 }; + struct ice_rule_query_data rule_added; + struct ice_hw *hw = &vsi->back->hw; + struct ice_adv_lkup_elem *list; + u32 flags = fltr->flags; + enum ice_status status; + int lkups_cnt; + int ret = 0; + int i; + + if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); + return -EOPNOTSUPP; + } + + lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) + return -ENOMEM; + + i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL); + if (i != lkups_cnt) { + ret = -EINVAL; + goto exit; + } + + /* egress traffic is always redirect to uplink */ + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) + fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; + + rule_info.sw_act.fltr_act = fltr->action.fltr_act; + if (fltr->action.fltr_act != ICE_DROP_PACKET) + rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; + /* For now, making priority to be highest, and it also becomes + * the priority for recipe which will get created as a result of + * new extraction sequence based on input set. + * Priority '7' is max val for switch recipe, higher the number + * results into order of switch rule evaluation. + */ + rule_info.priority = 7; + + if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + rule_info.rx = true; + } else { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + rule_info.rx = false; + rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + rule_info.flags_info.act_valid = true; + } + + /* specify the cookie as filter_rule_id */ + rule_info.fltr_rule_id = fltr->cookie; + + status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (status == ICE_ERR_ALREADY_EXISTS) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); + ret = -EINVAL; + goto exit; + } else if (status) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); + ret = -EIO; + goto exit; + } + + /* store the output params, which are needed later for removing + * advanced switch filter + */ + fltr->rid = rule_added.rid; + fltr->rule_id = rule_added.rule_id; + +exit: + kfree(list); + return ret; +} + +/** + * ice_add_tc_flower_adv_fltr - add appropriate filter rules + * @vsi: Pointer to VSI + * @tc_fltr: Pointer to TC flower filter structure + * + * based on filter parameters using Advance recipes supported + * by OS package. + */ +static int +ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, + struct ice_tc_flower_fltr *tc_fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; + struct ice_adv_rule_info rule_info = {0}; + struct ice_rule_query_data rule_added; + struct ice_adv_lkup_elem *list; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 flags = tc_fltr->flags; + struct ice_vsi *ch_vsi; + struct device *dev; + u16 lkups_cnt = 0; + u16 l4_proto = 0; + int ret = 0; + u16 i = 0; + + dev = ice_pf_to_dev(pf); + if (ice_is_safe_mode(pf)) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode"); + return -EOPNOTSUPP; + } + + if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)"); + return -EOPNOTSUPP; + } + + /* get the channel (aka ADQ VSI) */ + if (tc_fltr->dest_vsi) + ch_vsi = tc_fltr->dest_vsi; + else + ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class]; + + lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) + return -ENOMEM; + + i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto); + if (i != lkups_cnt) { + ret = -EINVAL; + goto exit; + } + + rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; + if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) { + if (!ch_vsi) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist"); + ret = -EINVAL; + goto exit; + } + + rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; + rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.priority = 7; + rule_info.sw_act.src = hw->pf_id; + rule_info.rx = true; + dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", + tc_fltr->action.tc_class, + rule_info.sw_act.vsi_handle, lkups_cnt); + } else { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + rule_info.rx = false; + } + + /* specify the cookie as filter_rule_id */ + rule_info.fltr_rule_id = tc_fltr->cookie; + + ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (ret == -EEXIST) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter because it already exist"); + ret = -EINVAL; + goto exit; + } else if (ret) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter due to error"); + ret = -EIO; + goto exit; + } + + /* store the output params, which are needed later for removing + * advanced switch filter + */ + tc_fltr->rid = rule_added.rid; + tc_fltr->rule_id = rule_added.rule_id; + if (tc_fltr->action.tc_class > 0 && ch_vsi) { + /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and + * for PF ADQ filter, it is not yet set in tc_fltr, + * hence store the dest_vsi ptr in tc_fltr + */ + if (ch_vsi->type == ICE_VSI_CHNL) + tc_fltr->dest_vsi = ch_vsi; + /* keep track of advanced switch filter for + * destination VSI (channel VSI) + */ + ch_vsi->num_chnl_fltr++; + /* in this case, dest_id is VSI handle (sw handle) */ + tc_fltr->dest_id = rule_added.vsi_handle; + + /* keeps track of channel filters for PF VSI */ + if (vsi->type == ICE_VSI_PF && + (flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_ENC_DST_MAC))) + pf->num_dmac_chnl_fltrs++; + } + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", + lkups_cnt, flags, + tc_fltr->action.tc_class, rule_added.rid, + rule_added.rule_id, rule_added.vsi_handle); +exit: + kfree(list); + return ret; +} + +/** + * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter + * @match: Pointer to flow match structure + * @fltr: Pointer to filter structure + * @headers: inner or outer header fields + * @is_encap: set true for tunnel IPv4 address + */ +static int +ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) +{ + if (match->key->dst) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; + headers->l3_key.dst_ipv4 = match->key->dst; + headers->l3_mask.dst_ipv4 = match->mask->dst; + } + if (match->key->src) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; + headers->l3_key.src_ipv4 = match->key->src; + headers->l3_mask.src_ipv4 = match->mask->src; + } + return 0; +} + +/** + * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter + * @match: Pointer to flow match structure + * @fltr: Pointer to filter structure + * @headers: inner or outer header fields + * @is_encap: set true for tunnel IPv6 address + */ +static int +ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) +{ + struct ice_tc_l3_hdr *l3_key, *l3_mask; + + /* src and dest IPV6 address should not be LOOPBACK + * (0:0:0:0:0:0:0:1), which can be represented as ::1 + */ + if (ipv6_addr_loopback(&match->key->dst) || + ipv6_addr_loopback(&match->key->src)) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK"); + return -EINVAL; + } + /* if src/dest IPv6 address is *,* error */ + if (ipv6_addr_any(&match->mask->dst) && + ipv6_addr_any(&match->mask->src)) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); + return -EINVAL; + } + if (!ipv6_addr_any(&match->mask->dst)) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; + } + if (!ipv6_addr_any(&match->mask->src)) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; + } + + l3_key = &headers->l3_key; + l3_mask = &headers->l3_mask; + + if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_SRC_IPV6)) { + memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, + sizeof(match->key->src.s6_addr)); + memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, + sizeof(match->mask->src.s6_addr)); + } + if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | + ICE_TC_FLWR_FIELD_DEST_IPV6)) { + memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, + sizeof(match->key->dst.s6_addr)); + memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, + sizeof(match->mask->dst.s6_addr)); + } + + return 0; +} + +/** + * ice_tc_set_port - Parse ports from TC flower filter + * @match: Flow match structure + * @fltr: Pointer to filter structure + * @headers: inner or outer header fields + * @is_encap: set true for tunnel port + */ +static int +ice_tc_set_port(struct flow_match_ports match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) +{ + if (match.key->dst) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; + headers->l4_key.dst_port = match.key->dst; + headers->l4_mask.dst_port = match.mask->dst; + } + if (match.key->src) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; + headers->l4_key.src_port = match.key->src; + headers->l4_mask.src_port = match.mask->src; + } + return 0; +} + +static struct net_device * +ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) +{ + struct flow_action_entry *act; + int i; + + if (ice_is_tunnel_supported(dev)) + return dev; + + flow_action_for_each(i, act, &rule->action) { + if (act->id == FLOW_ACTION_REDIRECT && + ice_is_tunnel_supported(act->dev)) + return act->dev; + } + + return NULL; +} + +static int +ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, + struct ice_tc_flower_fltr *fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct flow_match_control enc_control; + + fltr->tunnel_type = ice_tc_tun_get_type(dev); + headers->l3_key.ip_proto = IPPROTO_UDP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid enc_keyid; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + + if (!enc_keyid.mask->keyid || + enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32)) + return -EINVAL; + + fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID; + fltr->tenant_id = enc_keyid.key->keyid; + } + + flow_rule_match_enc_control(rule, &enc_control); + + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + if (ice_tc_set_ipv4(&match, fltr, headers, true)) + return -EINVAL; + } else if (enc_control.key->addr_type == + FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + if (ice_tc_set_ipv6(&match, fltr, headers, true)) + return -EINVAL; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_match_ip match; + + flow_rule_match_enc_ip(rule, &match); + headers->l3_key.tos = match.key->tos; + headers->l3_key.ttl = match.key->ttl; + headers->l3_mask.tos = match.mask->tos; + headers->l3_mask.ttl = match.mask->ttl; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_enc_ports(rule, &match); + if (ice_tc_set_port(match, fltr, headers, true)) + return -EINVAL; + } + + return 0; +} + +/** + * ice_parse_cls_flower - Parse TC flower filters provided by kernel + * @vsi: Pointer to the VSI + * @filter_dev: Pointer to device on which filter is being added + * @f: Pointer to struct flow_cls_offload + * @fltr: Pointer to filter structure + */ +static int +ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, + struct flow_cls_offload *f, + struct ice_tc_flower_fltr *fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; + struct flow_dissector *dissector; + struct net_device *tunnel_dev; + + dissector = rule->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); + return -EOPNOTSUPP; + } + + tunnel_dev = ice_get_tunnel_device(filter_dev, rule); + if (tunnel_dev) { + int err; + + filter_dev = tunnel_dev; + + err = ice_parse_tunnel_attr(filter_dev, rule, fltr); + if (err) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes"); + return err; + } + + /* header pointers should point to the inner headers, outer + * header were already set by ice_parse_tunnel_attr + */ + headers = &fltr->inner_headers; + } else if (dissector->used_keys & + (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); + return -EOPNOTSUPP; + } else { + fltr->tunnel_type = TNL_LAST; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); + + if (n_proto_key == ETH_P_ALL || n_proto_key == 0) { + n_proto_key = 0; + n_proto_mask = 0; + } else { + fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; + } + + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); + headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); + headers->l3_key.ip_proto = match.key->ip_proto; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.key->dst)) { + ether_addr_copy(headers->l2_key.dst_mac, + match.key->dst); + ether_addr_copy(headers->l2_mask.dst_mac, + match.mask->dst); + fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; + } + + if (!is_zero_ether_addr(match.key->src)) { + ether_addr_copy(headers->l2_key.src_mac, + match.key->src); + ether_addr_copy(headers->l2_mask.src_mac, + match.mask->src); + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || + is_vlan_dev(filter_dev)) { + struct flow_dissector_key_vlan mask; + struct flow_dissector_key_vlan key; + struct flow_match_vlan match; + + if (is_vlan_dev(filter_dev)) { + match.key = &key; + match.key->vlan_id = vlan_dev_vlan_id(filter_dev); + match.key->vlan_priority = 0; + match.mask = &mask; + memset(match.mask, 0xff, sizeof(*match.mask)); + match.mask->vlan_priority = 0; + } else { + flow_rule_match_vlan(rule, &match); + } + + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { + fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask"); + return -EINVAL; + } + } + + headers->vlan_hdr.vlan_id = + cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); + if (match.mask->vlan_priority) + headers->vlan_hdr.vlan_prio = match.key->vlan_priority; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + + addr_type = match.key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (ice_tc_set_ipv4(&match, fltr, headers, false)) + return -EINVAL; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + if (ice_tc_set_ipv6(&match, fltr, headers, false)) + return -EINVAL; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if (ice_tc_set_port(match, fltr, headers, false)) + return -EINVAL; + switch (headers->l3_key.ip_proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + default: + NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported"); + return -EINVAL; + } + } + return 0; +} + +/** + * ice_add_switch_fltr - Add TC flower filters + * @vsi: Pointer to VSI + * @fltr: Pointer to struct ice_tc_flower_fltr + * + * Add filter in HW switch block + */ +static int +ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) +{ + if (fltr->action.fltr_act == ICE_FWD_TO_QGRP) + return -EOPNOTSUPP; + + if (ice_is_eswitch_mode_switchdev(vsi->back)) + return ice_eswitch_add_tc_fltr(vsi, fltr); + + return ice_add_tc_flower_adv_fltr(vsi, fltr); +} + +/** + * ice_handle_tclass_action - Support directing to a traffic class + * @vsi: Pointer to VSI + * @cls_flower: Pointer to TC flower offload structure + * @fltr: Pointer to TC flower filter structure + * + * Support directing traffic to a traffic class + */ +static int +ice_handle_tclass_action(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) +{ + int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); + struct ice_vsi *main_vsi; + + if (tc < 0) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid"); + return -EINVAL; + } + if (!tc) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + + if (!(vsi->all_enatc & BIT(tc))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); + return -EINVAL; + } + + /* Redirect to a TC class or Queue Group */ + main_vsi = ice_get_main_vsi(vsi->back); + if (!main_vsi || !main_vsi->netdev) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because of invalid netdevice"); + return -EINVAL; + } + + if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && + (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_SRC_MAC))) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination"); + return -EOPNOTSUPP; + } + + /* For ADQ, filter must include dest MAC address, otherwise unwanted + * packets with unrelated MAC address get delivered to ADQ VSIs as long + * as remaining filter criteria is satisfied such as dest IP address + * and dest/src L4 port. Following code is trying to handle: + * 1. For non-tunnel, if user specify MAC addresses, use them (means + * this code won't do anything + * 2. For non-tunnel, if user didn't specify MAC address, add implicit + * dest MAC to be lower netdev's active unicast MAC address + */ + if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) { + ether_addr_copy(fltr->outer_headers.l2_key.dst_mac, + main_vsi->netdev->dev_addr); + eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); + fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; + } + + /* validate specified dest MAC address, make sure either it belongs to + * lower netdev or any of MACVLAN. MACVLANs MAC address are added as + * unicast MAC filter destined to main VSI. + */ + if (!ice_mac_fltr_exist(&main_vsi->back->hw, + fltr->outer_headers.l2_key.dst_mac, + main_vsi->idx)) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because legacy MAC filter for specified destination doesn't exist"); + return -EINVAL; + } + + /* Make sure VLAN is already added to main VSI, before allowing ADQ to + * add a VLAN based filter such as MAC + VLAN + L4 port. + */ + if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { + u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); + + if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id, + main_vsi->idx)) { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); + return -EINVAL; + } + } + fltr->action.fltr_act = ICE_FWD_TO_VSI; + fltr->action.tc_class = tc; + + return 0; +} + +/** + * ice_parse_tc_flower_actions - Parse the actions for a TC filter + * @vsi: Pointer to VSI + * @cls_flower: Pointer to TC flower offload structure + * @fltr: Pointer to TC flower filter structure + * + * Parse the actions for a TC filter + */ +static int +ice_parse_tc_flower_actions(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); + struct flow_action *flow_action = &rule->action; + struct flow_action_entry *act; + int i; + + if (cls_flower->classid) + return ice_handle_tclass_action(vsi, cls_flower, fltr); + + if (!flow_action_has_entries(flow_action)) + return -EINVAL; + + flow_action_for_each(i, act, flow_action) { + if (ice_is_eswitch_mode_switchdev(vsi->back)) { + int err = ice_eswitch_tc_parse_action(fltr, act); + + if (err) + return err; + continue; + } + /* Allow only one rule per filter */ + + /* Drop action */ + if (act->id == FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP"); + return -EINVAL; + } + fltr->action.fltr_act = ICE_FWD_TO_VSI; + } + return 0; +} + +/** + * ice_del_tc_fltr - deletes a filter from HW table + * @vsi: Pointer to VSI + * @fltr: Pointer to struct ice_tc_flower_fltr + * + * This function deletes a filter from HW table and manages book-keeping + */ +static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) +{ + struct ice_rule_query_data rule_rem; + struct ice_pf *pf = vsi->back; + int err; + + rule_rem.rid = fltr->rid; + rule_rem.rule_id = fltr->rule_id; + rule_rem.vsi_handle = fltr->dest_id; + err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); + if (err) { + if (err == ICE_ERR_DOES_NOT_EXIST) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); + return -ENOENT; + } + NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter"); + return -EIO; + } + + /* update advanced switch filter count for destination + * VSI if filter destination was VSI + */ + if (fltr->dest_vsi) { + if (fltr->dest_vsi->type == ICE_VSI_CHNL) { + fltr->dest_vsi->num_chnl_fltr--; + + /* keeps track of channel filters for PF VSI */ + if (vsi->type == ICE_VSI_PF && + (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_ENC_DST_MAC))) + pf->num_dmac_chnl_fltrs--; + } + } + return 0; +} + +/** + * ice_add_tc_fltr - adds a TC flower filter + * @netdev: Pointer to netdev + * @vsi: Pointer to VSI + * @f: Pointer to flower offload structure + * @__fltr: Pointer to struct ice_tc_flower_fltr + * + * This function parses TC-flower input fields, parses action, + * and adds a filter. + */ +static int +ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *f, + struct ice_tc_flower_fltr **__fltr) +{ + struct ice_tc_flower_fltr *fltr; + int err; + + /* by default, set output to be INVALID */ + *__fltr = NULL; + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + fltr->cookie = f->cookie; + fltr->extack = f->common.extack; + fltr->src_vsi = vsi; + INIT_HLIST_NODE(&fltr->tc_flower_node); + + err = ice_parse_cls_flower(netdev, vsi, f, fltr); + if (err < 0) + goto err; + + err = ice_parse_tc_flower_actions(vsi, f, fltr); + if (err < 0) + goto err; + + err = ice_add_switch_fltr(vsi, fltr); + if (err < 0) + goto err; + + /* return the newly created filter */ + *__fltr = fltr; + + return 0; +err: + kfree(fltr); + return err; +} + +/** + * ice_find_tc_flower_fltr - Find the TC flower filter in the list + * @pf: Pointer to PF + * @cookie: filter specific cookie + */ +static struct ice_tc_flower_fltr * +ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) +{ + struct ice_tc_flower_fltr *fltr; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) + if (cookie == fltr->cookie) + return fltr; + + return NULL; +} + +/** + * ice_add_cls_flower - add TC flower filters + * @netdev: Pointer to filter device + * @vsi: Pointer to VSI + * @cls_flower: Pointer to flower offload structure + */ +int +ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct net_device *vsi_netdev = vsi->netdev; + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + int err; + + if (ice_is_reset_in_progress(pf->state)) + return -EBUSY; + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + return -EINVAL; + + if (ice_is_port_repr_netdev(netdev)) + vsi_netdev = netdev; + + if (!(vsi_netdev->features & NETIF_F_HW_TC) && + !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { + /* Based on TC indirect notifications from kernel, all ice + * devices get an instance of rule from higher level device. + * Avoid triggering explicit error in this case. + */ + if (netdev == vsi_netdev) + NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); + return -EINVAL; + } + + /* avoid duplicate entries, if exists - return error */ + fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); + if (fltr) { + NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring"); + return -EEXIST; + } + + /* prep and add TC-flower filter in HW */ + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); + if (err) + return err; + + /* add filter into an ordered list */ + hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list); + return 0; +} + +/** + * ice_del_cls_flower - delete TC flower filters + * @vsi: Pointer to VSI + * @cls_flower: Pointer to struct flow_cls_offload + */ +int +ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) +{ + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + int err; + + /* find filter */ + fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); + if (!fltr) { + if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) && + hlist_empty(&pf->tc_flower_fltr_list)) + return 0; + + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it"); + return -EINVAL; + } + + fltr->extack = cls_flower->common.extack; + /* delete filter from HW */ + err = ice_del_tc_fltr(vsi, fltr); + if (err) + return err; + + /* delete filter from an ordered list */ + hlist_del(&fltr->tc_flower_node); + + /* free the filter node */ + kfree(fltr); + + return 0; +} + +/** + * ice_replay_tc_fltrs - replay TC filters + * @pf: pointer to PF struct + */ +void ice_replay_tc_fltrs(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + struct hlist_node *node; + + hlist_for_each_entry_safe(fltr, node, + &pf->tc_flower_fltr_list, + tc_flower_node) { + fltr->extack = NULL; + ice_add_switch_fltr(fltr->src_vsi, fltr); + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h new file mode 100644 index 00000000000000..31904947795948 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_TC_LIB_H_ +#define _ICE_TC_LIB_H_ + +#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0) +#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1) +#define ICE_TC_FLWR_FIELD_VLAN BIT(2) +#define ICE_TC_FLWR_FIELD_DEST_IPV4 BIT(3) +#define ICE_TC_FLWR_FIELD_SRC_IPV4 BIT(4) +#define ICE_TC_FLWR_FIELD_DEST_IPV6 BIT(5) +#define ICE_TC_FLWR_FIELD_SRC_IPV6 BIT(6) +#define ICE_TC_FLWR_FIELD_DEST_L4_PORT BIT(7) +#define ICE_TC_FLWR_FIELD_SRC_L4_PORT BIT(8) +#define ICE_TC_FLWR_FIELD_TENANT_ID BIT(9) +#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 BIT(10) +#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 BIT(11) +#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 BIT(12) +#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 BIT(13) +#define ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT BIT(14) +#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) +#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) +#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) + +#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF + +struct ice_indr_block_priv { + struct net_device *netdev; + struct ice_netdev_priv *np; + struct list_head list; +}; + +struct ice_tc_flower_action { + u32 tc_class; + enum ice_sw_fwd_act_type fltr_act; +}; + +struct ice_tc_vlan_hdr { + __be16 vlan_id; /* Only last 12 bits valid */ + u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */ +}; + +struct ice_tc_l2_hdr { + u8 dst_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + __be16 n_proto; /* Ethernet Protocol */ +}; + +struct ice_tc_l3_hdr { + u8 ip_proto; /* IPPROTO value */ + union { + struct { + struct in_addr dst_ip; + struct in_addr src_ip; + } v4; + struct { + struct in6_addr dst_ip6; + struct in6_addr src_ip6; + } v6; + } ip; +#define dst_ipv6 ip.v6.dst_ip6.s6_addr32 +#define dst_ipv6_addr ip.v6.dst_ip6.s6_addr +#define src_ipv6 ip.v6.src_ip6.s6_addr32 +#define src_ipv6_addr ip.v6.src_ip6.s6_addr +#define dst_ipv4 ip.v4.dst_ip.s_addr +#define src_ipv4 ip.v4.src_ip.s_addr + + u8 tos; + u8 ttl; +}; + +struct ice_tc_l4_hdr { + __be16 dst_port; + __be16 src_port; +}; + +struct ice_tc_flower_lyr_2_4_hdrs { + /* L2 layer fields with their mask */ + struct ice_tc_l2_hdr l2_key; + struct ice_tc_l2_hdr l2_mask; + struct ice_tc_vlan_hdr vlan_hdr; + /* L3 (IPv4[6]) layer fields with their mask */ + struct ice_tc_l3_hdr l3_key; + struct ice_tc_l3_hdr l3_mask; + + /* L4 layer fields with their mask */ + struct ice_tc_l4_hdr l4_key; + struct ice_tc_l4_hdr l4_mask; +}; + +enum ice_eswitch_fltr_direction { + ICE_ESWITCH_FLTR_INGRESS, + ICE_ESWITCH_FLTR_EGRESS, +}; + +struct ice_tc_flower_fltr { + struct hlist_node tc_flower_node; + + /* cookie becomes filter_rule_id if rule is added successfully */ + unsigned long cookie; + + /* add_adv_rule returns information like recipe ID, rule_id. Store + * those values since they are needed to remove advanced rule + */ + u16 rid; + u16 rule_id; + /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon + * destination type + */ + u16 dest_id; + /* if dest_id is vsi_idx, then need to store destination VSI ptr */ + struct ice_vsi *dest_vsi; + /* direction of fltr for eswitch use case */ + enum ice_eswitch_fltr_direction direction; + + /* Parsed TC flower configuration params */ + struct ice_tc_flower_lyr_2_4_hdrs outer_headers; + struct ice_tc_flower_lyr_2_4_hdrs inner_headers; + struct ice_vsi *src_vsi; + __be32 tenant_id; + u32 flags; + u8 tunnel_type; + struct ice_tc_flower_action action; + + /* cache ptr which is used wherever needed to communicate netlink + * messages + */ + struct netlink_ext_ack *extack; +}; + +/** + * ice_is_chnl_fltr - is this a valid channel filter + * @f: Pointer to tc-flower filter + * + * Criteria to determine of given filter is valid channel filter + * or not is based on its "destination". If destination is hw_tc (aka tc_class) + * and it is non-zero, then it is valid channel (aka ADQ) filter + */ +static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f) +{ + return !!f->action.tc_class; +} + +/** + * ice_chnl_dmac_fltr_cnt - DMAC based CHNL filter count + * @pf: Pointer to PF + */ +static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf) +{ + return pf->num_dmac_chnl_fltrs; +} + +int +ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower); +int +ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); +void ice_replay_tc_fltrs(struct ice_pf *pf); +bool ice_is_tunnel_supported(struct net_device *dev); + +#endif /* _ICE_TC_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h index 9bc0b8fdfc77be..cf685247c07ad4 100644 --- a/drivers/net/ethernet/intel/ice/ice_trace.h +++ b/drivers/net/ethernet/intel/ice/ice_trace.h @@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template, TP_ARGS(q_vector, dim), TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector) __field(struct dim *, dim) - __string(devname, q_vector->rx.ring->netdev->name)), + __string(devname, q_vector->rx.rx_ring->netdev->name)), TP_fast_assign(__entry->q_vector = q_vector; __entry->dim = dim; - __assign_str(devname, q_vector->rx.ring->netdev->name);), + __assign_str(devname, q_vector->rx.rx_ring->netdev->name);), TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", __get_str(devname), - __entry->q_vector->rx.ring->q_index, + __entry->q_vector->rx.rx_ring->q_index, __entry->dim->state, __entry->dim->profile_ix, __entry->dim->tune_state, @@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template, TP_ARGS(q_vector, dim), TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector) __field(struct dim *, dim) - __string(devname, q_vector->tx.ring->netdev->name)), + __string(devname, q_vector->tx.tx_ring->netdev->name)), TP_fast_assign(__entry->q_vector = q_vector; __entry->dim = dim; - __assign_str(devname, q_vector->tx.ring->netdev->name);), + __assign_str(devname, q_vector->tx.tx_ring->netdev->name);), TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", __get_str(devname), - __entry->q_vector->tx.ring->q_index, + __entry->q_vector->tx.tx_ring->q_index, __entry->dim->state, __entry->dim->profile_ix, __entry->dim->tune_state, @@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work, /* Events related to a vsi & ring */ DECLARE_EVENT_CLASS(ice_tx_template, - TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc, + TP_PROTO(struct ice_tx_ring *ring, struct ice_tx_desc *desc, struct ice_tx_buf *buf), TP_ARGS(ring, desc, buf), @@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template, #define DEFINE_TX_TEMPLATE_OP_EVENT(name) \ DEFINE_EVENT(ice_tx_template, name, \ - TP_PROTO(struct ice_ring *ring, \ + TP_PROTO(struct ice_tx_ring *ring, \ struct ice_tx_desc *desc, \ struct ice_tx_buf *buf), \ TP_ARGS(ring, desc, buf)) @@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap); DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop); DECLARE_EVENT_CLASS(ice_rx_template, - TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc), + TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc), TP_ARGS(ring, desc), @@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template, __entry->ring, __entry->desc) ); DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq, - TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc), + TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc), TP_ARGS(ring, desc) ); DECLARE_EVENT_CLASS(ice_rx_indicate_template, - TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc, + TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb), @@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template, ); DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate, - TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc, + TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb) ); DECLARE_EVENT_CLASS(ice_xmit_template, - TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), + TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), TP_ARGS(ring, skb), @@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template, #define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \ DEFINE_EVENT(ice_xmit_template, name, \ - TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \ + TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), \ TP_ARGS(ring, skb)) DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6ee8e0032d52cb..bc3ba19dc88f8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "ice_txrx_lib.h" #include "ice_lib.h" @@ -13,6 +14,7 @@ #include "ice_trace.h" #include "ice_dcb_lib.h" #include "ice_xsk.h" +#include "ice_eswitch.h" #define ICE_RX_HDR_SIZE 256 @@ -32,7 +34,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, struct ice_tx_buf *tx_buf, *first; struct ice_fltr_desc *f_desc; struct ice_tx_desc *tx_desc; - struct ice_ring *tx_ring; + struct ice_tx_ring *tx_ring; struct device *dev; dma_addr_t dma; u32 td_cmd; @@ -106,7 +108,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, * @tx_buf: the buffer to free */ static void -ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) +ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { if (tx_buf->skb) { if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) @@ -133,7 +135,7 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) /* tx_buf must be completely set up in the transmit path */ } -static struct netdev_queue *txring_txq(const struct ice_ring *ring) +static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->q_index); } @@ -142,8 +144,9 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring) * ice_clean_tx_ring - Free any empty Tx buffers * @tx_ring: ring to be cleaned */ -void ice_clean_tx_ring(struct ice_ring *tx_ring) +void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) { + u32 size; u16 i; if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { @@ -162,8 +165,10 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) tx_skip_free: memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); + size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), + PAGE_SIZE); /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); + memset(tx_ring->desc, 0, size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -181,14 +186,18 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) * * Free all transmit software resources */ -void ice_free_tx_ring(struct ice_ring *tx_ring) +void ice_free_tx_ring(struct ice_tx_ring *tx_ring) { + u32 size; + ice_clean_tx_ring(tx_ring); devm_kfree(tx_ring->dev, tx_ring->tx_buf); tx_ring->tx_buf = NULL; if (tx_ring->desc) { - dmam_free_coherent(tx_ring->dev, tx_ring->size, + size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), + PAGE_SIZE); + dmam_free_coherent(tx_ring->dev, size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } @@ -201,7 +210,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) +static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; unsigned int budget = ICE_DFLT_IRQ_WORK; @@ -238,11 +247,8 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) total_bytes += tx_buf->bytecount; total_pkts += tx_buf->gso_segs; - if (ice_ring_is_xdp(tx_ring)) - page_frag_free(tx_buf->raw_buf); - else - /* free the skb */ - napi_consume_skb(tx_buf->skb, napi_budget); + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -298,9 +304,6 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); - if (ice_ring_is_xdp(tx_ring)) - return !!budget; - netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); @@ -329,9 +332,10 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) * * Return 0 on success, negative on error */ -int ice_setup_tx_ring(struct ice_ring *tx_ring) +int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) { struct device *dev = tx_ring->dev; + u32 size; if (!dev) return -ENOMEM; @@ -339,19 +343,19 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_buf); tx_ring->tx_buf = - devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, + devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, GFP_KERNEL); if (!tx_ring->tx_buf) return -ENOMEM; /* round up to nearest page */ - tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), - PAGE_SIZE); - tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), + PAGE_SIZE); + tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", - tx_ring->size); + size); goto err; } @@ -370,9 +374,10 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) * ice_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned */ -void ice_clean_rx_ring(struct ice_ring *rx_ring) +void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) { struct device *dev = rx_ring->dev; + u32 size; u16 i; /* ring already cleared, nothing to do */ @@ -417,7 +422,9 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); + size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), + PAGE_SIZE); + memset(rx_ring->desc, 0, size); rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; @@ -430,8 +437,10 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) * * Free all receive software resources */ -void ice_free_rx_ring(struct ice_ring *rx_ring) +void ice_free_rx_ring(struct ice_rx_ring *rx_ring) { + u32 size; + ice_clean_rx_ring(rx_ring); if (rx_ring->vsi->type == ICE_VSI_PF) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) @@ -441,7 +450,9 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) rx_ring->rx_buf = NULL; if (rx_ring->desc) { - dmam_free_coherent(rx_ring->dev, rx_ring->size, + size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), + PAGE_SIZE); + dmam_free_coherent(rx_ring->dev, size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } @@ -453,9 +464,10 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) * * Return 0 on success, negative on error */ -int ice_setup_rx_ring(struct ice_ring *rx_ring) +int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) { struct device *dev = rx_ring->dev; + u32 size; if (!dev) return -ENOMEM; @@ -463,19 +475,19 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_buf); rx_ring->rx_buf = - devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, + devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count, GFP_KERNEL); if (!rx_ring->rx_buf) return -ENOMEM; /* round up to nearest page */ - rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), - PAGE_SIZE); - rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, + size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), + PAGE_SIZE); + rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", - rx_ring->size); + size); goto err; } @@ -499,7 +511,7 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) } static unsigned int -ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) +ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) { unsigned int truesize; @@ -519,15 +531,15 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run + * @xdp_ring: ring to be used for XDP_TX action * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static int -ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog) +ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) { - struct ice_ring *xdp_ring; - int err, result; + int err; u32 act; act = bpf_prog_run_xdp(xdp_prog, xdp); @@ -535,11 +547,14 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, case XDP_PASS: return ICE_XDP_PASS; case XDP_TX: - xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; - result = ice_xmit_xdp_buff(xdp, xdp_ring); - if (result == ICE_XDP_CONSUMED) + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_lock(&xdp_ring->tx_lock); + err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_unlock(&xdp_ring->tx_lock); + if (err == ICE_XDP_CONSUMED) goto out_failure; - return result; + return err; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (err) @@ -576,7 +591,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct ice_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct ice_vsi *vsi = np->vsi; - struct ice_ring *xdp_ring; + struct ice_tx_ring *xdp_ring; int nxmit = 0, i; if (test_bit(ICE_VSI_DOWN, vsi->state)) @@ -588,7 +603,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; - xdp_ring = vsi->xdp_rings[queue_index]; + if (static_branch_unlikely(&ice_xdp_locking_key)) { + queue_index %= vsi->num_xdp_txq; + xdp_ring = vsi->xdp_rings[queue_index]; + spin_lock(&xdp_ring->tx_lock); + } else { + xdp_ring = vsi->xdp_rings[queue_index]; + } + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; @@ -602,6 +624,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (unlikely(flags & XDP_XMIT_FLUSH)) ice_xdp_ring_update_tail(xdp_ring); + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_unlock(&xdp_ring->tx_lock); + return nxmit; } @@ -614,7 +639,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, * reused. */ static bool -ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) +ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) { struct page *page = bi->page; dma_addr_t dma; @@ -665,7 +690,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) * buffers. Then bump tail at most one time. Grouping like this lets us avoid * multiple tail writes per call. */ -bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) +bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; @@ -794,7 +819,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) * The function will then update the page offset. */ static void -ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, +ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE >= 8192) @@ -820,7 +845,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * Synchronizes page for reuse by the adapter */ static void -ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) +ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) { u16 nta = rx_ring->next_to_alloc; struct ice_rx_buf *new_buf; @@ -851,7 +876,7 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) * for use by the CPU. */ static struct ice_rx_buf * -ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size, +ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, int *rx_buf_pgcnt) { struct ice_rx_buf *rx_buf; @@ -888,7 +913,7 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size, * to set up the skb correctly and avoid any memcpy overhead. */ static struct sk_buff * -ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, +ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, struct xdp_buff *xdp) { u8 metasize = xdp->data - xdp->data_meta; @@ -940,7 +965,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * skb correctly. */ static struct sk_buff * -ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, +ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, struct xdp_buff *xdp) { unsigned int size = xdp->data_end - xdp->data; @@ -1000,7 +1025,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * the associated resources. */ static void -ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, +ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) { u16 ntc = rx_ring->next_to_clean + 1; @@ -1036,7 +1061,7 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * otherwise return true indicating that this is in fact a non-EOP buffer. */ static bool -ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) +ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) { /* if we are the last buffer then there is nothing else to do */ #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) @@ -1060,11 +1085,12 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) * * Returns amount of work completed */ -int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) +int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); unsigned int offset = rx_ring->rx_offset; + struct ice_tx_ring *xdp_ring = NULL; unsigned int xdp_res, xdp_xmit = 0; struct sk_buff *skb = rx_ring->skb; struct bpf_prog *xdp_prog = NULL; @@ -1077,6 +1103,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) #endif xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + if (xdp_prog) + xdp_ring = rx_ring->xdp_ring; + /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; @@ -1140,11 +1170,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); #endif - xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (!xdp_prog) goto construct_skb; - xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); + xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); if (!xdp_res) goto construct_skb; if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { @@ -1221,7 +1250,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); if (xdp_prog) - ice_finalize_xdp_rx(rx_ring, xdp_xmit); + ice_finalize_xdp_rx(xdp_ring, xdp_xmit); rx_ring->skb = skb; ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); @@ -1230,6 +1259,41 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_pkts; } +static void __ice_update_sample(struct ice_q_vector *q_vector, + struct ice_ring_container *rc, + struct dim_sample *sample, + bool is_tx) +{ + u64 packets = 0, bytes = 0; + + if (is_tx) { + struct ice_tx_ring *tx_ring; + + ice_for_each_tx_ring(tx_ring, *rc) { + packets += tx_ring->stats.pkts; + bytes += tx_ring->stats.bytes; + } + } else { + struct ice_rx_ring *rx_ring; + + ice_for_each_rx_ring(rx_ring, *rc) { + packets += rx_ring->stats.pkts; + bytes += rx_ring->stats.bytes; + } + } + + dim_update_sample(q_vector->total_events, packets, bytes, sample); + sample->comp_ctr = 0; + + /* if dim settings get stale, like when not updated for 1 + * second or longer, force it to start again. This addresses the + * frequent case of an idle queue being switched to by the + * scheduler. The 1,000 here means 1,000 milliseconds. + */ + if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) + rc->dim.state = DIM_START_MEASURE; +} + /** * ice_net_dim - Update net DIM algorithm * @q_vector: the vector associated with the interrupt @@ -1245,34 +1309,16 @@ static void ice_net_dim(struct ice_q_vector *q_vector) struct ice_ring_container *rx = &q_vector->rx; if (ITR_IS_DYNAMIC(tx)) { - struct dim_sample dim_sample = {}; - u64 packets = 0, bytes = 0; - struct ice_ring *ring; - - ice_for_each_ring(ring, q_vector->tx) { - packets += ring->stats.pkts; - bytes += ring->stats.bytes; - } - - dim_update_sample(q_vector->total_events, packets, bytes, - &dim_sample); + struct dim_sample dim_sample; + __ice_update_sample(q_vector, tx, &dim_sample, true); net_dim(&tx->dim, dim_sample); } if (ITR_IS_DYNAMIC(rx)) { - struct dim_sample dim_sample = {}; - u64 packets = 0, bytes = 0; - struct ice_ring *ring; - - ice_for_each_ring(ring, q_vector->rx) { - packets += ring->stats.pkts; - bytes += ring->stats.bytes; - } - - dim_update_sample(q_vector->total_events, packets, bytes, - &dim_sample); + struct dim_sample dim_sample; + __ice_update_sample(q_vector, rx, &dim_sample, false); net_dim(&rx->dim, dim_sample); } } @@ -1299,15 +1345,14 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) } /** - * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt + * ice_enable_interrupt - re-enable MSI-X interrupt * @q_vector: the vector associated with the interrupt to enable * - * Update the net_dim() algorithm and re-enable the interrupt associated with - * this vector. - * - * If the VSI is down, the interrupt will not be re-enabled. + * If the VSI is down, the interrupt will not be re-enabled. Also, + * when enabling the interrupt always reset the wb_on_itr to false + * and trigger a software interrupt to clean out internal state. */ -static void ice_update_ena_itr(struct ice_q_vector *q_vector) +static void ice_enable_interrupt(struct ice_q_vector *q_vector) { struct ice_vsi *vsi = q_vector->vsi; bool wb_en = q_vector->wb_on_itr; @@ -1316,25 +1361,25 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector) if (test_bit(ICE_DOWN, vsi->state)) return; - /* When exiting WB_ON_ITR, let ITR resume its normal - * interrupts-enabled path. + /* trigger an ITR delayed software interrupt when exiting busy poll, to + * make sure to catch any pending cleanups that might have been missed + * due to interrupt state transition. If busy poll or poll isn't + * enabled, then don't update ITR, and just enable the interrupt. */ - if (wb_en) + if (!wb_en) { + itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + } else { q_vector->wb_on_itr = false; - /* This will do nothing if dynamic updates are not enabled. */ - ice_net_dim(q_vector); - - /* net_dim() updates ITR out-of-band using a work item */ - itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); - /* trigger an immediate software interrupt when exiting - * busy poll, to make sure to catch any pending cleanups - * that might have been missed due to interrupt state - * transition. - */ - if (wb_en) { + /* do two things here with a single write. Set up the third ITR + * index to be used for software interrupt moderation, and then + * trigger a software interrupt with a rate limit of 20K on + * software interrupts, this will help avoid high interrupt + * loads due to frequently polling and exiting polling. + */ + itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | - GLINT_DYN_CTL_SW_ITR_INDX_M | + ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; } wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); @@ -1387,18 +1432,24 @@ int ice_napi_poll(struct napi_struct *napi, int budget) { struct ice_q_vector *q_vector = container_of(napi, struct ice_q_vector, napi); + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; bool clean_complete = true; - struct ice_ring *ring; int budget_per_ring; int work_done = 0; /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - ice_for_each_ring(ring, q_vector->tx) { - bool wd = ring->xsk_pool ? - ice_clean_tx_irq_zc(ring, budget) : - ice_clean_tx_irq(ring, budget); + ice_for_each_tx_ring(tx_ring, q_vector->tx) { + bool wd; + + if (tx_ring->xsk_pool) + wd = ice_clean_tx_irq_zc(tx_ring, budget); + else if (ice_ring_is_xdp(tx_ring)) + wd = true; + else + wd = ice_clean_tx_irq(tx_ring, budget); if (!wd) clean_complete = false; @@ -1419,16 +1470,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget) /* Max of 1 Rx ring in this q_vector so give it the budget */ budget_per_ring = budget; - ice_for_each_ring(ring, q_vector->rx) { + ice_for_each_rx_ring(rx_ring, q_vector->rx) { int cleaned; /* A dedicated path for zero-copy allows making a single * comparison in the irq context instead of many inside the * ice_clean_rx_irq function and makes the codebase cleaner. */ - cleaned = ring->xsk_pool ? - ice_clean_rx_irq_zc(ring, budget_per_ring) : - ice_clean_rx_irq(ring, budget_per_ring); + cleaned = rx_ring->xsk_pool ? + ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : + ice_clean_rx_irq(rx_ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ if (cleaned >= budget_per_ring) @@ -1447,10 +1498,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget) /* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling */ - if (likely(napi_complete_done(napi, work_done))) - ice_update_ena_itr(q_vector); - else + if (likely(napi_complete_done(napi, work_done))) { + ice_net_dim(q_vector); + ice_enable_interrupt(q_vector); + } else { ice_set_wb_on_itr(q_vector); + } return min_t(int, work_done, budget - 1); } @@ -1462,7 +1515,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * * Returns -EBUSY if a stop is needed, else 0 */ -static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) +static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); /* Memory barrier before checking head and tail */ @@ -1485,7 +1538,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) * * Returns 0 if stop is not needed */ -static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) +static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) { if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) return 0; @@ -1504,7 +1557,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) * it and the length into the transmit descriptor. */ static void -ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, +ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, struct ice_tx_offload_params *off) { u64 td_offset, td_tag, td_cmd; @@ -1840,7 +1893,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) * related to VLAN tagging for the HW, such as VLAN, DCB, etc. */ static void -ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) +ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) { struct sk_buff *skb = first->skb; @@ -2146,7 +2199,7 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) * @off: Tx offload parameters */ static void -ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb, +ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, struct ice_tx_buf *first, struct ice_tx_offload_params *off) { s8 idx; @@ -2181,7 +2234,7 @@ ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb, * Returns NETDEV_TX_OK if sent, else an error code */ static netdev_tx_t -ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) +ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) { struct ice_tx_offload_params offload = { 0 }; struct ice_vsi *vsi = tx_ring->vsi; @@ -2245,6 +2298,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); + if (ice_is_switchdev_running(vsi->back)) + ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; @@ -2282,7 +2337,7 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - struct ice_ring *tx_ring; + struct ice_tx_ring *tx_ring; tx_ring = vsi->tx_rings[skb->queue_mapping]; @@ -2295,11 +2350,44 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) return ice_xmit_frame_ring(skb, tx_ring); } +/** + * ice_get_dscp_up - return the UP/TC value for a SKB + * @dcbcfg: DCB config that contains DSCP to UP/TC mapping + * @skb: SKB to query for info to determine UP/TC + * + * This function is to only be called when the PF is in L3 DSCP PFC mode + */ +static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return dcbcfg->dscp_map[dscp]; +} + +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *dcbcfg; + + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) + skb->priority = ice_get_dscp_up(dcbcfg, skb); + + return netdev_pick_tx(netdev, skb, sb_dev); +} + /** * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue * @tx_ring: tx_ring to clean */ -void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) +void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) { struct ice_vsi *vsi = tx_ring->vsi; s16 i = tx_ring->next_to_clean; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 1e46e80f3d6f89..c56dd174990316 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -13,6 +13,7 @@ #define ICE_MAX_CHAINED_RX_BUFS 5 #define ICE_MAX_BUF_TXD 8 #define ICE_MIN_TX_LEN 17 +#define ICE_TX_THRESH 32 /* The size limit for a transmit buffer in a descriptor is (16K - 1). * In order to align with the read requests we will align the value to @@ -154,7 +155,7 @@ struct ice_tx_buf { struct ice_tx_offload_params { u64 cd_qw1; - struct ice_ring *tx_ring; + struct ice_tx_ring *tx_ring; u32 td_cmd; u32 td_offset; u32 td_l2tag1; @@ -164,17 +165,10 @@ struct ice_tx_offload_params { }; struct ice_rx_buf { - union { - struct { - dma_addr_t dma; - struct page *page; - unsigned int page_offset; - u16 pagecnt_bias; - }; - struct { - struct xdp_buff *xdp; - }; - }; + dma_addr_t dma; + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; }; struct ice_q_stats { @@ -258,9 +252,9 @@ enum ice_dynamic_itr { #define ICE_TX_LEGACY 1 /* descriptor ring, associated with a VSI */ -struct ice_ring { +struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ - struct ice_ring *next; /* pointer to next ring in q_vector */ + struct ice_rx_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ @@ -268,14 +262,13 @@ struct ice_ring { struct ice_q_vector *q_vector; /* Backreference to associated vector */ u8 __iomem *tail; union { - struct ice_tx_buf *tx_buf; struct ice_rx_buf *rx_buf; + struct xdp_buff **xdp_buf; }; /* CL2 - 2nd cacheline starts here */ + struct xdp_rxq_info xdp_rxq; + /* CL3 - 3rd cacheline starts here */ u16 q_index; /* Queue number of ring */ - u16 q_handle; /* Queue handle per TC */ - - u8 ring_active:1; /* is ring online or not */ u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -284,63 +277,104 @@ struct ice_ring { u16 next_to_use; u16 next_to_clean; u16 next_to_alloc; + u16 rx_offset; + u16 rx_buf_len; /* stats structs */ + struct ice_rxq_stats rx_stats; struct ice_q_stats stats; struct u64_stats_sync syncp; - union { - struct ice_txq_stats tx_stats; - struct ice_rxq_stats rx_stats; - }; struct rcu_head rcu; /* to avoid race on free */ - DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ + /* CL4 - 3rd cacheline starts here */ + struct ice_channel *ch; struct bpf_prog *xdp_prog; + struct ice_tx_ring *xdp_ring; struct xsk_buff_pool *xsk_pool; - u16 rx_offset; - /* CL3 - 3rd cacheline starts here */ - struct xdp_rxq_info xdp_rxq; struct sk_buff *skb; - /* CLX - the below items are only accessed infrequently and should be - * in their own cache line if possible - */ -#define ICE_TX_FLAGS_RING_XDP BIT(0) + dma_addr_t dma; /* physical address of ring */ #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) + u64 cached_phctime; + u8 dcb_tc; /* Traffic class of ring */ + u8 ptp_rx; u8 flags; +} ____cacheline_internodealigned_in_smp; + +struct ice_tx_ring { + /* CL1 - 1st cacheline starts here */ + struct ice_tx_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + u8 __iomem *tail; + struct ice_tx_buf *tx_buf; + struct ice_q_vector *q_vector; /* Backreference to associated vector */ + struct net_device *netdev; /* netdev ring maps to */ + struct ice_vsi *vsi; /* Backreference to associated VSI */ + /* CL2 - 2nd cacheline starts here */ dma_addr_t dma; /* physical address of ring */ - unsigned int size; /* length of descriptor ring in bytes */ + struct xsk_buff_pool *xsk_pool; + u16 next_to_use; + u16 next_to_clean; + u16 next_rs; + u16 next_dd; + u16 q_handle; /* Queue handle per TC */ + u16 reg_idx; /* HW register index of the ring */ + u16 count; /* Number of descriptors */ + u16 q_index; /* Queue number of ring */ + /* stats structs */ + struct ice_q_stats stats; + struct u64_stats_sync syncp; + struct ice_txq_stats tx_stats; + + /* CL3 - 3rd cacheline starts here */ + struct rcu_head rcu; /* to avoid race on free */ + DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ + struct ice_channel *ch; + struct ice_ptp_tx *tx_tstamps; + spinlock_t tx_lock; u32 txq_teid; /* Added Tx queue TEID */ - u16 rx_buf_len; +#define ICE_TX_FLAGS_RING_XDP BIT(0) + u8 flags; u8 dcb_tc; /* Traffic class of ring */ - struct ice_ptp_tx *tx_tstamps; - u64 cached_phctime; - u8 ptp_rx:1; - u8 ptp_tx:1; + u8 ptp_tx; } ____cacheline_internodealigned_in_smp; -static inline bool ice_ring_uses_build_skb(struct ice_ring *ring) +static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) { return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); } -static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring) +static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) { ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; } -static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring) +static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) { ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; } -static inline bool ice_ring_is_xdp(struct ice_ring *ring) +static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) +{ + return !!ring->ch; +} + +static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring) { return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); } +enum ice_container_type { + ICE_RX_CONTAINER, + ICE_TX_CONTAINER, +}; + struct ice_ring_container { /* head of linked-list of rings */ - struct ice_ring *ring; + union { + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + }; struct dim dim; /* data for net_dim algorithm */ u16 itr_idx; /* index in the interrupt vector */ /* this matches the maximum number of ITR bits, but in usec @@ -349,6 +383,7 @@ struct ice_ring_container { u16 itr_setting:13; u16 itr_reserved:2; u16 itr_mode:1; + enum ice_container_type type; }; struct ice_coalesce_stored { @@ -360,10 +395,13 @@ struct ice_coalesce_stored { }; /* iterator for handling rings in ring container */ -#define ice_for_each_ring(pos, head) \ - for (pos = (head).ring; pos; pos = pos->next) +#define ice_for_each_rx_ring(pos, head) \ + for (pos = (head).rx_ring; pos; pos = pos->next) + +#define ice_for_each_tx_ring(pos, head) \ + for (pos = (head).tx_ring; pos; pos = pos->next) -static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) +static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) { #if (PAGE_SIZE < 8192) if (ring->rx_buf_len > (PAGE_SIZE / 2)) @@ -376,18 +414,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) union ice_32b_rx_flex_desc; -bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); +bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); -void ice_clean_tx_ring(struct ice_ring *tx_ring); -void ice_clean_rx_ring(struct ice_ring *rx_ring); -int ice_setup_tx_ring(struct ice_ring *tx_ring); -int ice_setup_rx_ring(struct ice_ring *rx_ring); -void ice_free_tx_ring(struct ice_ring *tx_ring); -void ice_free_rx_ring(struct ice_ring *rx_ring); +u16 +ice_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +void ice_clean_tx_ring(struct ice_tx_ring *tx_ring); +void ice_clean_rx_ring(struct ice_rx_ring *rx_ring); +int ice_setup_tx_ring(struct ice_tx_ring *tx_ring); +int ice_setup_rx_ring(struct ice_rx_ring *rx_ring); +void ice_free_tx_ring(struct ice_tx_ring *tx_ring); +void ice_free_rx_ring(struct ice_rx_ring *rx_ring); int ice_napi_poll(struct napi_struct *napi, int budget); int ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, u8 *raw_packet); -int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget); -void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring); +int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget); +void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring); #endif /* _ICE_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 171397dcf00acd..1dd7e84f41f877 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,13 +2,15 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_txrx_lib.h" +#include "ice_eswitch.h" +#include "ice_lib.h" /** * ice_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index */ -void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val) +void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) { u16 prev_ntu = rx_ring->next_to_use & ~0x7; @@ -66,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) * @rx_ptype: the ptype value from the descriptor */ static void -ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, +ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 rx_ptype) { struct ice_32b_rx_flex_desc_nic *nic_mdid; @@ -93,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * skb->protocol must be set before this function is called */ static void -ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, +ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u16 ptype) { struct ice_rx_ptype_decoded decoded; @@ -178,14 +180,15 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, * other fields within the skb. */ void -ice_process_skb_fields(struct ice_ring *rx_ring, +ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 ptype) { ice_rx_hash(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev + (rx_ring, rx_desc)); ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -203,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring, * gro receive functions (with/without VLAN tag) */ void -ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) @@ -211,19 +214,68 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) napi_gro_receive(&rx_ring->q_vector->napi, skb); } +/** + * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring + * @xdp_ring: XDP ring to clean + */ +static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) +{ + unsigned int total_bytes = 0, total_pkts = 0; + u16 ntc = xdp_ring->next_to_clean; + struct ice_tx_desc *next_dd_desc; + u16 next_dd = xdp_ring->next_dd; + struct ice_tx_buf *tx_buf; + int i; + + next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd); + if (!(next_dd_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) + return; + + for (i = 0; i < ICE_TX_THRESH; i++) { + tx_buf = &xdp_ring->tx_buf[ntc]; + + total_bytes += tx_buf->bytecount; + /* normally tx_buf->gso_segs was taken but at this point + * it's always 1 for us + */ + total_pkts++; + + page_frag_free(tx_buf->raw_buf); + dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + tx_buf->raw_buf = NULL; + + ntc++; + if (ntc >= xdp_ring->count) + ntc = 0; + } + + next_dd_desc->cmd_type_offset_bsz = 0; + xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH; + if (xdp_ring->next_dd > xdp_ring->count) + xdp_ring->next_dd = ICE_TX_THRESH - 1; + xdp_ring->next_to_clean = ntc; + ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes); +} + /** * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission * @data: packet data pointer * @size: packet data size * @xdp_ring: XDP ring for transmission */ -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) +int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring) { u16 i = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; dma_addr_t dma; + if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH) + ice_clean_xdp_irq(xdp_ring); + if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { xdp_ring->tx_stats.tx_busy++; return ICE_XDP_CONSUMED; @@ -244,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) tx_desc = ICE_TX_DESC(xdp_ring, i); tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, + tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0, size, 0); - /* Make certain all of the status bits have been updated - * before next_to_watch is written. - */ - smp_wmb(); - i++; - if (i == xdp_ring->count) + if (i == xdp_ring->count) { i = 0; - - tx_buf->next_to_watch = tx_desc; + tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); + xdp_ring->next_rs = ICE_TX_THRESH - 1; + } xdp_ring->next_to_use = i; + if (i > xdp_ring->next_rs) { + tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); + xdp_ring->next_rs += ICE_TX_THRESH; + } + return ICE_XDP_TX; } @@ -269,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) * * Returns negative on failure, 0 on success. */ -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) +int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); @@ -281,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) /** * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map - * @rx_ring: Rx ring + * @xdp_ring: XDP ring * @xdp_res: Result of the receive batch * * This function bumps XDP Tx tail and/or flush redirect map, and * should be called when a batch of packets has been processed in the * napi loop. */ -void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res) { if (xdp_res & ICE_XDP_REDIR) xdp_do_flush_map(); if (xdp_res & ICE_XDP_TX) { - struct ice_ring *xdp_ring = - rx_ring->vsi->xdp_rings[rx_ring->q_index]; - + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_lock(&xdp_ring->tx_lock); ice_xdp_ring_update_tail(xdp_ring); + if (static_branch_unlikely(&ice_xdp_locking_key)) + spin_unlock(&xdp_ring->tx_lock); } } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 05ac3075290260..11b6c160198662 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) * * This function updates the XDP Tx ring tail register. */ -static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) +static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. @@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); } -void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res); -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring); -void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val); +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res); +int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); +int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring); +void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); void -ice_process_skb_fields(struct ice_ring *rx_ring, +ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 ptype); void -ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index d33d1906103c7c..9e0c2923c62e8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -138,7 +138,9 @@ enum ice_vsi_type { ICE_VSI_PF = 0, ICE_VSI_VF = 1, ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ + ICE_VSI_CHNL = 4, ICE_VSI_LB = 6, + ICE_VSI_SWITCHDEV_CTRL = 7, }; struct ice_link_status { @@ -569,6 +571,8 @@ struct ice_sched_vsi_info { struct list_head list_entry; u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS]; + /* bw_t_info saves VSI BW information */ + struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS]; }; /* driver defines the policy */ @@ -604,7 +608,8 @@ struct ice_dcb_app_priority_table { }; #define ICE_MAX_USER_PRIORITY 8 -#define ICE_DCBX_MAX_APPS 32 +#define ICE_DCBX_MAX_APPS 64 +#define ICE_DSCP_NUM_VAL 64 #define ICE_LLDPDU_SIZE 1500 #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 @@ -622,7 +627,14 @@ struct ice_dcbx_cfg { struct ice_dcb_ets_cfg etscfg; struct ice_dcb_ets_cfg etsrec; struct ice_dcb_pfc_cfg pfc; +#define ICE_QOS_MODE_VLAN 0x0 +#define ICE_QOS_MODE_DSCP 0x1 + u8 pfc_mode; struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; + /* when DSCP mapping defined by user set its bit to 1 */ + DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL); + /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */ + u8 dscp_map[ICE_DSCP_NUM_VAL]; u8 dcbx_mode; #define ICE_DCBX_MODE_CEE 0x1 #define ICE_DCBX_MODE_IEEE 0x2 @@ -668,6 +680,10 @@ struct ice_port_info { struct ice_switch_info { struct list_head vsi_list_map_head; struct ice_sw_recipe *recp_list; + u16 prof_res_bm_init; + u16 max_used_prof_index; + + DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; /* FW logging configuration */ @@ -903,6 +919,7 @@ struct ice_hw { struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; struct ice_mbx_snapshot mbx_snapshot; + u16 io_expander_handle; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e93430ab37f1e9..2ac21484b87669 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -5,7 +5,9 @@ #include "ice_base.h" #include "ice_lib.h" #include "ice_fltr.h" +#include "ice_dcb_lib.h" #include "ice_flow.h" +#include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" #define FIELD_SELECTOR(proto_hdr_field) \ @@ -251,7 +253,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { * ice_get_vf_vsi - get VF's VSI based on the stored index * @vf: VF used to get VSI */ -static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) { return vf->pf->vsi[vf->lan_vsi_idx]; } @@ -412,7 +414,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf) * * send a link status message to a single VF */ -static void ice_vc_notify_vf_link_state(struct ice_vf *vf) +void ice_vc_notify_vf_link_state(struct ice_vf *vf) { struct virtchnl_pf_event pfe = { 0 }; struct ice_hw *hw = &vf->pf->hw; @@ -620,6 +622,8 @@ void ice_free_vfs(struct ice_pf *pf) if (!pf->vf) return; + ice_eswitch_release(pf); + while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); @@ -828,7 +832,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); @@ -855,7 +859,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); ice_vf_ctrl_invalidate_vsi(vf); @@ -881,6 +885,40 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; } +/** + * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration + * @vf: VF to re-apply the configuration for + * + * Called after a VF VSI has been re-added/rebuild during reset. The PF driver + * needs to re-apply the host configured Tx rate limiting configuration. + */ +static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int err; + + if (vf->min_tx_rate) { + err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); + if (err) { + dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", + vf->min_tx_rate, vf->vf_id, err); + return err; + } + } + + if (vf->max_tx_rate) { + err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); + if (err) { + dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", + vf->max_tx_rate, vf->vf_id, err); + return err; + } + } + + return 0; +} + /** * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN * @vf: VF to add MAC filters for @@ -932,6 +970,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) enum ice_status status; u8 broadcast[ETH_ALEN]; + if (ice_is_eswitch_mode_switchdev(vf->pf)) + return 0; + eth_broadcast_addr(broadcast); status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); if (status) { @@ -1414,6 +1455,11 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) if (ice_vf_rebuild_host_vlan_cfg(vf)) dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", vf->vf_id); + + if (ice_vf_rebuild_host_tx_rate_cfg(vf)) + dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", + vf->vf_id); + /* rebuild aggregator node config for main VF VSI */ ice_vf_rebuild_aggregator_node_cfg(vsi); } @@ -1581,6 +1627,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vf_post_vsi_rebuild(vf); } + if (ice_is_eswitch_mode_switchdev(pf)) + if (ice_eswitch_rebuild(pf)) + dev_warn(dev, "eswitch rebuild failed\n"); + ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); @@ -1593,7 +1643,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * * Returns true if the PF or VF is disabled, false otherwise. */ -static bool ice_is_vf_disabled(struct ice_vf *vf) +bool ice_is_vf_disabled(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; @@ -1711,6 +1761,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) } ice_vf_post_vsi_rebuild(vf); + vsi = ice_get_vf_vsi(vf); + ice_eswitch_update_repr(vsi); /* if the VF has been reset allow it to come up again */ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) @@ -1894,6 +1946,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) */ ice_vf_ctrl_invalidate_vsi(vf); ice_vf_fdir_init(vf); + + ice_vc_set_dflt_vf_ops(&vf->vc_ops); } } @@ -1960,6 +2014,11 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) } clear_bit(ICE_VF_DIS, pf->state); + + ret = ice_eswitch_configure(pf); + if (ret) + goto err_unroll_sriov; + return 0; err_unroll_sriov: @@ -2823,7 +2882,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf) * disabled, and initialized so it can be configured and/or queried by a host * administrator. */ -static int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +int ice_check_vf_ready_for_cfg(struct ice_vf *vf) { struct ice_pf *pf; @@ -3013,7 +3072,10 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) v_ret = VIRTCHNL_STATUS_ERR_PARAM; } - ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc); + if (rm_promisc) + ret = ice_cfg_vlan_pruning(vsi, true); + else + ret = ice_cfg_vlan_pruning(vsi, false); if (ret) { dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n"); v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -3329,7 +3391,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - struct ice_ring *ring = vsi->tx_rings[vf_q_id]; + struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id]; struct ice_txq_meta txq_meta = { 0 }; if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { @@ -3801,6 +3863,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME); } +/** + * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF + * @vf: VF to update + * @vc_ether_addr: structure from VIRTCHNL with MAC to check + * + * only update cached hardware MAC for legacy VF drivers on delete + * because we cannot guarantee order/type of MAC from the VF driver + */ +static void +ice_update_legacy_cached_mac(struct ice_vf *vf, + struct virtchnl_ether_addr *vc_ether_addr) +{ + if (!ice_is_vc_addr_legacy(vc_ether_addr) || + ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) + return; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); + ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); +} + /** * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed * @vf: VF to update @@ -3822,16 +3904,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) */ eth_zero_addr(vf->dev_lan_addr.addr); - /* only update cached hardware MAC for legacy VF drivers on delete - * because we cannot guarantee order/type of MAC from the VF driver - */ - if (ice_is_vc_addr_legacy(vc_ether_addr) && - !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) { - ether_addr_copy(vf->dev_lan_addr.addr, - vf->legacy_last_added_umac.addr); - ether_addr_copy(vf->hw_lan_addr.addr, - vf->legacy_last_added_umac.addr); - } + ice_update_legacy_cached_mac(vf, vc_ether_addr); } /** @@ -4207,7 +4280,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) /* Enable VLAN pruning when non-zero VLAN is added */ if (!vlan_promisc && vid && !ice_vsi_is_vlan_pruning_ena(vsi)) { - status = ice_cfg_vlan_pruning(vsi, true, false); + status = ice_cfg_vlan_pruning(vsi, true); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", @@ -4261,7 +4334,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) /* Disable VLAN pruning when only VLAN 0 is left */ if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) - ice_cfg_vlan_pruning(vsi, false, false); + ice_cfg_vlan_pruning(vsi, false); /* Disable Unicast/Multicast VLAN promiscuous mode */ if (vlan_promisc) { @@ -4400,6 +4473,168 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) return ice_vsi_manage_vlan_stripping(vsi, false); } +static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = { + .get_ver_msg = ice_vc_get_ver_msg, + .get_vf_res_msg = ice_vc_get_vf_res_msg, + .reset_vf = ice_vc_reset_vf_msg, + .add_mac_addr_msg = ice_vc_add_mac_addr_msg, + .del_mac_addr_msg = ice_vc_del_mac_addr_msg, + .cfg_qs_msg = ice_vc_cfg_qs_msg, + .ena_qs_msg = ice_vc_ena_qs_msg, + .dis_qs_msg = ice_vc_dis_qs_msg, + .request_qs_msg = ice_vc_request_qs_msg, + .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, + .config_rss_key = ice_vc_config_rss_key, + .config_rss_lut = ice_vc_config_rss_lut, + .get_stats_msg = ice_vc_get_stats_msg, + .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, + .add_vlan_msg = ice_vc_add_vlan_msg, + .remove_vlan_msg = ice_vc_remove_vlan_msg, + .ena_vlan_stripping = ice_vc_ena_vlan_stripping, + .dis_vlan_stripping = ice_vc_dis_vlan_stripping, + .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, + .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, + .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, +}; + +void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) +{ + *ops = ice_vc_vf_dflt_ops; +} + +/** + * ice_vc_repr_add_mac + * @vf: pointer to VF + * @msg: virtchannel message + * + * When port representors are created, we do not add MAC rule + * to firmware, we store it so that PF could report same + * MAC as VF. + */ +static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + struct ice_vsi *vsi; + struct ice_pf *pf; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + pf = vf->pf; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + for (i = 0; i < al->num_elements; i++) { + u8 *mac_addr = al->list[i].addr; + + if (!is_unicast_ether_addr(mac_addr) || + ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) + continue; + + if (vf->pf_set_mac) { + dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto handle_mac_exit; + } + + ice_vfhw_mac_add(vf, &al->list[i]); + vf->num_mac++; + break; + } + +handle_mac_exit: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + v_ret, NULL, 0); +} + +/** + * ice_vc_repr_del_mac - response with success for deleting MAC + * @vf: pointer to VF + * @msg: virtchannel message + * + * Respond with success to not break normal VF flow. + * For legacy VF driver try to update cached MAC address. + */ +static int +ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) +{ + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + + ice_update_legacy_cached_mac(vf, &al->list[0]); + + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't enable VLAN stripping in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); +} + +static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't disable VLAN stripping in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); +} + +static int +ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't config promiscuous mode in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); +} + +void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) +{ + ops->add_mac_addr_msg = ice_vc_repr_add_mac; + ops->del_mac_addr_msg = ice_vc_repr_del_mac; + ops->add_vlan_msg = ice_vc_repr_add_vlan; + ops->remove_vlan_msg = ice_vc_repr_del_vlan; + ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping; + ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping; + ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode; +} + /** * ice_vc_process_vf_msg - Process request from VF * @pf: pointer to the PF structure @@ -4413,6 +4648,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) u32 v_opcode = le32_to_cpu(event->desc.cookie_high); s16 vf_id = le16_to_cpu(event->desc.retval); u16 msglen = event->msg_len; + struct ice_vc_vf_ops *ops; u8 *msg = event->msg_buf; struct ice_vf *vf = NULL; struct device *dev; @@ -4436,6 +4672,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) goto error_handler; } + ops = &vf->vc_ops; + /* Perform basic checks on the msg */ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (err) { @@ -4463,75 +4701,75 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) switch (v_opcode) { case VIRTCHNL_OP_VERSION: - err = ice_vc_get_ver_msg(vf, msg); + err = ops->get_ver_msg(vf, msg); break; case VIRTCHNL_OP_GET_VF_RESOURCES: - err = ice_vc_get_vf_res_msg(vf, msg); + err = ops->get_vf_res_msg(vf, msg); if (ice_vf_init_vlan_stripping(vf)) dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n", vf->vf_id); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - ice_vc_reset_vf_msg(vf); + ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: - err = ice_vc_add_mac_addr_msg(vf, msg); + err = ops->add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: - err = ice_vc_del_mac_addr_msg(vf, msg); + err = ops->del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - err = ice_vc_cfg_qs_msg(vf, msg); + err = ops->cfg_qs_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: - err = ice_vc_ena_qs_msg(vf, msg); + err = ops->ena_qs_msg(vf, msg); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: - err = ice_vc_dis_qs_msg(vf, msg); + err = ops->dis_qs_msg(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: - err = ice_vc_request_qs_msg(vf, msg); + err = ops->request_qs_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - err = ice_vc_cfg_irq_map_msg(vf, msg); + err = ops->cfg_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - err = ice_vc_config_rss_key(vf, msg); + err = ops->config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - err = ice_vc_config_rss_lut(vf, msg); + err = ops->config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_STATS: - err = ice_vc_get_stats_msg(vf, msg); + err = ops->get_stats_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - err = ice_vc_cfg_promiscuous_mode_msg(vf, msg); + err = ops->cfg_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: - err = ice_vc_add_vlan_msg(vf, msg); + err = ops->add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: - err = ice_vc_remove_vlan_msg(vf, msg); + err = ops->remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: - err = ice_vc_ena_vlan_stripping(vf); + err = ops->ena_vlan_stripping(vf); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: - err = ice_vc_dis_vlan_stripping(vf); + err = ops->dis_vlan_stripping(vf); break; case VIRTCHNL_OP_ADD_FDIR_FILTER: - err = ice_vc_add_fdir_fltr(vf, msg); + err = ops->add_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_FDIR_FILTER: - err = ice_vc_del_fdir_fltr(vf, msg); + err = ops->del_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_RSS_CFG: - err = ice_vc_handle_rss_cfg(vf, msg, true); + err = ops->handle_rss_cfg_msg(vf, msg, true); break; case VIRTCHNL_OP_DEL_RSS_CFG: - err = ice_vc_handle_rss_cfg(vf, msg, false); + err = ops->handle_rss_cfg_msg(vf, msg, false); break; case VIRTCHNL_OP_UNKNOWN: default: @@ -4588,8 +4826,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; - ivi->max_tx_rate = vf->tx_rate; - ivi->min_tx_rate = 0; + ivi->max_tx_rate = vf->max_tx_rate; + ivi->min_tx_rate = vf->min_tx_rate; return 0; } @@ -4699,6 +4937,11 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) struct ice_vf *vf; int ret; + if (ice_is_eswitch_mode_switchdev(pf)) { + dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); + return -EOPNOTSUPP; + } + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; @@ -4762,6 +5005,122 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) return 0; } +/** + * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs + * @pf: PF associated with VFs + */ +static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) +{ + int rate = 0, i; + + ice_for_each_vf(pf, i) + rate += pf->vf[i].min_tx_rate; + + return rate; +} + +/** + * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription + * @vf: VF trying to configure min_tx_rate + * @min_tx_rate: min Tx rate in Mbps + * + * Check if the min_tx_rate being passed in will cause oversubscription of total + * min_tx_rate based on the current link speed and all other VFs configured + * min_tx_rate + * + * Return true if the passed min_tx_rate would cause oversubscription, else + * return false + */ +static bool +ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) +{ + int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); + int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); + + /* this VF's previous rate is being overwritten */ + all_vfs_min_tx_rate -= vf->min_tx_rate; + + if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { + dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", + min_tx_rate, vf->vf_id, + all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, + link_speed_mbps); + return true; + } + + return false; +} + +/** + * ice_set_vf_bw - set min/max VF bandwidth + * @netdev: network interface device structure + * @vf_id: VF identifier + * @min_tx_rate: Minimum Tx rate in Mbps + * @max_tx_rate: Maximum Tx rate in Mbps + */ +int +ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *vsi; + struct device *dev; + struct ice_vf *vf; + int ret; + + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) + return -EINVAL; + + vf = &pf->vf[vf_id]; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + return ret; + + vsi = ice_get_vf_vsi(vf); + + /* when max_tx_rate is zero that means no max Tx rate limiting, so only + * check if max_tx_rate is non-zero + */ + if (max_tx_rate && min_tx_rate > max_tx_rate) { + dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", + min_tx_rate, max_tx_rate); + return -EINVAL; + } + + if (min_tx_rate && ice_is_dcb_active(pf)) { + dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); + return -EOPNOTSUPP; + } + + if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) + return -EINVAL; + + if (vf->min_tx_rate != (unsigned int)min_tx_rate) { + ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); + if (ret) { + dev_err(dev, "Unable to set min-tx-rate for VF %d\n", + vf->vf_id); + return ret; + } + + vf->min_tx_rate = min_tx_rate; + } + + if (vf->max_tx_rate != (unsigned int)max_tx_rate) { + ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); + if (ret) { + dev_err(dev, "Unable to set max-tx-rate for VF %d\n", + vf->vf_id); + return ret; + } + + vf->max_tx_rate = max_tx_rate; + } + + return 0; +} + /** * ice_get_vf_stats - populate some stats for the VF * @netdev: the netdev of the PF diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 842cb077df8612..5ff93a08f54c94 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -70,6 +70,32 @@ struct ice_mdd_vf_events { u16 last_printed; }; +struct ice_vf; + +struct ice_vc_vf_ops { + int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); + int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); + void (*reset_vf)(struct ice_vf *vf); + int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*request_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); + int (*config_rss_key)(struct ice_vf *vf, u8 *msg); + int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); + int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_vlan_stripping)(struct ice_vf *vf); + int (*dis_vlan_stripping)(struct ice_vf *vf); + int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); + int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); +}; + /* VF information structure */ struct ice_vf { struct ice_pf *pf; @@ -99,7 +125,8 @@ struct ice_vf { * the main LAN VSI for the PF. */ u16 lan_vsi_num; /* ID as used by firmware */ - unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ + unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ u64 num_inval_msgs; /* number of continuous invalid msgs */ @@ -111,9 +138,17 @@ struct ice_vf { struct ice_mdd_vf_events mdd_rx_events; struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + + struct ice_repr *repr; + + struct ice_vc_vf_ops vc_ops; + + /* devlink port data */ + struct devlink_port devlink_port; }; #ifdef CONFIG_PCI_IOV +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); @@ -124,6 +159,9 @@ void ice_free_vfs(struct ice_pf *pf); void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); +void ice_vc_notify_vf_link_state(struct ice_vf *vf); +void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops); +void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); @@ -135,10 +173,18 @@ int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto); +int +ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); + int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); +int ice_check_vf_ready_for_cfg(struct ice_vf *vf); + +bool ice_is_vf_disabled(struct ice_vf *vf); + int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); @@ -164,6 +210,9 @@ static inline void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_vc_notify_link_state(struct ice_pf *pf) { } static inline void ice_vc_notify_reset(struct ice_pf *pf) { } +static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { } +static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { } +static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { } static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { } static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } @@ -171,6 +220,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } +static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_is_vf_disabled(struct ice_vf *vf) +{ + return true; +} + +static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +{ + return NULL; +} + static inline bool ice_is_malicious_vf(struct ice_pf __always_unused *pf, struct ice_rq_event_info __always_unused *event, @@ -244,6 +308,14 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } +static inline int +ice_set_vf_bw(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused min_tx_rate, + int __always_unused max_tx_rate) +{ + return -EOPNOTSUPP; +} + static inline int ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, struct ice_q_vector __always_unused *q_vector) diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 5a9f61deeb38da..ff55cb415b110f 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, * @q_vector: queue vector */ static void -ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, +ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, struct ice_q_vector *q_vector) { struct ice_pf *pf = vsi->back; @@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) u16 reg_idx = q_vector->reg_idx; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - struct ice_ring *ring; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; ice_cfg_itr(hw, q_vector); - ice_for_each_ring(ring, q_vector->tx) - ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, + ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx, q_vector->tx.itr_idx); - ice_for_each_ring(ring, q_vector->rx) - ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx, + ice_for_each_rx_ring(rx_ring, q_vector->rx) + ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx, q_vector->rx.itr_idx); ice_flush(hw); @@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) { struct ice_txq_meta txq_meta = { }; - struct ice_ring *tx_ring, *rx_ring; struct ice_q_vector *q_vector; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; int timeout = 50; int err; @@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; if (ice_is_xdp_ena_vsi(vsi)) { - struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; memset(&txq_meta, 0, sizeof(txq_meta)); ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); @@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) { struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_ring *tx_ring, *rx_ring; struct ice_q_vector *q_vector; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; u16 size; int err; @@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) goto free_buf; if (ice_is_xdp_ena_vsi(vsi)) { - struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; memset(qg_buf, 0, size); qg_buf->num_txqs = 1; @@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) goto free_buf; ice_set_ring_xdp(xdp_ring); - xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); + xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); } err = ice_vsi_cfg_rxq(rx_ring); @@ -360,56 +363,50 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) * * Returns true if all allocations were successful, false if any fail. */ -bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) +bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; - struct ice_rx_buf *rx_buf; - bool ok = true; + struct xdp_buff **xdp; + u32 nb_buffs, i; dma_addr_t dma; - if (!count) - return true; - rx_desc = ICE_RX_DESC(rx_ring, ntu); - rx_buf = &rx_ring->rx_buf[ntu]; + xdp = &rx_ring->xdp_buf[ntu]; - do { - rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); - if (!rx_buf->xdp) { - ok = false; - break; - } + nb_buffs = min_t(u16, count, rx_ring->count - ntu); + nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); + if (!nb_buffs) + return false; - dma = xsk_buff_xdp_get_dma(rx_buf->xdp); + i = nb_buffs; + while (i--) { + dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); - rx_desc->wb.status_error0 = 0; rx_desc++; - rx_buf++; - ntu++; - - if (unlikely(ntu == rx_ring->count)) { - rx_desc = ICE_RX_DESC(rx_ring, 0); - rx_buf = rx_ring->rx_buf; - ntu = 0; - } - } while (--count); + xdp++; + } - if (rx_ring->next_to_use != ntu) { - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.status_error0 = 0; - ice_release_rx_desc(rx_ring, ntu); + ntu += nb_buffs; + if (ntu == rx_ring->count) { + rx_desc = ICE_RX_DESC(rx_ring, 0); + xdp = rx_ring->xdp_buf; + ntu = 0; } - return ok; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.status_error0 = 0; + ice_release_rx_desc(rx_ring, ntu); + + return count == nb_buffs; } /** * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring * @rx_ring: Rx ring */ -static void ice_bump_ntc(struct ice_ring *rx_ring) +static void ice_bump_ntc(struct ice_rx_ring *rx_ring) { int ntc = rx_ring->next_to_clean + 1; @@ -421,19 +418,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring) /** * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * @rx_ring: Rx ring - * @rx_buf: zero-copy Rx buffer + * @xdp_arr: Pointer to the SW ring of xdp_buff pointers * * This function allocates a new skb from a zero-copy Rx buffer. * * Returns the skb on success, NULL on failure. */ static struct sk_buff * -ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) +ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr) { - unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; - unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; - unsigned int datasize_hard = rx_buf->xdp->data_end - - rx_buf->xdp->data_hard_start; + struct xdp_buff *xdp = *xdp_arr; + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start; struct sk_buff *skb; skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, @@ -441,13 +438,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) if (unlikely(!skb)) return NULL; - skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); if (metasize) skb_metadata_set(skb, metasize); - xsk_buff_free(rx_buf->xdp); - rx_buf->xdp = NULL; + xsk_buff_free(xdp); + *xdp_arr = NULL; return skb; } @@ -455,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) * ice_run_xdp_zc - Executes an XDP program in zero-copy path * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program + * @xdp_prog: XDP program to run + * @xdp_ring: ring to be used for XDP_TX action * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static int -ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) +ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) { int err, result = ICE_XDP_PASS; - struct bpf_prog *xdp_prog; - struct ice_ring *xdp_ring; u32 act; - /* ZC patch is enabled only when XDP program is set, - * so here it can not be NULL - */ - xdp_prog = READ_ONCE(rx_ring->xdp_prog); - act = bpf_prog_run_xdp(xdp_prog, xdp); if (likely(act == XDP_REDIRECT)) { @@ -484,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) case XDP_PASS: break; case XDP_TX: - xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; result = ice_xmit_xdp_buff(xdp, xdp_ring); if (result == ICE_XDP_CONSUMED) goto out_failure; @@ -511,17 +503,25 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) * * Returns number of processed packets on success, remaining budget on failure. */ -int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) +int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; + struct bpf_prog *xdp_prog; bool failure = false; + /* ZC patch is enabled only when XDP program is set, + * so here it can not be NULL + */ + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + xdp_ring = rx_ring->xdp_ring; + while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; - struct ice_rx_buf *rx_buf; + struct xdp_buff **xdp; struct sk_buff *skb; u16 stat_err_bits; u16 vlan_tag = 0; @@ -544,18 +544,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (!size) break; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - rx_buf->xdp->data_end = rx_buf->xdp->data + size; - xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool); + xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean]; + xsk_buff_set_size(*xdp, size); + xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool); - xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp); + xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring); if (xdp_res) { if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) xdp_xmit |= xdp_res; else - xsk_buff_free(rx_buf->xdp); + xsk_buff_free(*xdp); - rx_buf->xdp = NULL; + *xdp = NULL; total_rx_bytes += size; total_rx_packets++; cleaned_count++; @@ -565,7 +565,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) } /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, rx_buf); + skb = ice_construct_skb_zc(rx_ring, xdp); if (!skb) { rx_ring->rx_stats.alloc_buf_failed++; break; @@ -596,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (cleaned_count >= ICE_RX_BUF_WRITE) failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count); - ice_finalize_xdp_rx(rx_ring, xdp_xmit); + ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { @@ -618,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) * * Returns true if cleanup/transmission is done. */ -static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) +static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget) { struct ice_tx_desc *tx_desc = NULL; bool work_done = true; @@ -669,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) * @tx_buf: Tx buffer to clean */ static void -ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) +ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) { xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), @@ -684,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) * * Returns true if cleanup/tranmission is done. */ -bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) +bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget) { int total_packets = 0, total_bytes = 0; s16 ntc = xdp_ring->next_to_clean; @@ -757,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_q_vector *q_vector; struct ice_vsi *vsi = np->vsi; - struct ice_ring *ring; + struct ice_tx_ring *ring; if (test_bit(ICE_DOWN, vsi->state)) return -ENETDOWN; @@ -808,17 +808,17 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring * @rx_ring: ring to be cleaned */ -void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) +void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { u16 i; for (i = 0; i < rx_ring->count; i++) { - struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + struct xdp_buff **xdp = &rx_ring->xdp_buf[i]; - if (!rx_buf->xdp) + if (!xdp) continue; - rx_buf->xdp = NULL; + *xdp = NULL; } } @@ -826,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues * @xdp_ring: XDP_Tx ring */ -void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) +void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; u32 xsk_frames = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index ea208808623a84..4c7bd8e9dfc43c 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -11,13 +11,13 @@ struct ice_vsi; #ifdef CONFIG_XDP_SOCKETS int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid); -int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget); -bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget); +int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget); +bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); -bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count); +bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); -void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); -void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); +void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); +void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); #else static inline int ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, @@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, } static inline int -ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, +ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, int __always_unused budget) { return 0; } static inline bool -ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring, +ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring, int __always_unused budget) { return false; } static inline bool -ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring, +ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, u16 __always_unused count) { return false; @@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } -static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { } -static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { } +static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } +static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } #endif /* CONFIG_XDP_SOCKETS */ #endif /* !_ICE_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 751de06019a0e8..836be0d3b29105 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -577,16 +577,15 @@ static void igb_set_i2c_data(void *data, int state) struct e1000_hw *hw = &adapter->hw; s32 i2cctl = rd32(E1000_I2CPARAMS); - if (state) - i2cctl |= E1000_I2C_DATA_OUT; - else + if (state) { + i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; + } else { + i2cctl &= ~E1000_I2C_DATA_OE_N; i2cctl &= ~E1000_I2C_DATA_OUT; + } - i2cctl &= ~E1000_I2C_DATA_OE_N; - i2cctl |= E1000_I2C_CLK_OE_N; wr32(E1000_I2CPARAMS, i2cctl); wrfl(); - } /** @@ -603,8 +602,7 @@ static void igb_set_i2c_clk(void *data, int state) s32 i2cctl = rd32(E1000_I2CPARAMS); if (state) { - i2cctl |= E1000_I2C_CLK_OUT; - i2cctl &= ~E1000_I2C_CLK_OE_N; + i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N; } else { i2cctl &= ~E1000_I2C_CLK_OUT; i2cctl &= ~E1000_I2C_CLK_OE_N; @@ -3116,12 +3114,21 @@ static void igb_init_mas(struct igb_adapter *adapter) **/ static s32 igb_init_i2c(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; s32 status = 0; + s32 i2cctl; /* I2C interface supported on i350 devices */ if (adapter->hw.mac.type != e1000_i350) return 0; + i2cctl = rd32(E1000_I2CPARAMS); + i2cctl |= E1000_I2CBB_EN + | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N + | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); + /* Initialize the i2c bus which is controlled by the registers. * This bus will use the i2c_algo_bit structure that implements * the protocol through toggling of the 4 bits in the register. @@ -3356,7 +3363,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error\n"); } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); @@ -4988,7 +4995,7 @@ static int igb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index d32e72d953c8df..74ccd622251a23 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1527,8 +1527,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter) spin_unlock_bh(&hw->mbx_lock); if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); } @@ -1813,7 +1812,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); return 0; } @@ -2816,8 +2815,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); } spin_unlock_bh(&hw->mbx_lock); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 84f142f5e472e5..f068b66b802510 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) ctrl = rd32(IGC_CTRL); hw_dbg("Issuing a global reset to MAC\n"); - wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); ret_val = igc_get_auto_rd_done(hw); if (ret_val) { @@ -158,11 +158,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) struct igc_phy_info *phy = &hw->phy; s32 ret_val = 0; - if (hw->phy.media_type != igc_media_type_copper) { - phy->type = igc_phy_none; - goto out; - } - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; phy->reset_delay_us = 100; @@ -207,6 +202,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) case IGC_DEV_ID_I225_K2: case IGC_DEV_ID_I226_K: case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: case IGC_DEV_ID_I225_IT: case IGC_DEV_ID_I226_LM: case IGC_DEV_ID_I226_V: diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index a4bbee74879845..c7fe61509d5b51 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -130,7 +130,7 @@ #define IGC_ERR_SWFW_SYNC 13 /* Device Control */ -#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ #define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ #define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 4e0203336c6bfe..587db7483f2557 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -24,6 +24,7 @@ #define IGC_DEV_ID_I225_K2 0x3101 #define IGC_DEV_ID_I226_K 0x3102 #define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 #define IGC_DEV_ID_I225_IT 0x0D9F #define IGC_DEV_ID_I226_LM 0x125B #define IGC_DEV_ID_I226_V 0x125C diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 0e19b4d02e628a..8e448288ee2657 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -56,6 +56,7 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, @@ -949,7 +950,7 @@ static int igc_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ @@ -6377,7 +6378,7 @@ static int igc_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "NVM Read Error\n"); } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 0f021909b430a0..30568e3544cdab 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -773,7 +773,7 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) { -#if IS_ENABLED(CONFIG_X86_TSC) +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) return convert_art_ns_to_tsc(tstamp); #else return (struct system_counterval_t) { }; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c index a430871d1c27e3..c8d1e815ec6bc2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -549,7 +549,7 @@ ixgb_mta_set(struct ixgb_hw *hw, *****************************************************************************/ void ixgb_rar_set(struct ixgb_hw *hw, - u8 *addr, + const u8 *addr, u32 index) { u32 rar_low, rar_high; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h index 6064583095da79..70bcff5fb3db9c 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -740,7 +740,7 @@ bool ixgb_adapter_start(struct ixgb_hw *hw); void ixgb_check_for_link(struct ixgb_hw *hw); bool ixgb_check_for_bad_link(struct ixgb_hw *hw); -void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index); +void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index); /* Filters (multicast, vlan, receive) */ void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list, diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 1588376d4c6734..99d481904ce642 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -362,6 +362,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgb_adapter *adapter; static int cards_found = 0; int pci_using_dac; + u8 addr[ETH_ALEN]; int i; int err; @@ -461,7 +462,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + ixgb_get_ee_mac_addr(&adapter->hw, addr); + eth_hw_addr_set(netdev, addr); if (!is_valid_ether_addr(netdev->dev_addr)) { netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n"); @@ -1030,7 +1032,7 @@ ixgb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); ixgb_rar_set(&adapter->hw, addr->sa_data, 0); @@ -2227,6 +2229,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); + u8 addr[ETH_ALEN]; if (pci_enable_device(pdev)) { netif_err(adapter, probe, adapter->netdev, @@ -2250,7 +2253,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) "After reset, the EEPROM checksum is not valid\n"); return PCI_ERS_RESULT_DISCONNECT; } - ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + ixgb_get_ee_mac_addr(&adapter->hw, addr); + eth_hw_addr_set(netdev, addr); memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a604552fa634e8..4a69823e6abd6b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -351,6 +351,7 @@ struct ixgbe_ring { }; u16 rx_offset; struct xdp_rxq_info xdp_rxq; + spinlock_t tx_lock; /* used in XDP mode */ struct xsk_buff_pool *xsk_pool; u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ u16 rx_buf_len; @@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 #define IXGBE_MAX_MACVLANS 63 +DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); + struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ @@ -629,7 +632,7 @@ struct ixgbe_adapter { /* XDP */ int num_xdp_queues; - struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS]; unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */ /* TX */ @@ -772,6 +775,22 @@ struct ixgbe_adapter { #endif /* CONFIG_IXGBE_IPSEC */ }; +static inline int ixgbe_determine_xdp_q_idx(int cpu) +{ + if (static_key_enabled(&ixgbe_xdp_locking_key)) + return cpu % IXGBE_MAX_XDP_QS; + else + return cpu; +} + +static inline +struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter) +{ + int index = ixgbe_determine_xdp_q_idx(smp_processor_id()); + + return adapter->xdp_ring[index]; +} + static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index beda8e0ef7d421..8362822316a915 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -467,9 +467,8 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, * this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ - if (!bitmap_subset(cmd->link_modes.advertising, - cmd->link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS)) + if (!linkmode_subset(cmd->link_modes.advertising, + cmd->link_modes.supported)) return -EINVAL; /* only allow one speed at a time if no autoneg */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 0218f6c9b9258b..86b11164655e21 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) { - return adapter->xdp_prog ? nr_cpu_ids : 0; + int queues; + + queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); + return adapter->xdp_prog ? queues : 0; } #define IXGBE_RSS_64Q_MASK 0x3F @@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = xdp_idx; set_ring_xdp(ring); + spin_lock_init(&ring->tx_lock); /* assign ring to adapter */ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); @@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->q_vector[v_idx] = NULL; __netif_napi_del(&q_vector->napi); + if (static_key_enabled(&ixgbe_xdp_locking_key)) + static_branch_dec(&ixgbe_xdp_locking_key); + /* * after a call to __netif_napi_del() napi may still be used and * ixgbe_get_stats64() might access the rings on this vector, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 13c4782b920a79..0f9f022260d70f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); +DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); +EXPORT_SYMBOL(ixgbe_xdp_locking_key); + static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); @@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; @@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; @@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); @@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (ixgbe_init_rss_key(adapter)) return -ENOMEM; - adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); if (!adapter->af_xdp_zc_qps) return -ENOMEM; @@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; u32 len, cmd_type; @@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); ixgbe_mac_set_default_filter(adapter); @@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -EINVAL; } - if (nr_cpu_ids > MAX_XDP_QUEUES) + /* if the number of cpus is much larger than the maximum of queues, + * we should stop it and then return with ENOMEM like before. + */ + if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) return -ENOMEM; + else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) + static_branch_inc(&ixgbe_xdp_locking_key); old_prog = xchg(&adapter->xdp_prog, prog); need_reset = (!!prog != !!old_prog); @@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) writel(ring->next_to_use, ring->tail); } +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) +{ + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); +} + static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; if (unlikely(!ring)) return -ENXIO; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) return -ENXIO; + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; - err = ixgbe_xmit_xdp_ring(adapter, xdpf); + err = ixgbe_xmit_xdp_ring(ring, xdpf); if (err != IXGBE_XDP_TX) break; nxmit++; @@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + return nxmit; } @@ -10903,7 +10927,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) eth_platform_get_mac_address(&adapter->pdev->dev, adapter->hw.mac.perm_addr); - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.perm_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 2aeec78029bc5a..a82533f21d368e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -12,7 +12,7 @@ #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf); bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, @@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb); void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring); void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index b1d22e4d5ec9ca..db2bc58dfcfd0f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; @@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; @@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c714e1ecd30895..d81811ab4ec482 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2540,7 +2540,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } if (is_valid_ether_addr(adapter->hw.mac.addr)) { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } @@ -3054,7 +3054,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - ether_addr_copy(netdev->dev_addr, hw->mac.addr); + eth_hw_addr_set(netdev, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -4231,7 +4231,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) ether_addr_copy(hw->mac.addr, addr->sa_data); ether_addr_copy(hw->mac.perm_addr, addr->sa_data); - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 5fc347abab3cb8..d459f5c8e98ff5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -66,9 +66,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 timeout = IXGBE_VF_INIT_TIMEOUT; - s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; u8 *addr = (u8 *)(&msgbuf[1]); + s32 ret_val; /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 1bdc4f23e1e575..439674fc976509 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -313,7 +313,7 @@ jme_load_macaddr(struct net_device *netdev) val = jread32(jme, JME_RXUMA_HI); macaddr[4] = (val >> 0) & 0xFF; macaddr[5] = (val >> 8) & 0xFF; - memcpy(netdev->dev_addr, macaddr, ETH_ALEN); + eth_hw_addr_set(netdev, macaddr); spin_unlock_bh(&jme->macaddr_lock); } @@ -2254,7 +2254,7 @@ jme_set_macaddr(struct net_device *netdev, void *p) return -EBUSY; spin_lock_bh(&jme->macaddr_lock); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); jme_set_unicastaddr(netdev); spin_unlock_bh(&jme->macaddr_lock); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 3e9f324f1061fc..df9a8eefa007a3 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1297,8 +1297,8 @@ static int korina_probe(struct platform_device *pdev) lp = netdev_priv(dev); if (mac_addr) - ether_addr_copy(dev->dev_addr, mac_addr); - else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0) + eth_hw_addr_set(dev, mac_addr); + else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0) eth_hw_addr_random(dev); clk = devm_clk_get_optional(&pdev->dev, "mdioclk"); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 62f8c52121822d..2258e3f1916107 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -96,6 +96,9 @@ struct ltq_etop_priv { struct ltq_etop_chan ch[MAX_DMA_CHAN]; int tx_free[MAX_DMA_CHAN >> 1]; + int tx_burst_len; + int rx_burst_len; + spinlock_t lock; }; @@ -259,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev) /* enable crc generation */ ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); - ltq_dma_init_port(DMA_PORT_ETOP); + ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len); for (i = 0; i < MAX_DMA_CHAN; i++) { int irq = LTQ_DMA_CH0_INT + i; @@ -472,8 +475,8 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - /* dma needs to start on a 16 byte aligned address */ - byte_offset = CPHYSADDR(skb->data) % 16; + /* dma needs to start on a burst length value aligned address */ + byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4); ch->skb[ch->dma.desc] = skb; netif_trans_update(dev); @@ -667,6 +670,18 @@ ltq_etop_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); SET_NETDEV_DEV(dev, &pdev->dev); + err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len); + if (err < 0) { + dev_err(&pdev->dev, "unable to read tx-burst-length property\n"); + return err; + } + + err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len); + if (err < 0) { + dev_err(&pdev->dev, "unable to read rx-burst-length property\n"); + return err; + } + for (i = 0; i < MAX_DMA_CHAN; i++) { if (IS_TX(i)) netif_napi_add(dev, &priv->ch[i].napi, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index fb78f17d734fe2..0da09ea8198098 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -14,15 +14,18 @@ #include #include +#include + #include #include #include /* DMA */ -#define XRX200_DMA_DATA_LEN 0x600 +#define XRX200_DMA_DATA_LEN (SZ_64K - 1) #define XRX200_DMA_RX 0 #define XRX200_DMA_TX 1 +#define XRX200_DMA_BURST_LEN 8 /* cpu port mac */ #define PMAC_RX_IPG 0x0024 @@ -106,7 +109,8 @@ static void xrx200_flush_dma(struct xrx200_chan *ch) break; desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - XRX200_DMA_DATA_LEN; + (ch->priv->net_dev->mtu + VLAN_ETH_HLEN + + ETH_FCS_LEN); ch->dma.desc++; ch->dma.desc %= LTQ_DESC_NUM; } @@ -154,19 +158,20 @@ static int xrx200_close(struct net_device *net_dev) static int xrx200_alloc_skb(struct xrx200_chan *ch) { + int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; struct sk_buff *skb = ch->skb[ch->dma.desc]; dma_addr_t mapping; int ret = 0; ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, - XRX200_DMA_DATA_LEN); + len); if (!ch->skb[ch->dma.desc]) { ret = -ENOMEM; goto skip; } mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, - XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE); + len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { dev_kfree_skb_any(ch->skb[ch->dma.desc]); ch->skb[ch->dma.desc] = skb; @@ -179,8 +184,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch) wmb(); skip: ch->dma.desc_base[ch->dma.desc].ctl = - LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - XRX200_DMA_DATA_LEN; + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; return ret; } @@ -316,8 +320,8 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(priv->dev, mapping))) goto err_drop; - /* dma needs to start on a 16 byte aligned address */ - byte_offset = mapping % 16; + /* dma needs to start on a burst length value aligned address */ + byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4); desc->addr = mapping - byte_offset; /* Make sure the address is written before we give it to HW */ @@ -340,10 +344,57 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int +xrx200_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + struct xrx200_chan *ch_rx = &priv->chan_rx; + int old_mtu = net_dev->mtu; + bool running = false; + struct sk_buff *skb; + int curr_desc; + int ret = 0; + + net_dev->mtu = new_mtu; + + if (new_mtu <= old_mtu) + return ret; + + running = netif_running(net_dev); + if (running) { + napi_disable(&ch_rx->napi); + ltq_dma_close(&ch_rx->dma); + } + + xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM); + curr_desc = ch_rx->dma.desc; + + for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; + ch_rx->dma.desc++) { + skb = ch_rx->skb[ch_rx->dma.desc]; + ret = xrx200_alloc_skb(ch_rx); + if (ret) { + net_dev->mtu = old_mtu; + break; + } + dev_kfree_skb_any(skb); + } + + ch_rx->dma.desc = curr_desc; + if (running) { + napi_enable(&ch_rx->napi); + ltq_dma_open(&ch_rx->dma); + ltq_dma_enable_irq(&ch_rx->dma); + } + + return ret; +} + static const struct net_device_ops xrx200_netdev_ops = { .ndo_open = xrx200_open, .ndo_stop = xrx200_close, .ndo_start_xmit = xrx200_start_xmit, + .ndo_change_mtu = xrx200_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -369,7 +420,8 @@ static int xrx200_dma_init(struct xrx200_priv *priv) int ret = 0; int i; - ltq_dma_init_port(DMA_PORT_ETOP); + ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN, + XRX200_DMA_BURST_LEN); ch_rx->dma.nr = XRX200_DMA_RX; ch_rx->dma.dev = priv->dev; @@ -453,7 +505,7 @@ static int xrx200_probe(struct platform_device *pdev) net_dev->netdev_ops = &xrx200_netdev_ops; SET_NETDEV_DEV(net_dev, dev); net_dev->min_mtu = ETH_ZLEN; - net_dev->max_mtu = XRX200_DMA_DATA_LEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; /* load the memory ranges */ priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); @@ -474,7 +526,7 @@ static int xrx200_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - err = of_get_mac_address(np, net_dev->dev_addr); + err = of_get_ethdev_address(np, net_dev); if (err) eth_hw_addr_random(net_dev); diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig index 63bf01d28f0cf0..f99adbf26ab4e3 100644 --- a/drivers/net/ethernet/litex/Kconfig +++ b/drivers/net/ethernet/litex/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_LITEX config LITEX_LITEETH tristate "LiteX Ethernet support" - depends on OF_NET + depends on OF help If you wish to compile a kernel for hardware with a LiteX LiteEth device then you should answer Y to this. diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c index a9bdbf0dcfe1ed..3d9385a4989b7a 100644 --- a/drivers/net/ethernet/litex/litex_liteeth.c +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -266,7 +266,7 @@ static int liteeth_probe(struct platform_device *pdev) priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size; priv->tx_slot = 0; - err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + err = of_get_ethdev_address(pdev->dev.of_node, netdev); if (err) eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 28d5ad296646ae..bb14fa2241a36e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1770,7 +1770,7 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) addr[5] = mac_l & 0xff; } -static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) +static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr) { wrlp(mp, MAC_ADDR_HIGH, (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); @@ -1919,7 +1919,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); netif_addr_lock_bh(dev); mv643xx_eth_program_unicast_filter(dev); @@ -2925,10 +2925,14 @@ static void set_params(struct mv643xx_eth_private *mp, struct net_device *dev = mp->dev; unsigned int tx_ring_size; - if (is_valid_ether_addr(pd->mac_addr)) - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); - else - uc_addr_get(mp, dev->dev_addr); + if (is_valid_ether_addr(pd->mac_addr)) { + eth_hw_addr_set(dev, pd->mac_addr); + } else { + u8 addr[ETH_ALEN]; + + uc_addr_get(mp, addr); + eth_hw_addr_set(dev, addr); + } mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; if (pd->rx_queue_size) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d460a27060125..5a7bdca22a6335 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1623,8 +1623,8 @@ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, } /* Set mac address */ -static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, - int queue) +static void mvneta_mac_addr_set(struct mvneta_port *pp, + const unsigned char *addr, int queue) { unsigned int mac_h; unsigned int mac_l; @@ -1914,7 +1914,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp, } /* Handle tx checksum */ -static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) +static u32 mvneta_skb_tx_csum(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; @@ -2595,8 +2595,7 @@ static int mvneta_rx_hwbm(struct napi_struct *napi, } static inline void -mvneta_tso_put_hdr(struct sk_buff *skb, - struct mvneta_port *pp, struct mvneta_tx_queue *txq) +mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq) { int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; @@ -2604,7 +2603,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb, tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = hdr_len; - tx_desc->command = mvneta_skb_tx_csum(pp, skb); + tx_desc->command = mvneta_skb_tx_csum(skb); tx_desc->command |= MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = txq->tso_hdrs_phys + txq->txq_put_index * TSO_HEADER_SIZE; @@ -2681,7 +2680,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - mvneta_tso_put_hdr(skb, pp, txq); + mvneta_tso_put_hdr(skb, txq); while (data_left > 0) { int size; @@ -2799,7 +2798,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); - tx_cmd = mvneta_skb_tx_csum(pp, skb); + tx_cmd = mvneta_skb_tx_csum(skb); tx_desc->data_size = skb_headlen(skb); @@ -3824,8 +3823,6 @@ static void mvneta_validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state) { - struct net_device *ndev = to_net_dev(config->dev); - struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; /* We only support QSGMII, SGMII, 802.3z and RGMII modes. @@ -3833,16 +3830,9 @@ static void mvneta_validate(struct phylink_config *config, * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When = 1 (1000BASE-X) this field must be set to 1." */ - if (phy_interface_mode_is_8023z(state->interface)) { - if (!phylink_test(state->advertising, Autoneg)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - return; - } - } else if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_QSGMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_rgmii(state->interface)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + if (phy_interface_mode_is_8023z(state->interface) && + !phylink_test(state->advertising, Autoneg)) { + linkmode_zero(supported); return; } @@ -3854,11 +3844,12 @@ static void mvneta_validate(struct phylink_config *config, phylink_set(mask, Pause); /* Half-duplex at speeds higher than 100Mbit is unsupported */ - if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { + if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); } - if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { + + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { phylink_set(mask, 2500baseT_Full); phylink_set(mask, 2500baseX_Full); } @@ -3871,15 +3862,8 @@ static void mvneta_validate(struct phylink_config *config, phylink_set(mask, 100baseT_Full); } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - - /* We can only operate at 2500BaseX or 1000BaseX. If requested - * to advertise both, only report advertising at 2500BaseX. - */ - phylink_helper_basex_speed(state); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void mvneta_mac_pcs_get_state(struct phylink_config *config, @@ -5182,6 +5166,31 @@ static int mvneta_probe(struct platform_device *pdev) pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; + phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + pp->phylink_config.supported_interfaces); + if (comphy) { + /* If a COMPHY is present, we can support any of the serdes + * modes and switch between them. + */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + pp->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + pp->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + pp->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { + /* No COMPHY, with only 2500BASE-X mode supported */ + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + pp->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || + phy_mode == PHY_INTERFACE_MODE_SGMII) { + /* No COMPHY, we can switch between 1000BASE-X and SGMII */ + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + pp->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + pp->phylink_config.supported_interfaces); + } phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, phy_mode, &mvneta_phylink_ops); @@ -5242,14 +5251,14 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_ports; } - err = of_get_mac_address(dn, dev->dev_addr); + err = of_get_ethdev_address(dn, dev); if (!err) { mac_from = "device tree"; } else { mvneta_get_mac_addr(pp, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { mac_from = "hardware"; - memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, hw_mac_addr); } else { mac_from = "random"; eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d5c92e43f89e6a..587def69a6f75c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6081,9 +6081,9 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, char hw_mac_addr[ETH_ALEN] = {0}; char fw_mac_addr[ETH_ALEN]; - if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { + if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) { *mac_from = "firmware node"; - ether_addr_copy(dev->dev_addr, fw_mac_addr); + eth_hw_addr_set(dev, fw_mac_addr); return; } @@ -6091,7 +6091,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, mvpp21_get_mac_address(port, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { *mac_from = "hardware"; - ether_addr_copy(dev->dev_addr, hw_mac_addr); + eth_hw_addr_set(dev, hw_mac_addr); return; } } @@ -6261,32 +6261,13 @@ static void mvpp2_phylink_validate(struct phylink_config *config, struct mvpp2_port *port = mvpp2_phylink_to_port(config); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - /* Invalid combinations */ - switch (state->interface) { - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_XAUI: - if (!mvpp2_port_supports_xlg(port)) - goto empty_set; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - if (!mvpp2_port_supports_rgmii(port)) - goto empty_set; - break; - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - /* When in 802.3z mode, we must have AN enabled: - * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... - * When = 1 (1000BASE-X) this field must be set to 1. - */ - if (!phylink_test(state->advertising, Autoneg)) - goto empty_set; - break; - default: - break; - } + /* When in 802.3z mode, we must have AN enabled: + * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When = 1 (1000BASE-X) this field must be set to 1. + */ + if (phy_interface_mode_is_8023z(state->interface) && + !phylink_test(state->advertising, Autoneg)) + goto empty_set; phylink_set(mask, Autoneg); phylink_set_port_modes(mask); @@ -6299,19 +6280,12 @@ static void mvpp2_phylink_validate(struct phylink_config *config, switch (state->interface) { case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_XAUI: - case PHY_INTERFACE_MODE_NA: if (mvpp2_port_supports_xlg(port)) { - phylink_set(mask, 10000baseT_Full); - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); + phylink_set_10g_modes(mask); phylink_set(mask, 10000baseKR_Full); } - if (state->interface != PHY_INTERFACE_MODE_NA) - break; - fallthrough; + break; + case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: @@ -6323,35 +6297,28 @@ static void mvpp2_phylink_validate(struct phylink_config *config, phylink_set(mask, 100baseT_Full); phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); - if (state->interface != PHY_INTERFACE_MODE_NA) - break; - fallthrough; + break; + case PHY_INTERFACE_MODE_1000BASEX: + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + break; + case PHY_INTERFACE_MODE_2500BASEX: - if (port->comphy || - state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - if (port->comphy || - state->interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); break; + default: goto empty_set; } - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - - phylink_helper_basex_speed(state); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); return; empty_set: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); } static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, @@ -6943,6 +6910,40 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; + if (mvpp2_port_supports_xlg(port)) { + __set_bit(PHY_INTERFACE_MODE_10GBASER, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_XAUI, + port->phylink_config.supported_interfaces); + } + + if (mvpp2_port_supports_rgmii(port)) + phy_interface_set_rgmii(port->phylink_config.supported_interfaces); + + if (comphy) { + /* If a COMPHY is present, we can support any of the + * serdes modes and switch between them. + */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { + /* No COMPHY, with only 2500BASE-X mode supported */ + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || + phy_mode == PHY_INTERFACE_MODE_SGMII) { + /* No COMPHY, we can switch between 1000BASE-X and SGMII + */ + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.supported_interfaces); + } + phylink = phylink_create(&port->phylink_config, port_fwnode, phy_mode, &mvpp2_phylink_ops); if (IS_ERR(phylink)) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 93575800ca92ad..75ba57bd1d46d8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -2347,7 +2347,7 @@ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) return err; /* Set addr in the device */ - ether_addr_copy(dev->dev_addr, da); + eth_hw_addr_set(dev, da); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 34a089b71e5542..186d00a9ab35c3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) if (!cgx) return; - if (is_dev_rpm(cgx)) - return; - if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); @@ -1522,7 +1519,6 @@ static int cgx_lmac_exit(struct cgx *cgx) int i; if (cgx->cgx_cmd_workq) { - flush_workqueue(cgx->cgx_cmd_workq); destroy_workqueue(cgx->cgx_cmd_workq); cgx->cgx_cmd_workq = NULL; } @@ -1545,9 +1541,11 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { if (is_dev_rpm(cgx)) - cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); + cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | + RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); else - cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | + RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); } static struct mac_ops cgx_mac_ops = { @@ -1571,6 +1569,7 @@ static struct mac_ops cgx_mac_ops = { .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, .mac_pause_frm_config = cgx_lmac_pause_frm_config, + .mac_enadis_ptp_config = cgx_lmac_ptp_config, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index d9bea13f15b863..8931864ee110c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -191,6 +191,7 @@ enum nix_scheduler { #define NIX_CHAN_SDP_CH_START (0x700ull) #define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a)) #define NIX_CHAN_SDP_NUM_CHANS 256 +#define NIX_CHAN_CPT_CH_START (0x800ull) /* The mask is to extract lower 10-bits of channel number * which CPT will pass to X2P. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index c38306b3384a70..fc6e7423cbd81d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -102,6 +102,11 @@ struct mac_ops { void (*mac_pause_frm_config)(void *cgxd, int lmac_id, bool enable); + + /* Enable/Disable Inbound PTP */ + void (*mac_enadis_ptp_config)(void *cgxd, + int lmac_id, + bool enable); }; struct cgx { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 154877706a0e11..4e79e918a16177 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -84,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0009) +#define OTX2_MBOX_VERSION (0x000a) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \ M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \ M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \ cgx_pause_frm_cfg) \ -M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ -M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ -M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ -M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ -M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ - cgx_set_link_mode_rsp) \ -M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ - cgx_features_info_msg) \ -M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ -M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ - cgx_mac_addr_add_rsp) \ -M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ +M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ +M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ +M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \ msg_rsp) \ -M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \ cgx_max_dmac_entries_get_rsp) \ -M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ -M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ +M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ +M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\ + cgx_set_link_mode_rsp) \ +M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ + cgx_features_info_msg) \ +M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \ msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ @@ -186,9 +186,12 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \ M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ cpt_rd_wr_reg_msg) \ +M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \ + cpt_inline_ipsec_cfg_msg, msg_rsp) \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ +M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \ /* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ @@ -229,6 +232,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_req, \ npc_mcam_read_entry_rsp) \ +M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \ + npc_set_pkind, msg_rsp) \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ msg_req, npc_mcam_read_base_rule_rsp) \ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ @@ -270,6 +275,10 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ +M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \ + nix_inline_ipsec_cfg, msg_rsp) \ +M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \ + nix_inline_ipsec_lf_cfg, msg_rsp) \ M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ @@ -284,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ #define MBOX_UP_CGX_MESSAGES \ M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) +#define MBOX_UP_CPT_MESSAGES \ +M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp) + enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES +MBOX_UP_CPT_MESSAGES #undef M }; @@ -575,10 +588,13 @@ struct cgx_mac_addr_update_req { }; #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ -#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ -#define RVU_MAC_VERSION BIT_ULL(2) -#define RVU_MAC_CGX BIT_ULL(3) -#define RVU_MAC_RPM BIT_ULL(4) +#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1) + /* flow control from physical link higig2 messages */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */ +#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */ +#define RVU_MAC_VERSION BIT_ULL(4) +#define RVU_MAC_CGX BIT_ULL(5) +#define RVU_MAC_RPM BIT_ULL(6) struct cgx_features_info_msg { struct mbox_msghdr hdr; @@ -593,6 +609,22 @@ struct rpm_stats_rsp { u64 tx_stats[RPM_TX_STATS_COUNT]; }; +struct npc_set_pkind { + struct mbox_msghdr hdr; +#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0) +#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63) + u64 mode; +#define PKIND_TX BIT_ULL(0) +#define PKIND_RX BIT_ULL(1) + u8 dir; + u8 pkind; /* valid only in case custom flag */ + u8 var_len_off; /* Offset of custom header length field. + * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND + */ + u8 var_len_off_mask; /* Mask for length with in offset */ + u8 shift_dir; /* shift direction to get length of the header at var_len_off */ +}; + /* NPA mbox message formats */ /* NPA mailbox error codes @@ -698,6 +730,8 @@ enum nix_af_status { NIX_AF_ERR_INVALID_BANDPROF = -426, NIX_AF_ERR_IPOLICER_NOTSUPP = -427, NIX_AF_ERR_BANDPROF_INVAL_REQ = -428, + NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, + NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, }; /* For NIX RX vtag action */ @@ -1065,6 +1099,40 @@ struct nix_bp_cfg_rsp { u8 chan_cnt; /* Number of channel for which bpids are assigned */ }; +/* Global NIX inline IPSec configuration */ +struct nix_inline_ipsec_cfg { + struct mbox_msghdr hdr; + u32 cpt_credit; + struct { + u8 egrp; + u8 opcode; + u16 param1; + u16 param2; + } gen_cfg; + struct { + u16 cpt_pf_func; + u8 cpt_slot; + } inst_qsel; + u8 enable; +}; + +/* Per NIX LF inline IPSec configuration */ +struct nix_inline_ipsec_lf_cfg { + struct mbox_msghdr hdr; + u64 sa_base_addr; + struct { + u32 tag_const; + u16 lenm1_max; + u8 sa_pow2_size; + u8 tt; + } ipsec_cfg0; + struct { + u32 sa_idx_max; + u8 sa_idx_w; + } ipsec_cfg1; + u8 enable; +}; + struct nix_hw_info { struct mbox_msghdr hdr; u16 rsvs16; @@ -1357,12 +1425,15 @@ struct npc_mcam_get_stats_rsp { enum ptp_op { PTP_OP_ADJFINE = 0, PTP_OP_GET_CLOCK = 1, + PTP_OP_GET_TSTMP = 2, + PTP_OP_SET_THRESH = 3, }; struct ptp_req { struct mbox_msghdr hdr; u8 op; s64 scaled_ppm; + u64 thresh; }; struct ptp_rsp { @@ -1399,7 +1470,9 @@ enum cpt_af_status { CPT_AF_ERR_LF_INVALID = -903, CPT_AF_ERR_ACCESS_DENIED = -904, CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905, - CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906 + CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906, + CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907, + CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908 }; /* CPT mbox message formats */ @@ -1420,6 +1493,22 @@ struct cpt_lf_alloc_req_msg { int blkaddr; }; +#define CPT_INLINE_INBOUND 0 +#define CPT_INLINE_OUTBOUND 1 + +/* Mailbox message request format for CPT IPsec + * inline inbound and outbound configuration. + */ +struct cpt_inline_ipsec_cfg_msg { + struct mbox_msghdr hdr; + u8 enable; + u8 slot; + u8 dir; + u8 sso_pf_func_ovrd; + u16 sso_pf_func; /* inbound path SSO_PF_FUNC */ + u16 nix_pf_func; /* outbound path NIX_PF_FUNC */ +}; + /* Mailbox message request and response format for CPT stats. */ struct cpt_sts_req { struct mbox_msghdr hdr; @@ -1478,6 +1567,13 @@ struct cpt_rxc_time_cfg_req { u16 active_limit; }; +/* Mailbox message request format to request for CPT_INST_S lmtst. */ +struct cpt_inst_lmtst_req { + struct mbox_msghdr hdr; + u64 inst[8]; + u64 rsvd; +}; + struct sdp_node_info { /* Node to which this PF belons to */ u8 node_id; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3a819b24accc66..77fd39e2c8dbde 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -8,6 +8,8 @@ #ifndef NPC_H #define NPC_H +#define NPC_KEX_CHAN_MASK 0xFFFULL + enum NPC_LID_E { NPC_LID_LA = 0, NPC_LID_LB, @@ -25,15 +27,12 @@ enum npc_kpu_la_ltype { NPC_LT_LA_8023 = 1, NPC_LT_LA_ETHER, NPC_LT_LA_IH_NIX_ETHER, - NPC_LT_LA_IH_8_ETHER, - NPC_LT_LA_IH_4_ETHER, - NPC_LT_LA_IH_2_ETHER, - NPC_LT_LA_HIGIG2_ETHER, + NPC_LT_LA_HIGIG2_ETHER = 7, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_LT_LA_CH_LEN_90B_ETHER, NPC_LT_LA_CPT_HDR, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_LT_LA_CUSTOM_PRE_L2_ETHER, NPC_LT_LA_CUSTOM0 = 0xE, NPC_LT_LA_CUSTOM1 = 0xF, }; @@ -148,10 +147,11 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ -#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND enum npc_pkind_type { NPC_RX_LBK_PKIND = 0ULL, + NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL, NPC_RX_VLAN_EXDSA_PKIND = 56ULL, NPC_RX_CHLEN24B_PKIND = 57ULL, NPC_RX_CPT_HDR_PKIND, @@ -162,6 +162,10 @@ enum npc_pkind_type { NPC_TX_DEF_PKIND, /* NIX-TX PKIND */ }; +enum npc_interface_type { + NPC_INTF_MODE_DEF, +}; + /* list of known and supported fields in packet header and * fields present in key structure. */ @@ -549,7 +553,7 @@ struct npc_kpu_profile_fwdata { #define KPU_SIGN 0x00666f727075706b #define KPU_NAME_LEN 32 /** Maximum number of custom KPU entries supported by the built-in profile. */ -#define KPU_MAX_CST_ENT 2 +#define KPU_MAX_CST_ENT 6 /* KPU Profle Header */ __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */ u8 name[KPU_NAME_LEN]; /* KPU Profile name */ @@ -589,6 +593,8 @@ struct rvu_npc_mcam_rule { u8 default_rule; bool enable; bool vfvlan_cfg; + u16 chan; + u16 chan_mask; }; #endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index 588822a0cf21e7..0fe7ad35e36fd1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -8,7 +8,7 @@ #ifndef NPC_PROFILE_H #define NPC_PROFILE_H -#define NPC_KPU_PROFILE_VER 0x0000000100060000 +#define NPC_KPU_PROFILE_VER 0x0000000100070000 #define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF)) #define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF)) #define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF)) @@ -176,18 +176,18 @@ enum npc_kpu_parser_state { NPC_S_KPU1_EXDSA, NPC_S_KPU1_HIGIG2, NPC_S_KPU1_IH_NIX_HIGIG2, - NPC_S_KPU1_CUSTOM_L2_90B, + NPC_S_KPU1_CUSTOM_PRE_L2, NPC_S_KPU1_CPT_HDR, - NPC_S_KPU1_CUSTOM_L2_24B, NPC_S_KPU1_VLAN_EXDSA, NPC_S_KPU2_CTAG, NPC_S_KPU2_CTAG2, NPC_S_KPU2_SBTAG, NPC_S_KPU2_QINQ, NPC_S_KPU2_ETAG, - NPC_S_KPU2_PREHEADER, NPC_S_KPU2_EXDSA, NPC_S_KPU2_NGIO, + NPC_S_KPU2_CPT_CTAG, + NPC_S_KPU2_CPT_QINQ, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, @@ -979,8 +979,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, + NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER, 0, 0, 0, 0, 0, @@ -996,27 +996,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 36, 40, 44, 0, 0, - NPC_S_KPU1_CUSTOM_L2_24B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 40, 54, 58, 0, 0, - NPC_S_KPU1_CPT_HDR, 0, 0, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CPT_HDR, 40, 0, NPC_LID_LA, NPC_LT_NA, 0, - 0, 0, 0, 0, + 0, 7, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 102, 106, 110, 0, 0, - NPC_S_KPU1_CUSTOM_L2_90B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, 0, 0, 0, 0, 0, @@ -1060,6 +1060,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { }; static struct npc_kpu_profile_cam kpu1_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -1377,33 +1381,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, 0x0000, }, - { - NPC_S_KPU1_IH, 0xff, - NPC_IH_W | NPC_IH_UTAG, - NPC_IH_W | NPC_IH_UTAG, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_IH, 0xff, - NPC_IH_W, - NPC_IH_W | NPC_IH_UTAG, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_IH, 0xff, - 0x0000, - NPC_IH_W | NPC_IH_UTAG, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, { NPC_S_KPU1_IH, 0xff, 0x0000, @@ -1711,7 +1688,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP, 0xffff, 0x0000, @@ -1720,7 +1697,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP6, 0xffff, 0x0000, @@ -1729,7 +1706,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ARP, 0xffff, 0x0000, @@ -1738,7 +1715,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_RARP, 0xffff, 0x0000, @@ -1747,7 +1724,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_PTP, 0xffff, 0x0000, @@ -1756,7 +1733,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_FCOE, 0xffff, 0x0000, @@ -1765,7 +1742,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_CTAG, @@ -1774,7 +1751,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1783,7 +1760,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_SBTAG, 0xffff, 0x0000, @@ -1792,7 +1769,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -1801,7 +1778,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ETAG, 0xffff, 0x0000, @@ -1810,7 +1787,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSU, 0xffff, 0x0000, @@ -1819,7 +1796,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSM, 0xffff, 0x0000, @@ -1828,7 +1805,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_NSH, 0xffff, 0x0000, @@ -1837,7 +1814,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, 0x0000, 0x0000, 0x0000, @@ -1847,150 +1824,24 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_QINQ, - 0xffff, 0x0000, 0x0000, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, - NPC_ETYPE_IP, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, NPC_ETYPE_IP6, 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, 0x0000, - 0xffff, 0x0000, 0x0000, - NPC_ETYPE_CTAG, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, 0x0000, - 0xffff, - 0x0000, - 0x0000, - NPC_ETYPE_QINQ, - 0xffff, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_PTP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_FCOE, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1999,16 +1850,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_S_KPU1_CPT_HDR, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -2016,51 +1858,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, 0x0000, }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ETAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSU, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSM, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_NSH, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, { NPC_S_KPU1_VLAN_EXDSA, 0xff, NPC_ETYPE_CTAG, @@ -2082,6 +1879,10 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu2_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -2804,114 +2605,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, 0x0000, }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_PTP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_FCOE, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_QINQ, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_MPLSU, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_MPLSM, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU2_PREHEADER, 0xff, - NPC_ETYPE_NSH, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, { NPC_S_KPU2_EXDSA, 0xff, NPC_DSA_EDSA, @@ -3065,6 +2758,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, 0x0000, }, + { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, { NPC_S_NA, 0X00, 0x0000, @@ -3077,6 +2806,10 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu3_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -4054,6 +3787,10 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu4_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -4365,6 +4102,10 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu5_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -5360,6 +5101,10 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu6_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -6031,6 +5776,10 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu7_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -6351,6 +6100,10 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu8_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -7094,6 +6847,10 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu9_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -7492,15 +7249,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = { 0x0000, 0x0000, }, - { - NPC_S_KPU9_GTPU, 0xff, - 0x0000, - 0x0000, - NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, - NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, - 0x0000, - 0x0000, - }, { NPC_S_KPU9_GTPU, 0xff, 0x0000, @@ -7567,6 +7315,10 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu10_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -7734,6 +7486,10 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu11_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -8045,6 +7801,10 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu12_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -8302,6 +8062,10 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu13_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -8316,6 +8080,10 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu14_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -8330,6 +8098,10 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu15_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -8533,6 +8305,10 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = { }; static struct npc_kpu_profile_cam kpu16_cam_entries[] = { + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, + NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, NPC_KPU_NOP_CAM, { @@ -8592,6 +8368,10 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = { }; static struct npc_kpu_profile_action kpu1_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -8879,30 +8659,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE, 0, 0, 0, 0, }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 12, 14, 16, 0, 0, - NPC_S_KPU2_PREHEADER, 8, 1, - NPC_LID_LA, NPC_LT_LA_IH_8_ETHER, - 0, - 1, 0xff, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 12, 14, 16, 0, 0, - NPC_S_KPU2_PREHEADER, 4, 1, - NPC_LID_LA, NPC_LT_LA_IH_4_ETHER, - 0, - 1, 0xff, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 12, 14, 16, 0, 0, - NPC_S_KPU2_PREHEADER, 2, 1, - NPC_LID_LA, NPC_LT_LA_IH_2_ETHER, - 0, - 1, 0xff, 0, 0, - }, { NPC_ERRLEV_LA, NPC_EC_IH_LENGTH, 0, 0, 0, 0, 1, @@ -9192,313 +8948,153 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP6, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_ARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_RARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_PTP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_FCOE, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_S_KPU2_CTAG2, 12, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 60, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_S_KPU2_CTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 60, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 58, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 58, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_S_KPU2_QINQ, 12, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, + 8, 0, 6, 3, 0, + NPC_S_KPU5_CPT_IP, 14, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, + 6, 0, 0, 3, 0, + NPC_S_KPU5_CPT_IP6, 14, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_NSH, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CPT_CTAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_UNK_ETYPE, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CPT_QINQ, 12, 1, + NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, }, { @@ -9520,6 +9116,10 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { }; static struct npc_kpu_profile_action kpu2_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -10162,102 +9762,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { NPC_F_LB_U_UNK_ETYPE, 0, 0, 0, 0, }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU5_IP, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, - NPC_S_KPU5_IP6, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_ARP, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_RARP, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_PTP, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 2, 0, - NPC_S_KPU5_FCOE, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 0, 0, 0, - NPC_S_KPU3_CTAG_C, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 20, 0, 0, - NPC_S_KPU3_STAG_C, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 0, 0, 0, - NPC_S_KPU3_QINQ_C, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 1, 0, - NPC_S_KPU4_MPLS, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 1, 0, - NPC_S_KPU4_MPLS, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 1, 0, - NPC_S_KPU4_NSH, 14, 0, - NPC_LID_LB, NPC_LT_NA, - 0, - 0, 0, 0, 0, - }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 2, 0, @@ -10394,6 +9898,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, 0, }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, @@ -10405,6 +9941,10 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }; static struct npc_kpu_profile_action kpu3_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -11274,6 +10814,10 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }; static struct npc_kpu_profile_action kpu4_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -11551,6 +11095,10 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = { }; static struct npc_kpu_profile_action kpu5_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -12436,6 +11984,10 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { }; static struct npc_kpu_profile_action kpu6_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -13033,6 +12585,10 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }; static struct npc_kpu_profile_action kpu7_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -13318,6 +12874,10 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = { }; static struct npc_kpu_profile_action kpu8_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -13979,6 +13539,10 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = { }; static struct npc_kpu_profile_action kpu9_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -14335,16 +13899,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU12_TU_IP, 8, 1, - NPC_LID_LE, NPC_LT_LE_GTPU, - NPC_F_LE_L_GTPU_G_PDU, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU12_TU_IP, 8, 1, + 8, 0, 6, 2, 1, + NPC_S_NA, 0, 1, NPC_LID_LE, NPC_LT_LE_GTPU, 0, 0, 0, 0, 0, @@ -14400,6 +13956,10 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = { }; static struct npc_kpu_profile_action kpu10_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -14549,6 +14109,10 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = { }; static struct npc_kpu_profile_action kpu11_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -14826,6 +14390,10 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = { }; static struct npc_kpu_profile_action kpu12_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -15055,6 +14623,10 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = { }; static struct npc_kpu_profile_action kpu13_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -15068,6 +14640,10 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = { }; static struct npc_kpu_profile_action kpu14_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -15081,6 +14657,10 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = { }; static struct npc_kpu_profile_action kpu15_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { @@ -15262,6 +14842,10 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = { }; static struct npc_kpu_profile_action kpu16_action_entries[] = { + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, + NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, NPC_KPU_NOP_ACTION, { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 9b8e59f4c206d5..d6321de3cc1719 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -27,54 +27,29 @@ #define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_PTP_BAR_NO 0 -#define PCI_RST_BAR_NO 0 #define PTP_CLOCK_CFG 0xF00ULL #define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0) +#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1) +#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2) +#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) +#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) +#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) +#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) +#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) + +#define PTP_PPS_HI_INCR 0xF60ULL +#define PTP_PPS_LO_INCR 0xF68ULL +#define PTP_PPS_THRESH_HI 0xF58ULL + #define PTP_CLOCK_LO 0xF08ULL #define PTP_CLOCK_HI 0xF10ULL #define PTP_CLOCK_COMP 0xF18ULL - -#define RST_BOOT 0x1600ULL -#define RST_MUL_BITS GENMASK_ULL(38, 33) -#define CLOCK_BASE_RATE 50000000ULL +#define PTP_TIMESTAMP 0xF20ULL static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; -static u64 get_clock_rate(void) -{ - u64 cfg, ret = CLOCK_BASE_RATE * 16; - struct pci_dev *pdev; - void __iomem *base; - - /* To get the input clock frequency with which PTP co-processor - * block is running the base frequency(50 MHz) needs to be multiplied - * with multiplier bits present in RST_BOOT register of RESET block. - * Hence below code gets the multiplier bits from the RESET PCI - * device present in the system. - */ - pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_OCTEONTX2_RST, NULL); - if (!pdev) - goto error; - - base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO); - if (!base) - goto error_put_pdev; - - cfg = readq(base + RST_BOOT); - ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg); - - iounmap(base); - -error_put_pdev: - pci_dev_put(pdev); - -error: - return ret; -} - struct ptp *ptp_get(void) { struct ptp *ptp = first_ptp_block; @@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk) return 0; } +void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) +{ + struct pci_dev *pdev; + u64 clock_comp; + u64 clock_cfg; + + if (!ptp) + return; + + pdev = ptp->pdev; + + if (!sclk) { + dev_err(&pdev->dev, "PTP input clock cannot be zero\n"); + return; + } + + /* sclk is in MHz */ + ptp->clock_rate = sclk * 1000000; + + /* Enable PTP clock */ + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + + if (ext_clk_freq) { + ptp->clock_rate = ext_clk_freq; + /* Set GPIO as PTP clock source */ + clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK; + clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN; + } + + if (extts) { + clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE; + /* Set GPIO as timestamping source */ + clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK; + clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN; + } + + clock_cfg |= PTP_CLOCK_CFG_PTP_EN; + clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + + /* Set 50% duty cycle for 1Hz output */ + writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); + writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); + + clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + /* Initial compensation value to start the nanosecs counter */ + writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); +} + +static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) +{ + *clk = readq(ptp->reg_base + PTP_TIMESTAMP); + + return 0; +} + +static int ptp_set_thresh(struct ptp *ptp, u64 thresh) +{ + writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI); + + return 0; +} + static int ptp_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct ptp *ptp; - u64 clock_comp; - u64 clock_cfg; int err; ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL); @@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev, ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; - ptp->clock_rate = get_clock_rate(); - - /* Enable PTP clock */ - clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); - clock_cfg |= PTP_CLOCK_CFG_PTP_EN; - writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); - - clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; - /* Initial compensation value to start the nanosecs counter */ - writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); - pci_set_drvdata(pdev, ptp); if (!first_ptp_block) first_ptp_block = ptp; @@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_GET_CLOCK: err = ptp_get_clock(rvu->ptp, &rsp->clk); break; + case PTP_OP_GET_TSTMP: + err = ptp_get_tstmp(rvu->ptp, &rsp->clk); + break; + case PTP_OP_SET_THRESH: + err = ptp_set_thresh(rvu->ptp, req->thresh); + break; default: err = -EINVAL; break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 76d404b24552e0..1b81a0493cd32b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -20,6 +20,7 @@ struct ptp { struct ptp *ptp_get(void); void ptp_put(struct ptp *ptp); +void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts); extern struct pci_driver ptp_driver; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 07b0eafccad87c..e695fa0e82a94c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, .mac_pause_frm_config = rpm_lmac_pause_frm_config, + .mac_enadis_ptp_config = rpm_lmac_ptp_config, }; struct mac_ops *rpm_get_mac_ops(void) @@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) return 0; } + +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return; + + cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG); + if (enable) + cfg |= RPMX_RX_TS_PREPEND; + else + cfg &= ~RPMX_RX_TS_PREPEND; + rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index f0b069442dccb9..57c8a687b488a9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -14,6 +14,8 @@ #define PCI_DEVID_CN10K_RPM 0xA060 /* Registers */ +#define RPMX_CMRX_CFG 0x00 +#define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_CMRX_SW_INT 0x180 #define RPMX_CMRX_SW_INT_W1S 0x188 #define RPMX_CMRX_SW_INT_ENA_W1S 0x198 @@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause); int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat); int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat); +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 35836903b7fbc3..cb56e171ddd4c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -854,6 +854,7 @@ static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr) block->lfcfg_reg = NIX_PRIV_LFX_CFG; block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; block->lfreset_reg = NIX_AF_LF_RST; + block->rvu = rvu; sprintf(block->name, "NIX%d", blkid); rvu->nix_blkaddr[blkid] = blkaddr; return rvu_alloc_bitmap(&block->lf); @@ -883,6 +884,7 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr) block->lfcfg_reg = CPT_PRIV_LFX_CFG; block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; block->lfreset_reg = CPT_AF_LF_RST; + block->rvu = rvu; sprintf(block->name, "CPT%d", blkid); return rvu_alloc_bitmap(&block->lf); } @@ -940,6 +942,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu) block->lfcfg_reg = NPA_PRIV_LFX_CFG; block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; block->lfreset_reg = NPA_AF_LF_RST; + block->rvu = rvu; sprintf(block->name, "NPA"); err = rvu_alloc_bitmap(&block->lf); if (err) { @@ -979,6 +982,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu) block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; block->lfreset_reg = SSO_AF_LF_HWGRP_RST; + block->rvu = rvu; sprintf(block->name, "SSO GROUP"); err = rvu_alloc_bitmap(&block->lf); if (err) { @@ -1003,6 +1007,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu) block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; block->lfreset_reg = SSOW_AF_LF_HWS_RST; + block->rvu = rvu; sprintf(block->name, "SSOWS"); err = rvu_alloc_bitmap(&block->lf); if (err) { @@ -1028,6 +1033,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu) block->lfcfg_reg = TIM_PRIV_LFX_CFG; block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; block->lfreset_reg = TIM_AF_LF_RST; + block->rvu = rvu; sprintf(block->name, "TIM"); err = rvu_alloc_bitmap(&block->lf); if (err) { @@ -1287,6 +1293,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, return (val & 0xFFF); } +int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, + u16 global_slot, u16 *slot_in_block) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int numlfs, total_lfs = 0, nr_blocks = 0; + int i, num_blkaddr[BLK_COUNT] = { 0 }; + struct rvu_block *block; + int blkaddr; + u16 start_slot; + + if (!is_blktype_attached(pfvf, blktype)) + return -ENODEV; + + /* Get all the block addresses from which LFs are attached to + * the given pcifunc in num_blkaddr[]. + */ + for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) { + block = &rvu->hw->block[blkaddr]; + if (block->type != blktype) + continue; + if (!is_block_implemented(rvu->hw, blkaddr)) + continue; + + numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr); + if (numlfs) { + total_lfs += numlfs; + num_blkaddr[nr_blocks] = blkaddr; + nr_blocks++; + } + } + + if (global_slot >= total_lfs) + return -ENODEV; + + /* Based on the given global slot number retrieve the + * correct block address out of all attached block + * addresses and slot number in that block. + */ + total_lfs = 0; + blkaddr = -ENODEV; + for (i = 0; i < nr_blocks; i++) { + numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]); + total_lfs += numlfs; + if (global_slot < total_lfs) { + blkaddr = num_blkaddr[i]; + start_slot = total_lfs - numlfs; + *slot_in_block = global_slot - start_slot; + break; + } + } + + return blkaddr; +} + static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -2345,7 +2405,6 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw) int devid; if (mw->mbox_wq) { - flush_workqueue(mw->mbox_wq); destroy_workqueue(mw->mbox_wq); mw->mbox_wq = NULL; } @@ -2473,7 +2532,8 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) rvu_npa_lf_teardown(rvu, pcifunc, lf); else if ((block->addr == BLKADDR_CPT0) || (block->addr == BLKADDR_CPT1)) - rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot); + rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf, + slot); err = rvu_lf_reset(rvu, block, lf); if (err) { @@ -2671,6 +2731,8 @@ static void rvu_unregister_interrupts(struct rvu *rvu) { int irq; + rvu_cpt_unregister_interrupts(rvu); + /* Disable the Mbox interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); @@ -2880,6 +2942,11 @@ static int rvu_register_interrupts(struct rvu *rvu) goto fail; } rvu->irq_allocated[offset] = true; + + ret = rvu_cpt_register_interrupts(rvu); + if (ret) + goto fail; + return 0; fail: @@ -2890,7 +2957,6 @@ static int rvu_register_interrupts(struct rvu *rvu) static void rvu_flr_wq_destroy(struct rvu *rvu) { if (rvu->flr_wq) { - flush_workqueue(rvu->flr_wq); destroy_workqueue(rvu->flr_wq); rvu->flr_wq = NULL; } @@ -3186,6 +3252,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&rvu->rswitch.switch_lock); + if (rvu->fwdata) + ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, + rvu->fwdata->ptp_ext_tstamp); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 1d9411232f1dab..66e45d733824ef 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -101,6 +101,7 @@ struct rvu_block { u64 msixcfg_reg; u64 lfreset_reg; unsigned char name[NAME_SIZE]; + struct rvu *rvu; }; struct nix_mcast { @@ -220,6 +221,7 @@ struct rvu_pfvf { u16 maxlen; u16 minlen; + bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ @@ -237,6 +239,7 @@ struct rvu_pfvf { bool cgx_in_use; /* this PF/VF using CGX? */ int cgx_users; /* number of cgx users - used only by PFs */ + int intf_mode; u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ @@ -394,7 +397,9 @@ struct rvu_fwdata { u64 mcam_addr; u64 mcam_sz; u64 msixtr_base; -#define FWDATA_RESERVED_MEM 1023 + u32 ptp_ext_clk_rate; + u32 ptp_ext_tstamp; +#define FWDATA_RESERVED_MEM 1022 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 5 #define CGX_LMACS_MAX 4 @@ -656,6 +661,8 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_get_num_lbk_chans(void); +int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, + u16 global_slot, u16 *slot_in_block); /* RVU HW reg validation */ enum regmap_block { @@ -794,6 +801,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); @@ -805,7 +813,11 @@ bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index); /* CPT APIs */ -int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); +int rvu_cpt_register_interrupts(struct rvu *rvu); +void rvu_cpt_unregister_interrupts(struct rvu *rvu); +int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, + int slot); +int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc); /* CN10K RVU */ int rvu_set_channels_base(struct rvu *rvu); @@ -827,4 +839,7 @@ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 81e8ea9ee30ea0..2ca182a4ce8236 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -324,7 +324,6 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) static void rvu_cgx_wq_destroy(struct rvu *rvu) { if (rvu->cgx_evh_wq) { - flush_workqueue(rvu->cgx_evh_wq); destroy_workqueue(rvu->cgx_evh_wq); rvu->cgx_evh_wq = NULL; } @@ -411,7 +410,7 @@ int rvu_cgx_exit(struct rvu *rvu) * VF's of mapped PF and other PFs are not allowed. This fn() checks * whether a PFFUNC is permitted to do the config or not. */ -static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) +inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) @@ -694,7 +693,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -711,13 +712,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); - cgx_lmac_ptp_config(cgxd, lmac_id, enable); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required */ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) return -EINVAL; + /* This flag is required to clean up CGX conf if app gets killed */ + pfvf->hw_rx_tstamp_en = enable; return 0; } @@ -725,6 +729,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { + if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + return -EPERM; + return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 46a41cfff5751b..7dbbc115cde426 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -334,8 +334,8 @@ int rvu_set_channels_base(struct rvu *rvu) /* Out of 4096 channels start CPT from 2048 so * that MSB for CPT channels is always set */ - if (cpt_chan_base <= 0x800) { - hw->cpt_chan_base = 0x800; + if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) { + hw->cpt_chan_base = NIX_CHAN_CPT_CH_START; } else { dev_err(rvu->dev, "CPT channels could not fit in the range 2048-4095\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 1f90a7403392d5..45357deecabbf9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -37,6 +37,236 @@ (_rsp)->free_sts_##etype = free_sts; \ }) +static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr) +{ + struct rvu_block *block = ptr; + struct rvu *rvu = block->rvu; + int blkaddr = block->addr; + u64 reg0, reg1, reg2; + + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); + reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); + if (!is_rvu_otx2(rvu)) { + reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2)); + dev_err_ratelimited(rvu->dev, + "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx", + reg0, reg1, reg2); + } else { + dev_err_ratelimited(rvu->dev, + "Received CPTAF FLT irq : 0x%llx, 0x%llx", + reg0, reg1); + } + + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1); + if (!is_rvu_otx2(rvu)) + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2); + + return IRQ_HANDLED; +} + +static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr) +{ + struct rvu_block *block = ptr; + struct rvu *rvu = block->rvu; + int blkaddr = block->addr; + u64 reg; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); + dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg); + + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg); + return IRQ_HANDLED; +} + +static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr) +{ + struct rvu_block *block = ptr; + struct rvu *rvu = block->rvu; + int blkaddr = block->addr; + u64 reg; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); + dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg); + + rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg); + return IRQ_HANDLED; +} + +static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs, + irq_handler_t handler, + const char *name) +{ + struct rvu *rvu = block->rvu; + int ret; + + ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0, + name, block); + if (ret) { + dev_err(rvu->dev, "RVUAF: %s irq registration failed", name); + return ret; + } + + WARN_ON(rvu->irq_allocated[irq_offs]); + rvu->irq_allocated[irq_offs] = true; + return 0; +} + +static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) +{ + struct rvu *rvu = block->rvu; + int blkaddr = block->addr; + int i; + + /* Disable all CPT AF interrupts */ + for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++) + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); + + for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++) + if (rvu->irq_allocated[off + i]) { + free_irq(pci_irq_vector(rvu->pdev, off + i), block); + rvu->irq_allocated[off + i] = false; + } +} + +static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int i, offs; + + if (!is_block_implemented(rvu->hw, blkaddr)) + return; + offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; + if (!offs) { + dev_warn(rvu->dev, + "Failed to get CPT_AF_INT vector offsets\n"); + return; + } + block = &hw->block[blkaddr]; + if (!is_rvu_otx2(rvu)) + return cpt_10k_unregister_interrupts(block, offs); + + /* Disable all CPT AF interrupts */ + for (i = 0; i < CPT_AF_INT_VEC_RVU; i++) + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); + + for (i = 0; i < CPT_AF_INT_VEC_CNT; i++) + if (rvu->irq_allocated[offs + i]) { + free_irq(pci_irq_vector(rvu->pdev, offs + i), block); + rvu->irq_allocated[offs + i] = false; + } +} + +void rvu_cpt_unregister_interrupts(struct rvu *rvu) +{ + cpt_unregister_interrupts(rvu, BLKADDR_CPT0); + cpt_unregister_interrupts(rvu, BLKADDR_CPT1); +} + +static int cpt_10k_register_interrupts(struct rvu_block *block, int off) +{ + struct rvu *rvu = block->rvu; + int blkaddr = block->addr; + char irq_name[16]; + int i, ret; + + for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { + snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i); + ret = rvu_cpt_do_register_interrupt(block, off + i, + rvu_cpt_af_flt_intr_handler, + irq_name); + if (ret) + goto err; + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); + } + + ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, + rvu_cpt_af_rvu_intr_handler, + "CPTAF RVU"); + if (ret) + goto err; + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); + + ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS, + rvu_cpt_af_ras_intr_handler, + "CPTAF RAS"); + if (ret) + goto err; + rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1); + + return 0; +err: + rvu_cpt_unregister_interrupts(rvu); + return ret; +} + +static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int i, offs, ret = 0; + char irq_name[16]; + + if (!is_block_implemented(rvu->hw, blkaddr)) + return 0; + + block = &hw->block[blkaddr]; + offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; + if (!offs) { + dev_warn(rvu->dev, + "Failed to get CPT_AF_INT vector offsets\n"); + return 0; + } + + if (!is_rvu_otx2(rvu)) + return cpt_10k_register_interrupts(block, offs); + + for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) { + snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i); + ret = rvu_cpt_do_register_interrupt(block, offs + i, + rvu_cpt_af_flt_intr_handler, + irq_name); + if (ret) + goto err; + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); + } + + ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU, + rvu_cpt_af_rvu_intr_handler, + "CPTAF RVU"); + if (ret) + goto err; + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); + + ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS, + rvu_cpt_af_ras_intr_handler, + "CPTAF RAS"); + if (ret) + goto err; + rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1); + + return 0; +err: + rvu_cpt_unregister_interrupts(rvu); + return ret; +} + +int rvu_cpt_register_interrupts(struct rvu *rvu) +{ + int ret; + + ret = cpt_register_interrupts(rvu, BLKADDR_CPT0); + if (ret) + return ret; + + return cpt_register_interrupts(rvu, BLKADDR_CPT1); +} + static int get_cpt_pf_num(struct rvu *rvu) { int i, domain_nr, cpt_pf_num = -1; @@ -147,9 +377,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); - /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */ - val = (u64)req->nix_pf_func << 48 | - (u64)req->sso_pf_func << 32; + /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set + * on reset. + */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32)); + val |= ((u64)req->nix_pf_func << 48 | + (u64)req->sso_pf_func << 32); rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); } @@ -159,7 +393,7 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr) { u16 pcifunc = req->hdr.pcifunc; - int num_lfs, cptlf, slot; + int num_lfs, cptlf, slot, err; struct rvu_block *block; block = &rvu->hw->block[blkaddr]; @@ -173,10 +407,15 @@ static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr) if (cptlf < 0) return CPT_AF_ERR_LF_INVALID; - /* Reset CPT LF group and priority */ - rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0); - /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */ - rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0); + /* Perform teardown */ + rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot); + + /* Reset LF */ + err = rvu_lf_reset(rvu, block, cptlf); + if (err) { + dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", + block->addr, cptlf); + } } return 0; @@ -197,6 +436,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, return ret; } +static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf, + struct cpt_inline_ipsec_cfg_msg *req) +{ + u16 sso_pf_func = req->sso_pf_func; + u8 nix_sel; + u64 val; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + if (req->enable && (val & BIT_ULL(16))) { + /* IPSec inline outbound path is already enabled for a given + * CPT LF, HRM states that inline inbound & outbound paths + * must not be enabled at the same time for a given CPT LF + */ + return CPT_AF_ERR_INLINE_IPSEC_INB_ENA; + } + /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ + if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO)) + return CPT_AF_ERR_SSO_PF_FUNC_INVALID; + + nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0; + /* Enable CPT LF for IPsec inline inbound operations */ + if (req->enable) + val |= BIT_ULL(9); + else + val &= ~BIT_ULL(9); + + val |= (u64)nix_sel << 8; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + if (sso_pf_func) { + /* Set SSO_PF_FUNC */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val |= (u64)sso_pf_func << 32; + val |= (u64)req->nix_pf_func << 48; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + } + if (req->sso_pf_func_ovrd) + /* Set SSO_PF_FUNC_OVRD for inline IPSec */ + rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1); + + /* Configure the X2P Link register with the cpt base channel number and + * range of channels it should propagate to X2P + */ + if (!is_rvu_otx2(rvu)) { + val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16); + val |= rvu->hw->cpt_chan_base; + + rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val); + rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val); + } + + return 0; +} + +static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf, + struct cpt_inline_ipsec_cfg_msg *req) +{ + u16 nix_pf_func = req->nix_pf_func; + int nix_blkaddr; + u8 nix_sel; + u64 val; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + if (req->enable && (val & BIT_ULL(9))) { + /* IPSec inline inbound path is already enabled for a given + * CPT LF, HRM states that inline inbound & outbound paths + * must not be enabled at the same time for a given CPT LF + */ + return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA; + } + + /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ + if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX)) + return CPT_AF_ERR_NIX_PF_FUNC_INVALID; + + /* Enable CPT LF for IPsec inline outbound operations */ + if (req->enable) + val |= BIT_ULL(16); + else + val &= ~BIT_ULL(16); + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + if (nix_pf_func) { + /* Set NIX_PF_FUNC */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val |= (u64)nix_pf_func << 48; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + + nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func); + nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + val |= (u64)nix_sel << 8; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + } + + return 0; +} + +int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, + struct cpt_inline_ipsec_cfg_msg *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr, ret; + u16 actual_slot; + + blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, + req->slot, &actual_slot); + if (blkaddr < 0) + return CPT_AF_ERR_LF_INVALID; + + block = &rvu->hw->block[blkaddr]; + + cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + + switch (req->dir) { + case CPT_INLINE_INBOUND: + ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req); + break; + + case CPT_INLINE_OUTBOUND: + ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req); + break; + + default: + return CPT_AF_ERR_PARAM; + } + + return ret; +} + static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) { u64 offset = req->reg_offset; @@ -421,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, return 0; } +int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc); +} + +static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) +{ + struct cpt_rxc_time_cfg_req req; + int timeout = 2000; + u64 reg; + + if (is_rvu_otx2(rvu)) + return; + + /* Set time limit to minimum values, so that rxc entries will be + * flushed out quickly. + */ + req.step = 1; + req.zombie_thres = 1; + req.zombie_limit = 1; + req.active_thres = 1; + req.active_limit = 1; + + cpt_rxc_time_cfg(rvu, &req, blkaddr); + + do { + reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); + udelay(1); + if (FIELD_GET(RXC_ACTIVE_COUNT, reg)) + timeout--; + else + break; + } while (timeout); + + if (timeout == 0) + dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n"); + + timeout = 2000; + do { + reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); + udelay(1); + if (FIELD_GET(RXC_ZOMBIE_COUNT, reg)) + timeout--; + else + break; + } while (timeout); + + if (timeout == 0) + dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n"); +} + #define INPROG_INFLIGHT(reg) ((reg) & 0x1FF) #define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31)) #define INPROG_GRB(reg) (((reg) >> 32) & 0xFF) @@ -485,14 +911,12 @@ static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot) dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); } -int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot) +int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot) { - int blkaddr; u64 reg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc); - if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) - return -EINVAL; + if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc)) + cpt_rxc_teardown(rvu, blkaddr); /* Enable BAR2 ALIAS for this pcifunc. */ reg = BIT_ULL(16) | pcifunc; @@ -509,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot) return 0; } + +#define CPT_RES_LEN 16 +#define CPT_SE_IE_EGRP 1ULL + +static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr, + int nix_blkaddr) +{ + int cpt_pf_num = get_cpt_pf_num(rvu); + struct cpt_inst_lmtst_req *req; + dma_addr_t res_daddr; + int timeout = 3000; + u8 cpt_idx; + u64 *inst; + u16 *res; + int rc; + + res = kzalloc(CPT_RES_LEN, GFP_KERNEL); + if (!res) + return -ENOMEM; + + res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(rvu->dev, res_daddr)) { + dev_err(rvu->dev, "DMA mapping failed for CPT result\n"); + rc = -EFAULT; + goto res_free; + } + *res = 0xFFFF; + + /* Send mbox message to CPT PF */ + req = (struct cpt_inst_lmtst_req *) + otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up, + cpt_pf_num, sizeof(*req), + sizeof(struct msg_rsp)); + if (!req) { + rc = -ENOMEM; + goto res_daddr_unmap; + } + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.id = MBOX_MSG_CPT_INST_LMTST; + + inst = req->inst; + /* Prepare CPT_INST_S */ + inst[0] = 0; + inst[1] = res_daddr; + /* AF PF FUNC */ + inst[2] = 0; + /* Set QORD */ + inst[3] = 1; + inst[4] = 0; + inst[5] = 0; + inst[6] = 0; + /* Set EGRP */ + inst[7] = CPT_SE_IE_EGRP << 61; + + /* Subtract 1 from the NIX-CPT credit count to preserve + * credit counts. + */ + cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1; + rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + BIT_ULL(22) - 1); + + otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); + rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); + if (rc) + dev_warn(rvu->dev, "notification to pf %d failed\n", + cpt_pf_num); + /* Wait for CPT instruction to be completed */ + do { + mdelay(1); + if (*res == 0xFFFF) + timeout--; + else + break; + } while (timeout); + + if (timeout == 0) + dev_warn(rvu->dev, "Poll for result hits hard loop counter\n"); + +res_daddr_unmap: + dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL); +res_free: + kfree(res); + + return 0; +} + +#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46) +#define CTX_CAM_CPTR GENMASK_ULL(45, 0) + +int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc) +{ + int nix_blkaddr, blkaddr; + u16 max_ctx_entries, i; + int slot = 0, num_lfs; + u64 reg, cam_data; + int rc; + + nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (nix_blkaddr < 0) + return -EINVAL; + + if (is_rvu_otx2(rvu)) + return 0; + + blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0; + + /* Submit CPT_INST_S to track when all packets have been + * flushed through for the NIX PF FUNC in inline inbound case. + */ + rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr); + if (rc) + return rc; + + /* Wait for rxc entries to be flushed out */ + cpt_rxc_teardown(rvu, blkaddr); + + reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); + max_ctx_entries = (reg >> 48) & 0xFFF; + + mutex_lock(&rvu->rsrc_lock); + + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), + blkaddr); + if (num_lfs == 0) { + dev_warn(rvu->dev, "CPT LF is not configured\n"); + goto unlock; + } + + /* Enable BAR2 ALIAS for this pcifunc. */ + reg = BIT_ULL(16) | pcifunc; + rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); + + for (i = 0; i < max_ctx_entries; i++) { + cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i)); + + if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) && + FIELD_GET(CTX_CAM_CPTR, cam_data)) { + reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data); + rvu_write64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH), + reg); + } + } + rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); + +unlock: + mutex_unlock(&rvu->rsrc_lock); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 49d822a98adab7..c7fd466a0efdc5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -95,7 +95,7 @@ static char *cgx_tx_stats_fields[] = { [CGX_STAT5] = "Total frames sent on the interface", [CGX_STAT6] = "Packets sent with an octet count < 64", [CGX_STAT7] = "Packets sent with an octet count == 64", - [CGX_STAT8] = "Packets sent with an octet count of 65–127", + [CGX_STAT8] = "Packets sent with an octet count of 65-127", [CGX_STAT9] = "Packets sent with an octet count of 128-255", [CGX_STAT10] = "Packets sent with an octet count of 256-511", [CGX_STAT11] = "Packets sent with an octet count of 512-1023", @@ -125,7 +125,7 @@ static char *rpm_rx_stats_fields[] = { "Total frames received on interface", "Packets received with an octet count < 64", "Packets received with an octet count == 64", - "Packets received with an octet count of 65ñ€“127", + "Packets received with an octet count of 65-127", "Packets received with an octet count of 128-255", "Packets received with an octet count of 256-511", "Packets received with an octet count of 512-1023", @@ -164,7 +164,7 @@ static char *rpm_tx_stats_fields[] = { "Packets sent to the multicast DMAC", "Packets sent to a broadcast DMAC", "Packets sent with an octet count == 64", - "Packets sent with an octet count of 65ñ€“127", + "Packets sent with an octet count of 65-127", "Packets sent with an octet count of 128-255", "Packets sent with an octet count of 256-511", "Packets sent with an octet count of 512-1023", @@ -226,6 +226,96 @@ static const struct file_operations rvu_dbg_##name##_fops = { \ static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); +#define LMT_MAPTBL_ENTRY_SIZE 16 +/* Dump LMTST map table */ +static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rvu *rvu = filp->private_data; + u64 lmt_addr, val, tbl_base; + int pf, vf, num_vfs, hw_vfs; + void __iomem *lmt_map_base; + int index = 0, off = 0; + int bytes_not_copied; + int buf_size = 10240; + char *buf; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + kfree(buf); + return false; + } + + off += scnprintf(&buf[off], buf_size - 1 - off, + "\n\t\t\t\t\tLmtst Map Table Entries"); + off += scnprintf(&buf[off], buf_size - 1 - off, + "\n\t\t\t\t\t======================="); + off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t"); + off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t"); + off += scnprintf(&buf[off], buf_size - 1 - off, + "Lmtline Base (word 0)\t\t"); + off += scnprintf(&buf[off], buf_size - 1 - off, + "Lmt Map Entry (word 1)"); + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", + pf); + + index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", + (tbl_base + index)); + lmt_addr = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%016llx\t\t", lmt_addr); + index += 8; + val = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", + val); + /* Reading num of VFs per PF */ + rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); + for (vf = 0; vf < num_vfs; vf++) { + index = (pf * rvu->hw->total_vfs * 16) + + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); + off += scnprintf(&buf[off], buf_size - 1 - off, + "PF%d:VF%d \t\t", pf, vf); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%llx\t\t", (tbl_base + index)); + lmt_addr = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%016llx\t\t", lmt_addr); + index += 8; + val = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%016llx\n", val); + } + } + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + + bytes_not_copied = copy_to_user(buffer, buf, off); + kfree(buf); + + iounmap(lmt_map_base); + if (bytes_not_copied) + return -EFAULT; + + *ppos = off; + return off; +} + +RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); + static void get_lf_str_list(struct rvu_block block, int pcifunc, char *lfs) { @@ -1960,7 +2050,7 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return -ENODEV; mac_ops = get_mac_ops(cgxd); - + /* There can be no CGX devices at all */ if (!mac_ops) return 0; @@ -2038,13 +2128,13 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) if (err) return err; - if (is_rvu_otx2(rvu)) - seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], - tx_stat); - else - seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], - tx_stat); - stat++; + if (is_rvu_otx2(rvu)) + seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], + tx_stat); + else + seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], + tx_stat); + stat++; } return err; @@ -2482,6 +2572,8 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) seq_printf(s, "VF%d", vf); } seq_puts(s, "\n"); + seq_printf(s, "\tchannel: 0x%x\n", iter->chan); + seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask); } rvu_dbg_npc_mcam_show_action(s, iter); @@ -2754,6 +2846,10 @@ void rvu_dbg_init(struct rvu *rvu) debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_rsrc_status_fops); + if (!is_rvu_otx2(rvu)) + debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, + rvu, &rvu_dbg_lmtst_map_table_fops); + if (!cgx_get_cgxcnt_max()) goto create; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 274d3abe30eb43..70bacd38a6d9d0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1510,13 +1510,6 @@ int rvu_register_dl(struct rvu *rvu) return -ENOMEM; } - err = devlink_register(dl); - if (err) { - dev_err(rvu->dev, "devlink register failed with error %d\n", err); - devlink_free(dl); - return err; - } - rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; @@ -1537,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu) goto err_dl_health; } - devlink_params_publish(dl); - + devlink_register(dl); return 0; err_dl_health: rvu_health_reporters_destroy(rvu); - devlink_unregister(dl); devlink_free(dl); return err; } @@ -1553,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu) struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct devlink *dl = rvu_dl->dl; - if (!dl) - return; - + devlink_unregister(dl); devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); rvu_health_reporters_destroy(rvu); - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 6970540dc47091..d8b1948aaa0aef 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -28,6 +28,7 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); +static const char *nix_get_ctx_name(int ctype); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -1061,10 +1062,68 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } +static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, u8 ctype) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + int rc, word; + + if (req->ctype != NIX_AQ_CTYPE_CQ) + return 0; + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, + req->hdr.pcifunc, ctype, req->qidx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", + __func__, nix_get_ctx_name(ctype), req->qidx, + req->hdr.pcifunc); + return rc; + } + + /* Make copy of original context & mask which are required + * for resubmission + */ + memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + + /* exclude fields which HW can update */ + aq_req.cq_mask.cq_err = 0; + aq_req.cq_mask.wrptr = 0; + aq_req.cq_mask.tail = 0; + aq_req.cq_mask.head = 0; + aq_req.cq_mask.avg_level = 0; + aq_req.cq_mask.update_time = 0; + aq_req.cq_mask.substream = 0; + + /* Context mask (cq_mask) holds mask value of fields which + * are changed in AQ WRITE operation. + * for example cq.drop = 0xa; + * cq_mask.drop = 0xff; + * Below logic performs '&' between cq and cq_mask so that non + * updated fields are masked out for request and response + * comparison + */ + for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + word++) { + *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + *(u64 *)((u8 *)&aq_req.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + } + + if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; + + return 0; +} + static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; + int err, retries = 5; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); @@ -1075,7 +1134,24 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); +retry: + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); + + /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' + * As a work around perfrom CQ context read after each AQ write. If AQ + * read shows AQ write is not updated perform AQ write again. + */ + if (!err && req->op == NIX_AQ_INSTOP_WRITE) { + err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); + if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { + if (retries--) + goto retry; + else + return NIX_AF_ERR_CQ_CTX_WRITE_ERR; + } + } + + return err; } static const char *nix_get_ctx_name(int ctype) @@ -4439,10 +4515,17 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, return rvu_cgx_start_stop_io(rvu, pcifunc, false); } +#define RX_SA_BASE GENMASK_ULL(52, 7) + void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + u64 sa_base; + void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; @@ -4479,9 +4562,33 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) dev_err(rvu->dev, "CQ ctx disable failed\n"); } + /* reset HW config done for Switch headers */ + rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, + (PKIND_TX | PKIND_RX), 0, 0, 0, 0); + + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + nix_ctx_free(rvu, pfvf); nix_free_all_bandprof(rvu, pcifunc); + + sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); + if (FIELD_GET(RX_SA_BASE, sa_base)) { + err = rvu_cpt_ctx_flush(rvu, pcifunc); + if (err) + dev_err(rvu->dev, + "CPT ctx flush failed with error: %d\n", err); + } } #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) @@ -4582,6 +4689,119 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, return 0; } +#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) +#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) +#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) +#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) + +#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) +#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) +#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) + +static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, + int blkaddr) +{ + u8 cpt_idx, cpt_blkaddr; + u64 val; + + cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; + if (req->enable) { + val = 0; + /* Enable context prefetching */ + if (!is_rvu_otx2(rvu)) + val |= BIT_ULL(51); + + /* Set OPCODE and EGRP */ + val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); + val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); + + /* Set CPT queue for inline IPSec */ + val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); + val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, + req->inst_qsel.cpt_pf_func); + + if (!is_rvu_otx2(rvu)) { + cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : + BLKADDR_CPT1; + val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); + } + + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + val); + + /* Set CPT credit */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + req->cpt_credit); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF); + } +} + +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, + struct nix_inline_ipsec_cfg *req, + struct msg_rsp *rsp) +{ + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); + if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); + + return 0; +} + +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, + struct nix_inline_ipsec_lf_cfg *req, + struct msg_rsp *rsp) +{ + int lf, blkaddr, err; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); + if (err) + return err; + + if (req->enable) { + /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ + val = (u64)req->ipsec_cfg0.tt << 44 | + (u64)req->ipsec_cfg0.tag_const << 20 | + (u64)req->ipsec_cfg0.sa_pow2_size << 16 | + req->ipsec_cfg0.lenm1_max; + + if (blkaddr == BLKADDR_NIX1) + val |= BIT_ULL(46); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); + + /* Set SA_IDX_W and SA_IDX_MAX */ + val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | + req->ipsec_cfg1.sa_idx_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); + + /* Set SA base address */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + req->sa_base_addr); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + 0x0); + } + + return 0; +} void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 5efb4174e82dff..bb6b42bbefa44d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } +static int +npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, + u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) +{ + struct npc_kpu_action0 *act0; + u8 shift_count = 0; + int blkaddr; + u64 val; + + if (!var_len_off_mask) + return -EINVAL; + + if (var_len_off_mask != 0xff) { + if (shift_dir) + shift_count = __ffs(var_len_off_mask); + else + shift_count = (8 - __fls(var_len_off_mask)); + } + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -EINVAL; + } + val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); + act0 = (struct npc_kpu_action0 *)&val; + act0->var_len_shift = shift_count; + act0->var_len_right = shift_dir; + act0->var_len_mask = var_len_off_mask; + act0->var_len_offset = var_len_off; + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); + return 0; +} + +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir) + +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr, nixlf, rc, intf_mode; + int pf = rvu_get_pf(pcifunc); + u64 rxpkind, txpkind; + u8 cgx_id, lmac_id; + + /* use default pkind to disable edsa/higig */ + rxpkind = rvu_npc_get_pkind(rvu, pf); + txpkind = NPC_TX_DEF_PKIND; + intf_mode = NPC_INTF_MODE_DEF; + + if (mode & OTX2_PRIV_FLAGS_CUSTOM) { + if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { + rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, + var_len_off, + var_len_off_mask, + shift_dir); + if (rc) + return rc; + } + rxpkind = pkind; + txpkind = pkind; + } + + if (dir & PKIND_RX) { + /* rx pkind set req valid only for cgx mapped PFs */ + if (!is_cgx_config_permitted(rvu, pcifunc)) + return 0; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + rxpkind); + if (rc) + return rc; + } + + if (dir & PKIND_TX) { + /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), + txpkind); + } + + pfvf->intf_mode = intf_mode; + return 0; +} + +int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, + struct msg_rsp *rsp) +{ + return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, + req->dir, req->pkind, req->var_len_off, + req->var_len_off_mask, req->shift_dir); +} + int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 51ddc7b81d0bd5..ff2b21999f36f7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1119,6 +1119,9 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, rule->default_rule = req->default_rule; rule->owner = owner; rule->enable = enable; + rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; + rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK; + rule->chan &= rule->chan_mask; if (is_npc_intf_tx(req->intf)) rule->intf = pfvf->nix_tx_intf; else diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 21f1ed4e222f75..22cd751613cdd3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -236,6 +236,8 @@ #define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8) #define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3) +#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3) #define NIX_AF_NDC_TX_SYNC (0x03F0) #define NIX_AF_AQ_CFG (0x0400) #define NIX_AF_AQ_BASE (0x0410) @@ -525,6 +527,7 @@ #define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull) #define CPT_AF_CTX_PSH_PC (0x49450ull) #define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull) +#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3) #define CPT_AF_RXC_TIME (0x50010ull) #define CPT_AF_RXC_TIME_CFG (0x50018ull) #define CPT_AF_RXC_DFRG (0x50020ull) @@ -542,6 +545,7 @@ #define CPT_LF_CTL 0x10 #define CPT_LF_INPROG 0x40 #define CPT_LF_Q_GRP_PTR 0x120 +#define CPT_LF_CTX_FLUSH 0x510 #define NPC_AF_BLK_RST (0x00040) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 77ac96693f04d5..edc9367b1b95e5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -62,6 +62,24 @@ enum rvu_af_int_vec_e { RVU_AF_INT_VEC_CNT = 0x5, }; +/* CPT Admin function Interrupt Vector Enumeration */ +enum cpt_af_int_vec_e { + CPT_AF_INT_VEC_FLT0 = 0x0, + CPT_AF_INT_VEC_FLT1 = 0x1, + CPT_AF_INT_VEC_RVU = 0x2, + CPT_AF_INT_VEC_RAS = 0x3, + CPT_AF_INT_VEC_CNT = 0x4, +}; + +enum cpt_10k_af_int_vec_e { + CPT_10K_AF_INT_VEC_FLT0 = 0x0, + CPT_10K_AF_INT_VEC_FLT1 = 0x1, + CPT_10K_AF_INT_VEC_FLT2 = 0x2, + CPT_10K_AF_INT_VEC_RVU = 0x3, + CPT_10K_AF_INT_VEC_RAS = 0x4, + CPT_10K_AF_INT_VEC_CNT = 0x5, +}; + /* NPA Admin function Interrupt Vector Enumeration */ enum npa_af_int_vec_e { NPA_AF_INT_VEC_RVU = 0x0, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index b92c267628b872..0048b594671216 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -3,11 +3,11 @@ # Makefile for Marvell's RVU Ethernet device drivers # -obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o -obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o +obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o +obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ + otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o rvu_nicvf-y := otx2_vf.o otx2_devlink.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 95f21dfdba483b..fd4f083c699ecb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 78df173e6df240..66da31f30d3ebb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -188,7 +188,7 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, return PTR_ERR(msghdr); } rsp = (struct nix_get_mac_addr_rsp *)msghdr; - ether_addr_copy(netdev->dev_addr, rsp->mac_addr); + eth_hw_addr_set(netdev, rsp->mac_addr); mutex_unlock(&pfvf->mbox.lock); return 0; @@ -203,7 +203,7 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); /* update dmac field in vlan offload rule */ if (netif_running(netdev) && pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) @@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) return -ENOMEM; } - req->maxlen = pfvf->max_frs; + req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); @@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) u64 schq, parent; u64 dwrr_val; - dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); + dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) @@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) /* Set topology e.t.c configuration */ if (lvl == NIX_TXSCH_LVL_SMQ) { req->reg[0] = NIX_AF_SMQX_CFG(schq); - req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8) - | OTX2_MIN_MTU; - + req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | (0x2ULL << 36); req->num_regs++; @@ -718,7 +716,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) int timeout = 1000; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); - for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { incr = (u64)qidx << 32; while (timeout) { val = otx2_atomic64_add(incr, ptr); @@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); + aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; @@ -835,17 +833,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; - err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, - TSO_HEADER_SIZE); - if (err) - return err; + if (qidx < pfvf->hw.tx_queues) { + err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, + TSO_HEADER_SIZE); + if (err) + return err; + } sq->sqe_base = sq->sqe->base; sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); if (!sq->sg) return -ENOMEM; - if (pfvf->ptp) { + if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); if (err) @@ -871,20 +871,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) { struct otx2_qset *qset = &pfvf->qset; + int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; - int err, pool_id; cq = &qset->cq[qidx]; cq->cq_idx = qidx; + non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; if (qidx < pfvf->hw.rx_queues) { cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - } else { + if (pfvf->xdp_prog) + xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; cq->cqe_cnt = qset->sqe_cnt; + } else { + cq->cq_type = CQ_XDP; + cq->cint_idx = qidx - non_xdp_queues; + cq->cqe_cnt = qset->sqe_cnt; } cq->cqe_size = pfvf->qset.xqe_size; @@ -991,7 +998,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf) } /* Initialize TX queues */ - for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); err = otx2_sq_init(pfvf, qidx, sqb_aura); @@ -1006,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf) return err; } + pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, + NIX_LF_CQ_OP_STATUS); + /* Initialize work queue for receive buffer refill */ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, sizeof(struct refill_work), GFP_KERNEL); @@ -1035,7 +1045,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) /* Set RQ/SQ/CQ counts */ nixlf->rq_cnt = pfvf->hw.rx_queues; - nixlf->sq_cnt = pfvf->hw.tx_queues; + nixlf->sq_cnt = pfvf->hw.tot_tx_queues; nixlf->cq_cnt = pfvf->qset.cq_cnt; nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; nixlf->rss_grps = MAX_RSS_GROUPS; @@ -1073,7 +1083,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf) int sqb, qidx; u64 iova, pa; - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { sq = &qset->sq[qidx]; if (!sq->sqb_ptrs) continue; @@ -1285,7 +1295,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) stack_pages = (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); /* Initialize aura context */ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); @@ -1305,7 +1315,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) goto fail; /* Allocate pointers and free them to aura/pool */ - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); pool = &pfvf->qset.pool[pool_id]; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index a51ecd771d0755..61e52812983fa3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -171,6 +171,8 @@ struct otx2_hw { struct otx2_rss_info rss_info; u16 rx_queues; u16 tx_queues; + u16 xdp_queues; + u16 tot_tx_queues; u16 max_queues; u16 pool_cnt; u16 rqpool_cnt; @@ -223,6 +225,7 @@ struct otx2_hw { #define HW_TSO 0 #define CN10K_MBOX 1 #define CN10K_LMTST 2 +#define CN10K_RPM 3 unsigned long cap_flag; #define LMT_LINE_SIZE 128 @@ -263,6 +266,12 @@ struct otx2_ptp { struct cyclecounter cycle_counter; struct timecounter time_counter; + + struct delayed_work extts_work; + u64 last_extts; + u64 thresh; + + struct ptp_pin_desc extts_config; }; #define OTX2_HW_TIMESTAMP_LEN 8 @@ -317,7 +326,7 @@ struct otx2_nic { struct net_device *netdev; struct dev_hw_ops *hw_ops; void *iommu_domain; - u16 max_frs; + u16 tx_max_pktlen; u16 rbsize; /* Receive buffer size */ #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) @@ -336,7 +345,9 @@ struct otx2_nic { #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; + u64 *cq_op_addr; + struct bpf_prog *xdp_prog; struct otx2_qset qset; struct otx2_hw hw; struct pci_dev *pdev; @@ -452,6 +463,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) if (!is_dev_otx2(pfvf->pdev)) { __set_bit(CN10K_MBOX, &hw->cap_flag); __set_bit(CN10K_LMTST, &hw->cap_flag); + __set_bit(CN10K_RPM, &hw->cap_flag); } } @@ -825,6 +837,9 @@ int otx2_open(struct net_device *netdev); int otx2_stop(struct net_device *netdev); int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); +int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); +int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); + /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); @@ -845,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 7ac3ef2fa06a26..777a27047c8e8a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -108,13 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf) return -ENOMEM; } - err = devlink_register(dl); - if (err) { - dev_err(pfvf->dev, "devlink register failed with error %d\n", err); - devlink_free(dl); - return err; - } - otx2_dl = devlink_priv(dl); otx2_dl->dl = dl; otx2_dl->pfvf = pfvf; @@ -128,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf) goto err_dl; } - devlink_params_publish(dl); - + devlink_register(dl); return 0; err_dl: - devlink_unregister(dl); devlink_free(dl); return err; } @@ -141,16 +132,10 @@ int otx2_register_dl(struct otx2_nic *pfvf) void otx2_unregister_dl(struct otx2_nic *pfvf) { struct otx2_devlink *otx2_dl = pfvf->dl; - struct devlink *dl; - - if (!otx2_dl || !otx2_dl->dl) - return; - - dl = otx2_dl->dl; + struct devlink *dl = otx2_dl->dl; + devlink_unregister(dl); devlink_params_unregister(dl, otx2_dl_params, ARRAY_SIZE(otx2_dl_params)); - - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index dbfa3bc39e34e6..80d4ce61f442d1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -121,14 +121,16 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) otx2_get_qset_strings(pfvf, &data, 0); - for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { - sprintf(data, "cgx_rxstat%d: ", stats); - data += ETH_GSTRING_LEN; - } + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { + for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { + sprintf(data, "cgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } - for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { - sprintf(data, "cgx_txstat%d: ", stats); - data += ETH_GSTRING_LEN; + for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { + sprintf(data, "cgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } } strcpy(data, "reset_count"); @@ -205,11 +207,15 @@ static void otx2_get_ethtool_stats(struct net_device *netdev, [otx2_drv_stats[stat].index]); otx2_get_qset_stats(pfvf, stats, &data); - otx2_update_lmac_stats(pfvf); - for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) - *(data++) = pfvf->hw.cgx_rx_stats[stat]; - for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) - *(data++) = pfvf->hw.cgx_tx_stats[stat]; + + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { + otx2_update_lmac_stats(pfvf); + for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_rx_stats[stat]; + for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_tx_stats[stat]; + } + *(data++) = pfvf->reset_count; fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; @@ -242,18 +248,19 @@ static void otx2_get_ethtool_stats(struct net_device *netdev, static int otx2_get_sset_count(struct net_device *netdev, int sset) { struct otx2_nic *pfvf = netdev_priv(netdev); - int qstats_count; + int qstats_count, mac_stats = 0; if (sset != ETH_SS_STATS) return -EINVAL; qstats_count = otx2_n_queue_stats * (pfvf->hw.rx_queues + pfvf->hw.tx_queues); + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) + mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT; otx2_update_lmac_fec_stats(pfvf); return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + - CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT - + 1; + mac_stats + OTX2_FEC_STATS_CNT + 1; } /* Get no of queues device supports and current queue count */ @@ -1168,9 +1175,8 @@ static int otx2_set_link_ksettings(struct net_device *netdev, otx2_get_link_ksettings(netdev, &cur_ks); /* Check requested modes against supported modes by hardware */ - if (!bitmap_subset(cmd->link_modes.advertising, - cur_ks.link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS)) + if (!linkmode_subset(cmd->link_modes.advertising, + cur_ks.link_modes.supported)) return -EINVAL; mutex_lock(&mbox->lock); @@ -1340,6 +1346,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_pauseparam = otx2_get_pauseparam, .set_pauseparam = otx2_set_pauseparam, .get_link_ksettings = otx2vf_get_link_ksettings, + .get_ts_info = otx2_get_ts_info, }; void otx2vf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53df7fff92c403..1e0d0c9c1dac3a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "otx2_reg.h" #include "otx2_common.h" @@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); static int otx2_change_mtu(struct net_device *netdev, int new_mtu) { + struct otx2_nic *pf = netdev_priv(netdev); bool if_up = netif_running(netdev); int err = 0; + if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } if (if_up) otx2_stop(netdev); @@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) } /* SQ */ - for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | @@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf) otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); /* Free SQB pointers */ otx2_sq_free_sqbs(pf); - for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { sq = &qset->sq[qidx]; qmem_free(pf->dev, sq->sqe); qmem_free(pf->dev, sq->tso_hdrs); @@ -1304,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) * NIX transfers entire data using 6 segments/buffers and writes * a CQE_RX descriptor with those segment addresses. First segment * has additional data prepended to packet. Also software omits a - * headroom of 128 bytes and sizeof(struct skb_shared_info) in - * each segment. Hence the total size of memory needed - * to receive a packet with 'mtu' is: + * headroom of 128 bytes in each segment. Hence the total size of + * memory needed to receive a packet with 'mtu' is: * frame size = mtu + additional data; - * memory = frame_size + (headroom + struct skb_shared_info size) * 6; + * memory = frame_size + headroom * 6; * each receive buffer size = memory / 6; */ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; - total_size = frame_size + (OTX2_HEAD_ROOM + - OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6; + total_size = frame_size + OTX2_HEAD_ROOM * 6; rbuf_size = total_size / 6; return ALIGN(rbuf_size, 2048); @@ -1332,10 +1338,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) * so, aura count = pool count. */ hw->rqpool_cnt = hw->rx_queues; - hw->sqpool_cnt = hw->tx_queues; + hw->sqpool_cnt = hw->tot_tx_queues; hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; - pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; + /* Maximum hardware supported transmit length */ + pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); @@ -1493,6 +1500,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_unlock(&mbox->lock); } +static void otx2_do_set_rx_mode(struct otx2_nic *pf) +{ + struct net_device *netdev = pf->netdev; + struct nix_rx_mode *req; + bool promisc = false; + + if (!(netdev->flags & IFF_UP)) + return; + + if ((netdev->flags & IFF_PROMISC) || + (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + promisc = true; + } + + /* Write unicast address to mcam entries or del from mcam */ + if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) + __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return; + } + + req->mode = NIX_RX_MODE_UCAST; + + if (promisc) + req->mode |= NIX_RX_MODE_PROMISC; + if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + req->mode |= NIX_RX_MODE_ALLMULTI; + + req->mode |= NIX_RX_MODE_USE_MCE; + + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + int otx2_open(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1503,7 +1548,7 @@ int otx2_open(struct net_device *netdev) netif_carrier_off(netdev); - pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues; + pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues; /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ @@ -1523,7 +1568,7 @@ int otx2_open(struct net_device *netdev) if (!qset->cq) goto err_free_mem; - qset->sq = kcalloc(pf->hw.tx_queues, + qset->sq = kcalloc(pf->hw.tot_tx_queues, sizeof(struct otx2_snd_queue), GFP_KERNEL); if (!qset->sq) goto err_free_mem; @@ -1544,11 +1589,20 @@ int otx2_open(struct net_device *netdev) /* RQ0 & SQ0 are mapped to CINT0 and so on.. * 'cq_ids[0]' points to RQ's CQ and * 'cq_ids[1]' points to SQ's CQ and + * 'cq_ids[2]' points to XDP's CQ and */ cq_poll->cq_ids[CQ_RX] = (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? qidx + pf->hw.rx_queues : CINT_INVALID_CQ; + if (pf->xdp_prog) + cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ? + (qidx + pf->hw.rx_queues + + pf->hw.tx_queues) : + CINT_INVALID_CQ; + else + cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; + cq_poll->dev = (void *)pf; netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler, NAPI_POLL_WEIGHT); @@ -1646,6 +1700,8 @@ int otx2_open(struct net_device *netdev) if (err) goto err_tx_stop_queues; + otx2_do_set_rx_mode(pf); + return 0; err_tx_stop_queues: @@ -1750,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) /* Check for minimum and maximum packet length */ if (skb->len <= ETH_HLEN || - (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) { + (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -1791,43 +1847,11 @@ static void otx2_set_rx_mode(struct net_device *netdev) queue_work(pf->otx2_wq, &pf->rx_mode_work); } -static void otx2_do_set_rx_mode(struct work_struct *work) +static void otx2_rx_mode_wrk_handler(struct work_struct *work) { struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); - struct net_device *netdev = pf->netdev; - struct nix_rx_mode *req; - bool promisc = false; - - if (!(netdev->flags & IFF_UP)) - return; - - if ((netdev->flags & IFF_PROMISC) || - (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { - promisc = true; - } - - /* Write unicast address to mcam entries or del from mcam */ - if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) - __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); - - mutex_lock(&pf->mbox.lock); - req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); - if (!req) { - mutex_unlock(&pf->mbox.lock); - return; - } - - req->mode = NIX_RX_MODE_UCAST; - - if (promisc) - req->mode |= NIX_RX_MODE_PROMISC; - if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) - req->mode |= NIX_RX_MODE_ALLMULTI; - req->mode |= NIX_RX_MODE_USE_MCE; - - otx2_sync_mbox_msg(&pf->mbox); - mutex_unlock(&pf->mbox.lock); + otx2_do_set_rx_mode(pf); } static int otx2_set_features(struct net_device *netdev, @@ -1967,7 +1991,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) return 0; } -static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config config; @@ -2023,8 +2047,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +EXPORT_SYMBOL(otx2_config_hwtstamp); -static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config *cfg = &pfvf->tstamp; @@ -2039,6 +2064,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_ioctl); static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) { @@ -2281,6 +2307,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, + int qidx) +{ + struct page *page; + u64 dma_addr; + int err = 0; + + dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data), + offset_in_page(xdpf->data), xdpf->len, + DMA_TO_DEVICE); + if (dma_mapping_error(pf->dev, dma_addr)) + return -ENOMEM; + + err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); + if (!err) { + otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); + page = virt_to_page(xdpf->data); + put_page(page); + return -ENOMEM; + } + return 0; +} + +static int otx2_xdp_xmit(struct net_device *netdev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int qidx = smp_processor_id(); + struct otx2_snd_queue *sq; + int drops = 0, i; + + if (!netif_running(netdev)) + return -ENETDOWN; + + qidx += pf->hw.tx_queues; + sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL; + + /* Abort xmit if xdp queue is not */ + if (unlikely(!sq)) + return -ENXIO; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = otx2_xdp_xmit_tx(pf, xdpf, qidx); + if (err) + drops++; + } + return n - drops; +} + +static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) +{ + struct net_device *dev = pf->netdev; + bool if_up = netif_running(pf->netdev); + struct bpf_prog *old_prog; + + if (prog && dev->mtu > MAX_XDP_MTU) { + netdev_warn(dev, "Jumbo frames not yet supported with XDP\n"); + return -EOPNOTSUPP; + } + + if (if_up) + otx2_stop(pf->netdev); + + old_prog = xchg(&pf->xdp_prog, prog); + + if (old_prog) + bpf_prog_put(old_prog); + + if (pf->xdp_prog) + bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1); + + /* Network stack and XDP shared same rx queues. + * Use separate tx queues for XDP and network stack. + */ + if (pf->xdp_prog) + pf->hw.xdp_queues = pf->hw.rx_queues; + else + pf->hw.xdp_queues = 0; + + pf->hw.tot_tx_queues += pf->hw.xdp_queues; + + if (if_up) + otx2_open(pf->netdev); + + return 0; +} + +static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return otx2_xdp_setup(pf, xdp->prog); + default: + return -EINVAL; + } +} + static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, int req_perm) { @@ -2348,6 +2479,8 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, + .ndo_bpf = otx2_xdp, + .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, }; @@ -2358,7 +2491,7 @@ static int otx2_wq_init(struct otx2_nic *pf) if (!pf->otx2_wq) return -ENOMEM; - INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode); + INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler); INIT_WORK(&pf->reset_task, otx2_reset_task); return 0; } @@ -2489,6 +2622,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->pdev = pdev; hw->rx_queues = qcount; hw->tx_queues = qcount; + hw->tot_tx_queues = qcount; hw->max_queues = qcount; num_vec = pci_msix_vec_count(pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index ec9e49985c2cef..0ef68fdd1f26bf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) return otx2_sync_mbox_msg(&ptp->nic->mbox); } +static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) +{ + struct ptp_req *req; + + if (!ptp->nic) + return -ENODEV; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return -ENOMEM; + + req->op = PTP_OP_SET_THRESH; + req->thresh = thresh; + + return otx2_sync_mbox_msg(&ptp->nic->mbox); +} + static u64 ptp_cc_read(const struct cyclecounter *cc) { struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); @@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc) return rsp->clk; } +static u64 ptp_tstmp_read(struct otx2_ptp *ptp) +{ + struct ptp_req *req; + struct ptp_rsp *rsp; + int err; + + if (!ptp->nic) + return 0; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return 0; + + req->op = PTP_OP_GET_TSTMP; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return 0; + + rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + if (IS_ERR(rsp)) + return 0; + + return rsp->clk; +} + static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, return 0; } +static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + break; + case PTP_PF_PEROUT: + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +static void otx2_ptp_extts_check(struct work_struct *work) +{ + struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, + extts_work.work); + struct ptp_clock_event event; + u64 tstmp, new_thresh; + + mutex_lock(&ptp->nic->mbox.lock); + tstmp = ptp_tstmp_read(ptp); + mutex_unlock(&ptp->nic->mbox.lock); + + if (tstmp != ptp->last_extts) { + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); + ptp_clock_event(ptp->ptp_clock, &event); + ptp->last_extts = tstmp; + + new_thresh = tstmp % 500000000; + if (ptp->thresh != new_thresh) { + mutex_lock(&ptp->nic->mbox.lock); + ptp_set_thresh(ptp, new_thresh); + mutex_unlock(&ptp->nic->mbox.lock); + ptp->thresh = new_thresh; + } + } + schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); +} + static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + int pin; + + if (!ptp->nic) + return -ENODEV; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + if (on) + schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); + else + cancel_delayed_work_sync(&ptp->extts_work); + return 0; + default: + break; + } return -EOPNOTSUPP; } @@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf) struct ptp_req *req; int err; + if (is_otx2_lbkvf(pfvf->pdev)) { + pfvf->ptp = NULL; + return 0; + } + mutex_lock(&pfvf->mbox.lock); /* check if PTP block is available */ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox); @@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf) timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, ktime_to_ns(ktime_get_real())); + snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); + ptp_ptr->extts_config.index = 0; + ptp_ptr->extts_config.func = PTP_PF_NONE; + ptp_ptr->ptp_info = (struct ptp_clock_info) { .owner = THIS_MODULE, .name = "OcteonTX2 PTP", .max_adj = 1000000000ull, - .n_ext_ts = 0, - .n_pins = 0, + .n_ext_ts = 1, + .n_pins = 1, .pps = 0, + .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, .adjtime = otx2_ptp_adjtime, .gettime64 = otx2_ptp_gettime, .settime64 = otx2_ptp_settime, .enable = otx2_ptp_enable, + .verify = otx2_ptp_verify_pin, }; + INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); + ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) { err = ptp_ptr->ptp_clock ? @@ -176,6 +297,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf) error: return err; } +EXPORT_SYMBOL_GPL(otx2_ptp_init); void otx2_ptp_destroy(struct otx2_nic *pfvf) { @@ -188,6 +310,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf) kfree(ptp); pfvf->ptp = NULL; } +EXPORT_SYMBOL_GPL(otx2_ptp_destroy); int otx2_ptp_clock_index(struct otx2_nic *pfvf) { @@ -196,6 +319,7 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf) return ptp_clock_index(pfvf->ptp->ptp_clock); } +EXPORT_SYMBOL_GPL(otx2_ptp_clock_index); int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) { @@ -206,3 +330,8 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) return 0; } +EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time); + +MODULE_AUTHOR("Sunil Goutham "); +MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index f42b1d4e0c679e..0cc6353254bf07 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include "otx2_reg.h" #include "otx2_common.h" @@ -17,6 +19,35 @@ #include "cn10k.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) +static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, + struct bpf_prog *prog, + struct nix_cqe_rx_s *cqe, + struct otx2_cq_queue *cq); + +static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq) +{ + u64 incr = (u64)(cq->cq_idx) << 32; + u64 status; + + status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); + + if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) || + status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) { + dev_err(pfvf->dev, "CQ stopped due to error"); + return -EINVAL; + } + + cq->cq_tail = status & 0xFFFFF; + cq->cq_head = (status >> 20) & 0xFFFFF; + if (cq->cq_tail < cq->cq_head) + cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + + cq->cq_tail; + else + cq->pend_cqe = cq->cq_tail - cq->cq_head; + + return 0; +} static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) { @@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) sg->num_segs = 0; } +static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, + struct otx2_snd_queue *sq, + struct nix_cqe_tx_s *cqe) +{ + struct nix_send_comp_s *snd_comp = &cqe->comp; + struct sg_list *sg; + struct page *page; + u64 pa; + + sg = &sq->sg[snd_comp->sqe_id]; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); + otx2_dma_unmap_page(pfvf, sg->dma_addr[0], + sg->size[0], DMA_TO_DEVICE); + page = virt_to_page(phys_to_virt(pa)); + put_page(page); +} + static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, struct otx2_snd_queue *sq, @@ -132,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf, skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); } -static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, - u64 iova, int len, struct nix_rx_parse_s *parse) +static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, + u64 iova, int len, struct nix_rx_parse_s *parse, + int qidx) { struct page *page; int off = 0; @@ -154,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, } page = virt_to_page(va); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - va - page_address(page) + off, len - off, pfvf->rbsize); + if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + va - page_address(page) + off, + len - off, pfvf->rbsize); + + otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, + pfvf->rbsize, DMA_FROM_DEVICE); + return true; + } - otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, - pfvf->rbsize, DMA_FROM_DEVICE); + /* If more than MAX_SKB_FRAGS fragments are received then + * give back those buffer pointers to hardware for reuse. + */ + pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); + + return false; } static void otx2_set_rxhash(struct otx2_nic *pfvf, @@ -285,6 +346,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, return; } + if (pfvf->xdp_prog) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) + return; + skb = napi_get_frags(napi); if (unlikely(!skb)) return; @@ -296,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, seg_addr = &sg->seg_addr; seg_size = (void *)sg; for (seg = 0; seg < sg->segs; seg++, seg_addr++) { - otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg], - parse); - cq->pool_ptrs++; + if (otx2_skb_add_frag(pfvf, skb, *seg_addr, + seg_size[seg], parse, cq->cq_idx)) + cq->pool_ptrs++; } start += sizeof(*sg); } @@ -318,7 +383,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe; int processed_cqe = 0; - while (likely(processed_cqe < budget)) { + if (cq->pend_cqe >= budget) + goto process_cqe; + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return 0; + +process_cqe: + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || !cqe->sg.seg_addr) { @@ -334,17 +406,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->sg.seg_addr = 0x00; processed_cqe++; + cq->pend_cqe--; } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); - if (unlikely(!cq->pool_ptrs)) - return 0; - /* Refill pool with new buffers */ - pfvf->hw_ops->refill_pool_ptrs(pfvf, cq); - return processed_cqe; } @@ -364,22 +432,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { - int tx_pkts = 0, tx_bytes = 0; + int tx_pkts = 0, tx_bytes = 0, qidx; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; - while (likely(processed_cqe < budget)) { + if (cq->pend_cqe >= budget) + goto process_cqe; + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return 0; + +process_cqe: + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { if (!processed_cqe) return 0; break; } - otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], - cqe, budget, &tx_pkts, &tx_bytes); - + if (cq->cq_type == CQ_XDP) { + qidx = cq->cq_idx - pfvf->hw.rx_queues; + otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], + cqe); + } else { + otx2_snd_pkt_handler(pfvf, cq, + &pfvf->qset.sq[cq->cint_idx], + cqe, budget, &tx_pkts, &tx_bytes); + } cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; + cq->pend_cqe--; } /* Free CQEs to HW */ @@ -402,6 +484,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, int otx2_napi_handler(struct napi_struct *napi, int budget) { + struct otx2_cq_queue *rx_cq = NULL; struct otx2_cq_poll *cq_poll; int workdone = 0, cq_idx, i; struct otx2_cq_queue *cq; @@ -412,17 +495,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) pfvf = (struct otx2_nic *)cq_poll->dev; qset = &pfvf->qset; - for (i = CQS_PER_CINT - 1; i >= 0; i--) { + for (i = 0; i < CQS_PER_CINT; i++) { cq_idx = cq_poll->cq_ids[i]; if (unlikely(cq_idx == CINT_INVALID_CQ)) continue; cq = &qset->cq[cq_idx]; if (cq->cq_type == CQ_RX) { - /* If the RQ refill WQ task is running, skip napi - * scheduler for this queue. - */ - if (cq->refill_task_sched) - continue; + rx_cq = cq; workdone += otx2_rx_napi_handler(pfvf, napi, cq, budget); } else { @@ -430,6 +509,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } } + if (rx_cq && rx_cq->pool_ptrs) + pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -936,10 +1017,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) int processed_cqe = 0; u64 iova, pa; - while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { - if (!cqe->sg.subdc) - continue; + if (pfvf->xdp_prog) + xdp_rxq_info_unreg(&cq->xdp_rxq); + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return; + + while (cq->pend_cqe) { + cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; + cq->pend_cqe--; + + if (!cqe) + continue; if (cqe->sg.segs > 1) { otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); continue; @@ -965,7 +1055,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) sq = &pfvf->qset.sq[cq->cint_idx]; - while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) { + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return; + + while (cq->pend_cqe) { + cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); + processed_cqe++; + cq->pend_cqe--; + + if (!cqe) + continue; sg = &sq->sg[cqe->comp.sqe_id]; skb = (struct sk_buff *)sg->skb; if (skb) { @@ -973,7 +1072,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) dev_kfree_skb_any(skb); sg->skb = (u64)NULL; } - processed_cqe++; } /* Free CQEs to HW */ @@ -1001,3 +1099,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) mutex_unlock(&pfvf->mbox.lock); return err; } + +static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, + int len, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u64 *iova = NULL; + + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 1; + sg->seg1_size = len; + iova = (void *)sg + sizeof(*sg); + *iova = dma_addr; + *offset += sizeof(*sg) + sizeof(u64); + + sq->sg[sq->head].dma_addr[0] = dma_addr; + sq->sg[sq->head].size[0] = len; + sq->sg[sq->head].num_segs = 1; +} + +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset, free_sqe; + + sq = &pfvf->qset.sq[qidx]; + free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + if (free_sqe < sq->sqe_thresh) + return false; + + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, iova, len, &offset); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); + + return true; +} + +static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, + struct bpf_prog *prog, + struct nix_cqe_rx_s *cqe, + struct otx2_cq_queue *cq) +{ + unsigned char *hard_start, *data; + int qidx = cq->cq_idx; + struct xdp_buff xdp; + struct page *page; + u64 iova, pa; + u32 act; + int err; + + iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + page = virt_to_page(phys_to_virt(pa)); + + xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); + + data = (unsigned char *)phys_to_virt(pa); + hard_start = page_address(page); + xdp_prepare_buff(&xdp, hard_start, data - hard_start, + cqe->sg.seg_size, false); + + act = bpf_prog_run_xdp(prog, &xdp); + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + qidx += pfvf->hw.tx_queues; + cq->pool_ptrs++; + return otx2_xdp_sq_append_pkt(pfvf, iova, + cqe->sg.seg_size, qidx); + case XDP_REDIRECT: + cq->pool_ptrs++; + err = xdp_do_redirect(pfvf->netdev, &xdp, prog); + + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + if (!err) + return true; + put_page(page); + break; + default: + bpf_warn_invalid_xdp_action(act); + break; + case XDP_ABORTED: + trace_xdp_exception(pfvf->netdev, prog, act); + break; + case XDP_DROP: + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + put_page(page); + cq->pool_ptrs++; + return true; + } + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 3ff1ad79c0011d..f1a04cf9210c35 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -11,6 +11,7 @@ #include #include #include +#include #define LBK_CHAN_BASE 0x000 #define SDP_CHAN_BASE 0x700 @@ -25,6 +26,8 @@ #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 +#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN) + /* Rx buffer size should be in multiples of 128bytes */ #define RCV_FRAG_LEN1(x) \ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \ @@ -36,9 +39,7 @@ #define RCV_FRAG_LEN(x) \ ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x)) -#define DMA_BUFFER_LEN(x) \ - ((x) - OTX2_HEAD_ROOM - \ - OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM) /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT] * is equal to this value. @@ -56,6 +57,9 @@ */ #define CQ_QCOUNT_DEFAULT 1 +#define CQ_OP_STAT_OP_ERR 63 +#define CQ_OP_STAT_CQ_ERR 46 + struct queue_stats { u64 bytes; u64 pkts; @@ -96,7 +100,8 @@ struct otx2_snd_queue { enum cq_type { CQ_RX, CQ_TX, - CQS_PER_CINT = 2, /* RQ + SQ */ + CQ_XDP, + CQS_PER_CINT = 3, /* RQ + SQ + XDP */ }; struct otx2_cq_poll { @@ -122,9 +127,12 @@ struct otx2_cq_queue { u16 pool_ptrs; u32 cqe_cnt; u32 cq_head; + u32 cq_tail; + u32 pend_cqe; void *cqe_base; struct qmem *cqe; struct otx2_pool *rbpool; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct otx2_qset { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 03b4ec630432be..e6cb8cd0787d30 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -8,9 +8,11 @@ #include #include #include +#include #include "otx2_common.h" #include "otx2_reg.h" +#include "otx2_ptp.h" #include "cn10k.h" #define DRV_NAME "rvu_nicvf" @@ -277,7 +279,6 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) struct mbox *mbox = &vf->mbox; if (vf->mbox_wq) { - flush_workqueue(vf->mbox_wq); destroy_workqueue(vf->mbox_wq); vf->mbox_wq = NULL; } @@ -500,6 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, + .ndo_do_ioctl = otx2_ioctl, }; static int otx2_wq_init(struct otx2_nic *vf) @@ -583,6 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->rx_queues = qcount; hw->tx_queues = qcount; hw->max_queues = qcount; + hw->tot_tx_queues = qcount; hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); @@ -640,6 +643,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; + /* Don't check for error. Proceed without ptp */ + otx2_ptp_init(vf); + /* Assign default mac address */ otx2_get_mac_from_af(netdev); diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index f18fe664b3738e..2a4c14c704c0d9 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -53,6 +53,8 @@ struct prestera_port_stats { u64 good_octets_sent; }; +#define PRESTERA_AP_PORT_MAX (10) + struct prestera_port_caps { u64 supp_link_modes; u8 supp_fec; @@ -69,6 +71,39 @@ struct prestera_lag { struct prestera_flow_block; +struct prestera_port_mac_state { + u32 mode; + u32 speed; + bool oper; + u8 duplex; + u8 fc; + u8 fec; +}; + +struct prestera_port_phy_state { + u64 lmode_bmap; + struct { + bool pause; + bool asym_pause; + } remote_fc; + u8 mdix; +}; + +struct prestera_port_mac_config { + u32 mode; + u32 speed; + bool admin; + u8 inband; + u8 duplex; + u8 fec; +}; + +struct prestera_port_phy_config { + u32 mode; + bool admin; + u8 mdix; +}; + struct prestera_port { struct net_device *dev; struct prestera_switch *sw; @@ -91,6 +126,10 @@ struct prestera_port { struct prestera_port_stats stats; struct delayed_work caching_dw; } cached_hw_stats; + struct prestera_port_mac_config cfg_mac; + struct prestera_port_phy_config cfg_phy; + struct prestera_port_mac_state state_mac; + struct prestera_port_phy_state state_phy; }; struct prestera_device { @@ -107,7 +146,7 @@ struct prestera_device { int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size); /* called by higher layer to send request to the firmware */ - int (*send_req)(struct prestera_device *dev, void *in_msg, + int (*send_req)(struct prestera_device *dev, int qid, void *in_msg, size_t in_size, void *out_msg, size_t out_size, unsigned int wait); }; @@ -129,13 +168,28 @@ enum prestera_rxtx_event_id { enum prestera_port_event_id { PRESTERA_PORT_EVENT_UNSPEC, - PRESTERA_PORT_EVENT_STATE_CHANGED, + PRESTERA_PORT_EVENT_MAC_STATE_CHANGED, }; struct prestera_port_event { u32 port_id; union { - u32 oper_state; + struct { + u32 mode; + u32 speed; + u8 oper; + u8 duplex; + u8 fc; + u8 fec; + } mac; + struct { + u64 lmode_bmap; + struct { + bool pause; + bool asym_pause; + } remote_fc; + u8 mdix; + } phy; } data; }; @@ -223,11 +277,16 @@ void prestera_device_unregister(struct prestera_device *dev); struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, u32 dev_id, u32 hw_id); -int prestera_port_autoneg_set(struct prestera_port *port, bool enable, - u64 adver_link_modes, u8 adver_fec); +int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes); struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id); +int prestera_port_cfg_mac_read(struct prestera_port *port, + struct prestera_port_mac_config *cfg); + +int prestera_port_cfg_mac_write(struct prestera_port *port, + struct prestera_port_mac_config *cfg); + struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev); int prestera_port_pvid_set(struct prestera_port *port, u16 vid); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index 68b442eb6d6943..06279cd6da6746 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = { }, }; -static void prestera_devlink_traps_fini(struct prestera_switch *sw); - static int prestera_drop_counter_get(struct devlink *devlink, const struct devlink_trap *trap, u64 *p_drops); @@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink, enum devlink_trap_action action, struct netlink_ext_ack *extack); -static int prestera_devlink_traps_register(struct prestera_switch *sw); - static const struct devlink_ops prestera_dl_ops = { .info_get = prestera_dl_info_get, .trap_init = prestera_trap_init, @@ -407,38 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw) devlink_free(dl); } -int prestera_devlink_register(struct prestera_switch *sw) +void prestera_devlink_register(struct prestera_switch *sw) { struct devlink *dl = priv_to_devlink(sw); - int err; - - err = devlink_register(dl); - if (err) { - dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); - return err; - } - err = prestera_devlink_traps_register(sw); - if (err) { - devlink_unregister(dl); - dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n", - err); - return err; - } - - return 0; + devlink_register(dl); } void prestera_devlink_unregister(struct prestera_switch *sw) { - struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); - prestera_devlink_traps_fini(sw); devlink_unregister(dl); - - kfree(trap_data->trap_items_arr); - kfree(trap_data); } int prestera_devlink_port_register(struct prestera_port *port) @@ -486,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev) return &port->dl_port; } -static int prestera_devlink_traps_register(struct prestera_switch *sw) +int prestera_devlink_traps_register(struct prestera_switch *sw) { const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr); const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr); @@ -625,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink, cpu_code_type, p_drops); } -static void prestera_devlink_traps_fini(struct prestera_switch *sw) +void prestera_devlink_traps_unregister(struct prestera_switch *sw) { + struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); const struct devlink_trap *trap; int i; @@ -638,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw) devlink_trap_groups_unregister(dl, prestera_trap_groups_arr, ARRAY_SIZE(prestera_trap_groups_arr)); + kfree(trap_data->trap_items_arr); + kfree(trap_data); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h index cc34c3db13a2f8..b322295bad3ad3 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -9,7 +9,7 @@ struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); void prestera_devlink_free(struct prestera_switch *sw); -int prestera_devlink_register(struct prestera_switch *sw); +void prestera_devlink_register(struct prestera_switch *sw); void prestera_devlink_unregister(struct prestera_switch *sw); int prestera_devlink_port_register(struct prestera_port *port); @@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev); void prestera_devlink_trap_report(struct prestera_port *port, struct sk_buff *skb, u8 cpu_code); +int prestera_devlink_traps_register(struct prestera_switch *sw); +void prestera_devlink_traps_unregister(struct prestera_switch *sw); #endif /* _PRESTERA_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c index 93a5e2baf8085e..6011454dba71f2 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c @@ -323,7 +323,6 @@ static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd, { u32 new_mode = PRESTERA_LINK_MODE_MAX; u32 type, mode; - int err; for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) { if (port_types[type].eth_type == ecmd->base.port && @@ -348,13 +347,8 @@ static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd, } } - if (new_mode < PRESTERA_LINK_MODE_MAX) - err = prestera_hw_port_link_mode_set(port, new_mode); - else - err = -EINVAL; - - if (err) - return err; + if (new_mode >= PRESTERA_LINK_MODE_MAX) + return -EINVAL; port->caps.type = type; port->autoneg = false; @@ -434,27 +428,33 @@ static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd, static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { + struct prestera_port_phy_state *state = &port->state_phy; bool asym_pause; bool pause; u64 bitmap; int err; - err = prestera_hw_port_remote_cap_get(port, &bitmap); - if (!err) { - prestera_modes_to_eth(ecmd->link_modes.lp_advertising, - bitmap, 0, PRESTERA_PORT_TYPE_NONE); + err = prestera_hw_port_phy_mode_get(port, NULL, &state->lmode_bmap, + &state->remote_fc.pause, + &state->remote_fc.asym_pause); + if (err) + netdev_warn(port->dev, "Remote link caps get failed %d", + port->caps.transceiver); - if (!bitmap_empty(ecmd->link_modes.lp_advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS)) { - ethtool_link_ksettings_add_link_mode(ecmd, - lp_advertising, - Autoneg); - } + bitmap = state->lmode_bmap; + + prestera_modes_to_eth(ecmd->link_modes.lp_advertising, + bitmap, 0, PRESTERA_PORT_TYPE_NONE); + + if (!bitmap_empty(ecmd->link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + ethtool_link_ksettings_add_link_mode(ecmd, + lp_advertising, + Autoneg); } - err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause); - if (err) - return; + pause = state->remote_fc.pause; + asym_pause = state->remote_fc.asym_pause; if (pause) ethtool_link_ksettings_add_link_mode(ecmd, @@ -466,30 +466,46 @@ static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd, Asym_Pause); } -static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd, - struct prestera_port *port) +static void prestera_port_link_mode_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) { + struct prestera_port_mac_state *state = &port->state_mac; u32 speed; + u8 duplex; int err; - err = prestera_hw_port_speed_get(port, &speed); - ecmd->base.speed = err ? SPEED_UNKNOWN : speed; + if (!port->state_mac.oper) + return; + + if (state->speed == SPEED_UNKNOWN || state->duplex == DUPLEX_UNKNOWN) { + err = prestera_hw_port_mac_mode_get(port, NULL, &speed, + &duplex, NULL); + if (err) { + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + } else { + state->speed = speed; + state->duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ? + DUPLEX_FULL : DUPLEX_HALF; + } + } + + ecmd->base.speed = port->state_mac.speed; + ecmd->base.duplex = port->state_mac.duplex; } -static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd, - struct prestera_port *port) +static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd, + struct prestera_port *port) { - u8 duplex; - int err; + struct prestera_port_phy_state *state = &port->state_phy; - err = prestera_hw_port_duplex_get(port, &duplex); - if (err) { - ecmd->base.duplex = DUPLEX_UNKNOWN; - return; + if (prestera_hw_port_phy_mode_get(port, &state->mdix, NULL, NULL, NULL)) { + netdev_warn(port->dev, "MDIX params get failed"); + state->mdix = ETH_TP_MDI_INVALID; } - ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ? - DUPLEX_FULL : DUPLEX_HALF; + ecmd->base.eth_tp_mdix = port->state_phy.mdix; + ecmd->base.eth_tp_mdix_ctrl = port->cfg_phy.mdix; } static int @@ -501,6 +517,8 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev, ethtool_link_ksettings_zero_link_mode(ecmd, supported); ethtool_link_ksettings_zero_link_mode(ecmd, advertising); ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising); + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; @@ -521,13 +539,8 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev, prestera_port_supp_types_get(ecmd, port); - if (netif_carrier_ok(dev)) { - prestera_port_speed_get(ecmd, port); - prestera_port_duplex_get(ecmd, port); - } else { - ecmd->base.speed = SPEED_UNKNOWN; - ecmd->base.duplex = DUPLEX_UNKNOWN; - } + if (netif_carrier_ok(dev)) + prestera_port_link_mode_get(ecmd, port); ecmd->base.port = prestera_port_type_get(port); @@ -545,8 +558,7 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev, if (port->caps.type == PRESTERA_PORT_TYPE_TP && port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) - prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix, - &ecmd->base.eth_tp_mdix_ctrl); + prestera_port_mdix_get(ecmd, port); return 0; } @@ -555,12 +567,17 @@ static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID && - port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && - port->caps.type == PRESTERA_PORT_TYPE_TP) - return prestera_hw_port_mdix_set(port, - ecmd->base.eth_tp_mdix_ctrl); - + port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && + port->caps.type == PRESTERA_PORT_TYPE_TP) { + port->cfg_phy.mdix = ecmd->base.eth_tp_mdix_ctrl; + return prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, + port->autoneg, + port->cfg_phy.mode, + port->adver_link_modes, + port->cfg_phy.mdix); + } return 0; + } static int prestera_port_link_mode_set(struct prestera_port *port, @@ -568,12 +585,15 @@ static int prestera_port_link_mode_set(struct prestera_port *port, { u32 new_mode = PRESTERA_LINK_MODE_MAX; u32 mode; + int err; for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { - if (speed != port_link_modes[mode].speed) + if (speed != SPEED_UNKNOWN && + speed != port_link_modes[mode].speed) continue; - if (duplex != port_link_modes[mode].duplex) + if (duplex != DUPLEX_UNKNOWN && + duplex != port_link_modes[mode].duplex) continue; if (!(port_link_modes[mode].pr_mask & @@ -590,36 +610,31 @@ static int prestera_port_link_mode_set(struct prestera_port *port, if (new_mode == PRESTERA_LINK_MODE_MAX) return -EOPNOTSUPP; - return prestera_hw_port_link_mode_set(port, new_mode); + err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, + false, new_mode, 0, + port->cfg_phy.mdix); + if (err) + return err; + + port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); + port->adver_link_modes = 0; + port->cfg_phy.mode = new_mode; + port->autoneg = false; + + return 0; } static int prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { - u32 curr_mode; - u8 duplex; - u32 speed; - int err; - - err = prestera_hw_port_link_mode_get(port, &curr_mode); - if (err) - return err; - if (curr_mode >= PRESTERA_LINK_MODE_MAX) - return -EINVAL; + u8 duplex = DUPLEX_UNKNOWN; if (ecmd->base.duplex != DUPLEX_UNKNOWN) duplex = ecmd->base.duplex == DUPLEX_FULL ? PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF; - else - duplex = port_link_modes[curr_mode].duplex; - if (ecmd->base.speed != SPEED_UNKNOWN) - speed = ecmd->base.speed; - else - speed = port_link_modes[curr_mode].speed; - - return prestera_port_link_mode_set(port, speed, duplex, + return prestera_port_link_mode_set(port, ecmd->base.speed, duplex, port->caps.type); } @@ -645,19 +660,12 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev, prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes, &adver_fec, port->caps.type); - err = prestera_port_autoneg_set(port, - ecmd->base.autoneg == AUTONEG_ENABLE, - adver_modes, adver_fec); - if (err) - return err; - - if (ecmd->base.autoneg == AUTONEG_DISABLE) { + if (ecmd->base.autoneg == AUTONEG_ENABLE) + err = prestera_port_autoneg_set(port, adver_modes); + else err = prestera_port_speed_duplex_set(ecmd, port); - if (err) - return err; - } - return 0; + return err; } static int prestera_ethtool_get_fecparam(struct net_device *dev, @@ -668,7 +676,7 @@ static int prestera_ethtool_get_fecparam(struct net_device *dev, u32 mode; int err; - err = prestera_hw_port_fec_get(port, &active); + err = prestera_hw_port_mac_mode_get(port, NULL, NULL, NULL, &active); if (err) return err; @@ -693,18 +701,19 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { struct prestera_port *port = netdev_priv(dev); - u8 fec, active; + struct prestera_port_mac_config cfg_mac; u32 mode; - int err; + u8 fec; if (port->autoneg) { netdev_err(dev, "FEC set is not allowed while autoneg is on\n"); return -EINVAL; } - err = prestera_hw_port_fec_get(port, &active); - if (err) - return err; + if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { + netdev_err(dev, "FEC set is not allowed on non-SFP ports\n"); + return -EINVAL; + } fec = PRESTERA_PORT_FEC_MAX; for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { @@ -715,13 +724,19 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev, } } - if (fec == active) + prestera_port_cfg_mac_read(port, &cfg_mac); + + if (fec == cfg_mac.fec) return 0; - if (fec == PRESTERA_PORT_FEC_MAX) - return -EOPNOTSUPP; + if (fec == PRESTERA_PORT_FEC_MAX) { + netdev_err(dev, "Unsupported FEC requested"); + return -EINVAL; + } + + cfg_mac.fec = fec; - return prestera_hw_port_fec_set(port, fec); + return prestera_port_cfg_mac_write(port, &cfg_mac); } static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset) @@ -766,6 +781,28 @@ static int prestera_ethtool_nway_reset(struct net_device *dev) return -EINVAL; } +void prestera_ethtool_port_state_changed(struct prestera_port *port, + struct prestera_port_event *evt) +{ + struct prestera_port_mac_state *smac = &port->state_mac; + + smac->oper = evt->data.mac.oper; + + if (smac->oper) { + smac->mode = evt->data.mac.mode; + smac->speed = evt->data.mac.speed; + smac->duplex = evt->data.mac.duplex; + smac->fc = evt->data.mac.fc; + smac->fec = evt->data.mac.fec; + } else { + smac->mode = PRESTERA_MAC_MODE_MAX; + smac->speed = SPEED_UNKNOWN; + smac->duplex = DUPLEX_UNKNOWN; + smac->fc = 0; + smac->fec = 0; + } +} + const struct ethtool_ops prestera_ethtool_ops = { .get_drvinfo = prestera_ethtool_get_drvinfo, .get_link_ksettings = prestera_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h index 523ef1f592ce28..9eb18e99dea676 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h @@ -6,6 +6,12 @@ #include +struct prestera_port_event; +struct prestera_port; + extern const struct ethtool_ops prestera_ethtool_ops; +void prestera_ethtool_port_state_changed(struct prestera_port *port, + struct prestera_port_event *evt); + #endif /* _PRESTERA_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index c1297859e471f0..41ba17cb296573 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -47,7 +47,6 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531, PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, - PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901, @@ -76,16 +75,12 @@ enum { PRESTERA_CMD_PORT_ATTR_LEARNING = 7, PRESTERA_CMD_PORT_ATTR_FLOOD = 8, PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9, - PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10, - PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11, - PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12, + PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12, PRESTERA_CMD_PORT_ATTR_TYPE = 13, - PRESTERA_CMD_PORT_ATTR_FEC = 14, - PRESTERA_CMD_PORT_ATTR_AUTONEG = 15, - PRESTERA_CMD_PORT_ATTR_DUPLEX = 16, PRESTERA_CMD_PORT_ATTR_STATS = 17, - PRESTERA_CMD_PORT_ATTR_MDIX = 18, - PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19, + PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18, + PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19, + PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22, }; enum { @@ -169,12 +164,12 @@ struct prestera_fw_event_handler { }; struct prestera_msg_cmd { - u32 type; + __le32 type; }; struct prestera_msg_ret { struct prestera_msg_cmd cmd; - u32 status; + __le32 status; }; struct prestera_msg_common_req { @@ -187,102 +182,144 @@ struct prestera_msg_common_resp { union prestera_msg_switch_param { u8 mac[ETH_ALEN]; - u32 ageing_timeout_ms; -}; + __le32 ageing_timeout_ms; +} __packed; struct prestera_msg_switch_attr_req { struct prestera_msg_cmd cmd; - u32 attr; + __le32 attr; union prestera_msg_switch_param param; }; struct prestera_msg_switch_init_resp { struct prestera_msg_ret ret; - u32 port_count; - u32 mtu_max; + __le32 port_count; + __le32 mtu_max; u8 switch_id; u8 lag_max; u8 lag_member_max; -}; + __le32 size_tbl_router_nexthop; +} __packed __aligned(4); -struct prestera_msg_port_autoneg_param { - u64 link_mode; - u8 enable; - u8 fec; -}; +struct prestera_msg_event_port_param { + union { + struct { + u8 oper; + __le32 mode; + __le32 speed; + u8 duplex; + u8 fc; + u8 fec; + } __packed mac; + struct { + u8 mdix; + __le64 lmode_bmap; + u8 fc; + } __packed phy; + } __packed; +} __packed __aligned(4); struct prestera_msg_port_cap_param { - u64 link_mode; + __le64 link_mode; u8 type; u8 fec; + u8 fc; u8 transceiver; }; -struct prestera_msg_port_mdix_param { - u8 status; - u8 admin_mode; -}; - struct prestera_msg_port_flood_param { u8 type; u8 enable; }; union prestera_msg_port_param { - u8 admin_state; - u8 oper_state; - u32 mtu; - u8 mac[ETH_ALEN]; - u8 accept_frm_type; - u32 speed; + u8 admin_state; + u8 oper_state; + __le32 mtu; + u8 mac[ETH_ALEN]; + u8 accept_frm_type; + __le32 speed; u8 learning; u8 flood; - u32 link_mode; - u8 type; - u8 duplex; - u8 fec; - u8 fc; - struct prestera_msg_port_mdix_param mdix; - struct prestera_msg_port_autoneg_param autoneg; + __le32 link_mode; + u8 type; + u8 duplex; + u8 fec; + u8 fc; + + union { + struct { + u8 admin:1; + u8 fc; + u8 ap_enable; + union { + struct { + __le32 mode; + u8 inband:1; + __le32 speed; + u8 duplex; + u8 fec; + u8 fec_supp; + } __packed reg_mode; + struct { + __le32 mode; + __le32 speed; + u8 fec; + u8 fec_supp; + } __packed ap_modes[PRESTERA_AP_PORT_MAX]; + } __packed; + } __packed mac; + struct { + u8 admin:1; + u8 adv_enable; + __le64 modes; + __le32 mode; + u8 mdix; + } __packed phy; + } __packed link; + struct prestera_msg_port_cap_param cap; struct prestera_msg_port_flood_param flood_ext; -}; + struct prestera_msg_event_port_param link_evt; +} __packed; struct prestera_msg_port_attr_req { struct prestera_msg_cmd cmd; - u32 attr; - u32 port; - u32 dev; + __le32 attr; + __le32 port; + __le32 dev; union prestera_msg_port_param param; -}; +} __packed __aligned(4); + struct prestera_msg_port_attr_resp { struct prestera_msg_ret ret; union prestera_msg_port_param param; -}; +} __packed __aligned(4); + struct prestera_msg_port_stats_resp { struct prestera_msg_ret ret; - u64 stats[PRESTERA_PORT_CNT_MAX]; + __le64 stats[PRESTERA_PORT_CNT_MAX]; }; struct prestera_msg_port_info_req { struct prestera_msg_cmd cmd; - u32 port; + __le32 port; }; struct prestera_msg_port_info_resp { struct prestera_msg_ret ret; - u32 hw_id; - u32 dev_id; - u16 fp_id; + __le32 hw_id; + __le32 dev_id; + __le16 fp_id; }; struct prestera_msg_vlan_req { struct prestera_msg_cmd cmd; - u32 port; - u32 dev; - u16 vid; + __le32 port; + __le32 dev; + __le16 vid; u8 is_member; u8 is_tagged; }; @@ -292,113 +329,114 @@ struct prestera_msg_fdb_req { u8 dest_type; union { struct { - u32 port; - u32 dev; + __le32 port; + __le32 dev; }; - u16 lag_id; + __le16 lag_id; } dest; u8 mac[ETH_ALEN]; - u16 vid; + __le16 vid; u8 dynamic; - u32 flush_mode; -}; + __le32 flush_mode; +} __packed __aligned(4); struct prestera_msg_bridge_req { struct prestera_msg_cmd cmd; - u32 port; - u32 dev; - u16 bridge; + __le32 port; + __le32 dev; + __le16 bridge; }; struct prestera_msg_bridge_resp { struct prestera_msg_ret ret; - u16 bridge; + __le16 bridge; }; struct prestera_msg_acl_action { - u32 id; + __le32 id; + __le32 reserved[5]; }; struct prestera_msg_acl_match { - u32 type; + __le32 type; union { struct { u8 key; u8 mask; - } u8; + } __packed u8; struct { - u16 key; - u16 mask; + __le16 key; + __le16 mask; } u16; struct { - u32 key; - u32 mask; + __le32 key; + __le32 mask; } u32; struct { - u64 key; - u64 mask; + __le64 key; + __le64 mask; } u64; struct { u8 key[ETH_ALEN]; u8 mask[ETH_ALEN]; - } mac; - } __packed keymask; + } __packed mac; + } keymask; }; struct prestera_msg_acl_rule_req { struct prestera_msg_cmd cmd; - u32 id; - u32 priority; - u16 ruleset_id; + __le32 id; + __le32 priority; + __le16 ruleset_id; u8 n_actions; u8 n_matches; }; struct prestera_msg_acl_rule_resp { struct prestera_msg_ret ret; - u32 id; + __le32 id; }; struct prestera_msg_acl_rule_stats_resp { struct prestera_msg_ret ret; - u64 packets; - u64 bytes; + __le64 packets; + __le64 bytes; }; struct prestera_msg_acl_ruleset_bind_req { struct prestera_msg_cmd cmd; - u32 port; - u32 dev; - u16 ruleset_id; + __le32 port; + __le32 dev; + __le16 ruleset_id; }; struct prestera_msg_acl_ruleset_req { struct prestera_msg_cmd cmd; - u16 id; + __le16 id; }; struct prestera_msg_acl_ruleset_resp { struct prestera_msg_ret ret; - u16 id; + __le16 id; }; struct prestera_msg_span_req { struct prestera_msg_cmd cmd; - u32 port; - u32 dev; + __le32 port; + __le32 dev; u8 id; -} __packed __aligned(4); +}; struct prestera_msg_span_resp { struct prestera_msg_ret ret; u8 id; -} __packed __aligned(4); +}; struct prestera_msg_stp_req { struct prestera_msg_cmd cmd; - u32 port; - u32 dev; - u16 vid; + __le32 port; + __le32 dev; + __le16 vid; u8 state; }; @@ -409,20 +447,14 @@ struct prestera_msg_rxtx_req { struct prestera_msg_rxtx_resp { struct prestera_msg_ret ret; - u32 map_addr; -}; - -struct prestera_msg_rxtx_port_req { - struct prestera_msg_cmd cmd; - u32 port; - u32 dev; + __le32 map_addr; }; struct prestera_msg_lag_req { struct prestera_msg_cmd cmd; - u32 port; - u32 dev; - u16 lag_id; + __le32 port; + __le32 dev; + __le16 lag_id; }; struct prestera_msg_cpu_code_counter_req { @@ -433,22 +465,18 @@ struct prestera_msg_cpu_code_counter_req { struct mvsw_msg_cpu_code_counter_ret { struct prestera_msg_ret ret; - u64 packet_count; + __le64 packet_count; }; struct prestera_msg_event { - u16 type; - u16 id; -}; - -union prestera_msg_event_port_param { - u32 oper_state; + __le16 type; + __le16 id; }; struct prestera_msg_event_port { struct prestera_msg_event id; - u32 port_id; - union prestera_msg_event_port_param param; + __le32 port_id; + struct prestera_msg_event_port_param param; }; union prestera_msg_event_fdb_param { @@ -459,12 +487,52 @@ struct prestera_msg_event_fdb { struct prestera_msg_event id; u8 dest_type; union { - u32 port_id; - u16 lag_id; + __le32 port_id; + __le16 lag_id; } dest; - u32 vid; + __le32 vid; union prestera_msg_event_fdb_param param; -}; +} __packed __aligned(4); + +static inline void prestera_hw_build_tests(void) +{ + /* check requests */ + BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4); + BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 120); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28); + BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8); + + /* check responses */ + BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 112); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248); + BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12); + + /* check events */ + BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20); +} + +static u8 prestera_hw_mdix_to_eth(u8 mode); +static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause); static int __prestera_cmd_ret(struct prestera_switch *sw, enum prestera_cmd_type_t type, @@ -475,15 +543,15 @@ static int __prestera_cmd_ret(struct prestera_switch *sw, struct prestera_device *dev = sw->dev; int err; - cmd->type = type; + cmd->type = __cpu_to_le32(type); - err = dev->send_req(dev, cmd, clen, ret, rlen, waitms); + err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms); if (err) return err; - if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK) + if (__le32_to_cpu(ret->cmd.type) != PRESTERA_CMD_TYPE_ACK) return -EBADE; - if (ret->status != PRESTERA_CMD_ACK_OK) + if (__le32_to_cpu(ret->status) != PRESTERA_CMD_ACK_OK) return -EINVAL; return 0; @@ -517,13 +585,24 @@ static int prestera_cmd(struct prestera_switch *sw, static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt) { - struct prestera_msg_event_port *hw_evt = msg; + struct prestera_msg_event_port *hw_evt; - if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED) - return -EINVAL; + hw_evt = (struct prestera_msg_event_port *)msg; - evt->port_evt.data.oper_state = hw_evt->param.oper_state; - evt->port_evt.port_id = hw_evt->port_id; + evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id); + + if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) { + evt->port_evt.data.mac.oper = hw_evt->param.mac.oper; + evt->port_evt.data.mac.mode = + __le32_to_cpu(hw_evt->param.mac.mode); + evt->port_evt.data.mac.speed = + __le32_to_cpu(hw_evt->param.mac.speed); + evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex; + evt->port_evt.data.mac.fc = hw_evt->param.mac.fc; + evt->port_evt.data.mac.fec = hw_evt->param.mac.fec; + } else { + return -EINVAL; + } return 0; } @@ -535,17 +614,17 @@ static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt) switch (hw_evt->dest_type) { case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT: evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT; - evt->fdb_evt.dest.port_id = hw_evt->dest.port_id; + evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id); break; case PRESTERA_HW_FDB_ENTRY_TYPE_LAG: evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG; - evt->fdb_evt.dest.lag_id = hw_evt->dest.lag_id; + evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id); break; default: return -EINVAL; } - evt->fdb_evt.vid = hw_evt->vid; + evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid); ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac); @@ -597,20 +676,22 @@ static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size struct prestera_msg_event *msg = buf; struct prestera_fw_event_handler eh; struct prestera_event evt; + u16 msg_type; int err; - if (msg->type >= PRESTERA_EVENT_TYPE_MAX) + msg_type = __le16_to_cpu(msg->type); + if (msg_type >= PRESTERA_EVENT_TYPE_MAX) return -EINVAL; - if (!fw_event_parsers[msg->type].func) + if (!fw_event_parsers[msg_type].func) return -ENOENT; - err = prestera_find_event_handler(sw, msg->type, &eh); + err = prestera_find_event_handler(sw, msg_type, &eh); if (err) return err; - evt.id = msg->id; + evt.id = __le16_to_cpu(msg->id); - err = fw_event_parsers[msg->type].func(buf, &evt); + err = fw_event_parsers[msg_type].func(buf, &evt); if (err) return err; @@ -635,11 +716,39 @@ static void prestera_pkt_recv(struct prestera_device *dev) eh.func(sw, &ev, eh.arg); } +static u8 prestera_hw_mdix_to_eth(u8 mode) +{ + switch (mode) { + case PRESTERA_PORT_TP_MDI: + return ETH_TP_MDI; + case PRESTERA_PORT_TP_MDIX: + return ETH_TP_MDI_X; + case PRESTERA_PORT_TP_AUTO: + return ETH_TP_MDI_AUTO; + default: + return ETH_TP_MDI_INVALID; + } +} + +static u8 prestera_hw_mdix_from_eth(u8 mode) +{ + switch (mode) { + case ETH_TP_MDI: + return PRESTERA_PORT_TP_MDI; + case ETH_TP_MDI_X: + return PRESTERA_PORT_TP_MDIX; + case ETH_TP_MDI_AUTO: + return PRESTERA_PORT_TP_AUTO; + default: + return PRESTERA_PORT_TP_NA; + } +} + int prestera_hw_port_info_get(const struct prestera_port *port, u32 *dev_id, u32 *hw_id, u16 *fp_id) { struct prestera_msg_port_info_req req = { - .port = port->id, + .port = __cpu_to_le32(port->id), }; struct prestera_msg_port_info_resp resp; int err; @@ -649,9 +758,9 @@ int prestera_hw_port_info_get(const struct prestera_port *port, if (err) return err; - *dev_id = resp.dev_id; - *hw_id = resp.hw_id; - *fp_id = resp.fp_id; + *dev_id = __le32_to_cpu(resp.dev_id); + *hw_id = __le32_to_cpu(resp.hw_id); + *fp_id = __le16_to_cpu(resp.fp_id); return 0; } @@ -659,7 +768,7 @@ int prestera_hw_port_info_get(const struct prestera_port *port, int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac) { struct prestera_msg_switch_attr_req req = { - .attr = PRESTERA_CMD_SWITCH_ATTR_MAC, + .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC), }; ether_addr_copy(req.param.mac, mac); @@ -676,6 +785,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw) INIT_LIST_HEAD(&sw->event_handlers); + prestera_hw_build_tests(); + err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT, &req.cmd, sizeof(req), &resp.ret, sizeof(resp), @@ -685,9 +796,9 @@ int prestera_hw_switch_init(struct prestera_switch *sw) sw->dev->recv_msg = prestera_evt_recv; sw->dev->recv_pkt = prestera_pkt_recv; - sw->port_count = resp.port_count; + sw->port_count = __le32_to_cpu(resp.port_count); sw->mtu_min = PRESTERA_MIN_MTU; - sw->mtu_max = resp.mtu_max; + sw->mtu_max = __le32_to_cpu(resp.mtu_max); sw->id = resp.switch_id; sw->lag_member_max = resp.lag_member_max; sw->lag_max = resp.lag_max; @@ -703,9 +814,9 @@ void prestera_hw_switch_fini(struct prestera_switch *sw) int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms) { struct prestera_msg_switch_attr_req req = { - .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING, + .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING), .param = { - .ageing_timeout_ms = ageing_ms, + .ageing_timeout_ms = __cpu_to_le32(ageing_ms), }, }; @@ -713,15 +824,56 @@ int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms) &req.cmd, sizeof(req)); } -int prestera_hw_port_state_set(const struct prestera_port *port, - bool admin_state) +int prestera_hw_port_mac_mode_get(const struct prestera_port *port, + u32 *mode, u32 *speed, u8 *duplex, u8 *fec) +{ + struct prestera_msg_port_attr_resp resp; + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id) + }; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + if (mode) + *mode = __le32_to_cpu(resp.param.link_evt.mac.mode); + + if (speed) + *speed = __le32_to_cpu(resp.param.link_evt.mac.speed); + + if (duplex) + *duplex = resp.param.link_evt.mac.duplex; + + if (fec) + *fec = resp.param.link_evt.mac.fec; + + return err; +} + +int prestera_hw_port_mac_mode_set(const struct prestera_port *port, + bool admin, u32 mode, u8 inband, + u32 speed, u8 duplex, u8 fec) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { - .admin_state = admin_state, + .link = { + .mac = { + .admin = admin, + .reg_mode.mode = __cpu_to_le32(mode), + .reg_mode.inband = inband, + .reg_mode.speed = __cpu_to_le32(speed), + .reg_mode.duplex = duplex, + .reg_mode.fec = fec + } + } } }; @@ -729,14 +881,70 @@ int prestera_hw_port_state_set(const struct prestera_port *port, &req.cmd, sizeof(req)); } +int prestera_hw_port_phy_mode_get(const struct prestera_port *port, + u8 *mdix, u64 *lmode_bmap, + bool *fc_pause, bool *fc_asym) +{ + struct prestera_msg_port_attr_resp resp; + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id) + }; + int err; + + err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + if (mdix) + *mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix); + + if (lmode_bmap) + *lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap); + + if (fc_pause && fc_asym) + prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc, + fc_pause, fc_asym); + + return err; +} + +int prestera_hw_port_phy_mode_set(const struct prestera_port *port, + bool admin, bool adv, u32 mode, u64 modes, + u8 mdix) +{ + struct prestera_msg_port_attr_req req = { + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .param = { + .link = { + .phy = { + .admin = admin, + .adv_enable = adv ? 1 : 0, + .mode = __cpu_to_le32(mode), + .modes = __cpu_to_le64(modes), + } + } + } + }; + + req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix); + + return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, + &req.cmd, sizeof(req)); +} + int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_MTU, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { - .mtu = mtu, + .mtu = __cpu_to_le32(mtu), } }; @@ -747,9 +955,9 @@ int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu) int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_MAC, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; ether_addr_copy(req.param.mac, mac); @@ -762,9 +970,9 @@ int prestera_hw_port_accept_frm_type(struct prestera_port *port, enum prestera_accept_frm_type type) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { .accept_frm_type = type, } @@ -778,9 +986,9 @@ int prestera_hw_port_cap_get(const struct prestera_port *port, struct prestera_port_caps *caps) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_attr_resp resp; int err; @@ -790,7 +998,7 @@ int prestera_hw_port_cap_get(const struct prestera_port *port, if (err) return err; - caps->supp_link_modes = resp.param.cap.link_mode; + caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode); caps->transceiver = resp.param.cap.transceiver; caps->supp_fec = resp.param.cap.fec; caps->type = resp.param.cap.type; @@ -798,44 +1006,9 @@ int prestera_hw_port_cap_get(const struct prestera_port *port, return err; } -int prestera_hw_port_remote_cap_get(const struct prestera_port *port, - u64 *link_mode_bitmap) +static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause) { - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY, - .port = port->hw_id, - .dev = port->dev_id, - }; - struct prestera_msg_port_attr_resp resp; - int err; - - err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *link_mode_bitmap = resp.param.cap.link_mode; - - return 0; -} - -int prestera_hw_port_remote_fc_get(const struct prestera_port *port, - bool *pause, bool *asym_pause) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC, - .port = port->hw_id, - .dev = port->dev_id, - }; - struct prestera_msg_port_attr_resp resp; - int err; - - err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - switch (resp.param.fc) { + switch (fc) { case PRESTERA_FC_SYMMETRIC: *pause = true; *asym_pause = false; @@ -852,8 +1025,6 @@ int prestera_hw_port_remote_fc_get(const struct prestera_port *port, *pause = false; *asym_pause = false; } - - return 0; } int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id) @@ -867,7 +1038,7 @@ int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id) if (err) return err; - *ruleset_id = resp.id; + *ruleset_id = __le16_to_cpu(resp.id); return 0; } @@ -875,7 +1046,7 @@ int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id) int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id) { struct prestera_msg_acl_ruleset_req req = { - .id = ruleset_id, + .id = __cpu_to_le16(ruleset_id), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE, @@ -890,7 +1061,7 @@ static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action, int i = 0; list_for_each_entry(a_entry, a_list, list) { - action[i].id = a_entry->id; + action[i].id = __cpu_to_le32(a_entry->id); switch (a_entry->id) { case PRESTERA_ACL_RULE_ACTION_ACCEPT: @@ -916,7 +1087,7 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match, int i = 0; list_for_each_entry(m_entry, m_list, list) { - match[i].type = m_entry->type; + match[i].type = __cpu_to_le32(m_entry->type); switch (m_entry->type) { case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE: @@ -924,8 +1095,10 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match, case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST: case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID: case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID: - match[i].keymask.u16.key = m_entry->keymask.u16.key; - match[i].keymask.u16.mask = m_entry->keymask.u16.mask; + match[i].keymask.u16.key = + __cpu_to_le16(m_entry->keymask.u16.key); + match[i].keymask.u16.mask = + __cpu_to_le16(m_entry->keymask.u16.mask); break; case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE: case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE: @@ -946,12 +1119,16 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match, case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST: case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC: case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST: - match[i].keymask.u32.key = m_entry->keymask.u32.key; - match[i].keymask.u32.mask = m_entry->keymask.u32.mask; + match[i].keymask.u32.key = + __cpu_to_le32(m_entry->keymask.u32.key); + match[i].keymask.u32.mask = + __cpu_to_le32(m_entry->keymask.u32.mask); break; case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT: - match[i].keymask.u64.key = m_entry->keymask.u64.key; - match[i].keymask.u64.mask = m_entry->keymask.u64.mask; + match[i].keymask.u64.key = + __cpu_to_le64(m_entry->keymask.u64.key); + match[i].keymask.u64.mask = + __cpu_to_le64(m_entry->keymask.u64.mask); break; default: return -EINVAL; @@ -1001,8 +1178,8 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw, if (err) goto free_buff; - req->ruleset_id = prestera_acl_rule_ruleset_id_get(rule); - req->priority = prestera_acl_rule_priority_get(rule); + req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule)); + req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule)); req->n_actions = prestera_acl_rule_action_len(rule); req->n_matches = prestera_acl_rule_match_len(rule); @@ -1011,7 +1188,7 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw, if (err) goto free_buff; - *rule_id = resp.id; + *rule_id = __le32_to_cpu(resp.id); free_buff: kfree(buff); return err; @@ -1020,7 +1197,7 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw, int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id) { struct prestera_msg_acl_rule_req req = { - .id = rule_id + .id = __cpu_to_le32(rule_id) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE, @@ -1032,7 +1209,7 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id, { struct prestera_msg_acl_rule_stats_resp resp; struct prestera_msg_acl_rule_req req = { - .id = rule_id + .id = __cpu_to_le32(rule_id) }; int err; @@ -1041,8 +1218,8 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id, if (err) return err; - *packets = resp.packets; - *bytes = resp.bytes; + *packets = __le64_to_cpu(resp.packets); + *bytes = __le64_to_cpu(resp.bytes); return 0; } @@ -1050,9 +1227,9 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id, int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id) { struct prestera_msg_acl_ruleset_bind_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .ruleset_id = ruleset_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .ruleset_id = __cpu_to_le16(ruleset_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND, @@ -1063,9 +1240,9 @@ int prestera_hw_acl_port_unbind(const struct prestera_port *port, u16 ruleset_id) { struct prestera_msg_acl_ruleset_bind_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .ruleset_id = ruleset_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .ruleset_id = __cpu_to_le16(ruleset_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND, @@ -1076,8 +1253,8 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id) { struct prestera_msg_span_resp resp; struct prestera_msg_span_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; int err; @@ -1094,8 +1271,8 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id) int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id) { struct prestera_msg_span_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .id = span_id, }; @@ -1106,8 +1283,8 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id) int prestera_hw_span_unbind(const struct prestera_port *port) { struct prestera_msg_span_req req = { - .port = port->hw_id, - .dev = port->dev_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND, @@ -1127,9 +1304,9 @@ int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id) int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_TYPE, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_attr_resp resp; int err; @@ -1144,146 +1321,12 @@ int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type) return 0; } -int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_FEC, - .port = port->hw_id, - .dev = port->dev_id, - }; - struct prestera_msg_port_attr_resp resp; - int err; - - err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *fec = resp.param.fec; - - return 0; -} - -int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_FEC, - .port = port->hw_id, - .dev = port->dev_id, - .param = { - .fec = fec, - } - }; - - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, - &req.cmd, sizeof(req)); -} - -static u8 prestera_hw_mdix_to_eth(u8 mode) -{ - switch (mode) { - case PRESTERA_PORT_TP_MDI: - return ETH_TP_MDI; - case PRESTERA_PORT_TP_MDIX: - return ETH_TP_MDI_X; - case PRESTERA_PORT_TP_AUTO: - return ETH_TP_MDI_AUTO; - default: - return ETH_TP_MDI_INVALID; - } -} - -static u8 prestera_hw_mdix_from_eth(u8 mode) -{ - switch (mode) { - case ETH_TP_MDI: - return PRESTERA_PORT_TP_MDI; - case ETH_TP_MDI_X: - return PRESTERA_PORT_TP_MDIX; - case ETH_TP_MDI_AUTO: - return PRESTERA_PORT_TP_AUTO; - default: - return PRESTERA_PORT_TP_NA; - } -} - -int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status, - u8 *admin_mode) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_MDIX, - .port = port->hw_id, - .dev = port->dev_id, - }; - struct prestera_msg_port_attr_resp resp; - int err; - - err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *status = prestera_hw_mdix_to_eth(resp.param.mdix.status); - *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode); - - return 0; -} - -int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_MDIX, - .port = port->hw_id, - .dev = port->dev_id, - }; - - req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode); - - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, - &req.cmd, sizeof(req)); -} - -int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE, - .port = port->hw_id, - .dev = port->dev_id, - .param = { - .link_mode = mode, - } - }; - - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, - &req.cmd, sizeof(req)); -} - -int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE, - .port = port->hw_id, - .dev = port->dev_id, - }; - struct prestera_msg_port_attr_resp resp; - int err; - - err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *mode = resp.param.link_mode; - - return 0; -} - int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_SPEED, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_attr_resp resp; int err; @@ -1293,73 +1336,33 @@ int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed) if (err) return err; - *speed = resp.param.speed; + *speed = __le32_to_cpu(resp.param.speed); return 0; } -int prestera_hw_port_autoneg_set(const struct prestera_port *port, - bool autoneg, u64 link_modes, u8 fec) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG, - .port = port->hw_id, - .dev = port->dev_id, - .param = { - .autoneg = { - .link_mode = link_modes, - .enable = autoneg, - .fec = fec, - } - } - }; - - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, - &req.cmd, sizeof(req)); -} - int prestera_hw_port_autoneg_restart(struct prestera_port *port) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } -int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex) -{ - struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX, - .port = port->hw_id, - .dev = port->dev_id, - }; - struct prestera_msg_port_attr_resp resp; - int err; - - err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *duplex = resp.param.duplex; - - return 0; -} - int prestera_hw_port_stats_get(const struct prestera_port *port, struct prestera_port_stats *st) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_STATS, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_stats_resp resp; - u64 *hw = resp.stats; + __le64 *hw = resp.stats; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, @@ -1367,36 +1370,56 @@ int prestera_hw_port_stats_get(const struct prestera_port *port, if (err) return err; - st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]; - st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]; - st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]; - st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]; - st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]; - st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT]; - st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT]; - st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT]; - st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT]; - st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]; - st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]; - st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]; - st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]; - st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]; - st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT]; - st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]; - st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT]; - st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]; - st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]; - st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]; - st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT]; - st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]; - st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT]; - st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT]; - st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]; - st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]; - st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]; - st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]; - st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]; - st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]; + st->good_octets_received = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]); + st->bad_octets_received = + __le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]); + st->mac_trans_error = + __le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]); + st->broadcast_frames_received = + __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]); + st->multicast_frames_received = + __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]); + st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]); + st->frames_65_to_127_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]); + st->frames_128_to_255_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]); + st->frames_256_to_511_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]); + st->frames_512_to_1023_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]); + st->frames_1024_to_max_octets = + __le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]); + st->excessive_collision = + __le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]); + st->multicast_frames_sent = + __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]); + st->broadcast_frames_sent = + __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]); + st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]); + st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]); + st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]); + st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]); + st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]); + st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]); + st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]); + st->rx_error_frame_received = + __le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]); + st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]); + st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]); + st->late_collision = + __le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]); + st->unicast_frames_received = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]); + st->unicast_frames_sent = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]); + st->sent_multiple = + __le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]); + st->sent_deferred = + __le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]); + st->good_octets_sent = + __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]); return 0; } @@ -1404,9 +1427,9 @@ int prestera_hw_port_stats_get(const struct prestera_port *port, int prestera_hw_port_learning_set(struct prestera_port *port, bool enable) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_LEARNING, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { .learning = enable, } @@ -1419,9 +1442,9 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable) static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { .flood_ext = { .type = PRESTERA_PORT_FLOOD_TYPE_UC, @@ -1437,9 +1460,9 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood) static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { .flood_ext = { .type = PRESTERA_PORT_FLOOD_TYPE_MC, @@ -1455,9 +1478,9 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood) static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood) { struct prestera_msg_port_attr_req req = { - .attr = PRESTERA_CMD_PORT_ATTR_FLOOD, - .port = port->hw_id, - .dev = port->dev_id, + .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), .param = { .flood = flood, } @@ -1505,7 +1528,7 @@ int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask, int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid) { struct prestera_msg_vlan_req req = { - .vid = vid, + .vid = __cpu_to_le16(vid), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE, @@ -1515,7 +1538,7 @@ int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid) int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid) { struct prestera_msg_vlan_req req = { - .vid = vid, + .vid = __cpu_to_le16(vid), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE, @@ -1526,9 +1549,9 @@ int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, bool is_member, bool untagged) { struct prestera_msg_vlan_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .vid = vid, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .vid = __cpu_to_le16(vid), .is_member = is_member, .is_tagged = !untagged, }; @@ -1540,9 +1563,9 @@ int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid) { struct prestera_msg_vlan_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .vid = vid, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .vid = __cpu_to_le16(vid), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET, @@ -1552,9 +1575,9 @@ int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid) int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state) { struct prestera_msg_stp_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .vid = vid, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .vid = __cpu_to_le16(vid), .state = state, }; @@ -1567,10 +1590,10 @@ int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, { struct prestera_msg_fdb_req req = { .dest = { - .dev = port->dev_id, - .port = port->hw_id, + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), }, - .vid = vid, + .vid = __cpu_to_le16(vid), .dynamic = dynamic, }; @@ -1585,10 +1608,10 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, { struct prestera_msg_fdb_req req = { .dest = { - .dev = port->dev_id, - .port = port->hw_id, + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), }, - .vid = vid, + .vid = __cpu_to_le16(vid), }; ether_addr_copy(req.mac, mac); @@ -1603,9 +1626,9 @@ int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id, struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { - .lag_id = lag_id, + .lag_id = __cpu_to_le16(lag_id), }, - .vid = vid, + .vid = __cpu_to_le16(vid), .dynamic = dynamic, }; @@ -1621,9 +1644,9 @@ int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id, struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { - .lag_id = lag_id, + .lag_id = __cpu_to_le16(lag_id), }, - .vid = vid, + .vid = __cpu_to_le16(vid), }; ether_addr_copy(req.mac, mac); @@ -1636,10 +1659,10 @@ int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode) { struct prestera_msg_fdb_req req = { .dest = { - .dev = port->dev_id, - .port = port->hw_id, + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), }, - .flush_mode = mode, + .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, @@ -1649,8 +1672,8 @@ int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode) int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode) { struct prestera_msg_fdb_req req = { - .vid = vid, - .flush_mode = mode, + .vid = __cpu_to_le16(vid), + .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN, @@ -1662,11 +1685,11 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, { struct prestera_msg_fdb_req req = { .dest = { - .dev = port->dev_id, - .port = port->hw_id, + .dev = __cpu_to_le32(port->dev_id), + .port = __cpu_to_le32(port->hw_id), }, - .vid = vid, - .flush_mode = mode, + .vid = __cpu_to_le16(vid), + .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, @@ -1679,9 +1702,9 @@ int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id, struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { - .lag_id = lag_id, + .lag_id = __cpu_to_le16(lag_id), }, - .flush_mode = mode, + .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, @@ -1694,10 +1717,10 @@ int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw, struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { - .lag_id = lag_id, + .lag_id = __cpu_to_le16(lag_id), }, - .vid = vid, - .flush_mode = mode, + .vid = __cpu_to_le16(vid), + .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, @@ -1716,7 +1739,7 @@ int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id) if (err) return err; - *bridge_id = resp.bridge; + *bridge_id = __le16_to_cpu(resp.bridge); return 0; } @@ -1724,7 +1747,7 @@ int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id) int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id) { struct prestera_msg_bridge_req req = { - .bridge = bridge_id, + .bridge = __cpu_to_le16(bridge_id), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE, @@ -1734,9 +1757,9 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id) int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id) { struct prestera_msg_bridge_req req = { - .bridge = bridge_id, - .port = port->hw_id, - .dev = port->dev_id, + .bridge = __cpu_to_le16(bridge_id), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD, @@ -1746,9 +1769,9 @@ int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id) int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id) { struct prestera_msg_bridge_req req = { - .bridge = bridge_id, - .port = port->hw_id, - .dev = port->dev_id, + .bridge = __cpu_to_le16(bridge_id), + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE, @@ -1769,28 +1792,17 @@ int prestera_hw_rxtx_init(struct prestera_switch *sw, if (err) return err; - params->map_addr = resp.map_addr; + params->map_addr = __le32_to_cpu(resp.map_addr); return 0; } -int prestera_hw_rxtx_port_init(struct prestera_port *port) -{ - struct prestera_msg_rxtx_port_req req = { - .port = port->hw_id, - .dev = port->dev_id, - }; - - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT, - &req.cmd, sizeof(req)); -} - int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id) { struct prestera_msg_lag_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .lag_id = lag_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .lag_id = __cpu_to_le16(lag_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD, @@ -1800,9 +1812,9 @@ int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id) int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id) { struct prestera_msg_lag_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .lag_id = lag_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .lag_id = __cpu_to_le16(lag_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE, @@ -1813,9 +1825,9 @@ int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id, bool enable) { struct prestera_msg_lag_req req = { - .port = port->hw_id, - .dev = port->dev_id, - .lag_id = lag_id, + .port = __cpu_to_le32(port->hw_id), + .dev = __cpu_to_le32(port->dev_id), + .lag_id = __cpu_to_le16(lag_id), }; u32 cmd; @@ -1842,7 +1854,7 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, if (err) return err; - *packet_count = resp.packet_count; + *packet_count = __le64_to_cpu(resp.packet_count); return 0; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index 546d5fd8240dce..57a3c2e5b112f7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -19,6 +19,23 @@ enum prestera_fdb_flush_mode { | PRESTERA_FDB_FLUSH_MODE_STATIC, }; +enum { + PRESTERA_MAC_MODE_INTERNAL, + PRESTERA_MAC_MODE_SGMII, + PRESTERA_MAC_MODE_1000BASE_X, + PRESTERA_MAC_MODE_KR, + PRESTERA_MAC_MODE_KR2, + PRESTERA_MAC_MODE_KR4, + PRESTERA_MAC_MODE_CR, + PRESTERA_MAC_MODE_CR2, + PRESTERA_MAC_MODE_CR4, + PRESTERA_MAC_MODE_SR_LR, + PRESTERA_MAC_MODE_SR_LR2, + PRESTERA_MAC_MODE_SR_LR4, + + PRESTERA_MAC_MODE_MAX +}; + enum { PRESTERA_LINK_MODE_10baseT_Half, PRESTERA_LINK_MODE_10baseT_Full, @@ -116,32 +133,29 @@ int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac); /* Port API */ int prestera_hw_port_info_get(const struct prestera_port *port, u32 *dev_id, u32 *hw_id, u16 *fp_id); -int prestera_hw_port_state_set(const struct prestera_port *port, - bool admin_state); + +int prestera_hw_port_mac_mode_get(const struct prestera_port *port, + u32 *mode, u32 *speed, u8 *duplex, u8 *fec); +int prestera_hw_port_mac_mode_set(const struct prestera_port *port, + bool admin, u32 mode, u8 inband, + u32 speed, u8 duplex, u8 fec); +int prestera_hw_port_phy_mode_get(const struct prestera_port *port, + u8 *mdix, u64 *lmode_bmap, + bool *fc_pause, bool *fc_asym); +int prestera_hw_port_phy_mode_set(const struct prestera_port *port, + bool admin, bool adv, u32 mode, u64 modes, + u8 mdix); + int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu); int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu); int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac); int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac); int prestera_hw_port_cap_get(const struct prestera_port *port, struct prestera_port_caps *caps); -int prestera_hw_port_remote_cap_get(const struct prestera_port *port, - u64 *link_mode_bitmap); -int prestera_hw_port_remote_fc_get(const struct prestera_port *port, - bool *pause, bool *asym_pause); int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type); -int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec); -int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec); -int prestera_hw_port_autoneg_set(const struct prestera_port *port, - bool autoneg, u64 link_modes, u8 fec); int prestera_hw_port_autoneg_restart(struct prestera_port *port); -int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex); int prestera_hw_port_stats_get(const struct prestera_port *port, struct prestera_port_stats *stats); -int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode); -int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode); -int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status, - u8 *admin_mode); -int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode); int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed); int prestera_hw_port_learning_set(struct prestera_port *port, bool enable); int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask, @@ -206,7 +220,6 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw, /* RX/TX */ int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params); -int prestera_hw_rxtx_port_init(struct prestera_port *port); /* LAG API */ int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 44c670807fb3cd..625b40149facfb 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -80,27 +80,76 @@ struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id) return port; } -static int prestera_port_open(struct net_device *dev) +int prestera_port_cfg_mac_read(struct prestera_port *port, + struct prestera_port_mac_config *cfg) +{ + *cfg = port->cfg_mac; + return 0; +} + +int prestera_port_cfg_mac_write(struct prestera_port *port, + struct prestera_port_mac_config *cfg) { - struct prestera_port *port = netdev_priv(dev); int err; - err = prestera_hw_port_state_set(port, true); + err = prestera_hw_port_mac_mode_set(port, cfg->admin, + cfg->mode, cfg->inband, cfg->speed, + cfg->duplex, cfg->fec); if (err) return err; + port->cfg_mac = *cfg; + return 0; +} + +static int prestera_port_open(struct net_device *dev) +{ + struct prestera_port *port = netdev_priv(dev); + struct prestera_port_mac_config cfg_mac; + int err = 0; + + if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { + err = prestera_port_cfg_mac_read(port, &cfg_mac); + if (!err) { + cfg_mac.admin = true; + err = prestera_port_cfg_mac_write(port, &cfg_mac); + } + } else { + port->cfg_phy.admin = true; + err = prestera_hw_port_phy_mode_set(port, true, port->autoneg, + port->cfg_phy.mode, + port->adver_link_modes, + port->cfg_phy.mdix); + } + netif_start_queue(dev); - return 0; + return err; } static int prestera_port_close(struct net_device *dev) { struct prestera_port *port = netdev_priv(dev); + struct prestera_port_mac_config cfg_mac; + int err = 0; netif_stop_queue(dev); - return prestera_hw_port_state_set(port, false); + if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { + err = prestera_port_cfg_mac_read(port, &cfg_mac); + if (!err) { + cfg_mac.admin = false; + prestera_port_cfg_mac_write(port, &cfg_mac); + } + } else { + port->cfg_phy.admin = false; + err = prestera_hw_port_phy_mode_set(port, false, port->autoneg, + port->cfg_phy.mode, + port->adver_link_modes, + port->cfg_phy.mdix); + } + + return err; } static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, @@ -137,7 +186,7 @@ static int prestera_port_set_mac_address(struct net_device *dev, void *p) if (err) return err; - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -228,46 +277,23 @@ static const struct net_device_ops prestera_netdev_ops = { .ndo_get_devlink_port = prestera_devlink_get_port, }; -int prestera_port_autoneg_set(struct prestera_port *port, bool enable, - u64 adver_link_modes, u8 adver_fec) +int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes) { - bool refresh = false; - u64 link_modes; int err; - u8 fec; - - if (port->caps.type != PRESTERA_PORT_TYPE_TP) - return enable ? -EINVAL : 0; - - if (!enable) - goto set_autoneg; - - link_modes = port->caps.supp_link_modes & adver_link_modes; - fec = port->caps.supp_fec & adver_fec; - - if (!link_modes && !fec) - return -EOPNOTSUPP; - - if (link_modes && port->adver_link_modes != link_modes) { - port->adver_link_modes = link_modes; - refresh = true; - } - - if (fec && port->adver_fec != fec) { - port->adver_fec = fec; - refresh = true; - } -set_autoneg: - if (port->autoneg == enable && !refresh) + if (port->autoneg && port->adver_link_modes == link_modes) return 0; - err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes, - port->adver_fec); + err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, + true, 0, link_modes, + port->cfg_phy.mdix); if (err) return err; - port->autoneg = enable; + port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); + port->adver_link_modes = link_modes; + port->cfg_phy.mode = 0; + port->autoneg = true; return 0; } @@ -288,6 +314,7 @@ static void prestera_port_list_del(struct prestera_port *port) static int prestera_port_create(struct prestera_switch *sw, u32 id) { + struct prestera_port_mac_config cfg_mac; struct prestera_port *port; struct net_device *dev; int err; @@ -338,11 +365,14 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) goto err_port_init; } + eth_hw_addr_gen(dev, sw->base_mac, port->fp_id); /* firmware requires that port's MAC address consist of the first * 5 bytes of the base MAC address */ - memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1); - dev->dev_addr[dev->addr_len - 1] = port->fp_id; + if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) { + dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id); + dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1); + } err = prestera_hw_port_mac_set(port, dev->dev_addr); if (err) { @@ -356,16 +386,43 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) goto err_port_init; } - port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); - prestera_port_autoneg_set(port, true, port->caps.supp_link_modes, - port->caps.supp_fec); + port->adver_link_modes = port->caps.supp_link_modes; + port->adver_fec = 0; + port->autoneg = true; + + /* initialize config mac */ + if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) { + cfg_mac.admin = true; + cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL; + } else { + cfg_mac.admin = false; + cfg_mac.mode = PRESTERA_MAC_MODE_MAX; + } + cfg_mac.inband = false; + cfg_mac.speed = 0; + cfg_mac.duplex = DUPLEX_UNKNOWN; + cfg_mac.fec = PRESTERA_PORT_FEC_OFF; - err = prestera_hw_port_state_set(port, false); + err = prestera_port_cfg_mac_write(port, &cfg_mac); if (err) { - dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id); + dev_err(prestera_dev(sw), "Failed to set port(%u) mac mode\n", id); goto err_port_init; } + /* initialize config phy (if this is inegral) */ + if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) { + port->cfg_phy.mdix = ETH_TP_MDI_AUTO; + port->cfg_phy.admin = false; + err = prestera_hw_port_phy_mode_set(port, + port->cfg_phy.admin, + false, 0, 0, + port->cfg_phy.mdix); + if (err) { + dev_err(prestera_dev(sw), "Failed to set port(%u) phy mode\n", id); + goto err_port_init; + } + } + err = prestera_rxtx_port_init(port); if (err) goto err_port_init; @@ -446,8 +503,10 @@ static void prestera_port_handle_event(struct prestera_switch *sw, caching_dw = &port->cached_hw_stats.caching_dw; - if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) { - if (evt->port_evt.data.oper_state) { + prestera_ethtool_port_state_changed(port, &evt->port_evt); + + if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) { + if (port->state_mac.oper) { netif_carrier_on(port->dev); if (!delayed_work_pending(caching_dw)) queue_delayed_work(prestera_wq, caching_dw, 0); @@ -851,7 +910,7 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_span_init; - err = prestera_devlink_register(sw); + err = prestera_devlink_traps_register(sw); if (err) goto err_dl_register; @@ -863,12 +922,13 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_ports_create; + prestera_devlink_register(sw); return 0; err_ports_create: prestera_lag_fini(sw); err_lag_init: - prestera_devlink_unregister(sw); + prestera_devlink_traps_unregister(sw); err_dl_register: prestera_span_fini(sw); err_span_init: @@ -888,9 +948,10 @@ static int prestera_switch_init(struct prestera_switch *sw) static void prestera_switch_fini(struct prestera_switch *sw) { + prestera_devlink_unregister(sw); prestera_destroy_ports(sw); prestera_lag_fini(sw); - prestera_devlink_unregister(sw); + prestera_devlink_traps_unregister(sw); prestera_span_fini(sw); prestera_acl_fini(sw); prestera_event_handlers_unregister(sw); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index a250d394da3805..5d4d410b07c8cc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -14,10 +14,10 @@ #define PRESTERA_MSG_MAX_SIZE 1500 -#define PRESTERA_SUPP_FW_MAJ_VER 3 +#define PRESTERA_SUPP_FW_MAJ_VER 4 #define PRESTERA_SUPP_FW_MIN_VER 0 -#define PRESTERA_PREV_FW_MAJ_VER 2 +#define PRESTERA_PREV_FW_MAJ_VER 4 #define PRESTERA_PREV_FW_MIN_VER 0 #define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img" @@ -102,23 +102,30 @@ struct prestera_fw_evtq_regs { u32 len; }; +#define PRESTERA_CMD_QNUM_MAX 4 + +struct prestera_fw_cmdq_regs { + u32 req_ctl; + u32 req_len; + u32 rcv_ctl; + u32 rcv_len; + u32 offs; + u32 len; +}; + struct prestera_fw_regs { u32 fw_ready; - u32 pad; u32 cmd_offs; u32 cmd_len; + u32 cmd_qnum; u32 evt_offs; u32 evt_qnum; - u32 cmd_req_ctl; - u32 cmd_req_len; - u32 cmd_rcv_ctl; - u32 cmd_rcv_len; - u32 fw_status; u32 rx_status; - struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX]; + struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_EVT_QNUM_MAX]; + struct prestera_fw_evtq_regs evtq_list[PRESTERA_CMD_QNUM_MAX]; }; #define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f) @@ -130,14 +137,22 @@ struct prestera_fw_regs { #define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs) #define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len) +#define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum) #define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs) #define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum) -#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl) -#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len) +#define PRESTERA_CMDQ_REG_OFFSET(q, f) \ + (PRESTERA_FW_REG_OFFSET(cmdq_list) + \ + (q) * sizeof(struct prestera_fw_cmdq_regs) + \ + offsetof(struct prestera_fw_cmdq_regs, f)) + +#define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl) +#define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len) +#define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl) +#define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len) +#define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs) +#define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len) -#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl) -#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len) #define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status) #define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status) @@ -174,6 +189,13 @@ struct prestera_fw_evtq { size_t len; }; +struct prestera_fw_cmdq { + /* serialize access to dev->send_req */ + struct mutex cmd_mtx; + u8 __iomem *addr; + size_t len; +}; + struct prestera_fw { struct prestera_fw_rev rev_supp; const struct firmware *bin; @@ -183,9 +205,10 @@ struct prestera_fw { u8 __iomem *ldr_ring_buf; u32 ldr_buf_len; u32 ldr_wr_idx; - struct mutex cmd_mtx; /* serialize access to dev->send_req */ size_t cmd_mbox_len; u8 __iomem *cmd_mbox; + struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX]; + u8 cmd_qnum; struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX]; u8 evt_qnum; struct work_struct evt_work; @@ -324,7 +347,27 @@ static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp, 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); } -static int prestera_fw_cmd_send(struct prestera_fw *fw, +static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid) +{ + mutex_lock(&fw->cmd_queue[qid].cmd_mtx); +} + +static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid) +{ + mutex_unlock(&fw->cmd_queue[qid].cmd_mtx); +} + +static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid) +{ + return fw->cmd_queue[qid].len; +} + +static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid) +{ + return fw->cmd_queue[qid].addr; +} + +static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid, void *in_msg, size_t in_size, void *out_msg, size_t out_size, unsigned int waitms) @@ -335,30 +378,32 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw, if (!waitms) waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS; - if (ALIGN(in_size, 4) > fw->cmd_mbox_len) + if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid)) return -EMSGSIZE; /* wait for finish previous reply from FW */ - err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30); + err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30); if (err) { dev_err(fw->dev.dev, "finish reply from FW is timed out\n"); return err; } - prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size); - memcpy_toio(fw->cmd_mbox, in_msg, in_size); + prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size); + + memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size); - prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT); + prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), + PRESTERA_CMD_F_REQ_SENT); /* wait for reply from FW */ - err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, + err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), PRESTERA_CMD_F_REPL_SENT, waitms); if (err) { dev_err(fw->dev.dev, "reply from FW is timed out\n"); goto cmd_exit; } - ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG); + ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid)); if (ret_size > out_size) { dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n", ret_size, out_size); @@ -366,14 +411,15 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw, goto cmd_exit; } - memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size); + memcpy_fromio(out_msg, prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size); cmd_exit: - prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD); + prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), + PRESTERA_CMD_F_REPL_RCVD); return err; } -static int prestera_fw_send_req(struct prestera_device *dev, +static int prestera_fw_send_req(struct prestera_device *dev, int qid, void *in_msg, size_t in_size, void *out_msg, size_t out_size, unsigned int waitms) { @@ -382,9 +428,10 @@ static int prestera_fw_send_req(struct prestera_device *dev, fw = container_of(dev, struct prestera_fw, dev); - mutex_lock(&fw->cmd_mtx); - ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms); - mutex_unlock(&fw->cmd_mtx); + prestera_fw_cmdq_lock(fw, qid); + ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size, + waitms); + prestera_fw_cmdq_unlock(fw, qid); return ret; } @@ -414,7 +461,16 @@ static int prestera_fw_init(struct prestera_fw *fw) fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG); fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG); - mutex_init(&fw->cmd_mtx); + fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG); + + for (qid = 0; qid < fw->cmd_qnum; qid++) { + u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid)); + struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid]; + + cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid)); + cmdq->addr = fw->cmd_mbox + offs; + mutex_init(&cmdq->cmd_mtx); + } fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG); fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 73d2eba5262f08..e452cdeaf703a3 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -794,14 +794,7 @@ void prestera_rxtx_switch_fini(struct prestera_switch *sw) int prestera_rxtx_port_init(struct prestera_port *port) { - int err; - - err = prestera_hw_rxtx_port_init(port); - if (err) - return err; - port->dev->needed_headroom = PRESTERA_DSA_HLEN; - return 0; } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index fab53c9b8380cc..1d607bc6b59e84 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -389,7 +389,7 @@ static void inverse_every_nibble(unsigned char *mac_addr) * Outputs * return the calculated entry. */ -static u32 hash_function(unsigned char *mac_addr_orig) +static u32 hash_function(const unsigned char *mac_addr_orig) { u32 hash_result; u32 addr0; @@ -434,7 +434,7 @@ static u32 hash_function(unsigned char *mac_addr_orig) * -ENOSPC if table full */ static int add_del_hash_entry(struct pxa168_eth_private *pep, - unsigned char *mac_addr, + const unsigned char *mac_addr, u32 rd, u32 skip, int del) { struct addr_table_entry *entry, *start; @@ -521,7 +521,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep, */ static void update_hash_table_mac_address(struct pxa168_eth_private *pep, unsigned char *oaddr, - unsigned char *addr) + const unsigned char *addr) { /* Delete old entry */ if (oaddr) @@ -607,7 +607,7 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); mac_h = dev->dev_addr[0] << 24; mac_h |= dev->dev_addr[1] << 16; @@ -977,8 +977,7 @@ static int pxa168_init_phy(struct net_device *dev) cmd.base.phy_address = pep->phy_addr; cmd.base.speed = pep->phy_speed; cmd.base.duplex = pep->phy_duplex; - bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES); cmd.base.autoneg = AUTONEG_ENABLE; if (cmd.base.speed != 0) @@ -1434,11 +1433,15 @@ static int pxa168_eth_probe(struct platform_device *pdev) INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); - err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr); + err = of_get_ethdev_address(pdev->dev.of_node, dev); if (err) { + u8 addr[ETH_ALEN]; + /* try reading the mac address, if set by the bootloader */ - pxa168_eth_get_mac_address(dev, dev->dev_addr); - if (!is_valid_ether_addr(dev->dev_addr)) { + pxa168_eth_get_mac_address(dev, addr); + if (is_valid_ether_addr(addr)) { + eth_hw_addr_set(dev, addr); + } else { dev_info(&pdev->dev, "Using random mac address\n"); eth_hw_addr_random(dev); } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 051dd3fb5b0382..0c864e5bf0a641 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3459,7 +3459,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) { memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); @@ -3810,6 +3810,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, { struct skge_port *skge; struct net_device *dev = alloc_etherdev(sizeof(*skge)); + u8 addr[ETH_ALEN]; if (!dev) return NULL; @@ -3862,7 +3863,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, } /* read the mac address */ - memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + eth_hw_addr_set(dev, addr); return dev; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index e9fc74e54b22e5..28b5b93411450d 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -3817,7 +3817,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); memcpy_toio(hw->regs + B2_MAC_1 + port * 8, dev->dev_addr, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_2 + port * 8, @@ -4440,86 +4440,6 @@ static const struct ethtool_ops sky2_ethtool_ops = { static struct dentry *sky2_debug; - -/* - * Read and parse the first part of Vital Product Data - */ -#define VPD_SIZE 128 -#define VPD_MAGIC 0x82 - -static const struct vpd_tag { - char tag[2]; - char *label; -} vpd_tags[] = { - { "PN", "Part Number" }, - { "EC", "Engineering Level" }, - { "MN", "Manufacturer" }, - { "SN", "Serial Number" }, - { "YA", "Asset Tag" }, - { "VL", "First Error Log Message" }, - { "VF", "Second Error Log Message" }, - { "VB", "Boot Agent ROM Configuration" }, - { "VE", "EFI UNDI Configuration" }, -}; - -static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) -{ - size_t vpd_size; - loff_t offs; - u8 len; - unsigned char *buf; - u16 reg2; - - reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); - vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); - - seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); - buf = kmalloc(vpd_size, GFP_KERNEL); - if (!buf) { - seq_puts(seq, "no memory!\n"); - return; - } - - if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { - seq_puts(seq, "VPD read failed\n"); - goto out; - } - - if (buf[0] != VPD_MAGIC) { - seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); - goto out; - } - len = buf[1]; - if (len == 0 || len > vpd_size - 4) { - seq_printf(seq, "Invalid id length: %d\n", len); - goto out; - } - - seq_printf(seq, "%.*s\n", len, buf + 3); - offs = len + 3; - - while (offs < vpd_size - 4) { - int i; - - if (!memcmp("RW", buf + offs, 2)) /* end marker */ - break; - len = buf[offs + 2]; - if (offs + len + 3 >= vpd_size) - break; - - for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { - if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { - seq_printf(seq, " %s: %.*s\n", - vpd_tags[i].label, len, buf + offs + 3); - break; - } - } - offs += len + 3; - } -out: - kfree(buf); -} - static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; @@ -4529,9 +4449,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) unsigned idx, last; int sop; - sky2_show_vpd(seq, hw); - - seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", + seq_printf(seq, "IRQ src=%x mask=%x control=%x\n", sky2_read32(hw, B0_ISRC), sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); @@ -4802,10 +4720,13 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, * 1) from device tree data * 2) from internal registers set by bootloader */ - ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr); - if (ret) - memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, - ETH_ALEN); + ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev); + if (ret) { + u8 addr[ETH_ALEN]; + + memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); + eth_hw_addr_set(dev, addr); + } /* if the address is invalid, use a random value */ if (!is_valid_ether_addr(dev->dev_addr)) { @@ -4989,7 +4910,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); if (sizeof(dma_addr_t) > sizeof(u32) && - !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) { + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { using_dac = 1; err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err < 0) { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 398c23cec8151f..75d67d1b5f6b26 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2588,7 +2588,7 @@ static int __init mtk_init(struct net_device *dev) struct mtk_eth *eth = mac->hw; int ret; - ret = of_get_mac_address(mac->of_node, dev->dev_addr); + ret = of_get_ethdev_address(mac->of_node, dev); if (ret) { /* If the mac address is invalid, use random mac address */ eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 1d5dd2015453fb..89ca7960b225ec 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -523,7 +523,7 @@ static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv) static void mtk_star_set_mac_addr(struct net_device *ndev) { struct mtk_star_priv *priv = netdev_priv(ndev); - u8 *mac_addr = ndev->dev_addr; + const u8 *mac_addr = ndev->dev_addr; unsigned int high, low; high = mac_addr[0] << 8 | mac_addr[1] << 0; @@ -1544,7 +1544,7 @@ static int mtk_star_probe(struct platform_device *pdev) if (ret) return ret; - ret = eth_platform_get_mac_address(dev, ndev->dev_addr); + ret = platform_get_ethdev_address(dev, ndev); if (ret || !is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 8d751383530b43..e10b7b04b89410 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2480,7 +2480,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) return 0; err_thread: - flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); err_slaves: while (i--) { @@ -2587,7 +2586,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) int i, port; if (mlx4_is_master(dev)) { - flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); for (i = 0; i < dev->num_slaves; i++) { for (port = 1; port <= MLX4_MAX_PORTS; port++) @@ -3009,7 +3007,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac) return -EPERM; } - s_info->mac = mlx4_mac_to_u64(mac); + s_info->mac = ether_addr_to_u64(mac); mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", vf, port, s_info->mac); return 0; @@ -3195,7 +3193,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; - mlx4_u64_to_mac(mac, s_info->mac); + u64_to_ether_addr(s_info->mac, mac); if (setting && !is_valid_ether_addr(mac)) { mlx4_info(dev, "Illegal MAC with spoofchk\n"); return -EPERM; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index f7053a74e6a80f..4d4f9cf9facb87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) buf += PAGE_SIZE; } } else { - err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? + err = copy_to_user((void __user *)buf, init_ents, + array_size(entries, cqe_size)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ef518b1040f723..066d79e4ecfc28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "mlx4_en.h" #include "en_port.h" @@ -197,6 +198,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = { /* xdp statistics */ "rx_xdp_drop", + "rx_xdp_redirect", + "rx_xdp_redirect_fail", "rx_xdp_tx", "rx_xdp_tx_full", @@ -428,6 +431,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, data[index++] = priv->rx_ring[i]->bytes; data[index++] = priv->rx_ring[i]->dropped; data[index++] = priv->rx_ring[i]->xdp_drop; + data[index++] = priv->rx_ring[i]->xdp_redirect; + data[index++] = priv->rx_ring[i]->xdp_redirect_fail; data[index++] = priv->rx_ring[i]->xdp_tx; data[index++] = priv->rx_ring[i]->xdp_tx_full; } @@ -519,6 +524,10 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_dropped", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_drop", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_redirect", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_redirect_fail", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_tx", i); sprintf(data + (index++) * ETH_GSTRING_LEN, @@ -643,10 +652,8 @@ static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg, unsigned int i; \ cfg = &ptys2ethtool_map[reg_]; \ cfg->speed = speed_; \ - bitmap_zero(cfg->supported, \ - __ETHTOOL_LINK_MODE_MASK_NBITS); \ - bitmap_zero(cfg->advertised, \ - __ETHTOOL_LINK_MODE_MASK_NBITS); \ + linkmode_zero(cfg->supported); \ + linkmode_zero(cfg->advertised); \ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ __set_bit(modes[i], cfg->supported); \ __set_bit(modes[i], cfg->advertised); \ @@ -702,10 +709,8 @@ static void ptys2ethtool_update_link_modes(unsigned long *link_modes, int i; for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { if (eth_proto & MLX4_PROT_MASK(i)) - bitmap_or(link_modes, link_modes, - ptys2ethtool_link_mode(&ptys2ethtool_map[i], - report), - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_or(link_modes, link_modes, + ptys2ethtool_link_mode(&ptys2ethtool_map[i], report)); } } @@ -716,11 +721,9 @@ static u32 ethtool2ptys_link_modes(const unsigned long *link_modes, u32 ptys_modes = 0; for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { - if (bitmap_intersects( - ptys2ethtool_link_mode(&ptys2ethtool_map[i], - report), - link_modes, - __ETHTOOL_LINK_MODE_MASK_NBITS)) + ulong *map_mode = ptys2ethtool_link_mode(&ptys2ethtool_map[i], + report); + if (linkmode_intersects(map_mode, link_modes)) ptys_modes |= 1 << i; } return ptys_modes; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 109472d6b61f19..f1259bdb1a2965 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -237,7 +237,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) if (mdev->pndev[i]) mlx4_en_destroy_netdev(mdev->pndev[i]); - flush_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue); (void) mlx4_mr_free(dev, &mdev->mr); iounmap(mdev->uar_map); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8af7f282732255..3f6d5c38463723 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -527,18 +527,17 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, return err; } -static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) +static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac) { - int i; - for (i = ETH_ALEN - 1; i >= 0; --i) { - dst_mac[i] = src_mac & 0xff; - src_mac >>= 8; - } - memset(&dst_mac[ETH_ALEN], 0, 2); + u8 addr[ETH_ALEN]; + + u64_to_ether_addr(src_mac, addr); + eth_hw_addr_set(dev, addr); } -static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, +static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, + const unsigned char *addr, int qpn, u64 *reg_id) { int err; @@ -559,7 +558,7 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, - unsigned char *mac, int *qpn, u64 *reg_id) + const unsigned char *mac, int *qpn, u64 *reg_id) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; @@ -611,7 +610,8 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, } static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, - unsigned char *mac, int qpn, u64 reg_id) + const unsigned char *mac, + int qpn, u64 reg_id) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; @@ -644,7 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) int index = 0; int err = 0; int *qpn = &priv->base_qpn; - u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); + u64 mac = ether_addr_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", priv->dev->dev_addr); @@ -683,7 +683,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) int qpn = priv->base_qpn; if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { - u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); + u64 mac = ether_addr_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", priv->dev->dev_addr); mlx4_unregister_mac(dev, priv->port, mac); @@ -701,14 +701,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int err = 0; - u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); + u64 new_mac_u64 = ether_addr_to_u64(new_mac); if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { struct hlist_head *bucket; unsigned int mac_hash; struct mlx4_mac_entry *entry; struct hlist_node *tmp; - u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); + u64 prev_mac_u64 = ether_addr_to_u64(prev_mac); bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { @@ -797,7 +797,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr) if (err) goto out; - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, saddr->sa_data); mlx4_en_update_user_mac(priv, new_mac); out: mutex_unlock(&mdev->state_lock); @@ -1076,7 +1076,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, mlx4_en_cache_mclist(dev); netif_addr_unlock_bh(dev); list_for_each_entry(mclist, &priv->mc_list, list) { - mcast_addr = mlx4_mac_to_u64(mclist->addr); + mcast_addr = ether_addr_to_u64(mclist->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } @@ -1169,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, found = true; if (!found) { - mac = mlx4_mac_to_u64(entry->mac); + mac = ether_addr_to_u64(entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, priv->base_qpn, entry->reg_id); @@ -1212,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; break; } - mac = mlx4_mac_to_u64(ha->addr); + mac = ether_addr_to_u64(ha->addr); memcpy(entry->mac, ha->addr, ETH_ALEN); err = mlx4_register_mac(mdev->dev, priv->port, mac); if (err < 0) { @@ -1348,7 +1348,7 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { bucket = &priv->mac_hash[i]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { - mac = mlx4_mac_to_u64(entry->mac); + mac = ether_addr_to_u64(entry->mac); en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, @@ -3267,7 +3267,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* Set default MAC */ dev->addr_len = ETH_ALEN; - mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); + mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", priv->port, dev->dev_addr); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0158b88bea5b6d..532997eba69847 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_complete = 0; priv->port_stats.rx_alloc_pages = 0; priv->xdp_stats.rx_xdp_drop = 0; + priv->xdp_stats.rx_xdp_redirect = 0; + priv->xdp_stats.rx_xdp_redirect_fail = 0; priv->xdp_stats.rx_xdp_tx = 0; priv->xdp_stats.rx_xdp_tx_full = 0; for (i = 0; i < priv->rx_ring_num; i++) { @@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); + priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect); + priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail); priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 7f6d3b82c29b29..650e6a1844aea0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct bpf_prog *xdp_prog; int cq_ring = cq->ring; bool doorbell_pending; + bool xdp_redir_flush; struct mlx4_cqe *cqe; struct xdp_buff xdp; int polled = 0; @@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp_prog = rcu_dereference_bh(ring->xdp_prog); xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); doorbell_pending = false; + xdp_redir_flush = false; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud switch (act) { case XDP_PASS: break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) { + ring->xdp_redirect++; + xdp_redir_flush = true; + frags[0].page = NULL; + goto next; + } + ring->xdp_redirect_fail++; + trace_xdp_exception(dev, xdp_prog, act); + goto xdp_drop_no_cnt; case XDP_TX: if (likely(!mlx4_en_xmit_frame(ring, frags, priv, length, cq_ring, @@ -897,6 +909,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud break; } + if (xdp_redir_flush) + xdp_do_flush(); + if (likely(polled)) { if (doorbell_pending) { priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c56b9dba4c7189..817f4154b86d59 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -130,6 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->bf_enabled = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); } + ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL; ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; @@ -753,8 +754,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring) #else iowrite32be( #endif - (__force u32)ring->doorbell_qpn, - ring->bf.uar->map + MLX4_SEND_DOORBELL); + (__force u32)ring->doorbell_qpn, ring->doorbell_address); } static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index dc4ac1a2b6b676..42c96c9d7fb16d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -3105,7 +3105,7 @@ void mlx4_replace_zero_macs(struct mlx4_dev *dev) dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { eth_random_addr(mac_addr); dev->port_random_macs |= 1 << i; - dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); + dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr); } } EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5a6b0fcaf7f8a5..b187c210d4d66e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4015,9 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->interface_state_mutex); mutex_init(&dev->persist->pci_status_mutex); - ret = devlink_register(devlink); - if (ret) - goto err_persist_free; ret = devlink_params_register(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); if (ret) @@ -4027,17 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_params_unregister; - devlink_params_publish(devlink); - devlink_reload_enable(devlink); pci_save_state(pdev); + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; err_params_unregister: devlink_params_unregister(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); err_devlink_unregister: - devlink_unregister(devlink); -err_persist_free: kfree(dev->persist); err_devlink_free: devlink_free(devlink); @@ -4140,7 +4135,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; - devlink_reload_disable(devlink); + devlink_unregister(devlink); if (mlx4_is_slave(dev)) persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; @@ -4176,7 +4171,6 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_pci_disable_device(dev); devlink_params_unregister(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); - devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f1b4ad9c66d216..f1716a83a4d33b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1046,7 +1046,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) } EXPORT_SYMBOL_GPL(mlx4_flow_detach); -int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr, int port, int qpn, u16 prio, u64 *reg_id) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 6bf558c5ec1070..e132ff4c82f2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -283,6 +283,7 @@ struct mlx4_en_tx_ring { struct mlx4_bf bf; /* Following part should be mostly read */ + void __iomem *doorbell_address; __be32 doorbell_qpn; __be32 mr_key; u32 size; /* number of TXBBs */ @@ -340,6 +341,8 @@ struct mlx4_en_rx_ring { unsigned long csum_complete; unsigned long rx_alloc_pages; unsigned long xdp_drop; + unsigned long xdp_redirect; + unsigned long xdp_redirect_fail; unsigned long xdp_tx; unsigned long xdp_tx_full; unsigned long dropped; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 7b51ae8cf75997..e9cd4bb6f83d7a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -42,9 +42,11 @@ struct mlx4_en_port_stats { struct mlx4_en_xdp_stats { unsigned long rx_xdp_drop; + unsigned long rx_xdp_redirect; + unsigned long rx_xdp_redirect_fail; unsigned long rx_xdp_tx; unsigned long rx_xdp_tx_full; -#define NUM_XDP_STATS 3 +#define NUM_XDP_STATS 5 }; struct mlx4_en_phy_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 63032cd6efb17b..e63bb9ceb9c062 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ - fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ + fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ - fw_reset.o qos.o + fw_reset.o qos.o lib/tout.o # # Netdev basic @@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o -mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ +mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \ en_rep.o en/rep/bond.o en/mod_hdr.o \ en/mapping.o mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ @@ -45,7 +45,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ esw/indir_table.o en/tc_tun_encap.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \ - en/tc/post_act.o + en/tc/post_act.o en/tc/int_port.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index db5dfff585c990..f71ec4d9d68e38 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -45,6 +45,7 @@ #include "mlx5_core.h" #include "lib/eq.h" +#include "lib/tout.h" enum { CMD_IF_REV = 5, @@ -225,9 +226,13 @@ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) static void poll_timeout(struct mlx5_cmd_work_ent *ent) { - unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); + struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); + u64 cmd_to_ms = mlx5_tout_ms(dev, CMD); + unsigned long poll_end; u8 own; + poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000); + do { own = READ_ONCE(ent->lay->status_own); if (!(own & CMD_OWNER_HW)) { @@ -925,15 +930,18 @@ static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd *cmd = ent->cmd; - struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); - unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); + bool poll_cmd = ent->polling; struct mlx5_cmd_layout *lay; + struct mlx5_core_dev *dev; + unsigned long cb_timeout; struct semaphore *sem; unsigned long flags; - bool poll_cmd = ent->polling; int alloc_ret; int cmd_mode; + dev = container_of(cmd, struct mlx5_core_dev, cmd); + cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); + complete(&ent->handling); sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -1073,7 +1081,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { - unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); + unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); struct mlx5_cmd *cmd = &dev->cmd; int err; @@ -2058,7 +2066,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) return -EINVAL; } - cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL); + cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); if (!cmd->stats) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index e8093c4e09d4e3..a8b84d53dfb061 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "mlx5_core.h" /* intf dev list mutex */ @@ -537,6 +538,16 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) return add_drivers(dev); } +static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) +{ + u64 fsystem_guid, psystem_guid; + + fsystem_guid = mlx5_query_nic_system_image_guid(dev); + psystem_guid = mlx5_query_nic_system_image_guid(peer_dev); + + return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid); +} + static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) { return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | @@ -556,7 +567,8 @@ static int next_phys_dev(struct device *dev, const void *data) if (mdev == curr) return 0; - if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) + if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) && + mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) return 0; return 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index dcf9f27ba2efdf..1c98652b244a0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); + struct pci_dev *pdev = dev->pdev; bool sf_dev_allocated; sf_dev_allocated = mlx5_sf_dev_allocated(dev); @@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return -EOPNOTSUPP; } + if (pci_num_vf(pdev)) { + NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); + } + switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: mlx5_unload_one(dev); @@ -449,7 +454,8 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, struct mlx5_core_dev *dev = devlink_priv(devlink); bool new_state = val.vbool; - if (new_state && !MLX5_CAP_GEN(dev, roce)) { + if (new_state && !MLX5_CAP_GEN(dev, roce) && + !MLX5_CAP_GEN(dev, roce_rw_supported)) { NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); return -EOPNOTSUPP; } @@ -625,7 +631,6 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, value); - devlink_param_publish(devlink, &enable_eth_param); return 0; } @@ -636,7 +641,6 @@ static void mlx5_devlink_eth_param_unregister(struct devlink *devlink) if (!mlx5_eth_supported(dev)) return; - devlink_param_unpublish(devlink, &enable_eth_param); devlink_param_unregister(devlink, &enable_eth_param); } @@ -672,7 +676,6 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, value); - devlink_param_publish(devlink, &enable_rdma_param); return 0; } @@ -681,7 +684,6 @@ static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return; - devlink_param_unpublish(devlink, &enable_rdma_param); devlink_param_unregister(devlink, &enable_rdma_param); } @@ -706,7 +708,6 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, value); - devlink_param_publish(devlink, &enable_rdma_param); return 0; } @@ -717,7 +718,6 @@ static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink) if (!mlx5_vnet_supported(dev)) return; - devlink_param_unpublish(devlink, &enable_vnet_param); devlink_param_unregister(devlink, &enable_vnet_param); } @@ -797,18 +797,15 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink) int mlx5_devlink_register(struct devlink *devlink) { + struct mlx5_core_dev *dev = devlink_priv(devlink); int err; - err = devlink_register(devlink); - if (err) - return err; - err = devlink_params_register(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); if (err) - goto params_reg_err; + return err; + mlx5_devlink_set_params_init_values(devlink); - devlink_params_publish(devlink); err = mlx5_devlink_auxdev_params_register(devlink); if (err) @@ -818,6 +815,9 @@ int mlx5_devlink_register(struct devlink *devlink) if (err) goto traps_reg_err; + if (!mlx5_core_is_mp_slave(dev)) + devlink_set_features(devlink, DEVLINK_F_RELOAD); + return 0; traps_reg_err: @@ -825,8 +825,6 @@ int mlx5_devlink_register(struct devlink *devlink) auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); -params_reg_err: - devlink_unregister(devlink); return err; } @@ -834,8 +832,6 @@ void mlx5_devlink_unregister(struct devlink *devlink) { mlx5_devlink_traps_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink); - devlink_params_unpublish(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); - devlink_unregister(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 87d65f6b53104f..7841ef6c193c7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -235,6 +235,9 @@ const char *parse_fs_dst(struct trace_seq *p, const char *ret = trace_seq_buffer_ptr(p); switch (dst->type) { + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: + trace_seq_printf(p, "uplink\n"); + break; case MLX5_FLOW_DESTINATION_TYPE_VPORT: trace_seq_printf(p, "vport=%u\n", dst->vport.num); break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index f9cf9fb3154797..eae9aa9c081185 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -745,7 +745,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY); MLX5_SET(mtrc_conf, in, log_trace_buffer_size, ilog2(TRACER_BUFFER_PAGE_NUM)); - MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key); + MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTRC_CONF, 0, 1); @@ -1028,7 +1028,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) err_notifier_unregister: mlx5_eq_notifier_unregister(dev, &tracer->nb); - mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); + mlx5_core_destroy_mkey(dev, tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); err_cancel_work: @@ -1051,7 +1051,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) if (tracer->owner) mlx5_fw_tracer_ownership_release(tracer); - mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey); + mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey); mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); } @@ -1069,7 +1069,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) mlx5_fw_tracer_clean_saved_traces_array(tracer); mlx5_fw_tracer_free_strings_db(tracer); mlx5_fw_tracer_destroy_log_buf(tracer); - flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); kvfree(tracer); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 97252a85d65e6b..4762b55b0b0eee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -89,7 +89,7 @@ struct mlx5_fw_tracer { void *log_buf; dma_addr_t dma; u32 size; - struct mlx5_core_mkey mkey; + u32 mkey; u32 consumer_index; } buff; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index ed4fb79b4db763..538adab6878b5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -30,7 +30,7 @@ static const char *const mlx5_rsc_sgmt_name[] = { struct mlx5_rsc_dump { u32 pdn; - struct mlx5_core_mkey mkey; + u32 mkey; u16 fw_segment_type[MLX5_SGMT_TYPE_NUM]; }; @@ -89,7 +89,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump return -ENOMEM; in_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num); - MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey.key); + MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey); MLX5_SET64(resource_dump, cmd->cmd, address, dma); err = mlx5_core_access_reg(dev, cmd->cmd, sizeof(cmd->cmd), cmd->cmd, @@ -202,7 +202,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) } static int mlx5_rsc_dump_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, - struct mlx5_core_mkey *mkey) + u32 *mkey) { int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; @@ -276,7 +276,7 @@ int mlx5_rsc_dump_init(struct mlx5_core_dev *dev) return err; destroy_mkey: - mlx5_core_destroy_mkey(dev, &rsc_dump->mkey); + mlx5_core_destroy_mkey(dev, rsc_dump->mkey); free_pd: mlx5_core_dealloc_pd(dev, rsc_dump->pdn); return err; @@ -287,6 +287,6 @@ void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev) if (IS_ERR_OR_NULL(dev->rsc_dump)) return; - mlx5_core_destroy_mkey(dev, &dev->rsc_dump->mkey); + mlx5_core_destroy_mkey(dev, dev->rsc_dump->mkey); mlx5_core_dealloc_pd(dev, dev->rsc_dump->pdn); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 03a7a4ce5cd5ec..f0ac6b0d965357 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -79,6 +79,11 @@ struct page_pool; SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MLX5E_RX_MAX_HEAD (256) +#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) +#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) +#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) +#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) +#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ @@ -152,6 +157,25 @@ struct page_pool; #define MLX5E_UMR_WQEBBS \ (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) +#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ + (sizeof(struct mlx5e_umr_wqe) +\ + (sizeof(struct mlx5_klm) * (sgl_len))) + +#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ + (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) + +#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ + (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) + +#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ + (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) + +#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ + ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) + +#define MLX5E_MAX_KLM_PER_WQE(mdev) \ + MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE) + #define MLX5E_MSG_LEVEL NETIF_MSG_LINK #define mlx5e_dbg(mlevel, priv, format, ...) \ @@ -217,11 +241,12 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - struct mlx5_mtt inline_mtts[0]; + union { + struct mlx5_mtt inline_mtts[0]; + struct mlx5_klm inline_klms[0]; + }; }; -extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER, MLX5E_PFLAG_TX_CQE_BASED_MODER, @@ -244,6 +269,21 @@ enum mlx5e_priv_flag { #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) +enum packet_merge { + MLX5E_PACKET_MERGE_NONE, + MLX5E_PACKET_MERGE_LRO, + MLX5E_PACKET_MERGE_SHAMPO, +}; + +struct mlx5e_packet_merge_param { + enum packet_merge type; + u32 timeout; + struct { + u8 match_criteria_type; + u8 alignment_granularity; + } shampo; +}; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@ -253,18 +293,20 @@ struct mlx5e_params { u16 mode; u8 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; + struct { + struct mlx5e_mqprio_rl *rl; + } channel; } mqprio; bool rx_cqe_compress_def; bool tunneled_offload_en; struct dim_cq_moder rx_cq_moderation; struct dim_cq_moder tx_cq_moderation; - bool lro_en; + struct mlx5e_packet_merge_param packet_merge; u8 tx_min_inline_mode; bool vlan_strip_disable; bool scatter_fcs_en; bool rx_dim_enabled; bool tx_dim_enabled; - u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; struct mlx5e_xsk *xsk; @@ -286,7 +328,8 @@ enum { MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ - MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */ + MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ + MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ }; struct mlx5e_cq { @@ -577,6 +620,7 @@ typedef struct sk_buff * struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); +typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool); int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); @@ -598,6 +642,25 @@ struct mlx5e_rq_frags_info { u8 wqe_bulk; }; +struct mlx5e_shampo_hd { + u32 mkey; + struct mlx5e_dma_info *info; + struct page *last_page; + u16 hd_per_wq; + u16 hd_per_wqe; + unsigned long *bitmap; + u16 pi; + u16 ci; + __be32 key; + u64 last_addr; +}; + +struct mlx5e_hw_gro_data { + struct sk_buff *skb; + struct flow_keys fk; + int second_ip_id; +}; + struct mlx5e_rq { /* data path */ union { @@ -619,6 +682,7 @@ struct mlx5e_rq { u8 umr_in_progress; u8 umr_last_bulk; u8 umr_completed; + struct mlx5e_shampo_hd *shampo; } mpwqe; }; struct { @@ -638,6 +702,8 @@ struct mlx5e_rq { struct mlx5e_icosq *icosq; struct mlx5e_priv *priv; + struct mlx5e_hw_gro_data *hw_gro_data; + mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_post_rx_wqes post_wqes; mlx5e_fp_dealloc_wqe dealloc_wqe; @@ -665,7 +731,7 @@ struct mlx5e_rq { u8 wq_type; u32 rqn; struct mlx5_core_dev *mdev; - struct mlx5_core_mkey umr_mkey; + u32 umr_mkey; struct mlx5e_dma_info wqe_overflow; /* XDP read-mostly */ @@ -879,11 +945,13 @@ struct mlx5e_priv { #endif struct mlx5e_scratchpad scratchpad; struct mlx5e_htb htb; + struct mlx5e_mqprio_rl *mqprio_rl; }; struct mlx5e_rx_handlers { mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; + mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo; }; extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; @@ -913,11 +981,13 @@ void mlx5e_build_ptys2ethtool_map(void); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); +void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); +int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); @@ -1003,7 +1073,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p); int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index 86e079310ac399..ae52e7f3830615 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -24,7 +24,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv) if (mlx5_core_is_pf(priv->mdev)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn); + attrs.phys.port_number = mlx5_get_dev_index(priv->mdev); if (MLX5_ESWITCH_MANAGER(priv->mdev)) { mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid); memcpy(attrs.switch_id.id, ppid.id, ppid.id_len); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index a88a1a48229f60..678ffbb48a25f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -125,15 +125,15 @@ struct mlx5e_ethtool_steering { void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); -int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); -int mlx5e_ethtool_get_rxnfc(struct net_device *dev, +int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd); +int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, u32 *rule_locs); #else static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { } static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { } -static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd) { return -EOPNOTSUPP; } -static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev, +static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, u32 *rule_locs) { return -EOPNOTSUPP; } #endif /* CONFIG_MLX5_EN_RXNFC */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 018262d0164b3f..d5b7110a4265bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -32,7 +32,6 @@ void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 -#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000 struct mlx5e_err_ctx { int (*recover)(void *ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 3cbb596821e891..f8c29022dbb220 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -87,7 +87,8 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk), mlx5e_rx_get_linear_frag_sz(params, NULL)); - return !params->lro_en && linear_frag_sz <= PAGE_SIZE; + return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE && + linear_frag_sz <= PAGE_SIZE; } bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, @@ -138,6 +139,27 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, return params->log_rq_mtu_frames - log_pkts_per_wqe; } +u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE)); +} + +u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE); +} + +u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + PAGE_SIZE; + + return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); +} + u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) @@ -164,19 +186,8 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, mlx5e_rx_is_linear_skb(params, xsk) : mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk); - return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0; -} - -struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params) -{ - struct mlx5e_lro_param lro_param; - - lro_param = (struct mlx5e_lro_param) { - .enabled = params->lro_en, - .timeout = params->lro_timeout, - }; - - return lro_param; + return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ? + mlx5e_get_linear_rq_headroom(params, xsk) : 0; } u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) @@ -453,6 +464,23 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); } +static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) +{ + int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); + int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); + u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk)); + int wqe_size = BIT(log_stride_sz) * num_strides; + + /* +1 is for the case that the pkt_per_rsrv dont consume the reservation + * so we get a filler cqe for the rest of the reservation. + */ + return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); +} + static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, @@ -464,9 +492,12 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + - mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) + log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); + else + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + + mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); break; default: /* MLX5_WQ_TYPE_CYCLIC */ log_cq_size = params->log_rq_mtu_frames; @@ -485,10 +516,11 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { + bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && MLX5_CAP_GEN(mdev, relaxed_ordering_write); - return ro && params->lro_en ? + return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; } @@ -520,6 +552,22 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, MLX5_SET(wq, wq, log_wqe_stride_size, log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { + MLX5_SET(wq, wq, shampo_enable, true); + MLX5_SET(wq, wq, log_reservation_size, + mlx5e_shampo_get_log_rsrv_size(mdev, params)); + MLX5_SET(wq, wq, + log_max_num_of_packets_per_reservation, + mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); + MLX5_SET(wq, wq, log_headers_entry_size, + mlx5e_shampo_get_log_hd_entry_size(mdev, params)); + MLX5_SET(rqc, rqc, reservation_timeout, + params->packet_merge.timeout); + MLX5_SET(rqc, rqc, shampo_match_criteria_type, + params->packet_merge.shampo.match_criteria_type); + MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, + params->packet_merge.shampo.alignment_granularity); + } break; } default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -620,17 +668,80 @@ static u8 mlx5e_get_rq_log_wq_sz(void *rqc) return MLX5_GET(wq, wq, log_wq_sz); } -static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, +/* This function calculates the maximum number of headers entries that are needed + * per WQE, the formula is based on the size of the reservations and the + * restriction we have about max packets for reservation that is equal to max + * headers per reservation. + */ +u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param) +{ + int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); + int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); + u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); + int wqe_size = BIT(log_stride_sz) * num_strides; + u32 hd_per_wqe; + + /* Assumption: hd_per_wqe % 8 == 0. */ + hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; + mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n", + __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); + return hd_per_wqe; +} + +/* This function calculates the maximum number of headers entries that are needed + * for the WQ, this value is uesed to allocate the header buffer in HW, thus + * must be a pow of 2. + */ +u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param) +{ + void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); + int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); + u32 hd_per_wqe, hd_per_wq; + + hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); + hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size); + return hd_per_wq; +} + +static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param) +{ + int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; + void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); + int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); + u32 wqebbs; + + max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); + max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); + max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; + rest = max_hd_per_wqe % max_klm_per_umr; + wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; + if (rest) + wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); + wqebbs *= wq_size; + return wqebbs; +} + +static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, struct mlx5e_rq_param *rqp) { - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, - order_base_2(MLX5E_UMR_WQEBBS) + - mlx5e_get_rq_log_wq_sz(rqp->rqc)); - default: /* MLX5_WQ_TYPE_CYCLIC */ + u32 wqebbs; + + /* MLX5_WQ_TYPE_CYCLIC */ + if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - } + + wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc)); + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) + wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); } static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) @@ -697,7 +808,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, if (err) return err; - icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); + icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq); async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); mlx5e_build_sq_param(mdev, params, &cparam->txq_sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 879ad46d754e1b..433e6967692d98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -11,11 +11,6 @@ struct mlx5e_xsk_param { u16 chunk_size; }; -struct mlx5e_lro_param { - bool enabled; - u32 timeout; -}; - struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; @@ -116,6 +111,18 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk); u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); +u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param); +u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param); u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); @@ -125,7 +132,6 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); -struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params); /* Build queue parameters */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 3a86f66d129558..18d542b1c5cbd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -682,7 +682,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->tstamp = &priv->tstamp; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->stats = &priv->ptp_stats.ch; c->lag_port = lag_port; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index e8a8d78e3e4d54..50977f01a0503d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -7,6 +7,21 @@ #define BYTES_IN_MBIT 125000 +int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes) +{ + if (nbytes < BYTES_IN_MBIT) { + qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n", + nbytes, BYTES_IN_MBIT); + return -EINVAL; + } + return 0; +} + +static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes) +{ + return div_u64(nbytes, BYTES_IN_MBIT); +} + int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) { return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); @@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs if (err) goto err_free_sq; err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, - ¶m_sq, sq, 0, node->hw_id, node->qid); + ¶m_sq, sq, 0, node->hw_id, + priv->htb.qos_sq_stats[node->qid]); if (err) goto err_close_cq; @@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce return err; } + +struct mlx5e_mqprio_rl { + struct mlx5_core_dev *mdev; + u32 root_id; + u32 *leaves_id; + u8 num_tc; +}; + +struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void) +{ + return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL); +} + +void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl) +{ + kvfree(rl); +} + +int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, + u64 max_rate[]) +{ + int err; + int tc; + + if (!mlx5_qos_is_supported(mdev)) { + qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device."); + return -EOPNOTSUPP; + } + if (num_tc > mlx5e_qos_max_leaf_nodes(mdev)) + return -EINVAL; + + rl->mdev = mdev; + rl->num_tc = num_tc; + rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL); + if (!rl->leaves_id) + return -ENOMEM; + + err = mlx5_qos_create_root_node(mdev, &rl->root_id); + if (err) + goto err_free_leaves; + + qos_dbg(mdev, "Root created, id %#x\n", rl->root_id); + + for (tc = 0; tc < num_tc; tc++) { + u32 max_average_bw; + + max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]); + err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw, + &rl->leaves_id[tc]); + if (err) + goto err_destroy_leaves; + + qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n", + tc, rl->leaves_id[tc], max_average_bw); + } + return 0; + +err_destroy_leaves: + while (--tc >= 0) + mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]); + mlx5_qos_destroy_node(mdev, rl->root_id); +err_free_leaves: + kvfree(rl->leaves_id); + return err; +} + +void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl) +{ + int tc; + + for (tc = 0; tc < rl->num_tc; tc++) + mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]); + mlx5_qos_destroy_node(rl->mdev, rl->root_id); + kvfree(rl->leaves_id); +} + +int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id) +{ + if (tc >= rl->num_tc) + return -EINVAL; + + *hw_id = rl->leaves_id[tc]; + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h index 757682b7c0e042..b7558907ba2086 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h @@ -12,6 +12,7 @@ struct mlx5e_priv; struct mlx5e_channels; struct mlx5e_channel; +int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes); int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv); @@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack); +/* MQPRIO TX rate limit */ +struct mlx5e_mqprio_rl; +struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void); +void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl); +int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, + u64 max_rate[]); +void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl); +int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index de03684528bbf9..fcb0892c08a9f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -18,10 +19,13 @@ #include "en/tc_tun.h" #include "lib/port_tun.h" #include "en/tc/sample.h" +#include "en_accel/ipsec_rxtx.h" +#include "en/tc/int_port.h" struct mlx5e_rep_indr_block_priv { struct net_device *netdev; struct mlx5e_rep_priv *rpriv; + enum flow_block_binder_type binder_type; struct list_head list; }; @@ -296,14 +300,16 @@ int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) static struct mlx5e_rep_indr_block_priv * mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, - struct net_device *netdev) + struct net_device *netdev, + enum flow_block_binder_type binder_type) { struct mlx5e_rep_indr_block_priv *cb_priv; list_for_each_entry(cb_priv, &rpriv->uplink_priv.tc_indr_block_priv_list, list) - if (cb_priv->netdev == netdev) + if (cb_priv->netdev == netdev && + cb_priv->binder_type == binder_type) return cb_priv; return NULL; @@ -341,9 +347,13 @@ mlx5e_rep_indr_offload(struct net_device *netdev, static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type, void *type_data, void *indr_priv) { - unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); + unsigned long flags = MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_rep_indr_block_priv *priv = indr_priv; + flags |= (priv->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) ? + MLX5_TC_FLAG(EGRESS) : + MLX5_TC_FLAG(INGRESS); + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_rep_indr_offload(priv->netdev, type_data, priv, @@ -409,6 +419,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv) static LIST_HEAD(mlx5e_block_cb_list); +static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PASSTHRU; +} + static int mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct mlx5e_rep_priv *rpriv, @@ -418,14 +435,30 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, void (*cleanup)(struct flow_block_cb *block_cb)) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + bool is_ovs_int_port = netif_is_ovs_master(netdev); struct mlx5e_rep_indr_block_priv *indr_priv; struct flow_block_cb *block_cb; if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && - !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) + !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) && + !is_ovs_int_port) { + if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev)) + return -EOPNOTSUPP; + if (!mlx5e_rep_macvlan_mode_supported(netdev)) { + netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode"); + return -EOPNOTSUPP; + } + } + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) return -EOPNOTSUPP; - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port) + return -EOPNOTSUPP; + + if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw)) return -EOPNOTSUPP; f->unlocked_driver_cb = true; @@ -433,7 +466,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, switch (f->command) { case FLOW_BLOCK_BIND: - indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type); if (indr_priv) return -EEXIST; @@ -443,6 +476,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, indr_priv->netdev = netdev; indr_priv->rpriv = rpriv; + indr_priv->binder_type = f->binder_type; list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); @@ -460,7 +494,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, return 0; case FLOW_BLOCK_UNBIND: - indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type); if (!indr_priv) return -ENOENT; @@ -597,8 +631,8 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, return false; } - /* Set tun_dev so we do dev_put() after datapath */ - tc_priv->tun_dev = dev; + /* Set fwd_dev so we do dev_put() after datapath */ + tc_priv->fwd_dev = dev; skb->dev = dev; @@ -638,6 +672,12 @@ static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1, return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); } +static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) +{ + if (tc_priv->fwd_dev) + dev_put(tc_priv->fwd_dev); +} + static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj, struct mlx5e_tc_update_priv *tc_priv) @@ -647,25 +687,54 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk "Failed to restore tunnel info for sampled packet\n"); return; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_skb(skb, mapped_obj); -#endif /* CONFIG_MLX5_TC_SAMPLE */ mlx5_rep_tc_post_napi_receive(tc_priv); } -bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, - struct sk_buff *skb, - struct mlx5e_tc_update_priv *tc_priv) +static bool mlx5e_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb, + struct mlx5_mapped_obj *mapped_obj, + struct mlx5e_tc_update_priv *tc_priv, + bool *forward_tx, + u32 reg_c1) +{ + u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + + /* Tunnel restore takes precedence over int port restore */ + if (tunnel_id) + return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb, + mapped_obj->int_port_metadata, forward_tx)) { + /* Set fwd_dev for future dev_put */ + tc_priv->fwd_dev = skb->dev; + + return true; + } + + return false; +} + +void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, + struct sk_buff *skb) { + u32 reg_c1 = be32_to_cpu(cqe->ft_metadata); + struct mlx5e_tc_update_priv tc_priv = {}; struct mlx5_mapped_obj mapped_obj; struct mlx5_eswitch *esw; + bool forward_tx = false; struct mlx5e_priv *priv; u32 reg_c0; int err; reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) - return true; + goto forward; /* If reg_c0 is not equal to the default flow tag then skb->mark * is not supported and must be reset back to 0. @@ -679,26 +748,35 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, netdev_dbg(priv->netdev, "Couldn't find mapped object for reg_c0: %d, err: %d\n", reg_c0, err); - return false; + goto free_skb; } if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { - u32 reg_c1 = be32_to_cpu(cqe->ft_metadata); - - return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv); + if (!mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, &tc_priv) && + !mlx5_ipsec_is_rx_flow(cqe)) + goto free_skb; } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { - mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv); - return false; + mlx5e_restore_skb_sample(priv, skb, &mapped_obj, &tc_priv); + goto free_skb; + } else if (mapped_obj.type == MLX5_MAPPED_OBJ_INT_PORT_METADATA) { + if (!mlx5e_restore_skb_int_port(priv, skb, &mapped_obj, &tc_priv, + &forward_tx, reg_c1)) + goto free_skb; } else { netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); - return false; + goto free_skb; } - return true; -} +forward: + if (forward_tx) + dev_queue_xmit(skb); + else + napi_gro_receive(rq->cq.napi, skb); -void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) -{ - if (tc_priv->tun_dev) - dev_put(tc_priv->tun_dev); + mlx5_rep_tc_post_napi_receive(&tc_priv); + + return; + +free_skb: + dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h index d0661578467bea..d6c7c81690ebda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h @@ -36,10 +36,8 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); -bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, - struct sk_buff *skb, - struct mlx5e_tc_update_priv *tc_priv); -void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv); +void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, + struct sk_buff *skb); #else /* CONFIG_MLX5_CLS_ACT */ @@ -66,13 +64,9 @@ static inline int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { return -EOPNOTSUPP; } -struct mlx5e_tc_update_priv; -static inline bool -mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, - struct sk_buff *skb, - struct mlx5e_tc_update_priv *tc_priv) { return true; } static inline void -mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {} +mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, + struct sk_buff *skb) {} #endif /* CONFIG_MLX5_CLS_ACT */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 0eb125316fe201..74086eb556ae58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -6,6 +6,7 @@ #include "txrx.h" #include "devlink.h" #include "ptp.h" +#include "lib/tout.h" static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) { @@ -32,8 +33,10 @@ static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq) { - unsigned long exp_time = jiffies + - msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC); + struct mlx5_core_dev *dev = icosq->channel->mdev; + unsigned long exp_time; + + exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR)); while (time_before(jiffies, exp_time)) { if (icosq->cc == icosq->pc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index bb682fd751c981..4f4bc8726ec4fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -4,11 +4,14 @@ #include "health.h" #include "en/ptp.h" #include "en/devlink.h" +#include "lib/tout.h" static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { - unsigned long exp_time = jiffies + - msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC); + struct mlx5_core_dev *dev = sq->mdev; + unsigned long exp_time; + + exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR)); while (time_before(jiffies, exp_time)) { if (sq->cc == sq->pc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index 625cd49ef96c59..c1cdd8c2e37ae2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -127,7 +127,7 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt) static int mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, - const struct mlx5e_lro_param *init_lro_param, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, bool inner) { struct mlx5e_rss_params_traffic_type rss_tt; @@ -161,7 +161,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss, rqtn = mlx5e_rqt_get_rqtn(&rss->rqt); mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn, rqtn, rss->inner_ft_support); - mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param); rss_tt = mlx5e_rss_get_tt_config(rss, tt); mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); @@ -198,14 +198,14 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types } static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss, - const struct mlx5e_lro_param *init_lro_param, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, bool inner) { enum mlx5_traffic_types tt, max_tt; int err; for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner); + err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner); if (err) goto err_destroy_tirs; } @@ -297,7 +297,7 @@ int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn, - const struct mlx5e_lro_param *init_lro_param) + const struct mlx5e_packet_merge_param *init_pkt_merge_param) { int err; @@ -305,12 +305,12 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, if (err) goto err_out; - err = mlx5e_rss_create_tirs(rss, init_lro_param, false); + err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false); if (err) goto err_destroy_rqt; if (inner_ft_support) { - err = mlx5e_rss_create_tirs(rss, init_lro_param, true); + err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true); if (err) goto err_destroy_tirs; } @@ -372,7 +372,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, */ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, - const struct mlx5e_lro_param *init_lro_param, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, bool inner, u32 *tirn) { struct mlx5e_tir *tir; @@ -381,7 +381,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, if (!tir) { /* TIR doesn't exist, create one */ int err; - err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner); + err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner); if (err) return err; tir = rss_get_tir(rss, tt, inner); @@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, return 0; } -static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) { int err; @@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r if (err) mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n", mlx5e_rqt_get_rqtn(&rss->rqt), err); + return err; } void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) @@ -418,7 +419,8 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss) mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err); } -int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param) +int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, + struct mlx5e_packet_merge_param *pkt_merge_param) { struct mlx5e_tir_builder *builder; enum mlx5_traffic_types tt; @@ -428,7 +430,7 @@ int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_p if (!builder) return -ENOMEM; - mlx5e_tir_builder_build_lro(builder, lro_param); + mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); final_err = 0; @@ -490,6 +492,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, { bool changed_indir = false; bool changed_hash = false; + struct mlx5e_rss *old_rss; + int err = 0; + + old_rss = mlx5e_rss_alloc(); + if (!old_rss) + return -ENOMEM; + + *old_rss = *rss; if (hfunc && *hfunc != rss->hash.hfunc) { switch (*hfunc) { @@ -497,7 +507,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, case ETH_RSS_HASH_TOP: break; default: - return -EINVAL; + err = -EINVAL; + goto out; } changed_hash = true; changed_indir = true; @@ -520,13 +531,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, rss->indir.table[i] = indir[i]; } - if (changed_indir && rss->enabled) - mlx5e_rss_apply(rss, rqns, num_rqns); + if (changed_indir && rss->enabled) { + err = mlx5e_rss_apply(rss, rqns, num_rqns); + if (err) { + *rss = *old_rss; + goto out; + } + } if (changed_hash) mlx5e_rss_update_tirs(rss); - return 0; +out: + mlx5e_rss_free(old_rss); + return err; } struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index d522a10dadf334..c6b21641634407 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -17,7 +17,7 @@ struct mlx5e_rss *mlx5e_rss_alloc(void); void mlx5e_rss_free(struct mlx5e_rss *rss); int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn, - const struct mlx5e_lro_param *init_lro_param); + const struct mlx5e_packet_merge_param *init_pkt_merge_param); int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn); int mlx5e_rss_cleanup(struct mlx5e_rss *rss); @@ -30,13 +30,14 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner); int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, - const struct mlx5e_lro_param *init_lro_param, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, bool inner, u32 *tirn); void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns); void mlx5e_rss_disable(struct mlx5e_rss *rss); -int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param); +int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, + struct mlx5e_packet_merge_param *pkt_merge_param); int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc); int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, const u8 *key, const u8 *hfunc, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index 13056cb9757d4b..14295384799606 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -34,7 +34,7 @@ struct mlx5e_rx_res { /* API for rx_res_rss_* */ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, - const struct mlx5e_lro_param *init_lro_param, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, unsigned int init_nch) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; @@ -49,7 +49,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, return -ENOMEM; err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn, - init_lro_param); + init_pkt_merge_param); if (err) goto err_rss_free; @@ -275,7 +275,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) } static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, - const struct mlx5e_lro_param *init_lro_param) + const struct mlx5e_packet_merge_param *init_pkt_merge_param) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; struct mlx5e_tir_builder *builder; @@ -306,7 +306,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), inner_ft_support); - mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param); mlx5e_tir_builder_build_direct(builder); err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true); @@ -336,7 +336,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), inner_ft_support); - mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param); mlx5e_tir_builder_build_direct(builder); err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true); @@ -437,7 +437,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res) int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features, unsigned int max_nch, - u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param, + u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, unsigned int init_nch) { int err; @@ -447,11 +447,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, res->max_nch = max_nch; res->drop_rqn = drop_rqn; - err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch); + err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch); if (err) goto err_out; - err = mlx5e_rx_res_channels_init(res, init_lro_param); + err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param); if (err) goto err_rss_destroy; @@ -645,7 +645,8 @@ int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix) return err; } -int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param) +int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, + struct mlx5e_packet_merge_param *pkt_merge_param) { struct mlx5e_tir_builder *builder; int err, final_err; @@ -655,7 +656,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param if (!builder) return -ENOMEM; - mlx5e_tir_builder_build_lro(builder, lro_param); + mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); final_err = 0; @@ -665,7 +666,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param if (!rss) continue; - err = mlx5e_rss_lro_set_param(rss, lro_param); + err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param); if (err) final_err = final_err ? : err; } @@ -673,7 +674,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param for (ix = 0; ix < res->max_nch; ix++) { err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder); if (err) { - mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n", + mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n", mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err); if (!final_err) final_err = err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 4a15942d79f7d8..d09f7d174a5180 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -25,7 +25,7 @@ enum mlx5e_rx_res_features { struct mlx5e_rx_res *mlx5e_rx_res_alloc(void); int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features, unsigned int max_nch, - u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param, + u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, unsigned int init_nch); void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res); void mlx5e_rx_res_free(struct mlx5e_rx_res *res); @@ -57,7 +57,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, u8 rx_hash_fields); -int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param); +int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, + struct mlx5e_packet_merge_param *pkt_merge_param); int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch); int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c new file mode 100644 index 00000000000000..ca834bbcb44f19 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include +#include "en/mapping.h" +#include "en/tc/int_port.h" +#include "en.h" +#include "en_rep.h" +#include "en_tc.h" + +struct mlx5e_tc_int_port { + enum mlx5e_tc_int_port_type type; + int ifindex; + u32 match_metadata; + u32 mapping; + struct list_head list; + struct mlx5_flow_handle *rx_rule; + refcount_t refcnt; + struct rcu_head rcu_head; +}; + +struct mlx5e_tc_int_port_priv { + struct mlx5_core_dev *dev; + struct mutex int_ports_lock; /* Protects int ports list */ + struct list_head int_ports; /* Uses int_ports_lock */ + u16 num_ports; + bool ul_rep_rx_ready; /* Set when uplink is performing teardown */ + struct mapping_ctx *metadata_mapping; /* Metadata for source port rewrite and matching */ +}; + +bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw) +{ + return mlx5_eswitch_vport_match_metadata_enabled(esw) && + MLX5_CAP_GEN(esw->dev, reg_c_preserve); +} + +u32 mlx5e_tc_int_port_get_metadata(struct mlx5e_tc_int_port *int_port) +{ + return int_port->match_metadata; +} + +int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port) +{ + /* For egress forwarding we can have the case + * where the packet came from a vport and redirected + * to int port or it came from the uplink, going + * via internal port and hairpinned back to uplink + * so we set the source to any port in this case. + */ + return int_port->type == MLX5E_TC_INT_PORT_EGRESS ? + MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT : + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; +} + +u32 mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port) +{ + return int_port->match_metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); +} + +static struct mlx5_flow_handle * +mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw, + struct mlx5e_tc_int_port *int_port, + struct mlx5_flow_destination *dest) + +{ + struct mlx5_flow_context *flow_context; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_spec *spec; + void *misc; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5e_tc_int_port_get_metadata_for_match(int_port)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_mask()); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + /* Overwrite flow tag with the int port metadata mapping + * instead of the chain mapping. + */ + flow_context = &spec->flow_context; + flow_context->flags |= FLOW_CONTEXT_HAS_TAG; + flow_context->flow_tag = int_port->mapping; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, + &flow_act, dest, 1); + if (IS_ERR(flow_rule)) + mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n", + PTR_ERR(flow_rule)); + + kvfree(spec); + + return flow_rule; +} + +static struct mlx5e_tc_int_port * +mlx5e_int_port_lookup(struct mlx5e_tc_int_port_priv *priv, + int ifindex, + enum mlx5e_tc_int_port_type type) +{ + struct mlx5e_tc_int_port *int_port; + + if (!priv->ul_rep_rx_ready) + goto not_found; + + list_for_each_entry(int_port, &priv->int_ports, list) + if (int_port->ifindex == ifindex && int_port->type == type) { + refcount_inc(&int_port->refcnt); + return int_port; + } + +not_found: + return NULL; +} + +static int mlx5e_int_port_metadata_alloc(struct mlx5e_tc_int_port_priv *priv, + int ifindex, enum mlx5e_tc_int_port_type type, + u32 *id) +{ + u32 mapped_key[2] = {type, ifindex}; + int err; + + err = mapping_add(priv->metadata_mapping, mapped_key, id); + if (err) + return err; + + /* Fill upper 4 bits of PFNUM with reserved value */ + *id |= 0xf << ESW_VPORT_BITS; + + return 0; +} + +static void mlx5e_int_port_metadata_free(struct mlx5e_tc_int_port_priv *priv, + u32 id) +{ + id &= (1 << ESW_VPORT_BITS) - 1; + mapping_remove(priv->metadata_mapping, id); +} + +/* Must be called with priv->int_ports_lock held */ +static struct mlx5e_tc_int_port * +mlx5e_int_port_add(struct mlx5e_tc_int_port_priv *priv, + int ifindex, + enum mlx5e_tc_int_port_type type) +{ + struct mlx5_eswitch *esw = priv->dev->priv.eswitch; + struct mlx5_mapped_obj mapped_obj = {}; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_tc_int_port *int_port; + struct mlx5_flow_destination dest; + struct mapping_ctx *ctx; + u32 match_metadata; + u32 mapping; + int err; + + if (priv->num_ports == MLX5E_TC_MAX_INT_PORT_NUM) { + mlx5_core_dbg(priv->dev, "Cannot add a new int port, max supported %d", + MLX5E_TC_MAX_INT_PORT_NUM); + return ERR_PTR(-ENOSPC); + } + + int_port = kzalloc(sizeof(*int_port), GFP_KERNEL); + if (!int_port) + return ERR_PTR(-ENOMEM); + + err = mlx5e_int_port_metadata_alloc(priv, ifindex, type, &match_metadata); + if (err) { + mlx5_core_warn(esw->dev, "Cannot add a new internal port, metadata allocation failed for ifindex %d", + ifindex); + goto err_metadata; + } + + /* map metadata to reg_c0 object for miss handling */ + ctx = esw->offloads.reg_c0_obj_pool; + mapped_obj.type = MLX5_MAPPED_OBJ_INT_PORT_METADATA; + mapped_obj.int_port_metadata = match_metadata; + err = mapping_add(ctx, &mapped_obj, &mapping); + if (err) + goto err_map; + + int_port->type = type; + int_port->ifindex = ifindex; + int_port->match_metadata = match_metadata; + int_port->mapping = mapping; + + /* Create a match on internal vport metadata in vport table */ + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = uplink_rpriv->root_ft; + + int_port->rx_rule = mlx5e_int_port_create_rx_rule(esw, int_port, &dest); + if (IS_ERR(int_port->rx_rule)) { + err = PTR_ERR(int_port->rx_rule); + mlx5_core_warn(esw->dev, "Can't add internal port rx rule, err %d", err); + goto err_rx_rule; + } + + refcount_set(&int_port->refcnt, 1); + list_add_rcu(&int_port->list, &priv->int_ports); + priv->num_ports++; + + return int_port; + +err_rx_rule: + mapping_remove(ctx, int_port->mapping); + +err_map: + mlx5e_int_port_metadata_free(priv, match_metadata); + +err_metadata: + kfree(int_port); + + return ERR_PTR(err); +} + +/* Must be called with priv->int_ports_lock held */ +static void +mlx5e_int_port_remove(struct mlx5e_tc_int_port_priv *priv, + struct mlx5e_tc_int_port *int_port) +{ + struct mlx5_eswitch *esw = priv->dev->priv.eswitch; + struct mapping_ctx *ctx; + + ctx = esw->offloads.reg_c0_obj_pool; + + list_del_rcu(&int_port->list); + + /* The following parameters are not used by the + * rcu readers of this int_port object so it is + * safe to release them. + */ + if (int_port->rx_rule) + mlx5_del_flow_rules(int_port->rx_rule); + mapping_remove(ctx, int_port->mapping); + mlx5e_int_port_metadata_free(priv, int_port->match_metadata); + kfree_rcu(int_port); + priv->num_ports--; +} + +/* Must be called with rcu_read_lock held */ +static struct mlx5e_tc_int_port * +mlx5e_int_port_get_from_metadata(struct mlx5e_tc_int_port_priv *priv, + u32 metadata) +{ + struct mlx5e_tc_int_port *int_port; + + list_for_each_entry_rcu(int_port, &priv->int_ports, list) + if (int_port->match_metadata == metadata) + return int_port; + + return NULL; +} + +struct mlx5e_tc_int_port * +mlx5e_tc_int_port_get(struct mlx5e_tc_int_port_priv *priv, + int ifindex, + enum mlx5e_tc_int_port_type type) +{ + struct mlx5e_tc_int_port *int_port; + + if (!priv) + return ERR_PTR(-EOPNOTSUPP); + + mutex_lock(&priv->int_ports_lock); + + /* Reject request if ul rep not ready */ + if (!priv->ul_rep_rx_ready) { + int_port = ERR_PTR(-EOPNOTSUPP); + goto done; + } + + int_port = mlx5e_int_port_lookup(priv, ifindex, type); + if (int_port) + goto done; + + /* Alloc and add new int port to list */ + int_port = mlx5e_int_port_add(priv, ifindex, type); + +done: + mutex_unlock(&priv->int_ports_lock); + + return int_port; +} + +void +mlx5e_tc_int_port_put(struct mlx5e_tc_int_port_priv *priv, + struct mlx5e_tc_int_port *int_port) +{ + if (!refcount_dec_and_mutex_lock(&int_port->refcnt, &priv->int_ports_lock)) + return; + + mlx5e_int_port_remove(priv, int_port); + mutex_unlock(&priv->int_ports_lock); +} + +struct mlx5e_tc_int_port_priv * +mlx5e_tc_int_port_init(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_int_port_priv *int_port_priv; + u64 mapping_id; + + if (!mlx5e_tc_int_port_supported(esw)) + return NULL; + + int_port_priv = kzalloc(sizeof(*int_port_priv), GFP_KERNEL); + if (!int_port_priv) + return NULL; + + mapping_id = mlx5_query_nic_system_image_guid(priv->mdev); + + int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT, + sizeof(u32) * 2, + (1 << ESW_VPORT_BITS) - 1, true); + if (IS_ERR(int_port_priv->metadata_mapping)) { + mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n", + PTR_ERR(int_port_priv->metadata_mapping)); + goto err_mapping; + } + + int_port_priv->dev = priv->mdev; + mutex_init(&int_port_priv->int_ports_lock); + INIT_LIST_HEAD(&int_port_priv->int_ports); + + return int_port_priv; + +err_mapping: + kfree(int_port_priv); + + return NULL; +} + +void +mlx5e_tc_int_port_cleanup(struct mlx5e_tc_int_port_priv *priv) +{ + if (!priv) + return; + + mutex_destroy(&priv->int_ports_lock); + mapping_destroy(priv->metadata_mapping); + kfree(priv); +} + +/* Int port rx rules reside in ul rep rx tables. + * It is possible the ul rep will go down while there are + * still int port rules in its rx table so proper cleanup + * is required to free resources. + */ +void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_tc_int_port_priv *ppriv; + struct mlx5e_rep_priv *uplink_rpriv; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + ppriv = uplink_priv->int_port_priv; + + if (!ppriv) + return; + + mutex_lock(&ppriv->int_ports_lock); + ppriv->ul_rep_rx_ready = true; + mutex_unlock(&ppriv->int_ports_lock); +} + +void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_tc_int_port_priv *ppriv; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_tc_int_port *int_port; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + ppriv = uplink_priv->int_port_priv; + + if (!ppriv) + return; + + mutex_lock(&ppriv->int_ports_lock); + + ppriv->ul_rep_rx_ready = false; + + list_for_each_entry(int_port, &ppriv->int_ports, list) { + if (!IS_ERR_OR_NULL(int_port->rx_rule)) + mlx5_del_flow_rules(int_port->rx_rule); + + int_port->rx_rule = NULL; + } + + mutex_unlock(&ppriv->int_ports_lock); +} + +bool +mlx5e_tc_int_port_dev_fwd(struct mlx5e_tc_int_port_priv *priv, + struct sk_buff *skb, u32 int_vport_metadata, + bool *forward_tx) +{ + enum mlx5e_tc_int_port_type fwd_type; + struct mlx5e_tc_int_port *int_port; + struct net_device *dev; + int ifindex; + + if (!priv) + return false; + + rcu_read_lock(); + int_port = mlx5e_int_port_get_from_metadata(priv, int_vport_metadata); + if (!int_port) { + rcu_read_unlock(); + mlx5_core_dbg(priv->dev, "Unable to find int port with metadata 0x%.8x\n", + int_vport_metadata); + return false; + } + + ifindex = int_port->ifindex; + fwd_type = int_port->type; + rcu_read_unlock(); + + dev = dev_get_by_index(&init_net, ifindex); + if (!dev) { + mlx5_core_dbg(priv->dev, + "Couldn't find internal port device with ifindex: %d\n", + ifindex); + return false; + } + + skb->skb_iif = dev->ifindex; + skb->dev = dev; + + if (fwd_type == MLX5E_TC_INT_PORT_INGRESS) { + skb->pkt_type = PACKET_HOST; + skb_set_redirected(skb, true); + *forward_tx = false; + } else { + skb_reset_network_header(skb); + skb_push_rcsum(skb, skb->mac_len); + skb_set_redirected(skb, false); + *forward_tx = true; + } + + return true; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h new file mode 100644 index 00000000000000..e72c79d308d747 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_INT_PORT_H__ +#define __MLX5_EN_TC_INT_PORT_H__ + +#include "en.h" + +struct mlx5e_tc_int_port; +struct mlx5e_tc_int_port_priv; + +enum mlx5e_tc_int_port_type { + MLX5E_TC_INT_PORT_INGRESS, + MLX5E_TC_INT_PORT_EGRESS, +}; + +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) +bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw); + +struct mlx5e_tc_int_port_priv * +mlx5e_tc_int_port_init(struct mlx5e_priv *priv); +void +mlx5e_tc_int_port_cleanup(struct mlx5e_tc_int_port_priv *priv); + +void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv); +void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv); + +bool +mlx5e_tc_int_port_dev_fwd(struct mlx5e_tc_int_port_priv *priv, + struct sk_buff *skb, u32 int_vport_metadata, + bool *forward_tx); +struct mlx5e_tc_int_port * +mlx5e_tc_int_port_get(struct mlx5e_tc_int_port_priv *priv, + int ifindex, + enum mlx5e_tc_int_port_type type); +void +mlx5e_tc_int_port_put(struct mlx5e_tc_int_port_priv *priv, + struct mlx5e_tc_int_port *int_port); + +u32 mlx5e_tc_int_port_get_metadata(struct mlx5e_tc_int_port *int_port); +u32 mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port); +int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port); +#else /* CONFIG_MLX5_CLS_ACT */ +static inline u32 +mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port) +{ + return 0; +} + +static inline int +mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port) +{ + return 0; +} + +static inline bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw) +{ + return false; +} + +static inline void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv) {} +static inline void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv) {} + +#endif /* CONFIG_MLX5_CLS_ACT */ +#endif /* __MLX5_EN_TC_INT_PORT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index a3e43e898a5688..31b4e39be2d370 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -4,6 +4,7 @@ #include "en_tc.h" #include "post_act.h" #include "mlx5_core.h" +#include "fs_core.h" struct mlx5e_post_act { enum mlx5_flow_namespace_type ns_type; @@ -28,16 +29,14 @@ struct mlx5e_post_act * mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, enum mlx5_flow_namespace_type ns_type) { + enum fs_flow_table_type table_type = ns_type == MLX5_FLOW_NAMESPACE_FDB ? + FS_FT_FDB : FS_FT_NIC_RX; struct mlx5e_post_act *post_act; int err; - if (ns_type == MLX5_FLOW_NAMESPACE_FDB && - !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ignore_flow_level)) { - mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); - err = -EOPNOTSUPP; - goto err_check; - } else if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { - mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) { + if (priv->mdev->coredev_type != MLX5_COREDEV_VF) + mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); err = -EOPNOTSUPP; goto err_check; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index 6552ecee3f9b96..df6888c4793c7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -509,13 +509,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, if (IS_ERR_OR_NULL(tc_psample)) return ERR_PTR(-EOPNOTSUPP); - /* If slow path flag is set, eg. when the neigh is invalid for encap, - * don't offload sample action. - */ - esw = tc_psample->esw; - if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) - return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); - sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL); if (!sample_flow) return ERR_PTR(-ENOMEM); @@ -527,6 +520,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, * Only match the fte id instead of the same match in the * original flow table. */ + esw = tc_psample->esw; if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) || attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { struct mlx5_flow_table *ft; @@ -602,7 +596,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, } sample_flow->pre_attr = pre_attr; - return sample_flow->post_rule; + return sample_flow->pre_rule; err_pre_offload_rule: kfree(pre_attr); @@ -613,7 +607,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, err_obj_id: sampler_put(tc_psample, sample_flow->sampler); err_sampler: - if (!post_act_handle) + if (sample_flow->post_rule) del_post_rule(esw, sample_flow, attr); err_post_rule: if (post_act_handle) @@ -628,45 +622,26 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { - struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5e_sample_flow *sample_flow; - struct mlx5_vport_tbl_attr tbl_attr; struct mlx5_eswitch *esw; if (IS_ERR_OR_NULL(tc_psample)) return; - /* If slow path flag is set, sample action is not offloaded. - * No need to delete sample rule. - */ - esw = tc_psample->esw; - if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { - mlx5_eswitch_del_offloaded_rule(esw, rule, attr); - return; - } - /* The following delete order can't be changed, otherwise, * will hit fw syndromes. */ + esw = tc_psample->esw; sample_flow = attr->sample_attr->sample_flow; mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr); - if (!sample_flow->post_act_handle) - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, - sample_flow->post_attr); sample_restore_put(tc_psample, sample_flow->restore); mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id); sampler_put(tc_psample, sample_flow->sampler); - if (sample_flow->post_act_handle) { + if (sample_flow->post_act_handle) mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle); - } else { - tbl_attr.chain = attr->chain; - tbl_attr.prio = attr->prio; - tbl_attr.vport = esw_attr->in_rep->vport; - tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; - mlx5_esw_vporttbl_put(esw, &tbl_attr); - kfree(sample_flow->post_attr); - } + else + del_post_rule(esw, sample_flow, attr); kfree(sample_flow->pre_attr); kfree(sample_flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h index db0146df9b3038..9ef8a49d780148 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h @@ -19,6 +19,8 @@ struct mlx5e_sample_attr { struct mlx5e_sample_flow *sample_flow; }; +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); struct mlx5_flow_handle * @@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act); void mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample); +#else /* CONFIG_MLX5_TC_SAMPLE */ + +static inline struct mlx5_flow_handle * +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, + u32 tunnel_id) +{ return ERR_PTR(-EOPNOTSUPP); } + +static inline void +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr) {} + +static inline struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act) +{ return ERR_PTR(-EOPNOTSUPP); } + +static inline void +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {} + +static inline void +mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {} + +#endif /* CONFIG_MLX5_TC_SAMPLE */ #endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 6c949abcd2e140..c1c6e74c79c4f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -889,7 +889,7 @@ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv) return ERR_PTR(-ENOMEM); counter->is_shared = false; - counter->counter = mlx5_fc_create(ct_priv->dev, true); + counter->counter = mlx5_fc_create_ex(ct_priv->dev, true); if (IS_ERR(counter->counter)) { ct_dbg("Failed to create counter for ct entry"); ret = PTR_ERR(counter->counter); @@ -2039,25 +2039,36 @@ mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw, static int mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, enum mlx5_flow_namespace_type ns_type, - struct mlx5e_post_act *post_act, - const char **err_msg) + struct mlx5e_post_act *post_act) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + const char *err_msg = NULL; + int err = 0; #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) /* cannot restore chain ID on HW miss */ - *err_msg = "tc skb extension missing"; - return -EOPNOTSUPP; + err_msg = "tc skb extension missing"; + err = -EOPNOTSUPP; + goto out_err; #endif if (IS_ERR_OR_NULL(post_act)) { - *err_msg = "tc ct offload not supported, post action is missing"; - return -EOPNOTSUPP; + /* Ignore_flow_level support isn't supported by default for VFs and so post_act + * won't be supported. Skip showing error msg. + */ + if (priv->mdev->coredev_type != MLX5_COREDEV_VF) + err_msg = "post action is missing"; + err = -EOPNOTSUPP; + goto out_err; } if (ns_type == MLX5_FLOW_NAMESPACE_FDB) - return mlx5_tc_ct_init_check_esw_support(esw, err_msg); - return 0; + err = mlx5_tc_ct_init_check_esw_support(esw, &err_msg); + +out_err: + if (err && err_msg) + netdev_dbg(priv->netdev, "tc ct offload not supported, %s\n", err_msg); + return err; } #define INIT_ERR_PREFIX "tc ct offload init failed" @@ -2070,16 +2081,13 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, { struct mlx5_tc_ct_priv *ct_priv; struct mlx5_core_dev *dev; - const char *msg; u64 mapping_id; int err; dev = priv->mdev; - err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act, &msg); - if (err) { - mlx5_core_warn(dev, "tc ct offload not supported, %s\n", msg); + err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act); + if (err) goto err_support; - } ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL); if (!ct_priv) @@ -2127,12 +2135,21 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); - rhashtable_init(&ct_priv->zone_ht, &zone_params); - rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); - rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params); + if (rhashtable_init(&ct_priv->zone_ht, &zone_params)) + goto err_ct_zone_ht; + if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params)) + goto err_ct_tuples_ht; + if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params)) + goto err_ct_tuples_nat_ht; return ct_priv; +err_ct_tuples_nat_ht: + rhashtable_destroy(&ct_priv->ct_tuples_ht); +err_ct_tuples_ht: + rhashtable_destroy(&ct_priv->zone_ht); +err_ct_zone_ht: + mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); err_ct_nat_tbl: mlx5_chains_destroy_global_table(chains, ct_priv->ct); err_ct_tbl: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index d1599b7b944bf0..8f64f2c8895a94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -173,4 +173,6 @@ void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); +struct mlx5e_tc_int_port_priv * +mlx5e_get_int_port_priv(struct mlx5e_priv *priv); #endif /* __MLX5_EN_TC_PRIV_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 4a13ef561587d8..a5e4509732257c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -10,8 +10,8 @@ #include "en_tc.h" #include "rep/tc.h" #include "rep/neigh.h" -#include "lag.h" -#include "lag_mp.h" +#include "lag/lag.h" +#include "lag/mp.h" struct mlx5e_tc_tun_route_attr { struct net_device *out_dev; @@ -83,7 +83,8 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, */ *route_dev = dev; if (!netdev_port_same_parent_id(priv->netdev, real_dev) || - dst_is_lag_dev || is_vlan_dev(*route_dev)) + dst_is_lag_dev || is_vlan_dev(*route_dev) || + netif_is_ovs_master(*route_dev)) *out_dev = uplink_dev; else if (mlx5e_eswitch_rep(dev) && mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) @@ -120,6 +121,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); attr->fl.fl4.flowi4_oif = uplink_dev->ifindex; + } else { + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev); + + if (tunnel && tunnel->get_remote_ifindex) + attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev); } rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4); @@ -437,12 +443,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_tc_tun_route_attr *attr) { + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev); struct net_device *route_dev; struct net_device *out_dev; struct dst_entry *dst; struct neighbour *n; int ret; + if (tunnel && tunnel->get_remote_ifindex) + attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev); dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6, NULL); if (IS_ERR(dst)) @@ -702,6 +711,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, struct mlx5_flow_attr *flow_attr) { struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; + struct mlx5e_tc_int_port *int_port; TC_TUN_ROUTE_ATTR_INIT(attr); u16 vport_num; int err = 0; @@ -726,17 +736,25 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, if (err) return err; - if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops || - !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev)) - goto out; - - err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num); - if (err) - goto out; + if (attr.route_dev->netdev_ops == &mlx5e_netdev_ops && + mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev)) { + err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num); + if (err) + goto out; - esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, - misc_parameters.vxlan_vni); - esw_attr->rx_tun_attr->decap_vport = vport_num; + esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, + misc_parameters.vxlan_vni); + esw_attr->rx_tun_attr->decap_vport = vport_num; + } else if (netif_is_ovs_master(attr.route_dev)) { + int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), + attr.route_dev->ifindex, + MLX5E_TC_INT_PORT_INGRESS); + if (IS_ERR(int_port)) { + err = PTR_ERR(int_port); + goto out; + } + esw_attr->int_port = int_port; + } out: if (flow_attr->tun_ip_version == 4) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 9350ca05ce65a9..aa092eaeaec3e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel { void *headers_v); bool (*encap_info_equal)(struct mlx5e_encap_key *a, struct mlx5e_encap_key *b); + int (*get_remote_ifindex)(struct net_device *mirred_dev); }; extern struct mlx5e_tc_tunnel vxlan_tunnel; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 1c44c6c345f5dc..660cca73c36c8f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -13,6 +13,30 @@ enum { MLX5E_ROUTE_ENTRY_VALID = BIT(0), }; +static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct mlx5e_encap_entry *e, + int out_index) +{ + struct net_device *route_dev; + int err = 0; + + route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex); + + if (!route_dev || !netif_is_ovs_master(route_dev)) + goto out; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex, + MLX5E_TC_INT_PORT_EGRESS, + &attr->action, out_index); + +out: + if (route_dev) + dev_put(route_dev); + + return err; +} + struct mlx5e_route_key { int ip_version; union { @@ -809,6 +833,17 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, if (err) goto out_err; + err = mlx5e_set_int_port_tunnel(priv, attr, e, out_index); + if (err == -EOPNOTSUPP) { + /* If device doesn't support int port offload, + * redirect to uplink vport. + */ + mlx5_core_dbg(priv->mdev, "attaching int port as encap dev not supported, using uplink\n"); + err = 0; + } else if (err) { + goto out_err; + } + flow->encaps[out_index].e = e; list_add(&flow->encaps[out_index].list, &e->flows); flow->encaps[out_index].index = out_index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 4267f3a1059e7f..fd07c4cbfd1d25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, return 0; } +static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev) +{ + const struct vxlan_dev *vxlan = netdev_priv(mirred_dev); + const struct vxlan_rdst *dst = &vxlan->default_dst; + + return dst->remote_ifindex; +} + struct mlx5e_tc_tunnel vxlan_tunnel = { .tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN, .match_level = MLX5_MATCH_L4, @@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = { .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan, .parse_tunnel = mlx5e_tc_tun_parse_vxlan, .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic, + .get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index de936dc4bc4832..da169b81666505 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -70,24 +70,30 @@ void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn, MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support); } -void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, - const struct mlx5e_lro_param *lro_param) +void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder, + const struct mlx5e_packet_merge_param *pkt_merge_param) { void *tirc = mlx5e_tir_builder_get_tirc(builder); const unsigned int rough_max_l2_l3_hdr_sz = 256; if (builder->modify) - MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1); - - if (!lro_param->enabled) - return; - - MLX5_SET(tirc, tirc, lro_enable_mask, - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); - MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout); + MLX5_SET(modify_tir_in, builder->in, bitmask.packet_merge, 1); + + switch (pkt_merge_param->type) { + case MLX5E_PACKET_MERGE_LRO: + MLX5_SET(tirc, tirc, packet_merge_mask, + MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO | + MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout); + break; + case MLX5E_PACKET_MERGE_SHAMPO: + MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO); + break; + default: + break; + } } static int mlx5e_hfunc_to_hw(u8 hfunc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h index e45149a78ed9d2..857a84bcd53aff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h @@ -18,7 +18,7 @@ struct mlx5e_rss_params_traffic_type { }; struct mlx5e_tir_builder; -struct mlx5e_lro_param; +struct mlx5e_packet_merge_param; struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify); void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder); @@ -27,8 +27,8 @@ void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder); void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn); void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqtn, bool inner_ft_support); -void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, - const struct mlx5e_lro_param *lro_param); +void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder, + const struct mlx5e_packet_merge_param *pkt_merge_param); void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, const struct mlx5e_rss_params_hash *rss_hash, const struct mlx5e_rss_params_traffic_type *rss_tt, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index d54607a427403f..a55b066746cb76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -137,7 +137,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) t->tstamp = &priv->tstamp; t->pdev = mlx5_core_dma_dev(priv->mdev); t->netdev = priv->netdev; - t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); + t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); t->stats = &priv->trap_stats.ch; netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 055c3bc2373393..4cdf8e5b24c229 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -36,6 +36,7 @@ ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ enum mlx5e_icosq_wqe_type { MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_UMR_RX, + MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, #ifdef CONFIG_MLX5_EN_TLS MLX5E_ICOSQ_WQE_UMR_TLS, MLX5E_ICOSQ_WQE_SET_PSV_TLS, @@ -166,6 +167,10 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +struct mlx5e_shampo_umr { + u16 len; +}; + struct mlx5e_icosq_wqe_info { u8 wqe_type; u8 num_wqebbs; @@ -175,6 +180,7 @@ struct mlx5e_icosq_wqe_info { struct { struct mlx5e_rq *rq; } umr; + struct mlx5e_shampo_umr shampo; #ifdef CONFIG_MLX5_EN_TLS struct { struct mlx5e_ktls_offload_context_rx *priv_rx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 5120a59361e6a3..b98db50c3418d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -127,6 +127,25 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +static inline bool +mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + + if (!mlx5e_ipsec_eseg_meta(eseg)) + return false; + + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; + if (xo->inner_ipproto) { + eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; + } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; + sq->stats->csum_partial_inner++; + } + + return true; +} #else static inline void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, @@ -143,6 +162,13 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; static inline netdev_features_t mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) { return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } + +static inline bool +mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg) +{ + return false; +} #endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5E_IPSEC_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 84eb7201c142e2..c0f409c195bf8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -47,7 +47,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, - struct mlx5_core_mkey *mkey) + u32 *mkey) { int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; @@ -108,7 +108,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) return 0; err_destroy_mkey: - mlx5_core_destroy_mkey(mdev, &res->mkey); + mlx5_core_destroy_mkey(mdev, res->mkey); err_dealloc_transport_domain: mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); err_dealloc_pd: @@ -121,7 +121,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; mlx5_free_bfreg(mdev, &res->bfreg); - mlx5_core_destroy_mkey(mdev, &res->mkey); + mlx5_core_destroy_mkey(mdev, res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_pd(mdev, res->pdn); memset(res, 0, sizeof(*res)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 9d451b8ee467c7..c2ea5fad48ddf2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) break; case ETH_SS_TEST: - for (i = 0; i < mlx5e_self_test_num(priv); i++) - strcpy(data + i * ETH_GSTRING_LEN, - mlx5e_self_tests[i]); + mlx5e_self_test_fill_strings(priv, data); break; case ETH_SS_STATS: @@ -1902,6 +1900,11 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val return -EINVAL; } + if (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { + netdev_warn(priv->netdev, "Can't set CQE compression with HW-GRO, disable it first.\n"); + return -EINVAL; + } + new_params = priv->channels.params; MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); if (rx_filter) @@ -1954,8 +1957,8 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return -EOPNOTSUPP; if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) return -EINVAL; - } else if (priv->channels.params.lro_en) { - netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n"); + } else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n"); return -EINVAL; } @@ -2139,12 +2142,14 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return 0; } - return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs); + return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs); } int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { - return mlx5e_ethtool_set_rxnfc(dev, cmd); + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_set_rxnfc(priv, cmd); } static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index d226cc5ab1d168..aeff1d972a4649 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -71,12 +71,12 @@ struct mlx5e_l2_hash_node { bool mpfs; }; -static inline int mlx5e_hash_l2(u8 *addr) +static inline int mlx5e_hash_l2(const u8 *addr) { return addr[5]; } -static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr) +static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr) { struct mlx5e_l2_hash_node *hn; int ix = mlx5e_hash_l2(addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 03693fa74a704a..ad0d234632a3c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -411,7 +411,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv, u32 rss_context, u32 *tirn) { if (fs->flow_type & FLOW_RSS) { - struct mlx5e_lro_param lro_param; + struct mlx5e_packet_merge_param pkt_merge_param; struct mlx5e_rss *rss; u32 flow_type; int err; @@ -426,8 +426,8 @@ static int flow_get_tirn(struct mlx5e_priv *priv, if (tt < 0) return -EINVAL; - lro_param = mlx5e_get_lro_param(&priv->channels.params); - err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn); + pkt_merge_param = priv->channels.params.packet_merge; + err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn); if (err) return err; eth_rule->rss = rss; @@ -937,9 +937,8 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, return 0; } -int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd) { - struct mlx5e_priv *priv = netdev_priv(dev); int err = 0; switch (cmd->cmd) { @@ -960,10 +959,9 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return err; } -int mlx5e_ethtool_get_rxnfc(struct net_device *dev, +int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, u32 *rule_locs) { - struct mlx5e_priv *priv = netdev_priv(dev); int err = 0; switch (info->cmd) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 41ef6eb70a5852..65571593ec5c19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -218,6 +218,45 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node) +{ + rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), + GFP_KERNEL, node); + if (!rq->mpwqe.shampo) + return -ENOMEM; + return 0; +} + +static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq) +{ + kvfree(rq->mpwqe.shampo); +} + +static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + + shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, + node); + if (!shampo->bitmap) + return -ENOMEM; + + shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq, + sizeof(*shampo->info)), + GFP_KERNEL, node); + if (!shampo->info) { + kvfree(shampo->bitmap); + return -ENOMEM; + } + return 0; +} + +static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) +{ + kvfree(rq->mpwqe.shampo->bitmap); + kvfree(rq->mpwqe.shampo->info); +} + static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); @@ -233,10 +272,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) return 0; } -static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, - u64 npages, u8 page_shift, - struct mlx5_core_mkey *umr_mkey, - dma_addr_t filler_addr) +static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev, + u64 npages, u8 page_shift, u32 *umr_mkey, + dma_addr_t filler_addr) { struct mlx5_mtt *mtt; int inlen; @@ -284,12 +322,59 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, return err; } +static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, + u64 nentries, + u32 *umr_mkey) +{ + int inlen; + void *mkc; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); + MLX5_SET(mkc, mkc, translations_octword_size, nentries); + MLX5_SET(mkc, mkc, length64, 1); + err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); + + kvfree(in); + return err; +} + static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); - return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey, - rq->wqe_overflow.addr); + return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT, + &rq->umr_mkey, rq->wqe_overflow.addr); +} + +static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, + struct mlx5e_rq *rq) +{ + u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); + + if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) { + mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", + max_klm_size, rq->mpwqe.shampo->hd_per_wq); + return -EINVAL; + } + return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + &rq->mpwqe.shampo->mkey); } static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) @@ -403,6 +488,65 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); } +static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rqp, + struct mlx5e_rq *rq, + u32 *pool_size, + int node) +{ + void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); + int wq_size; + int err; + + if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + return 0; + err = mlx5e_rq_shampo_hd_alloc(rq, node); + if (err) + goto out; + rq->mpwqe.shampo->hd_per_wq = + mlx5e_shampo_hd_per_wq(mdev, params, rqp); + err = mlx5e_create_rq_hd_umr_mkey(mdev, rq); + if (err) + goto err_shampo_hd; + err = mlx5e_rq_shampo_hd_info_alloc(rq, node); + if (err) + goto err_shampo_info; + rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node); + if (!rq->hw_gro_data) { + err = -ENOMEM; + goto err_hw_gro_data; + } + rq->mpwqe.shampo->key = + cpu_to_be32(rq->mpwqe.shampo->mkey); + rq->mpwqe.shampo->hd_per_wqe = + mlx5e_shampo_hd_per_wqe(mdev, params, rqp); + wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); + *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) / + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + return 0; + +err_hw_gro_data: + mlx5e_rq_shampo_hd_info_free(rq); +err_shampo_info: + mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey); +err_shampo_hd: + mlx5e_rq_shampo_hd_free(rq); +out: + return err; +} + +static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq) +{ + if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + return; + + kvfree(rq->hw_gro_data); + mlx5e_rq_shampo_hd_info_free(rq); + mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey); + mlx5e_rq_shampo_hd_free(rq); +} + static int mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_rq_param *rqp, @@ -455,11 +599,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_drop_page; - rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); + rq->mkey_be = cpu_to_be32(rq->umr_mkey); err = mlx5e_rq_alloc_mpwqe_info(rq, node); if (err) goto err_rq_mkey; + + err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node); + if (err) + goto err_free_by_rq_type; + break; default: /* MLX5_WQ_TYPE_CYCLIC */ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, @@ -487,7 +636,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, if (err) goto err_rq_frags; - rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key); + rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey); } if (xsk) { @@ -512,14 +661,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, if (IS_ERR(rq->page_pool)) { err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; - goto err_free_by_rq_type; + goto err_free_shampo; } if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); } if (err) - goto err_free_by_rq_type; + goto err_free_shampo; for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { @@ -528,8 +677,10 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, u32 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; u64 dma_offset = mlx5e_get_mpwqe_offset(i); + u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ? + 0 : rq->buff.headroom; - wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); + wqe->data[0].addr = cpu_to_be64(dma_offset + headroom); wqe->data[0].byte_count = cpu_to_be32(byte_count); wqe->data[0].lkey = rq->mkey_be; } else { @@ -569,12 +720,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, return 0; +err_free_shampo: + mlx5e_rq_free_shampo(rq); err_free_by_rq_type: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); err_rq_mkey: - mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(mdev, rq->umr_mkey); err_rq_drop_page: mlx5e_free_mpwqe_rq_drop_page(rq); break; @@ -607,8 +760,9 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); - mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey); mlx5e_free_mpwqe_rq_drop_page(rq); + mlx5e_rq_free_shampo(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ kvfree(rq->wqe.frags); @@ -662,6 +816,12 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { + MLX5_SET(wq, wq, log_headers_buffer_entry_num, + order_base_2(rq->mpwqe.shampo->hd_per_wq)); + MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey); + } + mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); @@ -801,6 +961,15 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) head = mlx5_wq_ll_get_wqe_next_ix(wq, head); } + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { + u16 len; + + len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) & + (rq->mpwqe.shampo->hd_per_wq - 1); + mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false); + rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci; + } + rq->mpwqe.actual_wq_head = wq->head; rq->mpwqe.umr_in_progress = 0; rq->mpwqe.umr_completed = 0; @@ -826,6 +995,10 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) mlx5_wq_ll_pop(wq, wqe_ix_be, &wqe->next.next_wqe_index); } + + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq, + 0, true); } else { struct mlx5_wq_cyc *wq = &rq->wqe.wq; @@ -845,6 +1018,9 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, struct mlx5_core_dev *mdev = rq->mdev; int err; + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) + __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state); + err = mlx5e_alloc_rq(params, xsk, param, node, rq); if (err) return err; @@ -930,9 +1106,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + size_t size; - xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, - GFP_KERNEL, numa); + size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq); + xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); if (!xdpi_fifo->xi) return -ENOMEM; @@ -946,10 +1123,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + size_t size; int err; - sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, - GFP_KERNEL, numa); + size = array_size(sizeof(*sq->db.wqe_info), wq_sz); + sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); if (!sq->db.wqe_info) return -ENOMEM; @@ -1298,7 +1476,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; @@ -1308,10 +1487,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, if (err) return err; - if (qos_queue_group_id) - sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; - else - sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + sq->stats = sq_stats; csp.tisn = tisn; csp.tis_lst_sz = 1; @@ -1705,6 +1881,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) mlx5e_close_cq(&c->sq[tc].cq); } +static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq) +{ + int tc; + + for (tc = 0; tc < TC_MAX_QUEUE; tc++) + if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count) + return tc; + + WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq); + return -ENOENT; +} + +static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, + u32 *hw_id) +{ + int tc; + + if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL || + !params->mqprio.channel.rl) { + *hw_id = 0; + return 0; + } + + tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix); + if (tc < 0) + return tc; + + return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id); +} + static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1713,9 +1919,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; + u32 qos_queue_group_id; + + err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id); + if (err) + goto err_close_sqs; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); + params, &cparam->txq_sq, &c->sq[tc], tc, + qos_queue_group_id, + &c->priv->channel_stats[c->ix].sq[tc]); if (err) goto err_close_sqs; } @@ -1991,7 +2204,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->cpu = cpu; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; @@ -2185,17 +2398,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) chs->num = 0; } -static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) +static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv) { struct mlx5e_rx_res *res = priv->rx_res; - struct mlx5e_lro_param lro_param; - - lro_param = mlx5e_get_lro_param(&priv->channels.params); - return mlx5e_rx_res_lro_set_param(res, &lro_param); + return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge); } -static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); +static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge); static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 mtu) @@ -2340,6 +2550,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); goto err_txqs; } + if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) { + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + priv->mqprio_rl = priv->channels.params.mqprio.channel.rl; + } return 0; @@ -2901,15 +3118,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) { params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.num_tc = num_tc; + params->mqprio.channel.rl = NULL; mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, params->num_channels); } static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, - struct tc_mqprio_qopt *qopt) + struct tc_mqprio_qopt *qopt, + struct mlx5e_mqprio_rl *rl) { params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.num_tc = qopt->num_tc; + params->mqprio.channel.rl = rl; mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); } @@ -2969,9 +3189,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, netdev_err(netdev, "Min tx rate is not supported\n"); return -EINVAL; } + if (mqprio->max_rate[i]) { - netdev_err(netdev, "Max tx rate is not supported\n"); - return -EINVAL; + int err; + + err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]); + if (err) + return err; } if (mqprio->qopt.offset[i] != agg_count) { @@ -2990,11 +3214,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, return 0; } +static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio) +{ + int tc; + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) + if (mqprio->max_rate[tc]) + return true; + return false; +} + static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, struct tc_mqprio_qopt_offload *mqprio) { mlx5e_fp_preactivate preactivate; struct mlx5e_params new_params; + struct mlx5e_mqprio_rl *rl; bool nch_changed; int err; @@ -3002,13 +3237,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, if (err) return err; + rl = NULL; + if (mlx5e_mqprio_rate_limit(mqprio)) { + rl = mlx5e_mqprio_rl_alloc(); + if (!rl) + return -ENOMEM; + err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc, + mqprio->max_rate); + if (err) { + mlx5e_mqprio_rl_free(rl); + return err; + } + } + new_params = priv->channels.params; - mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); + mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl); nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : mlx5e_update_netdev_queues_ctx; - return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + if (err && rl) { + mlx5e_mqprio_rl_cleanup(rl); + mlx5e_mqprio_rl_free(rl); + } + + return err; } static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, @@ -3226,7 +3480,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) return -EADDRNOTAVAIL; netif_addr_lock_bh(netdev); - ether_addr_copy(netdev->dev_addr, saddr->sa_data); + eth_hw_addr_set(netdev, saddr->sa_data); netif_addr_unlock_bh(netdev); mlx5e_nic_set_rx_mode(priv); @@ -3270,16 +3524,59 @@ static int set_feature_lro(struct net_device *netdev, bool enable) } new_params = *cur_params; - new_params.lro_en = enable; - if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == - mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL)) - reset = false; + if (enable) + new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO; + else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO) + new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE; + else + goto out; + + if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO && + new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) { + if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL)) + reset = false; + } } err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_modify_tirs_lro_ctx, NULL, reset); + mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); +out: + mutex_unlock(&priv->state_lock); + return err; +} + +static int set_feature_hw_gro(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params new_params; + bool reset = true; + int err = 0; + + mutex_lock(&priv->state_lock); + new_params = priv->channels.params; + + if (enable) { + if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { + netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n"); + err = -EINVAL; + goto out; + } + new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO; + new_params.packet_merge.shampo.match_criteria_type = + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED; + new_params.packet_merge.shampo.alignment_granularity = + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE; + } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { + new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE; + } else { + goto out; + } + + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; @@ -3458,6 +3755,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc); @@ -3518,6 +3816,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); features &= ~NETIF_F_LRO; } + if (features & NETIF_F_GRO_HW) { + netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n"); + features &= ~NETIF_F_GRO_HW; + } } if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { @@ -3606,7 +3908,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, goto out; } - if (params->lro_en) + if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO) reset = false; if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { @@ -4063,8 +4365,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) struct net_device *netdev = priv->netdev; struct mlx5e_params new_params; - if (priv->channels.params.lro_en) { - netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); + if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n"); return -EINVAL; } @@ -4321,9 +4623,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { /* No XSK params: checking the availability of striding RQ in general. */ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) - params->lro_en = !slow_pci_heuristic(mdev); + params->packet_merge.type = slow_pci_heuristic(mdev) ? + MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO; } - params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); + params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? @@ -4351,13 +4654,17 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + u8 addr[ETH_ALEN]; - mlx5_query_mac_address(priv->mdev, netdev->dev_addr); - if (is_zero_ether_addr(netdev->dev_addr) && + mlx5_query_mac_address(priv->mdev, addr); + if (is_zero_ether_addr(addr) && !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { eth_hw_addr_random(netdev); mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr); + return; } + + eth_hw_addr_set(netdev, addr); } static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table, @@ -4454,6 +4761,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + if (!!MLX5_CAP_GEN(mdev, shampo) && + mlx5e_check_fragmented_striding_rq_cap(mdev)) + netdev->hw_features |= NETIF_F_GRO_HW; + if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4504,6 +4815,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (fcs_enabled) netdev->features &= ~NETIF_F_RXALL; netdev->features &= ~NETIF_F_LRO; + netdev->features &= ~NETIF_F_GRO_HW; netdev->features &= ~NETIF_F_RXFCS; #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) @@ -4608,7 +4920,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; enum mlx5e_rx_res_features features; - struct mlx5e_lro_param lro_param; int err; priv->rx_res = mlx5e_rx_res_alloc(); @@ -4626,9 +4937,9 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP; if (priv->channels.params.tunneled_offload_en) features |= MLX5E_RX_RES_FEATURE_INNER_FT; - lro_param = mlx5e_get_lro_param(&priv->channels.params); err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, - priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (err) goto err_close_drop_rq; @@ -4862,6 +5173,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats); + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + memset(priv, 0, sizeof(*priv)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 0684ac6699b2de..e58a9ec4255322 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -53,6 +53,7 @@ #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" #include "en_accel/ipsec.h" +#include "en/tc/int_port.h" #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) @@ -793,7 +794,6 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_lro_param lro_param; int err; priv->rx_res = mlx5e_rx_res_alloc(); @@ -808,9 +808,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) return err; } - lro_param = mlx5e_get_lro_param(&priv->channels.params); err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (err) goto err_close_drop_rq; @@ -858,12 +858,22 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) { + int err; + mlx5e_create_q_counters(priv); - return mlx5e_init_rep_rx(priv); + err = mlx5e_init_rep_rx(priv); + if (err) + goto out; + + mlx5e_tc_int_port_init_rep_rx(priv); + +out: + return err; } static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) { + mlx5e_tc_int_port_cleanup_rep_rx(priv); mlx5e_cleanup_rep_rx(priv); mlx5e_destroy_q_counters(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 48a203a9e7d9f2..b01dacb6f527c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -58,6 +58,7 @@ struct mlx5e_neigh_update_table { }; struct mlx5_tc_ct_priv; +struct mlx5_tc_int_port_priv; struct mlx5e_rep_bond; struct mlx5e_tc_tun_encap; struct mlx5e_post_act; @@ -98,6 +99,9 @@ struct mlx5_rep_uplink_priv { /* tc tunneling encapsulation private data */ struct mlx5e_tc_tun_encap *encap; + + /* OVS internal port support */ + struct mlx5e_tc_int_port_priv *int_port_priv; }; struct mlx5e_rep_priv { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 29a6586ef28dc1..96967b0a24418c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -33,9 +33,12 @@ #include #include #include +#include #include #include #include +#include +#include #include "en.h" #include "en/txrx.h" #include "en_tc.h" @@ -62,10 +65,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w u16 cqe_bcnt, u32 head_offset, u32 page_idx); static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { .handle_rx_cqe = mlx5e_handle_rx_cqe, .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, + .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, }; static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) @@ -185,8 +190,9 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, mlx5e_read_mini_arr_slot(wq, cqd, cqcc); mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); - INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, - mlx5e_handle_rx_cqe, rq, &cqd->title); + INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, + rq, &cqd->title); } mlx5e_cqes_update_owner(wq, cqcc - wq->cc); wq->cc = cqcc; @@ -206,8 +212,9 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, mlx5e_read_title_slot(rq, wq, cc); mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); mlx5e_decompress_cqe(rq, wq, cc); - INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, - mlx5e_handle_rx_cqe, rq, &cqd->title); + INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, + rq, &cqd->title); cqd->mini_arr_idx++; return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; @@ -448,13 +455,13 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, static inline void mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, struct mlx5e_dma_info *dma_info, - int offset_from, u32 headlen) + int offset_from, int dma_offset, u32 headlen) { const void *from = page_address(dma_info->page) + offset_from; /* Aligning len to sizeof(long) optimizes memcpy performance */ unsigned int len = ALIGN(headlen, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, + dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, from, len); } @@ -494,6 +501,157 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) mlx5_wq_ll_update_db_record(wq); } +/* This function returns the size of the continuous free space inside a bitmap + * that starts from first and no longer than len including circular ones. + */ +static int bitmap_find_window(unsigned long *bitmap, int len, + int bitmap_size, int first) +{ + int next_one, count; + + next_one = find_next_bit(bitmap, bitmap_size, first); + if (next_one == bitmap_size) { + if (bitmap_size - first >= len) + return len; + next_one = find_next_bit(bitmap, bitmap_size, 0); + count = next_one + bitmap_size - first; + } else { + count = next_one - first; + } + + return min(len, count); +} + +static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, + __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) +{ + memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); + umr_wqe->ctrl.opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + umr_wqe->ctrl.umr_mkey = key; + umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) + | MLX5E_KLM_UMR_DS_CNT(klm_len)); + umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; + umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); + umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); + umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); +} + +static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, + struct mlx5e_icosq *sq, + u16 klm_entries, u16 index) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries; + u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; + struct page *page = shampo->last_page; + u64 addr = shampo->last_addr; + struct mlx5e_dma_info *dma_info; + struct mlx5e_umr_wqe *umr_wqe; + int headroom; + + headroom = rq->buff.headroom; + new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); + entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT); + wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); + pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); + umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); + build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); + + for (i = 0; i < entries; i++, index++) { + dma_info = &shampo->info[index]; + if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < + MLX5_UMR_KLM_ALIGNMENT)) + goto update_klm; + header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << + MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; + if (!(header_offset & (PAGE_SIZE - 1))) { + err = mlx5e_page_alloc(rq, dma_info); + if (unlikely(err)) + goto err_unmap; + addr = dma_info->addr; + page = dma_info->page; + } else { + dma_info->addr = addr + header_offset; + dma_info->page = page; + } + +update_klm: + umr_wqe->inline_klms[i].bcount = + cpu_to_be32(MLX5E_RX_MAX_HEAD); + umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); + umr_wqe->inline_klms[i].va = + cpu_to_be64(dma_info->addr + headroom); + } + + sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { + .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, + .num_wqebbs = wqe_bbs, + .shampo.len = new_entries, + }; + + shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); + shampo->last_page = page; + shampo->last_addr = addr; + sq->pc += wqe_bbs; + sq->doorbell_cseg = &umr_wqe->ctrl; + + return 0; + +err_unmap: + while (--i >= 0) { + if (--index < 0) + index = shampo->hd_per_wq - 1; + dma_info = &shampo->info[index]; + if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { + dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); + mlx5e_page_release(rq, dma_info, true); + } + } + rq->stats->buff_alloc_err++; + return err; +} + +static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + u16 klm_entries, num_wqe, index, entries_before; + struct mlx5e_icosq *sq = rq->icosq; + int i, err, max_klm_entries, len; + + max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); + klm_entries = bitmap_find_window(shampo->bitmap, + shampo->hd_per_wqe, + shampo->hd_per_wq, shampo->pi); + if (!klm_entries) + return 0; + + klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); + index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT); + entries_before = shampo->hd_per_wq - index; + + if (unlikely(entries_before < klm_entries)) + num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + + DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); + else + num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); + + for (i = 0; i < num_wqe; i++) { + len = (klm_entries > max_klm_entries) ? max_klm_entries : + klm_entries; + if (unlikely(index + len > shampo->hd_per_wq)) + len = shampo->hd_per_wq - index; + err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); + if (unlikely(err)) + return err; + index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); + klm_entries -= len; + } + + return 0; +} + static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -514,6 +672,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) goto err; } + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { + err = mlx5e_alloc_rx_hd_mpwqe(rq); + if (unlikely(err)) + goto err; + } + pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS); umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts)); @@ -558,6 +722,44 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) return err; } +/* This function is responsible to dealloc SHAMPO header buffer. + * close == true specifies that we are in the middle of closing RQ operation so + * we go over all the entries and if they are not in use we free them, + * otherwise we only go over a specific range inside the header buffer that are + * not in use. + */ +void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + int hd_per_wq = shampo->hd_per_wq; + struct page *deleted_page = NULL; + struct mlx5e_dma_info *hd_info; + int i, index = start; + + for (i = 0; i < len; i++, index++) { + if (index == hd_per_wq) + index = 0; + + if (close && !test_bit(index, shampo->bitmap)) + continue; + + hd_info = &shampo->info[index]; + hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); + if (hd_info->page != deleted_page) { + deleted_page = hd_info->page; + mlx5e_page_release(rq, hd_info, false); + } + } + + if (start + len > hd_per_wq) { + len -= hd_per_wq - start; + bitmap_clear(shampo->bitmap, start, hd_per_wq - start); + start = 0; + } + + bitmap_clear(shampo->bitmap, start, len); +} + static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -629,6 +831,28 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) sq->cc = sqcc; } +static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr, + struct mlx5e_icosq *sq) +{ + struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq); + struct mlx5e_shampo_hd *shampo; + /* assume 1:1 relationship between RQ and icosq */ + struct mlx5e_rq *rq = &c->rq; + int end, from, len = umr.len; + + shampo = rq->mpwqe.shampo; + end = shampo->hd_per_wq; + from = shampo->ci; + if (from + len > shampo->hd_per_wq) { + len -= end - from; + bitmap_set(shampo->bitmap, from, end - from); + from = 0; + } + + bitmap_set(shampo->bitmap, from, len); + shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1); +} + int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); @@ -685,6 +909,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) break; case MLX5E_ICOSQ_WQE_NOP: break; + case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR: + mlx5e_handle_shampo_hd_umr(wi->shampo, sq); + break; #ifdef CONFIG_MLX5_EN_TLS case MLX5E_ICOSQ_WQE_UMR_TLS: break; @@ -782,8 +1009,8 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) if (tcp_ack) { tcp->ack = 1; - tcp->ack_seq = cqe->lro_ack_seq_num; - tcp->window = cqe->lro_tcp_win; + tcp->ack_seq = cqe->lro.ack_seq_num; + tcp->window = cqe->lro.tcp_win; } } @@ -809,7 +1036,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, tcp = ip_p + sizeof(struct iphdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - ipv4->ttl = cqe->lro_min_ttl; + ipv4->ttl = cqe->lro.min_ttl; ipv4->tot_len = cpu_to_be16(tot_len); ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, @@ -829,7 +1056,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, tcp = ip_p + sizeof(struct ipv6hdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - ipv6->hop_limit = cqe->lro_min_ttl; + ipv6->hop_limit = cqe->lro.min_ttl; ipv6->payload_len = cpu_to_be16(payload_len); mlx5e_lro_update_tcp_hdr(cqe, tcp); @@ -841,6 +1068,142 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, } } +static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) +{ + struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; + u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; + + return page_address(last_head->page) + head_offset; +} + +static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) +{ + int udp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct udphdr *uh; + + uh = (struct udphdr *)(skb->data + udp_off); + uh->len = htons(skb->len - udp_off); + + if (uh->check) + uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr, + ipv4->daddr, 0); + + skb->csum_start = (unsigned char *)uh - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; +} + +static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) +{ + int udp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct udphdr *uh; + + uh = (struct udphdr *)(skb->data + udp_off); + uh->len = htons(skb->len - udp_off); + + if (uh->check) + uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr, + &ipv6->daddr, 0); + + skb->csum_start = (unsigned char *)uh - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; +} + +static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct tcphdr *skb_tcp_hd) +{ + u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + struct tcphdr *last_tcp_hd; + void *last_hd_addr; + + last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); + last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff; + tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH); +} + +static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, + struct mlx5_cqe64 *cqe, bool match) +{ + int tcp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct tcphdr *tcp; + + tcp = (struct tcphdr *)(skb->data + tcp_off); + if (match) + mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); + + tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr, + ipv4->daddr, 0); + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; + + skb->csum_start = (unsigned char *)tcp - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + + if (tcp->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; +} + +static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, + struct mlx5_cqe64 *cqe, bool match) +{ + int tcp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct tcphdr *tcp; + + tcp = (struct tcphdr *)(skb->data + tcp_off); + if (match) + mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); + + tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr, + &ipv6->daddr, 0); + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; + skb->csum_start = (unsigned char *)tcp - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + + if (tcp->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; +} + +static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) +{ + bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)); + struct sk_buff *skb = rq->hw_gro_data->skb; + + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + skb->ip_summed = CHECKSUM_PARTIAL; + + if (is_ipv4) { + int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr); + struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff); + __be16 newlen = htons(skb->len - nhoff); + + csum_replace2(&ipv4->check, ipv4->tot_len, newlen); + ipv4->tot_len = newlen; + + if (ipv4->protocol == IPPROTO_TCP) + mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match); + else + mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4); + } else { + int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr); + struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff); + + ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6)); + + if (ipv6->nexthdr == IPPROTO_TCP) + mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match); + else + mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6); + } +} + static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { @@ -1090,6 +1453,27 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, stats->mcast_packets++; } +static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, + struct sk_buff *skb) +{ + struct mlx5e_rq_stats *stats = rq->stats; + + stats->packets++; + stats->gro_packets++; + stats->bytes += cqe_bcnt; + stats->gro_bytes += cqe_bcnt; + if (NAPI_GRO_CB(skb)->count != 1) + return; + mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); + skb_reset_network_header(skb); + if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) { + napi_gro_receive(rq->cq.napi, skb); + rq->hw_gro_data->skb = NULL; + } +} + static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, @@ -1199,7 +1583,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } /* copy header */ - mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen); + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset, + headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; @@ -1275,7 +1660,6 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5e_tc_update_priv tc_priv = {}; struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; struct sk_buff *skb; @@ -1311,15 +1695,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) if (rep->vlan && skb_vlan_tag_present(skb)) skb_vlan_pop(skb); - if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) && - !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) { - dev_kfree_skb_any(skb); - goto free_wqe; - } - - napi_gro_receive(rq->cq.napi, skb); - - mlx5_rep_tc_post_napi_receive(&tc_priv); + mlx5e_rep_tc_receive(cqe, rq, skb); free_wqe: mlx5e_free_rx_wqe(rq, wi, true); @@ -1336,7 +1712,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; - struct mlx5e_tc_update_priv tc_priv = {}; struct mlx5e_rx_wqe_ll *wqe; struct mlx5_wq_ll *wq; struct sk_buff *skb; @@ -1369,15 +1744,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); - if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) && - !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) { - dev_kfree_skb_any(skb); - goto mpwrq_cqe_out; - } - - napi_gro_receive(rq->cq.napi, skb); - - mlx5_rep_tc_post_napi_receive(&tc_priv); + mlx5e_rep_tc_receive(cqe, rq, skb); mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) @@ -1395,6 +1762,30 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { }; #endif +static void +mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + u32 data_bcnt, u32 data_offset) +{ + net_prefetchw(skb->data); + + while (data_bcnt) { + u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); + unsigned int truesize; + + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + truesize = pg_consumed_bytes; + else + truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); + + mlx5e_add_skb_frag(rq, skb, di, data_offset, + pg_consumed_bytes, truesize); + + data_bcnt -= pg_consumed_bytes; + data_offset = 0; + di++; + } +} + static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) @@ -1420,20 +1811,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w frag_offset -= PAGE_SIZE; } - while (byte_cnt) { - u32 pg_consumed_bytes = - min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - unsigned int truesize = - ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - - mlx5e_add_skb_frag(rq, skb, di, frag_offset, - pg_consumed_bytes, truesize); - byte_cnt -= pg_consumed_bytes; - frag_offset = 0; - di++; - } + mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset); /* copy header */ - mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen); + mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; @@ -1487,6 +1867,181 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return skb; } +static void +mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + struct mlx5_cqe64 *cqe, u16 header_index) +{ + struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; + u16 head_offset = head->addr & (PAGE_SIZE - 1); + u16 head_size = cqe->shampo.header_size; + u16 rx_headroom = rq->buff.headroom; + struct sk_buff *skb = NULL; + void *hdr, *data; + u32 frag_size; + + hdr = page_address(head->page) + head_offset; + data = hdr + rx_headroom; + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); + + if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { + /* build SKB around header */ + dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); + prefetchw(hdr); + prefetch(data); + skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); + + if (unlikely(!skb)) + return; + + /* queue up for recycling/reuse */ + page_ref_inc(head->page); + + } else { + /* allocate SKB and copy header for large header */ + rq->stats->gro_large_hds++; + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(head_size, sizeof(long))); + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return; + } + + prefetchw(skb->data); + mlx5e_copy_skb_header(rq->pdev, skb, head, + head_offset + rx_headroom, + rx_headroom, head_size); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += head_size; + skb->len += head_size; + } + rq->hw_gro_data->skb = skb; + NAPI_GRO_CB(skb)->count = 1; + skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size; +} + +static void +mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz) +{ + skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + unsigned int frag_size = skb_frag_size(last_frag); + unsigned int frag_truesize; + + frag_truesize = ALIGN(frag_size, BIT(log_stride_sz)); + skb->truesize += frag_truesize - frag_size; +} + +static void +mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) +{ + struct sk_buff *skb = rq->hw_gro_data->skb; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->gro_skbs++; + if (likely(skb_shinfo(skb)->nr_frags)) + mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); + if (NAPI_GRO_CB(skb)->count > 1) + mlx5e_shampo_update_hdr(rq, cqe, match); + napi_gro_receive(rq->cq.napi, skb); + rq->hw_gro_data->skb = NULL; +} + +static bool +mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) +{ + int nr_frags = skb_shinfo(skb)->nr_frags; + + return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE; +} + +static void +mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + u64 addr = shampo->info[header_index].addr; + + if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { + shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); + mlx5e_page_release(rq, &shampo->info[header_index], true); + } + bitmap_clear(shampo->bitmap, header_index, 1); +} + +static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; + u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); + u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); + u32 data_offset = wqe_offset & (PAGE_SIZE - 1); + u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); + u16 wqe_id = be16_to_cpu(cqe->wqe_id); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + struct sk_buff **skb = &rq->hw_gro_data->skb; + bool flush = cqe->shampo.flush; + bool match = cqe->shampo.match; + struct mlx5e_rq_stats *stats = rq->stats; + struct mlx5e_rx_wqe_ll *wqe; + struct mlx5e_dma_info *di; + struct mlx5e_mpw_info *wi; + struct mlx5_wq_ll *wq; + + wi = &rq->mpwqe.info[wqe_id]; + wi->consumed_strides += cstrides; + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + trigger_report(rq, cqe); + stats->wqe_err++; + goto mpwrq_cqe_out; + } + + if (unlikely(mpwrq_is_filler_cqe(cqe))) { + stats->mpwqe_filler_cqes++; + stats->mpwqe_filler_strides += cstrides; + goto mpwrq_cqe_out; + } + + stats->gro_match_packets += match; + + if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { + match = false; + mlx5e_shampo_flush_skb(rq, cqe, match); + } + + if (!*skb) { + mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); + if (unlikely(!*skb)) + goto free_hd_entry; + } else { + NAPI_GRO_CB(*skb)->count++; + if (NAPI_GRO_CB(*skb)->count == 2 && + rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) { + void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); + int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff - + sizeof(struct iphdr); + struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff); + + rq->hw_gro_data->second_ip_id = ntohs(iph->id); + } + } + + di = &wi->umr.dma_info[page_idx]; + mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); + + mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); + if (flush) + mlx5e_shampo_flush_skb(rq, cqe, match); +free_hd_entry: + mlx5e_free_rx_shampo_hd_entry(rq, header_index); +mpwrq_cqe_out: + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) + return; + + wq = &rq->mpwqe.wq; + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); + mlx5e_free_rx_mpwqe(rq, wi, true); + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); +} + static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); @@ -1579,11 +2134,15 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) mlx5_cqwq_pop(cqwq); - INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, - mlx5e_handle_rx_cqe, rq, cqe); + INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, + rq, cqe); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); out: + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) + mlx5e_shampo_flush_skb(rq, NULL, false); + if (rcu_access_pointer(rq->xdp_prog)) mlx5e_xdp_rx_poll_complete(rq); @@ -1784,15 +2343,24 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; if (mlx5_fpga_is_ipsec_device(mdev)) { netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); return -EINVAL; } - if (!rq->handle_rx_cqe) { - netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); - return -EINVAL; + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; + if (!rq->handle_rx_cqe) { + netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n"); + return -EINVAL; + } + } else { + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; + if (!rq->handle_rx_cqe) { + netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); + return -EINVAL; + } } + break; default: /* MLX5_WQ_TYPE_CYCLIC */ rq->wqe.skb_from_cqe = xsk ? diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index ce8ab1f0187693..8c9163d2c64684 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -35,30 +35,7 @@ #include #include "en.h" #include "en/port.h" - -enum { - MLX5E_ST_LINK_STATE, - MLX5E_ST_LINK_SPEED, - MLX5E_ST_HEALTH_INFO, -#ifdef CONFIG_INET - MLX5E_ST_LOOPBACK, -#endif - MLX5E_ST_NUM, -}; - -const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = { - "Link Test", - "Speed Test", - "Health Test", -#ifdef CONFIG_INET - "Loopback Test", -#endif -}; - -int mlx5e_self_test_num(struct mlx5e_priv *priv) -{ - return ARRAY_SIZE(mlx5e_self_tests); -} +#include "eswitch.h" static int mlx5e_test_health_info(struct mlx5e_priv *priv) { @@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, mlx5e_refresh_tirs(priv, false, false); } +static int mlx5e_cond_loopback(struct mlx5e_priv *priv) +{ + if (is_mdev_switchdev_mode(priv->mdev)) + return -EOPNOTSUPP; + + return 0; +} + #define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) static int mlx5e_test_loopback(struct mlx5e_priv *priv) { @@ -313,37 +298,47 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv) } #endif -static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = { - mlx5e_test_link_state, - mlx5e_test_link_speed, - mlx5e_test_health_info, +typedef int (*mlx5e_st_func)(struct mlx5e_priv *); + +struct mlx5e_st { + char name[ETH_GSTRING_LEN]; + mlx5e_st_func st_func; + mlx5e_st_func cond_func; +}; + +static struct mlx5e_st mlx5e_sts[] = { + { "Link Test", mlx5e_test_link_state }, + { "Speed Test", mlx5e_test_link_speed }, + { "Health Test", mlx5e_test_health_info }, #ifdef CONFIG_INET - mlx5e_test_loopback, + { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback }, #endif }; +#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts) + void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) { struct mlx5e_priv *priv = netdev_priv(ndev); - int i; - - memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM); + int i, count = 0; mutex_lock(&priv->state_lock); netdev_info(ndev, "Self test begin..\n"); for (i = 0; i < MLX5E_ST_NUM; i++) { - netdev_info(ndev, "\t[%d] %s start..\n", - i, mlx5e_self_tests[i]); - buf[i] = mlx5e_st_func[i](priv); - netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", - i, mlx5e_self_tests[i], buf[i]); + struct mlx5e_st st = mlx5e_sts[i]; + + if (st.cond_func && st.cond_func(priv)) + continue; + netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); + buf[count] = st.st_func(priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); } mutex_unlock(&priv->state_lock); - for (i = 0; i < MLX5E_ST_NUM; i++) { + for (i = 0; i < count; i++) { if (buf[i]) { etest->flags |= ETH_TEST_FL_FAILED; break; @@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, netdev_info(ndev, "Self test out: status flags(0x%x)\n", etest->flags); } + +int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data) +{ + int i, count = 0; + + for (i = 0; i < MLX5E_ST_NUM; i++) { + struct mlx5e_st st = mlx5e_sts[i]; + + if (st.cond_func && st.cond_func(priv)) + continue; + if (data) + strcpy(data + count * ETH_GSTRING_LEN, st.name); + count++; + } + return count; +} + +int mlx5e_self_test_num(struct mlx5e_priv *priv) +{ + return mlx5e_self_test_fill_strings(priv, NULL); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e1dd17019030e6..2a9bfc3ffa2ec8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -128,6 +128,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, @@ -313,6 +318,11 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_gro_packets += rq_stats->gro_packets; + s->rx_gro_bytes += rq_stats->gro_bytes; + s->rx_gro_skbs += rq_stats->gro_skbs; + s->rx_gro_match_packets += rq_stats->gro_match_packets; + s->rx_gro_large_hds += rq_stats->gro_large_hds; s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; @@ -1760,6 +1770,11 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 139e59f30db000..2c1ed5b81be676 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -144,6 +144,11 @@ struct mlx5e_sw_stats { u64 tx_mpwqe_pkts; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_gro_packets; + u64 rx_gro_bytes; + u64 rx_gro_skbs; + u64 rx_gro_match_packets; + u64 rx_gro_large_hds; u64 rx_mcast_packets; u64 rx_ecn_mark; u64 rx_removed_vlan_packets; @@ -322,6 +327,11 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 gro_packets; + u64 gro_bytes; + u64 gro_skbs; + u64 gro_match_packets; + u64 gro_large_hds; u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 129ff7e0d65cc4..835caa1c7b7459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -59,7 +60,6 @@ #include "en/mapping.h" #include "en/tc_ct.h" #include "en/mod_hdr.h" -#include "en/tc_priv.h" #include "en/tc_tun_encap.h" #include "en/tc/sample.h" #include "lib/devcom.h" @@ -67,8 +67,8 @@ #include "lib/fs_chains.h" #include "diag/en_tc_tracepoint.h" #include -#include "lag.h" -#include "lag_mp.h" +#include "lag/lag.h" +#include "lag/mp.h" #define nic_chains(priv) ((priv)->fs.tc.chains) #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) @@ -231,6 +231,23 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, return err; } +struct mlx5e_tc_int_port_priv * +mlx5e_get_int_port_priv(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + + if (is_mdev_switchdev_mode(priv->mdev)) { + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + + return uplink_priv->int_port_priv; + } + + return NULL; +} + static struct mlx5_tc_ct_priv * get_ct_priv(struct mlx5e_priv *priv) { @@ -248,7 +265,6 @@ get_ct_priv(struct mlx5e_priv *priv) return priv->fs.tc.ct; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) static struct mlx5e_tc_psample * get_sample_priv(struct mlx5e_priv *priv) { @@ -265,7 +281,6 @@ get_sample_priv(struct mlx5e_priv *priv) return NULL; } -#endif struct mlx5_flow_handle * mlx5_tc_rule_insert(struct mlx5e_priv *priv, @@ -1148,11 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), flow, spec, attr, mod_hdr_acts); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) } else if (flow_flag_test(flow, SAMPLE)) { rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, mlx5e_tc_get_flow_tun_id(flow)); -#endif } else { rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } @@ -1188,12 +1201,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, return; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) if (flow_flag_test(flow, SAMPLE)) { mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); return; } -#endif if (attr->esw_attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@ -1390,6 +1401,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, int err = 0; int out_index; + parse_attr = attr->parse_attr; + esw_attr = attr->esw_attr; + /* We check chain range only for tc flows. * For ft flows, we checked attr->chain was originally 0 and set it to * FDB_FT_CHAIN which is outside tc range. @@ -1415,6 +1429,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5e_attach_decap_route(priv, flow); if (err) goto err_out; + + if (!attr->chain && esw_attr->int_port) { + /* If decap route device is internal port, change the + * source vport value in reg_c0 back to uplink just in + * case the rule performs goto chain > 0. If we have a miss + * on chain > 0 we want the metadata regs to hold the + * chain id so SW will resume handling of this packet + * from the proper chain. + */ + u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw, + esw_attr->in_rep->vport); + + err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, + MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, + metadata); + if (err) + return err; + } } if (flow_flag_test(flow, L3_TO_L2_DECAP)) { @@ -1423,8 +1455,31 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, goto err_out; } - parse_attr = attr->parse_attr; - esw_attr = attr->esw_attr; + if (netif_is_ovs_master(parse_attr->filter_dev)) { + struct mlx5e_tc_int_port *int_port; + + if (attr->chain) { + NL_SET_ERR_MSG_MOD(extack, + "Internal port rule is only supported on chain 0"); + return -EOPNOTSUPP; + } + + if (attr->dest_chain) { + NL_SET_ERR_MSG_MOD(extack, + "Internal port rule offload doesn't support goto action"); + return -EOPNOTSUPP; + } + + int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), + parse_attr->filter_dev->ifindex, + flow_flag_test(flow, EGRESS) ? + MLX5E_TC_INT_PORT_EGRESS : + MLX5E_TC_INT_PORT_INGRESS); + if (IS_ERR(int_port)) + return PTR_ERR(int_port); + + esw_attr->int_port = int_port; + } for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { struct net_device *out_dev; @@ -1447,7 +1502,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, goto err_out; if (esw_attr->dests[out_index].flags & - MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && + !esw_attr->dest_int_port) vf_tun = true; out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; @@ -1555,7 +1611,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { if (esw_attr->dests[out_index].flags & - MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && + !esw_attr->dest_int_port) vf_tun = true; if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) { mlx5e_detach_encap(priv, flow, out_index); @@ -1579,6 +1636,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) mlx5_fc_destroy(esw_attr->counter_dev, attr->counter); + if (esw_attr->int_port) + mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port); + + if (esw_attr->dest_int_port) + mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port); + if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); @@ -1690,8 +1753,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, if (opt->opt_class != htons(U16_MAX) || opt->type != U8_MAX) { - NL_SET_ERR_MSG(extack, - "Partial match of tunnel options in chain > 0 isn't supported"); + NL_SET_ERR_MSG_MOD(extack, + "Partial match of tunnel options in chain > 0 isn't supported"); netdev_warn(priv->netdev, "Partial match of tunnel options in chain > 0 isn't supported"); return -EOPNOTSUPP; @@ -1898,8 +1961,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, bool needs_mapping, sets_mapping; int err; - if (!mlx5e_is_eswitch_flow(flow)) + if (!mlx5e_is_eswitch_flow(flow)) { + NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported"); return -EOPNOTSUPP; + } needs_mapping = !!flow->attr->chain; sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); @@ -1907,8 +1972,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, if ((needs_mapping || sets_mapping) && !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { - NL_SET_ERR_MSG(extack, - "Chains on tunnel devices isn't supported without register loopback support"); + NL_SET_ERR_MSG_MOD(extack, + "Chains on tunnel devices isn't supported without register loopback support"); netdev_warn(priv->netdev, "Chains on tunnel devices isn't supported without register loopback support"); return -EOPNOTSUPP; @@ -2271,8 +2336,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, addr_type = match.key->addr_type; /* the HW doesn't support frag first/later */ - if (match.mask->flags & FLOW_DIS_FIRST_FRAG) + if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { + NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported"); return -EOPNOTSUPP; + } if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); @@ -2439,8 +2506,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, switch (ip_proto) { case IPPROTO_ICMP: if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_ICMP)) + MLX5_FLEX_PROTO_ICMP)) { + NL_SET_ERR_MSG_MOD(extack, + "Match on Flex protocols for ICMP is not supported"); return -EOPNOTSUPP; + } MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, match.mask->type); MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, @@ -2452,8 +2522,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, break; case IPPROTO_ICMPV6: if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_ICMPV6)) + MLX5_FLEX_PROTO_ICMPV6)) { + NL_SET_ERR_MSG_MOD(extack, + "Match on Flex protocols for ICMPV6 is not supported"); return -EOPNOTSUPP; + } MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, match.mask->type); MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, @@ -2559,15 +2632,19 @@ static int pedit_header_offsets[] = { #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, - struct pedit_headers_action *hdrs) + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) { u32 *curr_pmask, *curr_pval; curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); - if (*curr_pmask & mask) /* disallow acting twice on the same location */ + if (*curr_pmask & mask) { /* disallow acting twice on the same location */ + NL_SET_ERR_MSG_MOD(extack, + "curr_pmask and new mask same. Acting twice on same location"); goto out_err; + } *curr_pmask |= mask; *curr_pval |= (val & mask); @@ -2900,7 +2977,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, val = act->mangle.val; offset = act->mangle.offset; - err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]); + err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack); if (err) goto out_err; @@ -2912,16 +2989,17 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, } static int -parse_pedit_to_reformat(struct mlx5e_priv *priv, - const struct flow_action_entry *act, +parse_pedit_to_reformat(const struct flow_action_entry *act, struct mlx5e_tc_flow_parse_attr *parse_attr, struct netlink_ext_ack *extack) { u32 mask, val, offset; u32 *p; - if (act->id != FLOW_ACTION_MANGLE) + if (act->id != FLOW_ACTION_MANGLE) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action id"); return -EOPNOTSUPP; + } if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) { NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported"); @@ -2945,7 +3023,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) - return parse_pedit_to_reformat(priv, act, parse_attr, extack); + return parse_pedit_to_reformat(act, parse_attr, extack); return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack); @@ -3027,10 +3105,10 @@ struct ipv6_hoplimit_word { __u8 hop_limit; }; -static int is_action_keys_supported(const struct flow_action_entry *act, - bool ct_flow, bool *modify_ip_header, - bool *modify_tuple, - struct netlink_ext_ack *extack) +static bool +is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow, + bool *modify_ip_header, bool *modify_tuple, + struct netlink_ext_ack *extack) { u32 mask, offset; u8 htype; @@ -3058,7 +3136,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv4 address with action ct"); - return -EOPNOTSUPP; + return false; } } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { struct ipv6_hoplimit_word *hoplimit_word = @@ -3076,7 +3154,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv6 address with action ct"); - return -EOPNOTSUPP; + return false; } } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) { @@ -3084,11 +3162,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of transport header ports with action ct"); - return -EOPNOTSUPP; + return false; } } - return 0; + return true; } static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, @@ -3135,7 +3213,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, void *headers_v; u16 ethertype; u8 ip_proto; - int i, err; + int i; headers_c = get_match_headers_criteria(actions, spec); headers_v = get_match_headers_value(actions, spec); @@ -3153,11 +3231,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, act->id != FLOW_ACTION_ADD) continue; - err = is_action_keys_supported(act, ct_flow, - &modify_ip_header, - &modify_tuple, extack); - if (err) - return err; + if (!is_action_keys_supported(act, ct_flow, + &modify_ip_header, + &modify_tuple, extack)) + return false; } if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, @@ -3178,37 +3255,65 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, return true; } -static bool actions_match_supported(struct mlx5e_priv *priv, - struct flow_action *flow_action, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) +static bool +actions_match_supported_fdb(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { - bool ct_flow = false, ct_clear = false; - u32 actions; + struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; + bool ct_flow, ct_clear; - ct_clear = flow->attr->ct_attr.ct_action & - TCA_CT_ACT_CLEAR; + ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; ct_flow = flow_flag_test(flow, CT) && !ct_clear; - actions = flow->attr->action; - if (mlx5e_is_eswitch_flow(flow)) { - if (flow->attr->esw_attr->split_count && ct_flow && - !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) { - /* All registers used by ct are cleared when using - * split rules. - */ - NL_SET_ERR_MSG_MOD(extack, - "Can't offload mirroring with action ct"); - return false; - } + if (esw_attr->split_count && ct_flow && + !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) { + /* All registers used by ct are cleared when using + * split rules. + */ + NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct"); + return false; } - if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(priv, &parse_attr->spec, - flow_action, actions, - ct_flow, ct_clear, - extack); + if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "current firmware doesn't support split rule for port mirroring"); + netdev_warn_once(priv->netdev, + "current firmware doesn't support split rule for port mirroring\n"); + return false; + } + + return true; +} + +static bool +actions_match_supported(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + u32 actions = flow->attr->action; + bool ct_flow, ct_clear; + + ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; + ct_flow = flow_flag_test(flow, CT) && !ct_clear; + + if (!(actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); + return false; + } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + !modify_header_match_supported(priv, &parse_attr->spec, flow_action, + actions, ct_flow, ct_clear, extack)) + return false; + + if (mlx5e_is_eswitch_flow(flow) && + !actions_match_supported_fdb(priv, parse_attr, flow, extack)) + return false; return true; } @@ -3357,10 +3462,50 @@ static int validate_goto_chain(struct mlx5e_priv *priv, return 0; } -static int parse_tc_nic_actions(struct mlx5e_priv *priv, - struct flow_action *flow_action, +static int +actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct pedit_headers_action *hdrs, struct netlink_ext_ack *extack) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + enum mlx5_flow_namespace_type ns_type; + int err; + + if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits && + !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) + return 0; + + ns_type = get_flow_name_space(flow); + + err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs, + &attr->action, extack); + if (err) + return err; + + /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ + if (parse_attr->mod_hdr_acts.num_actions > 0) + return 0; + + attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + + if (ns_type != MLX5_FLOW_NAMESPACE_FDB) + return 0; + + if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) + attr->esw_attr->split_count = 0; + + return 0; +} + +static int +parse_tc_nic_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; @@ -3370,12 +3515,16 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, u32 action = 0; int err, i; - if (!flow_action_has_entries(flow_action)) + if (!flow_action_has_entries(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); return -EINVAL; + } if (!flow_action_hw_stats_check(flow_action, extack, - FLOW_ACTION_HW_STATS_DELAYED_BIT)) + FLOW_ACTION_HW_STATS_DELAYED_BIT)) { + NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); return -EOPNOTSUPP; + } nic_attr = attr->nic_attr; nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; @@ -3453,7 +3602,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: @@ -3464,38 +3614,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, flow_flag_set(flow, CT); break; default: - NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); + NL_SET_ERR_MSG_MOD(extack, + "The offload action is not supported in NIC action"); return -EOPNOTSUPP; } } - if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || - hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { - err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, hdrs, &action, extack); - if (err) - return err; - /* in case all pedit actions are skipped, remove the MOD_HDR - * flag. - */ - if (parse_attr->mod_hdr_acts.num_actions == 0) { - action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); - } - } - attr->action = action; - if (attr->dest_chain) { - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (attr->dest_chain && parse_attr->mirred_ifindex[0]) { + NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); + return -EOPNOTSUPP; } - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); + if (err) + return err; if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; @@ -3519,19 +3653,25 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, static int parse_tc_vlan_action(struct mlx5e_priv *priv, const struct flow_action_entry *act, struct mlx5_esw_flow_attr *attr, - u32 *action) + u32 *action, + struct netlink_ext_ack *extack) { u8 vlan_idx = attr->total_vlan; - if (vlan_idx >= MLX5_FS_VLAN_DEPTH) + if (vlan_idx >= MLX5_FS_VLAN_DEPTH) { + NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported"); return -EOPNOTSUPP; + } switch (act->id) { case FLOW_ACTION_VLAN_POP: if (vlan_idx) { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, - MLX5_FS_VLAN_DEPTH)) + MLX5_FS_VLAN_DEPTH)) { + NL_SET_ERR_MSG_MOD(extack, + "vlan pop action is not supported"); return -EOPNOTSUPP; + } *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; } else { @@ -3547,20 +3687,27 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, if (vlan_idx) { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, - MLX5_FS_VLAN_DEPTH)) + MLX5_FS_VLAN_DEPTH)) { + NL_SET_ERR_MSG_MOD(extack, + "vlan push action is not supported for vlan depth > 1"); return -EOPNOTSUPP; + } *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; } else { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && (act->vlan.proto != htons(ETH_P_8021Q) || - act->vlan.prio)) + act->vlan.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "vlan push action is not supported"); return -EOPNOTSUPP; + } *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; } break; default: + NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN"); return -EINVAL; } @@ -3594,7 +3741,8 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev, static int add_vlan_push_action(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, struct net_device **out_dev, - u32 *action) + u32 *action, + struct netlink_ext_ack *extack) { struct net_device *vlan_dev = *out_dev; struct flow_action_entry vlan_act = { @@ -3605,7 +3753,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv, }; int err; - err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action); + err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack); if (err) return err; @@ -3616,14 +3764,15 @@ static int add_vlan_push_action(struct mlx5e_priv *priv, return -ENODEV; if (is_vlan_dev(*out_dev)) - err = add_vlan_push_action(priv, attr, out_dev, action); + err = add_vlan_push_action(priv, attr, out_dev, action, extack); return err; } static int add_vlan_pop_action(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, - u32 *action) + u32 *action, + struct netlink_ext_ack *extack) { struct flow_action_entry vlan_act = { .id = FLOW_ACTION_VLAN_POP, @@ -3633,7 +3782,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv, nest_level = attr->parse_attr->filter_dev->lower_level - priv->netdev->lower_level; while (nest_level--) { - err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action); + err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack); if (err) return err; } @@ -3734,6 +3883,45 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv, return 0; } +int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + int ifindex, + enum mlx5e_tc_int_port_type type, + u32 *action, + int out_index) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5e_tc_int_port_priv *int_port_priv; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_tc_int_port *dest_int_port; + int err; + + parse_attr = attr->parse_attr; + int_port_priv = mlx5e_get_int_port_priv(priv); + + dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type); + if (IS_ERR(dest_int_port)) + return PTR_ERR(dest_int_port); + + err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, + MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, + mlx5e_tc_int_port_get_metadata(dest_int_port)); + if (err) { + mlx5e_tc_int_port_put(int_port_priv, dest_int_port); + return err; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + esw_attr->dest_int_port = dest_int_port; + esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; + + /* Forward to root fdb for matching against the new source vport */ + attr->dest_chain = 0; + + return 0; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, @@ -3753,20 +3941,39 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, bool encap = false, decap = false; u32 action = attr->action; int err, i, if_count = 0; + bool ptype_host = false; bool mpls_push = false; - if (!flow_action_has_entries(flow_action)) + if (!flow_action_has_entries(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); return -EINVAL; + } if (!flow_action_hw_stats_check(flow_action, extack, - FLOW_ACTION_HW_STATS_DELAYED_BIT)) + FLOW_ACTION_HW_STATS_DELAYED_BIT)) { + NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); return -EOPNOTSUPP; + } esw_attr = attr->esw_attr; parse_attr = attr->parse_attr; flow_action_for_each(i, act, flow_action) { switch (act->id) { + case FLOW_ACTION_ACCEPT: + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT; + break; + case FLOW_ACTION_PTYPE: + if (act->ptype != PACKET_HOST) { + NL_SET_ERR_MSG_MOD(extack, + "skbedit ptype is only supported with type host"); + return -EOPNOTSUPP; + } + + ptype_host = true; + break; case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; @@ -3830,6 +4037,50 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, break; return -EOPNOTSUPP; + case FLOW_ACTION_REDIRECT_INGRESS: { + struct net_device *out_dev; + + out_dev = act->dev; + if (!out_dev) + return -EOPNOTSUPP; + + if (!netif_is_ovs_master(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to ingress is supported only for OVS internal ports"); + return -EOPNOTSUPP; + } + + if (netif_is_ovs_master(parse_attr->filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to ingress is not supported from internal port"); + return -EOPNOTSUPP; + } + + if (!ptype_host) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to int port ingress requires ptype=host action"); + return -EOPNOTSUPP; + } + + if (esw_attr->out_count) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to int port ingress is supported only as single destination"); + return -EOPNOTSUPP; + } + + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, + MLX5E_TC_INT_PORT_INGRESS, + &action, esw_attr->out_count); + if (err) + return err; + + esw_attr->out_count++; + + break; + } case FLOW_ACTION_REDIRECT: case FLOW_ACTION_MIRRED: { struct mlx5e_priv *out_priv; @@ -3904,18 +4155,21 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (is_vlan_dev(out_dev)) { err = add_vlan_push_action(priv, attr, &out_dev, - &action); + &action, extack); if (err) return err; } if (is_vlan_dev(parse_attr->filter_dev)) { err = add_vlan_pop_action(priv, attr, - &action); + &action, extack); if (err) return err; } + if (netif_is_macvlan(out_dev)) + out_dev = macvlan_dev_real_dev(out_dev); + err = verify_uplink_forwarding(priv, flow, out_dev, extack); if (err) return err; @@ -3936,6 +4190,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, rpriv = out_priv->ppriv; esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; + esw_attr->out_count++; + } else if (netif_is_ovs_master(out_dev)) { + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, + out_dev->ifindex, + MLX5E_TC_INT_PORT_EGRESS, + &action, + esw_attr->out_count); + if (err) + return err; + esw_attr->out_count++; } else if (parse_attr->filter_dev != priv->netdev) { /* All mlx5 devices are called to configure @@ -3957,10 +4221,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, break; case FLOW_ACTION_TUNNEL_ENCAP: info = act->tunnel; - if (info) + if (info) { encap = true; - else + } else { + NL_SET_ERR_MSG_MOD(extack, + "Zero tunnel attributes is not supported"); return -EOPNOTSUPP; + } break; case FLOW_ACTION_VLAN_PUSH: @@ -3974,7 +4241,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, act, parse_attr, hdrs, &action, extack); } else { - err = parse_tc_vlan_action(priv, act, esw_attr, &action); + err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack); } if (err) return err; @@ -4000,7 +4267,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: @@ -4027,11 +4295,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, flow_flag_set(flow, SAMPLE); break; default: - NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); + NL_SET_ERR_MSG_MOD(extack, + "The offload action is not supported in FDB action"); return -EOPNOTSUPP; } } + /* Forward to/from internal port can only have 1 dest */ + if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && + esw_attr->out_count > 1) { + NL_SET_ERR_MSG_MOD(extack, + "Rules with internal port can have only one destination"); + return -EOPNOTSUPP; + } + /* always set IP version for indirect table handling */ attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); @@ -4047,60 +4324,26 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return err; } - if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || - hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { - err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, hdrs, &action, extack); - if (err) - return err; - /* in case all pedit actions are skipped, remove the MOD_HDR - * flag. we might have set split_count either by pedit or - * pop/push. if there is no pop/push either, reset it too. - */ - if (parse_attr->mod_hdr_acts.num_actions == 0) { - action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); - if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || - (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) - esw_attr->split_count = 0; - } - } - attr->action = action; - if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) - return -EOPNOTSUPP; - if (attr->dest_chain) { - if (decap) { - /* It can be supported if we'll create a mapping for - * the tunnel device only (without tunnel), and set - * this tunnel id with this decap flow. - * - * On restore (miss), we'll just set this saved tunnel - * device. - */ - - NL_SET_ERR_MSG(extack, - "Decap with goto isn't supported"); - netdev_warn(priv->netdev, - "Decap with goto isn't supported"); - return -EOPNOTSUPP; - } - - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } + err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); + if (err) + return err; - if (!(attr->action & - (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { - NL_SET_ERR_MSG_MOD(extack, - "Rule must have at least one forward/drop action"); + if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; - } - if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { - NL_SET_ERR_MSG_MOD(extack, - "current firmware doesn't support split rule for port mirroring"); - netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); + if (attr->dest_chain && decap) { + /* It can be supported if we'll create a mapping for + * the tunnel device only (without tunnel), and set + * this tunnel id with this decap flow. + * + * On restore (miss), we'll just set this saved tunnel + * device. + */ + + NL_SET_ERR_MSG(extack, "Decap with goto isn't supported"); + netdev_warn(priv->netdev, "Decap with goto isn't supported"); return -EOPNOTSUPP; } @@ -4735,8 +4978,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (!flow_action_basic_hw_stats_check(flow_action, extack)) + if (!flow_action_basic_hw_stats_check(flow_action, extack)) { + NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); return -EOPNOTSUPP; + } flow_action_for_each(i, act, flow_action) { switch (act->id) { @@ -5008,9 +5253,9 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) MLX5_FLOW_NAMESPACE_FDB, uplink_priv->post_act); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev)); + uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); -#endif mapping_id = mlx5_query_nic_system_image_guid(esw->dev); @@ -5024,9 +5269,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) } uplink_priv->tunnel_mapping = mapping; - /* 0xFFF is reserved for stack devices slow path table mark */ + /* Two last values are reserved for stack devices slow path table mark + * and bridge ingress push mark. + */ mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, - sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); + sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5054,9 +5301,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); -#endif + mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); mlx5_tc_ct_clean(uplink_priv->ct_priv); netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); @@ -5076,9 +5322,8 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); mapping_destroy(uplink_priv->tunnel_mapping); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); -#endif + mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); mlx5_tc_ct_clean(uplink_priv->ct_priv); mlx5e_tc_post_act_destroy(uplink_priv->post_act); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 1a4cd882f0fba2..fdb222793027a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -38,6 +38,7 @@ #include "eswitch.h" #include "en/tc_ct.h" #include "en/tc_tun.h" +#include "en/tc/int_port.h" #include "en_rep.h" #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff @@ -56,7 +57,7 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); struct mlx5e_tc_update_priv { - struct net_device *tun_dev; + struct net_device *fwd_dev; }; struct mlx5_nic_flow_attr { @@ -104,6 +105,8 @@ struct mlx5_rx_tun_attr { #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0) +#define MLX5E_TC_MAX_INT_PORT_NUM (8) + #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) struct tunnel_match_key { @@ -283,6 +286,12 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport); +int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + int ifindex, + enum mlx5e_tc_int_port_type type, + u32 *action, + int out_index); #else /* CONFIG_MLX5_CLS_ACT */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 188994d091c54f..7fd33b356cc8d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -38,6 +38,7 @@ #include "en/txrx.h" #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" +#include "en_accel/ipsec_rxtx.h" #include "en/ptp.h" static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) @@ -213,30 +214,13 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); } -static void -ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5_wqe_eth_seg *eseg) -{ - struct xfrm_offload *xo = xfrm_offload(skb); - - eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; - if (xo->inner_ipproto) { - eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; - } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; - sq->stats->csum_partial_inner++; - } -} - static inline void mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct mlx5_wqe_eth_seg *eseg) { - if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) { - ipsec_txwqe_build_eseg_csum(sq, skb, eseg); + if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) return; - } if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 605c8ecc3610f7..792e0d6aa86188 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_eq_notifier_register(dev, &table->cq_err_nb); param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_NUM_CMD_EQE, .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; @@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_NUM_ASYNC_EQE, }; @@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) goto err2; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = /* TODO: sriov max_vf + */ 1, .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, }; @@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ncomp_eqs = table->num_comp_eqs; nent = MLX5_COMP_EQ_SIZE; for (i = 0; i < ncomp_eqs; i++) { - int vecidx = i + MLX5_IRQ_VEC_COMP_BASE; struct mlx5_eq_param param = {}; + int vecidx = i; eq = kzalloc(sizeof(*eq), GFP_KERNEL); if (!eq) { @@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev) goto err_out; } - vecidx = MLX5_IRQ_VEC_COMP_BASE; - for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE; - vecidx++) { + for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) { err = irq_cpu_rmap_add(eq_table->rmap, pci_irq_vector(mdev->pdev, vecidx)); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 7e221038df8d54..f690f430f40f80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -28,7 +28,10 @@ #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1) #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1) -#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1) +#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2) +#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \ + (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1) +#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1) #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0 @@ -61,6 +64,9 @@ struct mlx5_esw_bridge { struct mlx5_flow_table *egress_ft; struct mlx5_flow_group *egress_vlan_fg; struct mlx5_flow_group *egress_mac_fg; + struct mlx5_flow_group *egress_miss_fg; + struct mlx5_pkt_reformat *egress_miss_pkt_reformat; + struct mlx5_flow_handle *egress_miss_handle; unsigned long ageing_time; u32 flags; }; @@ -86,6 +92,26 @@ mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry) SWITCHDEV_FDB_DEL_TO_BRIDGE); } +static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw) +{ + return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) && + MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) && + MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >= + offsetof(struct vlan_ethhdr, h_vlan_proto); +} + +static struct mlx5_pkt_reformat * +mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw) +{ + struct mlx5_pkt_reformat_params reformat_params = {}; + + reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR; + reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START; + reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto); + reformat_params.size = sizeof(struct vlan_hdr); + return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); +} + static struct mlx5_flow_table * mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw) { @@ -287,43 +313,74 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_ return fg; } +static struct mlx5_flow_group * +mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + u32 *in, *match; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return ERR_PTR(-ENOMEM); + + MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); + match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + + MLX5_SET(create_flow_group_in, in, start_flow_index, + MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM); + MLX5_SET(create_flow_group_in, in, end_flow_index, + MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO); + + fg = mlx5_create_flow_group(egress_ft, in); + if (IS_ERR(fg)) + esw_warn(esw->dev, + "Failed to create bridge egress table miss flow group (err=%ld)\n", + PTR_ERR(fg)); + kvfree(in); + return fg; +} + static int mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads) { struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg; struct mlx5_flow_table *ingress_ft, *skip_ft; + struct mlx5_eswitch *esw = br_offloads->esw; int err; - if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw)) + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) return -EOPNOTSUPP; ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE, MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE, - br_offloads->esw); + esw); if (IS_ERR(ingress_ft)) return PTR_ERR(ingress_ft); skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE, MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE, - br_offloads->esw); + esw); if (IS_ERR(skip_ft)) { err = PTR_ERR(skip_ft); goto err_skip_tbl; } - vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft); + vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft); if (IS_ERR(vlan_fg)) { err = PTR_ERR(vlan_fg); goto err_vlan_fg; } - filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft); + filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft); if (IS_ERR(filter_fg)) { err = PTR_ERR(filter_fg); goto err_filter_fg; } - mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft); + mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft); if (IS_ERR(mac_fg)) { err = PTR_ERR(mac_fg); goto err_mac_fg; @@ -362,35 +419,82 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa br_offloads->ingress_ft = NULL; } +static struct mlx5_flow_handle * +mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft, + struct mlx5_flow_table *skip_ft, + struct mlx5_pkt_reformat *pkt_reformat); + static int mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads, struct mlx5_esw_bridge *bridge) { - struct mlx5_flow_group *mac_fg, *vlan_fg; + struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg; + struct mlx5_pkt_reformat *miss_pkt_reformat = NULL; + struct mlx5_flow_handle *miss_handle = NULL; + struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_flow_table *egress_ft; int err; egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE, MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE, - br_offloads->esw); + esw); if (IS_ERR(egress_ft)) return PTR_ERR(egress_ft); - vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft); + vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft); if (IS_ERR(vlan_fg)) { err = PTR_ERR(vlan_fg); goto err_vlan_fg; } - mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft); + mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft); if (IS_ERR(mac_fg)) { err = PTR_ERR(mac_fg); goto err_mac_fg; } + if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) { + miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft); + if (IS_ERR(miss_fg)) { + esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n", + PTR_ERR(miss_fg)); + miss_fg = NULL; + goto skip_miss_flow; + } + + miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw); + if (IS_ERR(miss_pkt_reformat)) { + esw_warn(esw->dev, + "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n", + PTR_ERR(miss_pkt_reformat)); + miss_pkt_reformat = NULL; + mlx5_destroy_flow_group(miss_fg); + miss_fg = NULL; + goto skip_miss_flow; + } + + miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft, + br_offloads->skip_ft, + miss_pkt_reformat); + if (IS_ERR(miss_handle)) { + esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n", + PTR_ERR(miss_handle)); + miss_handle = NULL; + mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat); + miss_pkt_reformat = NULL; + mlx5_destroy_flow_group(miss_fg); + miss_fg = NULL; + goto skip_miss_flow; + } + } +skip_miss_flow: + bridge->egress_ft = egress_ft; bridge->egress_vlan_fg = vlan_fg; bridge->egress_mac_fg = mac_fg; + bridge->egress_miss_fg = miss_fg; + bridge->egress_miss_pkt_reformat = miss_pkt_reformat; + bridge->egress_miss_handle = miss_handle; return 0; err_mac_fg: @@ -403,6 +507,13 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads, static void mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge) { + if (bridge->egress_miss_handle) + mlx5_del_flow_rules(bridge->egress_miss_handle); + if (bridge->egress_miss_pkt_reformat) + mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev, + bridge->egress_miss_pkt_reformat); + if (bridge->egress_miss_fg) + mlx5_destroy_flow_group(bridge->egress_miss_fg); mlx5_destroy_flow_group(bridge->egress_mac_fg); mlx5_destroy_flow_group(bridge->egress_vlan_fg); mlx5_destroy_flow_table(bridge->egress_ft); @@ -443,8 +554,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); if (vlan && vlan->pkt_reformat_push) { - flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.pkt_reformat = vlan->pkt_reformat_push; + flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark; } else if (vlan) { MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.cvlan_tag); @@ -564,6 +677,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u if (!rule_spec) return ERR_PTR(-ENOMEM); + if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) && + vport_num == MLX5_VPORT_UPLINK) + rule_spec->flow_context.flow_source = + MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, @@ -599,6 +716,41 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u return handle; } +static struct mlx5_flow_handle * +mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft, + struct mlx5_flow_table *skip_ft, + struct mlx5_pkt_reformat *pkt_reformat) +{ + struct mlx5_flow_destination dest = { + .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, + .ft = skip_ft, + }; + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, + .flags = FLOW_ACT_NO_APPEND, + .pkt_reformat = pkt_reformat, + }; + struct mlx5_flow_spec *rule_spec; + struct mlx5_flow_handle *handle; + + rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL); + if (!rule_spec) + return ERR_PTR(-ENOMEM); + + rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + MLX5_SET(fte_match_param, rule_spec->match_criteria, + misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1, + ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK); + + handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1); + + kvfree(rule_spec); + return handle; +} + static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads) { @@ -736,14 +888,20 @@ mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry, kvfree(entry); } +static void +mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry, + struct mlx5_esw_bridge *bridge) +{ + mlx5_esw_bridge_fdb_del_notify(entry); + mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); +} + static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge) { struct mlx5_esw_bridge_fdb_entry *entry, *tmp; - list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { - mlx5_esw_bridge_fdb_del_notify(entry); - mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); - } + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) + mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge); } static struct mlx5_esw_bridge_vlan * @@ -798,24 +956,14 @@ mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5 static int mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw) { - struct mlx5_pkt_reformat_params reformat_params = {}; struct mlx5_pkt_reformat *pkt_reformat; - if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) || - MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) || - MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) < - offsetof(struct vlan_ethhdr, h_vlan_proto)) { + if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) { esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n"); return -EOPNOTSUPP; } - reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR; - reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START; - reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto); - reformat_params.size = sizeof(struct vlan_hdr); - pkt_reformat = mlx5_packet_reformat_alloc(esw->dev, - &reformat_params, - MLX5_FLOW_NAMESPACE_FDB); + pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw); if (IS_ERR(pkt_reformat)) { esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n", PTR_ERR(pkt_reformat)); @@ -833,6 +981,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_ vlan->pkt_reformat_pop = NULL; } +static int +mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_modify_hdr *pkt_mod_hdr; + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); + MLX5_SET(set_action_in, action, offset, 8); + MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS); + MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN); + + pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action); + if (IS_ERR(pkt_mod_hdr)) + return PTR_ERR(pkt_mod_hdr); + + vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr; + return 0; +} + +static void +mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw) +{ + mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark); + vlan->pkt_mod_hdr_push_mark = NULL; +} + static struct mlx5_esw_bridge_vlan * mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port, struct mlx5_eswitch *esw) @@ -852,6 +1027,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por err = mlx5_esw_bridge_vlan_push_create(vlan, esw); if (err) goto err_vlan_push; + + err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw); + if (err) + goto err_vlan_push_mark; } if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { err = mlx5_esw_bridge_vlan_pop_create(vlan, esw); @@ -870,6 +1049,9 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por if (vlan->pkt_reformat_pop) mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw); err_vlan_pop: + if (vlan->pkt_mod_hdr_push_mark) + mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw); +err_vlan_push_mark: if (vlan->pkt_reformat_push) mlx5_esw_bridge_vlan_push_cleanup(vlan, esw); err_vlan_push: @@ -886,17 +1068,18 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port, static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_esw_bridge *bridge) { + struct mlx5_eswitch *esw = bridge->br_offloads->esw; struct mlx5_esw_bridge_fdb_entry *entry, *tmp; - list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) { - mlx5_esw_bridge_fdb_del_notify(entry); - mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); - } + list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) + mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge); if (vlan->pkt_reformat_pop) - mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw); + mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw); + if (vlan->pkt_mod_hdr_push_mark) + mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw); if (vlan->pkt_reformat_push) - mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw); + mlx5_esw_bridge_vlan_push_cleanup(vlan, esw); } static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port, @@ -948,6 +1131,17 @@ mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id, return vlan; } +static struct mlx5_esw_bridge_fdb_entry * +mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge, + const unsigned char *addr, u16 vid) +{ + struct mlx5_esw_bridge_fdb_key key = {}; + + ether_addr_copy(key.addr, addr); + key.vid = vid; + return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); +} + static struct mlx5_esw_bridge_fdb_entry * mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr, u16 vid, bool added_by_user, bool peer, @@ -966,6 +1160,10 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow return ERR_CAST(vlan); } + entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid); + if (entry) + mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge); + entry = kvzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); @@ -1265,7 +1463,6 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 struct switchdev_notifier_fdb_info *fdb_info) { struct mlx5_esw_bridge_fdb_entry *entry; - struct mlx5_esw_bridge_fdb_key key; struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge *bridge; @@ -1274,13 +1471,11 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 return; bridge = port->bridge; - ether_addr_copy(key.addr, fdb_info->addr); - key.vid = fdb_info->vid; - entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); + entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid); if (!entry) { esw_debug(br_offloads->esw->dev, "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", - key.addr, key.vid, vport_num); + fdb_info->addr, fdb_info->vid, vport_num); return; } @@ -1322,7 +1517,6 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o { struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_esw_bridge_fdb_entry *entry; - struct mlx5_esw_bridge_fdb_key key; struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge *bridge; @@ -1331,18 +1525,15 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o return; bridge = port->bridge; - ether_addr_copy(key.addr, fdb_info->addr); - key.vid = fdb_info->vid; - entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); + entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid); if (!entry) { esw_warn(esw->dev, "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", - key.addr, key.vid, vport_num); + fdb_info->addr, fdb_info->vid, vport_num); return; } - mlx5_esw_bridge_fdb_del_notify(entry); - mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); + mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge); } void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) @@ -1358,13 +1549,11 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER) continue; - if (time_after(lastuse, entry->lastuse)) { + if (time_after(lastuse, entry->lastuse)) mlx5_esw_bridge_fdb_entry_refresh(entry); - } else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) && - time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) { - mlx5_esw_bridge_fdb_del_notify(entry); - mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); - } + else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) && + time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) + mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge); } } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h index 52964a82d6a68c..878311fe950a47 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h @@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan { struct list_head fdb_list; struct mlx5_pkt_reformat *pkt_reformat_push; struct mlx5_pkt_reformat *pkt_reformat_pop; + struct mlx5_modify_hdr *pkt_mod_hdr_push_mark; }; struct mlx5_esw_bridge_port { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 20af557ae30cb8..7f9b96d9537ebd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -36,7 +36,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 return NULL; mlx5_esw_get_port_parent_id(dev, &ppid); - pfnum = PCI_FUNC(dev->pdev->devfn); + pfnum = mlx5_get_dev_index(dev); external = mlx5_core_is_ecpf_esw_manager(dev); if (external) controller_num = dev->priv.eswitch->offloads.host_number + 1; @@ -149,7 +149,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p if (IS_ERR(vport)) return PTR_ERR(vport); - pfnum = PCI_FUNC(dev->pdev->devfn); + pfnum = mlx5_get_dev_index(dev); mlx5_esw_get_port_parent_id(dev, &ppid); memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2c7444101bb93e..42f8ee2e5d9f9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -51,6 +51,7 @@ enum mlx5_mapped_obj_type { MLX5_MAPPED_OBJ_CHAIN, MLX5_MAPPED_OBJ_SAMPLE, + MLX5_MAPPED_OBJ_INT_PORT_METADATA, }; struct mlx5_mapped_obj { @@ -63,6 +64,7 @@ struct mlx5_mapped_obj { u32 trunc_size; u32 tunnel_id; } sample; + u32 int_port_metadata; }; }; @@ -88,6 +90,7 @@ enum { MAPPING_TYPE_TUNNEL_ENC_OPTS, MAPPING_TYPE_LABELS, MAPPING_TYPE_ZONE, + MAPPING_TYPE_INT_PORT, }; struct vport_ingress { @@ -336,6 +339,9 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); +bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); +int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); + /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); @@ -433,7 +439,7 @@ enum mlx5_flow_match_level { }; /* current maximum for flow based vport multicasting */ -#define MLX5_MAX_FLOW_FWD_VPORTS 2 +#define MLX5_MAX_FLOW_FWD_VPORTS 32 enum { MLX5_ESW_DEST_ENCAP = BIT(0), @@ -447,12 +453,22 @@ enum { MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4), + MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5), }; +/* Returns true if any of the flags that require skipping further TC/NF processing are set. */ +static inline bool +mlx5_esw_attr_flags_skip(u32 attr_flags) +{ + return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT); +} + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *counter_dev; + struct mlx5e_tc_int_port *dest_int_port; + struct mlx5e_tc_int_port *int_port; int split_count; int out_count; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0d461e38add372..f4eaa589388601 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -86,12 +86,18 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr) { - if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && - attr && attr->in_rep) - spec->flow_context.flow_source = - attr->in_rep->vport == MLX5_VPORT_UPLINK ? - MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : - MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) + return; + + if (attr->int_port) { + spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port); + + return; + } + + spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ? + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : + MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; } /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits @@ -121,6 +127,8 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_eswitch *src_esw, u16 vport) { + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + u32 metadata; void *misc2; void *misc; @@ -130,10 +138,16 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { if (mlx5_esw_indir_table_decap_vport(attr)) vport = mlx5_esw_indir_table_decap_vport(attr); + + if (esw_attr->int_port) + metadata = + mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); + else + metadata = + mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport); + misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); - MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(src_esw, - vport)); + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata); misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, @@ -290,8 +304,11 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); if (err) goto err_setup_chain; - flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat; + + if (esw_attr->dests[j].pkt_reformat) { + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat; + } } return 0; @@ -315,7 +332,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) int i; for (i = esw_attr->split_count; i < esw_attr->out_count; i++) - if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + if (esw_attr->dests[i].rep && + mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, esw_attr->dests[i].mdev)) return true; return false; @@ -440,7 +458,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); (*i)++; - } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + } else if (mlx5_esw_attr_flags_skip(attr->flags)) { esw_setup_slow_path_dest(dest, flow_act, chains, *i); (*i)++; } else if (attr->dest_chain) { @@ -467,7 +485,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw, if (attr->dest_ft) { esw_cleanup_decap_indir(esw, attr); - } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { + } else if (!mlx5_esw_attr_flags_skip(attr->flags)) { if (attr->dest_chain) esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); else if (esw_is_indir_table(esw, attr)) @@ -482,12 +500,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) { - struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5_fs_chains *chains = esw_chains(esw); bool split = !!(esw_attr->split_count); struct mlx5_vport_tbl_attr fwd_attr; + struct mlx5_flow_destination *dest; struct mlx5_flow_handle *rule; struct mlx5_flow_table *fdb; int i = 0; @@ -495,6 +513,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (esw->mode != MLX5_ESWITCH_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); + dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); + if (!dest) + return ERR_PTR(-ENOMEM); + flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) @@ -574,6 +596,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, else atomic64_inc(&esw->offloads.num_flows); + kfree(dest); return rule; err_add_rule: @@ -584,6 +607,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, err_esw_get: esw_cleanup_dests(esw, attr); err_create_goto_table: + kfree(dest); return rule; } @@ -592,16 +616,20 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) { - struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5_fs_chains *chains = esw_chains(esw); struct mlx5_vport_tbl_attr fwd_attr; + struct mlx5_flow_destination *dest; struct mlx5_flow_table *fast_fdb; struct mlx5_flow_table *fwd_fdb; struct mlx5_flow_handle *rule; int i, err = 0; + dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); + if (!dest) + return ERR_PTR(-ENOMEM); + fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); if (IS_ERR(fast_fdb)) { rule = ERR_CAST(fast_fdb); @@ -654,6 +682,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, atomic64_inc(&esw->offloads.num_flows); + kfree(dest); return rule; err_chain_src_rewrite: esw_put_dest_tables_loop(esw, attr, 0, i); @@ -661,6 +690,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, err_get_fwd: mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); err_get_fast: + kfree(dest); return rule; } @@ -678,7 +708,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, mlx5_del_flow_rules(rule); - if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { + if (!mlx5_esw_attr_flags_skip(attr->flags)) { /* unref the term table */ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { if (esw_attr->dests[i].termtbl) @@ -1009,7 +1039,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) u16 vport_num; num_vfs = esw->esw_funcs.num_vfs; - flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL); if (!flows) return -ENOMEM; @@ -1188,7 +1218,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, peer_miss_rules_setup(esw, peer_dev, spec, &dest); - flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); if (!flows) { err = -ENOMEM; goto alloc_flows_err; @@ -1845,6 +1875,17 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) atomic64_set(&esw->user_count, 0); } +static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw) +{ + int nvports; + + nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; + if (mlx5e_tc_int_port_supported(esw)) + nvports += MLX5E_TC_MAX_INT_PORT_NUM; + + return nvports; +} + static int esw_create_offloads_table(struct mlx5_eswitch *esw) { struct mlx5_flow_table_attr ft_attr = {}; @@ -1859,7 +1900,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) return -EOPNOTSUPP; } - ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS; + ft_attr.max_fte = esw_get_offloads_ft_size(esw); ft_attr.prio = 1; ft_offloads = mlx5_create_flow_table(ns, &ft_attr); @@ -1888,7 +1929,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) int nvports; int err = 0; - nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; + nvports = esw_get_offloads_ft_size(esw); flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -2793,12 +2834,13 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) { u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; - u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1; + /* Reserve 0xf for internal port offload */ + u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2; u32 pf_num; int id; /* Only 4 bits of pf_num */ - pf_num = PCI_FUNC(esw->dev->pdev->devfn); + pf_num = mlx5_get_dev_index(esw->dev); if (pf_num > max_pf_num) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index b459549058450a..182306bbefaa87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -219,8 +219,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) || - attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH || - !mlx5_eswitch_offload_is_uplink_port(esw, spec)) + mlx5_esw_attr_flags_skip(attr->flags) || + (!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port)) return false; /* push vlan on RX */ @@ -229,7 +229,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, /* hairpin */ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) - if (esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK) + if (!esw_attr->dest_int_port && esw_attr->dests[i].rep && + esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK) return true; return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 306279b7f9e704..12abe991583a87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -115,7 +115,7 @@ static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn, ix = conn->qp.rq.pc & (conn->qp.rq.size - 1); data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix); data->byte_count = cpu_to_be32(buf->sg[0].size); - data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key); + data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey); data->addr = cpu_to_be64(buf->sg[0].dma_addr); conn->qp.rq.pc++; @@ -155,7 +155,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn, if (!buf->sg[sgi].data) break; data->byte_count = cpu_to_be32(buf->sg[sgi].size); - data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key); + data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey); data->addr = cpu_to_be64(buf->sg[sgi].dma_addr); data++; size++; @@ -221,7 +221,7 @@ static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn) } static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, - struct mlx5_core_mkey *mkey) + u32 *mkey) { int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; @@ -978,7 +978,7 @@ int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev) mlx5_fpga_err(fdev, "create mkey failed, %d\n", err); goto err_dealloc_pd; } - mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key); + mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey); return 0; @@ -994,7 +994,7 @@ int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev) void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev) { - mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey); + mlx5_core_destroy_mkey(fdev->mdev, fdev->conn_res.mkey); mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn); mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar); mlx5_nic_vport_disable_roce(fdev->mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 52c9dee91ea465..2a984e82ae16ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -54,7 +54,7 @@ struct mlx5_fpga_device { /* QP Connection resources */ struct { u32 pdn; - struct mlx5_core_mkey mkey; + u32 mkey; struct mlx5_uars_page *uar; } conn_res; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 7db8df64a60e24..750b21124a1ae0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master, return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); } +static int +mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns, + int definer_id) +{ + return 0; +} + +static int +mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns, + u16 format_id, u32 *match_mask) +{ + return 0; +} + static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) @@ -563,8 +577,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: id = dst->dest_attr.ft->id; break; + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: - id = dst->dest_attr.vport.num; MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id_valid, !!(dst->dest_attr.vport.flags & @@ -572,6 +586,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id, dst->dest_attr.vport.vhca_id); + if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) { + /* destination_id is reserved */ + id = 0; + break; + } + id = dst->dest_attr.vport.num; if (extended_dest && dst->dest_attr.vport.pkt_reformat) { MLX5_SET(dest_format_struct, in_dests, @@ -909,6 +929,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in); } +static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns, + int definer_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_OBJ_TYPE_MATCH_DEFINER); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id); + + return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns, + u16 format_id, u32 *match_mask) +{ + u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {}; + u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {}; + struct mlx5_core_dev *dev = ns->dev; + void *ptr; + int err; + + MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type, + MLX5_OBJ_TYPE_MATCH_DEFINER); + + ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context); + MLX5_SET(match_definer, ptr, format_id, format_id); + + ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask); + memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask)); + + err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out); + return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +} + static const struct mlx5_flow_cmds mlx5_flow_cmds = { .create_flow_table = mlx5_cmd_create_flow_table, .destroy_flow_table = mlx5_cmd_destroy_flow_table, @@ -923,6 +982,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = { .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc, .modify_header_alloc = mlx5_cmd_modify_header_alloc, .modify_header_dealloc = mlx5_cmd_modify_header_dealloc, + .create_match_definer = mlx5_cmd_create_match_definer, + .destroy_match_definer = mlx5_cmd_destroy_match_definer, .set_peer = mlx5_cmd_stub_set_peer, .create_ns = mlx5_cmd_stub_create_ns, .destroy_ns = mlx5_cmd_stub_destroy_ns, @@ -942,6 +1003,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc, .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc, .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc, + .create_match_definer = mlx5_cmd_stub_create_match_definer, + .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer, .set_peer = mlx5_cmd_stub_set_peer, .create_ns = mlx5_cmd_stub_create_ns, .destroy_ns = mlx5_cmd_stub_destroy_ns, @@ -969,6 +1032,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ case FS_FT_NIC_TX: case FS_FT_RDMA_RX: case FS_FT_RDMA_TX: + case FS_FT_PORT_SEL: return mlx5_fs_cmd_get_fw_cmds(); default: return mlx5_fs_cmd_get_stub_cmds(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 5ecd33cdc0874e..220ec632d35a85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -97,6 +97,10 @@ struct mlx5_flow_cmds { int (*create_ns)(struct mlx5_flow_root_namespace *ns); int (*destroy_ns)(struct mlx5_flow_root_namespace *ns); + int (*create_match_definer)(struct mlx5_flow_root_namespace *ns, + u16 format_id, u32 *match_mask); + int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns, + int definer_id); }; int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fe501ba88bea9c..386ab9a2d490f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -99,6 +99,9 @@ #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 +#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1 +#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1 + #define BY_PASS_PRIO_NUM_LEVELS 1 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) @@ -206,34 +209,63 @@ static struct init_tree_node egress_root_fs = { } }; -#define RDMA_RX_BYPASS_PRIO 0 -#define RDMA_RX_KERNEL_PRIO 1 +enum { + RDMA_RX_COUNTERS_PRIO, + RDMA_RX_BYPASS_PRIO, + RDMA_RX_KERNEL_PRIO, +}; + +#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS +#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) +#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) + static struct init_tree_node rdma_rx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 2, + .ar_size = 3, .children = (struct init_tree_node[]) { + [RDMA_RX_COUNTERS_PRIO] = + ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS, + RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))), [RDMA_RX_BYPASS_PRIO] = - ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, + ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), [RDMA_RX_KERNEL_PRIO] = - ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, + ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, ADD_MULTIPLE_PRIO(1, 1))), } }; +enum { + RDMA_TX_COUNTERS_PRIO, + RDMA_TX_BYPASS_PRIO, +}; + +#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS +#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) + static struct init_tree_node rdma_tx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 1, + .ar_size = 2, .children = (struct init_tree_node[]) { - ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, + [RDMA_TX_COUNTERS_PRIO] = + ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS, + RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))), + [RDMA_TX_BYPASS_PRIO] = + ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS_RDMA_TX, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, - ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL, BY_PASS_PRIO_NUM_LEVELS))), } }; @@ -2191,6 +2223,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, if (steering->fdb_root_ns) return &steering->fdb_root_ns->ns; return NULL; + case MLX5_FLOW_NAMESPACE_PORT_SEL: + if (steering->port_sel_root_ns) + return &steering->port_sel_root_ns->ns; + return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (steering->sniffer_rx_root_ns) return &steering->sniffer_rx_root_ns->ns; @@ -2215,6 +2251,12 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, prio = RDMA_RX_KERNEL_PRIO; } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { root_ns = steering->rdma_tx_root_ns; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_COUNTERS_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) { + root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_COUNTERS_PRIO; } else { /* Must be NIC RX */ root_ns = steering->root_ns; prio = type; @@ -2596,6 +2638,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) steering->fdb_root_ns = NULL; kfree(steering->fdb_sub_ns); steering->fdb_sub_ns = NULL; + cleanup_root_ns(steering->port_sel_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->rdma_rx_root_ns); @@ -2634,6 +2677,21 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) return PTR_ERR_OR_ZERO(prio); } +#define PORT_SEL_NUM_LEVELS 3 +static int init_port_sel_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL); + if (!steering->port_sel_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0, + PORT_SEL_NUM_LEVELS); + return PTR_ERR_OR_ZERO(prio); +} + static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) { int err; @@ -3020,6 +3078,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } + if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) { + err = init_port_sel_root_ns(steering); + if (err) + goto err; + } + if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { err = init_rdma_rx_root_ns(steering); @@ -3224,6 +3288,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); +int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer) +{ + return definer->id; +} + +struct mlx5_flow_definer * +mlx5_create_match_definer(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type ns_type, u16 format_id, + u32 *match_mask) +{ + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_definer *definer; + int id; + + root = get_root_namespace(dev, ns_type); + if (!root) + return ERR_PTR(-EOPNOTSUPP); + + definer = kzalloc(sizeof(*definer), GFP_KERNEL); + if (!definer) + return ERR_PTR(-ENOMEM); + + definer->ns_type = ns_type; + id = root->cmds->create_match_definer(root, format_id, match_mask); + if (id < 0) { + mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id); + kfree(definer); + return ERR_PTR(id); + } + definer->id = id; + return definer; +} + +void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, + struct mlx5_flow_definer *definer) +{ + struct mlx5_flow_root_namespace *root; + + root = get_root_namespace(dev, definer->ns_type); + if (WARN_ON(!root)) + return; + + root->cmds->destroy_match_definer(root, definer->id); + kfree(definer); +} + int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 98240badc342cf..7711db245c6387 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -49,6 +49,11 @@ #define FDB_TC_MAX_PRIO 16 #define FDB_TC_LEVELS_PER_PRIO 2 +struct mlx5_flow_definer { + enum mlx5_flow_namespace_type ns_type; + u32 id; +}; + struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; union { @@ -97,7 +102,8 @@ enum fs_flow_table_type { FS_FT_SNIFFER_TX = 0X6, FS_FT_RDMA_RX = 0X7, FS_FT_RDMA_TX = 0X8, - FS_FT_MAX_TYPE = FS_FT_RDMA_TX, + FS_FT_PORT_SEL = 0X9, + FS_FT_MAX_TYPE = FS_FT_PORT_SEL, }; enum fs_flow_table_op_mod { @@ -129,6 +135,7 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *rdma_rx_root_ns; struct mlx5_flow_root_namespace *rdma_tx_root_ns; struct mlx5_flow_root_namespace *egress_root_ns; + struct mlx5_flow_root_namespace *port_sel_root_ns; int esw_egress_acl_vports; int esw_ingress_acl_vports; }; @@ -341,7 +348,8 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node); (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \ (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \ - (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\ + (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \ + (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\ ) #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 18e5aec14641fd..31c99d53faf79c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -40,6 +40,7 @@ #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) +#define MLX5_SF_NUM_COUNTERS_BULK 6 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 @@ -146,8 +147,12 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, static int get_max_bulk_query_len(struct mlx5_core_dev *dev) { - return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, - (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); + int num_counters_bulk = mlx5_core_is_sf(dev) ? + MLX5_SF_NUM_COUNTERS_BULK : + MLX5_SW_MAX_COUNTERS_BULK; + + return min_t(int, num_counters_bulk, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); } static void update_counter_cache(int index, u32 *bulk_raw_data, @@ -296,7 +301,7 @@ static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) return mlx5_fc_single_alloc(dev); } -struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging) { struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging); struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; @@ -327,8 +332,6 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) goto err_out_alloc; llist_add(&counter->addlist, &fc_stats->addlist); - - mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); } return counter; @@ -337,6 +340,16 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) mlx5_fc_release(dev, counter); return ERR_PTR(err); } + +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (aging) + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + return counter; +} EXPORT_SYMBOL(mlx5_fc_create); u32 mlx5_fc_id(struct mlx5_fc *counter) @@ -497,8 +510,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; - bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), - GFP_KERNEL); + bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); if (!bulk) goto err_alloc_bulk; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 016d26f809a593..2d8406fab84462 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,6 +35,7 @@ #include #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" +#include "lib/tout.h" #include "accel/tls.h" enum { @@ -148,6 +149,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (err) return err; + if (MLX5_CAP_GEN(dev, port_selection_cap)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION); + if (err) + return err; + } + if (MLX5_CAP_GEN(dev, hca_cap_2)) { err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2); if (err) @@ -262,6 +269,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, shampo)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO); + if (err) + return err; + } + return 0; } @@ -317,10 +330,9 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) return 0; } -#define MLX5_FAST_TEARDOWN_WAIT_MS 3000 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) { - unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS; + unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN); u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {}; u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; int state; @@ -618,17 +630,18 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) fwhandle, 0); } -#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */ static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status) { - unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT); struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; u32 out[MLX5_ST_SZ_DW(mirc_reg)]; u32 in[MLX5_ST_SZ_DW(mirc_reg)]; + unsigned long exp_time; int err; + exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE)); + if (!MLX5_CAP_MCAM_REG2(dev, mirc)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 106b50e42b464d..0b0234f9d694c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -3,6 +3,7 @@ #include "fw_reset.h" #include "diag/fw_tracer.h" +#include "lib/tout.h" enum { MLX5_FW_RESET_FLAGS_RESET_REQUESTED, @@ -228,8 +229,6 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n"); } -#define MLX5_PCI_LINK_UP_TIMEOUT 2000 - static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) { struct pci_bus *bridge_bus = dev->pdev->bus; @@ -286,7 +285,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) goto restore; } - timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT); + timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_TOGGLE)); do { err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, ®16); if (err) @@ -299,8 +298,8 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) if (reg16 & PCI_EXP_LNKSTA_DLLLA) { mlx5_core_info(dev, "PCI Link up\n"); } else { - mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n", - reg16, MLX5_PCI_LINK_UP_TIMEOUT); + mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n", + reg16, mlx5_tout_ms(dev, PCI_TOGGLE)); err = -ETIMEDOUT; } @@ -395,16 +394,16 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti return NOTIFY_OK; } -#define MLX5_FW_RESET_TIMEOUT_MSEC 5000 int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev) { - unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC); + unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE); + unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout); struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; int err; if (!wait_for_completion_timeout(&fw_reset->done, timeout)) { - mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n", - MLX5_FW_RESET_TIMEOUT_MSEC / 1000); + mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n", + pci_sync_update_timeout / 1000); err = -ETIMEDOUT; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 037e18dd4be0e1..64f1abc4dc367f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -36,14 +36,15 @@ #include #include #include +#include #include "mlx5_core.h" #include "lib/eq.h" #include "lib/mlx5.h" #include "lib/pci_vsc.h" +#include "lib/tout.h" #include "diag/fw_tracer.h" enum { - MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, MAX_MISSES = 3, }; @@ -74,6 +75,11 @@ enum { MLX5_SENSOR_FW_SYND_RFR = 5, }; +enum { + MLX5_SEVERITY_MASK = 0x7, + MLX5_SEVERITY_VALID_MASK = 0x8, +}; + u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7; @@ -98,12 +104,19 @@ static bool sensor_pci_not_working(struct mlx5_core_dev *dev) return (ioread32be(&h->fw_ver) == 0xffffffff); } +static int mlx5_health_get_rfr(u8 rfr_severity) +{ + return rfr_severity >> MLX5_RFR_BIT_OFFSET; +} + static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET; u8 synd = ioread8(&h->synd); + u8 rfr; + + rfr = mlx5_health_get_rfr(ioread8(&h->rfr_severity)); if (rfr && synd) mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd); @@ -219,11 +232,9 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mutex_unlock(&dev->intf_state_mutex); } -#define MLX5_CRDUMP_WAIT_MS 60000 -#define MLX5_FW_RESET_WAIT_MS 1000 void mlx5_error_sw_reset(struct mlx5_core_dev *dev) { - unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS; + unsigned long end, delay_ms = mlx5_tout_ms(dev, PCI_TOGGLE); int lock = -EBUSY; mutex_lock(&dev->intf_state_mutex); @@ -237,7 +248,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev) lock = lock_sem_sw_reset(dev, true); if (lock == -EBUSY) { - delay_ms = MLX5_CRDUMP_WAIT_MS; + delay_ms = mlx5_tout_ms(dev, FULL_CRDUMP); goto recover_from_sw_reset; } /* Execute SW reset */ @@ -307,13 +318,11 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) mlx5_disable_device(dev); } -/* How much time to wait until health resetting the driver (in msecs) */ -#define MLX5_RECOVERY_WAIT_MSECS 60000 int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev) { unsigned long end; - end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS); + end = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FW_RESET)); while (sensor_pci_not_working(dev)) { if (time_after(jiffies, end)) return -ETIMEDOUT; @@ -370,35 +379,69 @@ static const char *hsynd_str(u8 synd) } } +static const char *mlx5_loglevel_str(int level) +{ + switch (level) { + case LOGLEVEL_EMERG: + return "EMERGENCY"; + case LOGLEVEL_ALERT: + return "ALERT"; + case LOGLEVEL_CRIT: + return "CRITICAL"; + case LOGLEVEL_ERR: + return "ERROR"; + case LOGLEVEL_WARNING: + return "WARNING"; + case LOGLEVEL_NOTICE: + return "NOTICE"; + case LOGLEVEL_INFO: + return "INFO"; + case LOGLEVEL_DEBUG: + return "DEBUG"; + } + return "Unknown log level"; +} + +static int mlx5_health_get_severity(u8 rfr_severity) +{ + return rfr_severity & MLX5_SEVERITY_VALID_MASK ? + rfr_severity & MLX5_SEVERITY_MASK : LOGLEVEL_ERR; +} + static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - char fw_str[18]; - u32 fw; + u8 rfr_severity; + int severity; int i; /* If the syndrome is 0, the device is OK and no need to print buffer */ if (!ioread8(&h->synd)) return; + rfr_severity = ioread8(&h->rfr_severity); + severity = mlx5_health_get_severity(rfr_severity); + mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n", + hsynd_str(ioread8(&h->synd)), severity, mlx5_loglevel_str(severity)); + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) - mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i, - ioread32be(h->assert_var + i)); - - mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n", - ioread32be(&h->assert_exit_ptr)); - mlx5_core_err(dev, "assert_callra 0x%08x\n", - ioread32be(&h->assert_callra)); - sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); - mlx5_core_err(dev, "fw_ver %s\n", fw_str); - mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); - mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index)); - mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd), - hsynd_str(ioread8(&h->synd))); - mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); - fw = ioread32be(&h->fw_ver); - mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw); + mlx5_log(dev, severity, "assert_var[%d] 0x%08x\n", i, + ioread32be(h->assert_var + i)); + + mlx5_log(dev, severity, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); + mlx5_log(dev, severity, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); + mlx5_log(dev, severity, "fw_ver %d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), + fw_rev_sub(dev)); + mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time)); + mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); + mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity)); + mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity)); + mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index)); + mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd), + hsynd_str(ioread8(&h->synd))); + mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); + mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver)); } static int @@ -447,6 +490,7 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev, { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; + u8 rfr_severity; int err; int i; @@ -477,9 +521,19 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev, return err; err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra", ioread32be(&h->assert_callra)); + if (err) + return err; + err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time)); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); + if (err) + return err; + rfr_severity = ioread8(&h->rfr_severity); + err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity)); + if (err) + return err; + err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity)); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index", @@ -674,13 +728,13 @@ static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev) devlink_health_reporter_destroy(health->fw_fatal_reporter); } -static unsigned long get_next_poll_jiffies(void) +static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; - next += jiffies + MLX5_HEALTH_POLL_INTERVAL; + next += jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL)); return next; } @@ -698,6 +752,31 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev) spin_unlock_irqrestore(&health->wq_lock, flags); } +#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60) +static void mlx5_health_log_ts_update(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + u32 out[MLX5_ST_SZ_DW(mrtc_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(mrtc_reg)] = {}; + struct mlx5_core_health *health; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + u64 now_us; + + health = container_of(dwork, struct mlx5_core_health, update_fw_log_ts_work); + priv = container_of(health, struct mlx5_priv, health); + dev = container_of(priv, struct mlx5_core_dev, priv); + + now_us = ktime_to_us(ktime_get_real()); + + MLX5_SET(mrtc_reg, in, time_h, now_us >> 32); + MLX5_SET(mrtc_reg, in, time_l, now_us & 0xFFFFFFFF); + mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MRTC, 0, 1); + + queue_delayed_work(health->wq, &health->update_fw_log_ts_work, + msecs_to_jiffies(MLX5_MSEC_PER_HOUR)); +} + static void poll_health(struct timer_list *t) { struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer); @@ -740,11 +819,12 @@ static void poll_health(struct timer_list *t) queue_work(health->wq, &health->report_work); out: - mod_timer(&health->timer, get_next_poll_jiffies()); + mod_timer(&health->timer, get_next_poll_jiffies(dev)); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) { + u64 poll_interval_ms = mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL); struct mlx5_core_health *health = &dev->priv.health; timer_setup(&health->timer, poll_health, 0); @@ -753,7 +833,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; - health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); + health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms); add_timer(&health->timer); } @@ -779,6 +859,7 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); spin_unlock_irqrestore(&health->wq_lock, flags); + cancel_delayed_work_sync(&health->update_fw_log_ts_work); cancel_work_sync(&health->report_work); cancel_work_sync(&health->fatal_report_work); } @@ -794,6 +875,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; + cancel_delayed_work_sync(&health->update_fw_log_ts_work); destroy_workqueue(health->wq); mlx5_fw_reporters_destroy(dev); } @@ -819,6 +901,9 @@ int mlx5_health_init(struct mlx5_core_dev *dev) spin_lock_init(&health->wq_lock); INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work); INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work); + INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update); + if (mlx5_core_is_pf(dev)) + queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 0c8594c7df21d0..962d41418ce716 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -217,6 +217,32 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev, return 0; } +#ifdef CONFIG_MLX5_EN_RXNFC +static u32 mlx5i_flow_type_mask(u32 flow_type) +{ + return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); +} + +static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct ethtool_rx_flow_spec *fs = &cmd->fs; + + if (mlx5i_flow_type_mask(fs->flow_type) == ETHER_FLOW) + return -EINVAL; + + return mlx5e_ethtool_set_rxnfc(priv, cmd); +} + +static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rule_locs) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + + return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs); +} +#endif + const struct ethtool_ops mlx5i_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | @@ -233,6 +259,10 @@ const struct ethtool_ops mlx5i_ethtool_ops = { .get_coalesce = mlx5i_get_coalesce, .set_coalesce = mlx5i_set_coalesce, .get_ts_info = mlx5i_get_ts_info, +#ifdef CONFIG_MLX5_EN_RXNFC + .get_rxnfc = mlx5i_get_rxnfc, + .set_rxnfc = mlx5i_set_rxnfc, +#endif .get_link_ksettings = mlx5i_get_link_ksettings, .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 269ebb53eda676..84297cc1b50998 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -67,7 +67,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; - params->lro_en = false; + params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; params->tunneled_offload_en = false; } @@ -219,7 +219,7 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) { - unsigned char *dev_addr = priv->netdev->dev_addr; + const unsigned char *dev_addr = priv->netdev->dev_addr; u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; struct mlx5i_priv *ipriv = priv->ppriv; @@ -336,6 +336,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) goto err_destroy_arfs_tables; } + mlx5e_ethtool_init_steering(priv); + return 0; err_destroy_arfs_tables: @@ -348,12 +350,12 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); + mlx5e_ethtool_cleanup_steering(priv); } static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_lro_param lro_param; int err; priv->rx_res = mlx5e_rx_res_alloc(); @@ -368,9 +370,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - lro_param = mlx5e_get_lro_param(&priv->channels.params); err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (err) goto err_close_drop_rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c similarity index 92% rename from drivers/net/ethernet/mellanox/mlx5/core/lag.c rename to drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index d2105c1635c34f..48d2ea690d7ad9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -38,7 +38,7 @@ #include "mlx5_core.h" #include "eswitch.h" #include "lag.h" -#include "lag_mp.h" +#include "mp.h" /* General purpose, use for short periods of time. * Beware of lock dependencies (preferably, no locks should be acquired @@ -47,16 +47,21 @@ static DEFINE_SPINLOCK(lag_lock); static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, - u8 remap_port2, bool shared_fdb) + u8 remap_port2, bool shared_fdb, u8 flags) { u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); + if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) { + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + } else { + MLX5_SET(lagc, lag_ctx, port_select_mode, + MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT); + } return mlx5_cmd_exec_in(dev, create_lag, in); } @@ -199,6 +204,15 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, *port1 = 2; } +static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2) +{ + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + + if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) + return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2); + return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); +} + void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker) { @@ -211,39 +225,56 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] || v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) { + err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2); + if (err) { + mlx5_core_err(dev0, + "Failed to modify LAG (%d)\n", + err); + return; + } ldev->v2p_map[MLX5_LAG_P1] = v2p_port1; ldev->v2p_map[MLX5_LAG_P2] = v2p_port2; - mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d", ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]); - - err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); - if (err) - mlx5_core_err(dev0, - "Failed to modify LAG (%d)\n", - err); } } +static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, + struct lag_tracker *tracker, u8 *flags) +{ + bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE); + struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; + + if (roce_lag || + !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) || + tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return; + *flags |= MLX5_LAG_FLAG_HASH_BASED; +} + +static char *get_str_port_sel_mode(u8 flags) +{ + if (flags & MLX5_LAG_FLAG_HASH_BASED) + return "hash"; + return "queue_affinity"; +} + static int mlx5_create_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - bool shared_fdb) + bool shared_fdb, u8 flags) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; - mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], - &ldev->v2p_map[MLX5_LAG_P2]); - - mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d", + mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s", ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2], - shared_fdb); + shared_fdb, get_str_port_sel_mode(flags)); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1], - ldev->v2p_map[MLX5_LAG_P2], shared_fdb); + ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags); if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", @@ -279,16 +310,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; int err; - err = mlx5_create_lag(ldev, tracker, shared_fdb); + mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], + &ldev->v2p_map[MLX5_LAG_P2]); + mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); + if (flags & MLX5_LAG_FLAG_HASH_BASED) { + err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, + ldev->v2p_map[MLX5_LAG_P1], + ldev->v2p_map[MLX5_LAG_P2]); + if (err) { + mlx5_core_err(dev0, + "Failed to create LAG port selection(%d)\n", + err); + return err; + } + } + + err = mlx5_create_lag(ldev, tracker, shared_fdb, flags); if (err) { - if (roce_lag) { + if (flags & MLX5_LAG_FLAG_HASH_BASED) + mlx5_lag_port_sel_destroy(ldev); + if (roce_lag) mlx5_core_err(dev0, "Failed to activate RoCE LAG\n"); - } else { + else mlx5_core_err(dev0, "Failed to activate VF LAG\n" "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n"); - } return err; } @@ -302,6 +349,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; bool roce_lag = __mlx5_lag_is_roce(ldev); + u8 flags = ldev->flags; int err; ldev->flags &= ~MLX5_LAG_MODE_FLAGS; @@ -324,6 +372,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) "Failed to deactivate VF LAG; driver restart required\n" "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n"); } + } else if (flags & MLX5_LAG_FLAG_HASH_BASED) { + mlx5_lag_port_sel_destroy(ldev); } return err; @@ -592,8 +642,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, if (!(bond_status & 0x3)) return 0; - if (lag_upper_info) + if (lag_upper_info) { tracker->tx_type = lag_upper_info->tx_type; + tracker->hash_type = lag_upper_info->hash_type; + } /* Determine bonding status: * A device is considered bonded if both its physical ports are slaves @@ -692,7 +744,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev, struct mlx5_core_dev *dev, struct net_device *netdev) { - unsigned int fn = PCI_FUNC(dev->pdev->devfn); + unsigned int fn = mlx5_get_dev_index(dev); if (fn >= MLX5_MAX_PORTS) return; @@ -722,7 +774,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, struct mlx5_core_dev *dev) { - unsigned int fn = PCI_FUNC(dev->pdev->devfn); + unsigned int fn = mlx5_get_dev_index(dev); if (fn >= MLX5_MAX_PORTS) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h similarity index 89% rename from drivers/net/ethernet/mellanox/mlx5/core/lag.h rename to drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index d4bae528954e63..e5d231c31b544e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -5,7 +5,8 @@ #define __MLX5_LAG_H__ #include "mlx5_core.h" -#include "lag_mp.h" +#include "mp.h" +#include "port_sel.h" enum { MLX5_LAG_P1, @@ -17,10 +18,12 @@ enum { MLX5_LAG_FLAG_SRIOV = 1 << 1, MLX5_LAG_FLAG_MULTIPATH = 1 << 2, MLX5_LAG_FLAG_READY = 1 << 3, + MLX5_LAG_FLAG_HASH_BASED = 1 << 4, }; #define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\ - MLX5_LAG_FLAG_MULTIPATH) + MLX5_LAG_FLAG_MULTIPATH | \ + MLX5_LAG_FLAG_HASH_BASED) struct lag_func { struct mlx5_core_dev *dev; @@ -32,6 +35,7 @@ struct lag_tracker { enum netdev_lag_tx_type tx_type; struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; unsigned int is_bonded:1; + enum netdev_lag_hash hash_type; }; /* LAG data of a ConnectX card. @@ -49,6 +53,7 @@ struct mlx5_lag { struct delayed_work bond_work; struct notifier_block nb; struct lag_mp lag_mp; + struct mlx5_lag_port_sel port_sel; }; static inline struct mlx5_lag * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c similarity index 99% rename from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c rename to drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index 21fdaf708f1fe8..bf4d3cbefa633e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -3,8 +3,8 @@ #include #include -#include "lag.h" -#include "lag_mp.h" +#include "lag/lag.h" +#include "lag/mp.h" #include "mlx5_core.h" #include "eswitch.h" #include "lib/mlx5.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h similarity index 91% rename from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h rename to drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h index dea199e79beda5..57af962cad298c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h @@ -31,7 +31,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {}; static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; } static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {} -bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; } +static inline bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; } #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_LAG_MP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c new file mode 100644 index 00000000000000..adc836b3d857ff --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#include +#include "lag.h" + +enum { + MLX5_LAG_FT_LEVEL_TTC, + MLX5_LAG_FT_LEVEL_INNER_TTC, + MLX5_LAG_FT_LEVEL_DEFINER, +}; + +static struct mlx5_flow_group * +mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, + struct mlx5_flow_definer *definer) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + u32 *in; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return ERR_PTR(-ENOMEM); + + MLX5_SET(create_flow_group_in, in, match_definer_id, + mlx5_get_match_definer_id(definer)); + MLX5_SET(create_flow_group_in, in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1); + MLX5_SET(create_flow_group_in, in, group_type, + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT); + + fg = mlx5_create_flow_group(ft, in); + kvfree(in); + return fg; +} + +static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, + struct mlx5_lag_definer *lag_definer, + u8 port1, u8 port2) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_namespace *ns; + int err, i; + + ft_attr.max_fte = MLX5_MAX_PORTS; + ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; + + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL); + if (!ns) { + mlx5_core_warn(dev, "Failed to get port selection namespace\n"); + return -EOPNOTSUPP; + } + + lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(lag_definer->ft)) { + mlx5_core_warn(dev, "Failed to create port selection table\n"); + return PTR_ERR(lag_definer->ft); + } + + lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft, + lag_definer->definer); + if (IS_ERR(lag_definer->fg)) { + err = PTR_ERR(lag_definer->fg); + goto destroy_ft; + } + + dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + flow_act.flags |= FLOW_ACT_NO_APPEND; + for (i = 0; i < MLX5_MAX_PORTS; i++) { + u8 affinity = i == 0 ? port1 : port2; + + dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev, + vhca_id); + lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft, + NULL, &flow_act, + &dest, 1); + if (IS_ERR(lag_definer->rules[i])) { + err = PTR_ERR(lag_definer->rules[i]); + while (i--) + mlx5_del_flow_rules(lag_definer->rules[i]); + goto destroy_fg; + } + } + + return 0; + +destroy_fg: + mlx5_destroy_flow_group(lag_definer->fg); +destroy_ft: + mlx5_destroy_flow_table(lag_definer->ft); + return err; +} + +static int mlx5_lag_set_definer_inner(u32 *match_definer_mask, + enum mlx5_traffic_types tt) +{ + int format_id; + u8 *ipv6; + + switch (tt) { + case MLX5_TT_IPV4_UDP: + case MLX5_TT_IPV4_TCP: + format_id = 23; + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_l4_sport); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_l4_dport); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_ip_src_addr); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_ip_dest_addr); + break; + case MLX5_TT_IPV4: + format_id = 23; + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_l3_type); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_dmac_47_16); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_dmac_15_0); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_smac_47_16); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_smac_15_0); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_ip_src_addr); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_ip_dest_addr); + break; + case MLX5_TT_IPV6_TCP: + case MLX5_TT_IPV6_UDP: + format_id = 31; + MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask, + inner_l4_sport); + MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask, + inner_l4_dport); + ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask, + inner_ip_dest_addr); + memset(ipv6, 0xff, 16); + ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask, + inner_ip_src_addr); + memset(ipv6, 0xff, 16); + break; + case MLX5_TT_IPV6: + format_id = 32; + ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask, + inner_ip_dest_addr); + memset(ipv6, 0xff, 16); + ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask, + inner_ip_src_addr); + memset(ipv6, 0xff, 16); + MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask, + inner_dmac_47_16); + MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask, + inner_dmac_15_0); + MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask, + inner_smac_47_16); + MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask, + inner_smac_15_0); + break; + default: + format_id = 23; + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_l3_type); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_dmac_47_16); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_dmac_15_0); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_smac_47_16); + MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask, + inner_smac_15_0); + break; + } + + return format_id; +} + +static int mlx5_lag_set_definer(u32 *match_definer_mask, + enum mlx5_traffic_types tt, bool tunnel, + enum netdev_lag_hash hash) +{ + int format_id; + u8 *ipv6; + + if (tunnel) + return mlx5_lag_set_definer_inner(match_definer_mask, tt); + + switch (tt) { + case MLX5_TT_IPV4_UDP: + case MLX5_TT_IPV4_TCP: + format_id = 22; + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_l4_sport); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_l4_dport); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_ip_src_addr); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_ip_dest_addr); + break; + case MLX5_TT_IPV4: + format_id = 22; + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_l3_type); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_dmac_47_16); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_dmac_15_0); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_smac_47_16); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_smac_15_0); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_ip_src_addr); + MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask, + outer_ip_dest_addr); + break; + case MLX5_TT_IPV6_TCP: + case MLX5_TT_IPV6_UDP: + format_id = 29; + MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask, + outer_l4_sport); + MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask, + outer_l4_dport); + ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask, + outer_ip_dest_addr); + memset(ipv6, 0xff, 16); + ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask, + outer_ip_src_addr); + memset(ipv6, 0xff, 16); + break; + case MLX5_TT_IPV6: + format_id = 30; + ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask, + outer_ip_dest_addr); + memset(ipv6, 0xff, 16); + ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask, + outer_ip_src_addr); + memset(ipv6, 0xff, 16); + MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask, + outer_dmac_47_16); + MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask, + outer_dmac_15_0); + MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask, + outer_smac_47_16); + MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask, + outer_smac_15_0); + break; + default: + format_id = 0; + MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask, + outer_smac_47_16); + MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask, + outer_smac_15_0); + + if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) { + MLX5_SET_TO_ONES(match_definer_format_0, + match_definer_mask, + outer_first_vlan_vid); + break; + } + + MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask, + outer_ethertype); + MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask, + outer_dmac_47_16); + MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask, + outer_dmac_15_0); + break; + } + + return format_id; +} + +static struct mlx5_lag_definer * +mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, + enum mlx5_traffic_types tt, bool tunnel, u8 port1, + u8 port2) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_lag_definer *lag_definer; + u32 *match_definer_mask; + int format_id, err; + + lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL); + if (!lag_definer) + return ERR_PTR(ENOMEM); + + match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer, + match_mask), + GFP_KERNEL); + if (!match_definer_mask) { + err = -ENOMEM; + goto free_lag_definer; + } + + format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash); + lag_definer->definer = + mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL, + format_id, match_definer_mask); + if (IS_ERR(lag_definer->definer)) { + err = PTR_ERR(lag_definer->definer); + goto free_mask; + } + + err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2); + if (err) + goto destroy_match_definer; + + kvfree(match_definer_mask); + + return lag_definer; + +destroy_match_definer: + mlx5_destroy_match_definer(dev, lag_definer->definer); +free_mask: + kvfree(match_definer_mask); +free_lag_definer: + kfree(lag_definer); + return ERR_PTR(err); +} + +static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, + struct mlx5_lag_definer *lag_definer) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + mlx5_del_flow_rules(lag_definer->rules[i]); + mlx5_destroy_flow_group(lag_definer->fg); + mlx5_destroy_flow_table(lag_definer->ft); + mlx5_destroy_match_definer(dev, lag_definer->definer); + kfree(lag_definer); +} + +static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + int tt; + + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { + if (port_sel->outer.definers[tt]) + mlx5_lag_destroy_definer(ldev, + port_sel->outer.definers[tt]); + if (port_sel->inner.definers[tt]) + mlx5_lag_destroy_definer(ldev, + port_sel->inner.definers[tt]); + } +} + +static int mlx5_lag_create_definers(struct mlx5_lag *ldev, + enum netdev_lag_hash hash_type, + u8 port1, u8 port2) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + struct mlx5_lag_definer *lag_definer; + int tt, err; + + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { + lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt, + false, port1, port2); + if (IS_ERR(lag_definer)) { + err = PTR_ERR(lag_definer); + goto destroy_definers; + } + port_sel->outer.definers[tt] = lag_definer; + + if (!port_sel->tunnel) + continue; + + lag_definer = + mlx5_lag_create_definer(ldev, hash_type, tt, + true, port1, port2); + if (IS_ERR(lag_definer)) { + err = PTR_ERR(lag_definer); + goto destroy_definers; + } + port_sel->inner.definers[tt] = lag_definer; + } + + return 0; + +destroy_definers: + mlx5_lag_destroy_definers(ldev); + return err; +} + +static void set_tt_map(struct mlx5_lag_port_sel *port_sel, + enum netdev_lag_hash hash) +{ + port_sel->tunnel = false; + + switch (hash) { + case NETDEV_LAG_HASH_E34: + port_sel->tunnel = true; + fallthrough; + case NETDEV_LAG_HASH_L34: + set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map); + set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map); + set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map); + set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map); + set_bit(MLX5_TT_IPV4, port_sel->tt_map); + set_bit(MLX5_TT_IPV6, port_sel->tt_map); + set_bit(MLX5_TT_ANY, port_sel->tt_map); + break; + case NETDEV_LAG_HASH_E23: + port_sel->tunnel = true; + fallthrough; + case NETDEV_LAG_HASH_L23: + set_bit(MLX5_TT_IPV4, port_sel->tt_map); + set_bit(MLX5_TT_IPV6, port_sel->tt_map); + set_bit(MLX5_TT_ANY, port_sel->tt_map); + break; + default: + set_bit(MLX5_TT_ANY, port_sel->tt_map); + break; + } +} + +#define SET_IGNORE_DESTS_BITS(tt_map, dests) \ + do { \ + int idx; \ + \ + for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \ + set_bit(idx, dests); \ + } while (0) + +static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, + struct ttc_params *ttc_params) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + struct mlx5_flow_table_attr *ft_attr; + int tt; + + ttc_params->ns = mlx5_get_flow_namespace(dev, + MLX5_FLOW_NAMESPACE_PORT_SEL); + ft_attr = &ttc_params->ft_attr; + ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC; + + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { + ttc_params->dests[tt].type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft; + } + SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests); +} + +static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev, + struct ttc_params *ttc_params) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + struct mlx5_flow_table_attr *ft_attr; + int tt; + + ttc_params->ns = mlx5_get_flow_namespace(dev, + MLX5_FLOW_NAMESPACE_PORT_SEL); + ft_attr = &ttc_params->ft_attr; + ft_attr->level = MLX5_LAG_FT_LEVEL_TTC; + + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { + ttc_params->dests[tt].type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft; + } + SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests); + + ttc_params->inner_ttc = port_sel->tunnel; + if (!port_sel->tunnel) + return; + + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + ttc_params->tunnel_dests[tt].type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ttc_params->tunnel_dests[tt].ft = + mlx5_get_ttc_flow_table(port_sel->inner.ttc); + } +} + +static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + struct ttc_params ttc_params = {}; + + mlx5_lag_set_outer_ttc_params(ldev, &ttc_params); + port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params); + if (IS_ERR(port_sel->outer.ttc)) + return PTR_ERR(port_sel->outer.ttc); + + return 0; +} + +static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + struct ttc_params ttc_params = {}; + + mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); + port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params); + if (IS_ERR(port_sel->inner.ttc)) + return PTR_ERR(port_sel->inner.ttc); + + return 0; +} + +int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + enum netdev_lag_hash hash_type, u8 port1, u8 port2) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + int err; + + set_tt_map(port_sel, hash_type); + err = mlx5_lag_create_definers(ldev, hash_type, port1, port2); + if (err) + return err; + + if (port_sel->tunnel) { + err = mlx5_lag_create_inner_ttc_table(ldev); + if (err) + goto destroy_definers; + } + + err = mlx5_lag_create_ttc_table(ldev); + if (err) + goto destroy_inner; + + return 0; + +destroy_inner: + if (port_sel->tunnel) + mlx5_destroy_ttc_table(port_sel->inner.ttc); +destroy_definers: + mlx5_lag_destroy_definers(ldev); + return err; +} + +static int +mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, + struct mlx5_lag_definer **definers, + u8 port1, u8 port2) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + struct mlx5_flow_destination dest = {}; + int err; + int tt; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { + struct mlx5_flow_handle **rules = definers[tt]->rules; + + if (ldev->v2p_map[MLX5_LAG_P1] != port1) { + dest.vport.vhca_id = + MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id); + err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1], + &dest, NULL); + if (err) + return err; + } + + if (ldev->v2p_map[MLX5_LAG_P2] != port2) { + dest.vport.vhca_id = + MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id); + err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2], + &dest, NULL); + if (err) + return err; + } + } + + return 0; +} + +int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + int err; + + err = mlx5_lag_modify_definers_destinations(ldev, + port_sel->outer.definers, + port1, port2); + if (err) + return err; + + if (!port_sel->tunnel) + return 0; + + return mlx5_lag_modify_definers_destinations(ldev, + port_sel->inner.definers, + port1, port2); +} + +void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + + mlx5_destroy_ttc_table(port_sel->outer.ttc); + if (port_sel->tunnel) + mlx5_destroy_ttc_table(port_sel->inner.ttc); + mlx5_lag_destroy_definers(ldev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h new file mode 100644 index 00000000000000..6d15b28a42fc2f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef __MLX5_LAG_FS_H__ +#define __MLX5_LAG_FS_H__ + +#include "lib/fs_ttc.h" + +struct mlx5_lag_definer { + struct mlx5_flow_definer *definer; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_flow_handle *rules[MLX5_MAX_PORTS]; +}; + +struct mlx5_lag_ttc { + struct mlx5_ttc_table *ttc; + struct mlx5_lag_definer *definers[MLX5_NUM_TT]; +}; + +struct mlx5_lag_port_sel { + DECLARE_BITMAP(tt_map, MLX5_NUM_TT); + bool tunnel; + struct mlx5_lag_ttc outer; + struct mlx5_lag_ttc inner; +}; + +#ifdef CONFIG_MLX5_ESWITCH + +int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2); +void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev); +int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + enum netdev_lag_hash hash_type, u8 port1, + u8 port2); + +#else /* CONFIG_MLX5_ESWITCH */ +static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + enum netdev_lag_hash hash_type, + u8 port1, u8 port2) +{ + return 0; +} + +static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, + u8 port2) +{ + return 0; +} + +static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {} +#endif /* CONFIG_MLX5_ESWITCH */ +#endif /* __MLX5_LAG_FS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c index 749d17c0057d70..b63dec24747ab6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -247,6 +247,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, for (tt = 0; tt < MLX5_NUM_TT; tt++) { struct mlx5_ttc_rule *rule = &rules[tt]; + if (test_bit(tt, params->ignore_dests)) + continue; rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt], ttc_rules[tt].etype, ttc_rules[tt].proto); @@ -266,6 +268,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, if (!mlx5_tunnel_proto_supported_rx(dev, ttc_tunnel_rules[tt].proto)) continue; + if (test_bit(tt, params->ignore_tunnel_dests)) + continue; trules[tt] = mlx5_generate_ttc_rule(dev, ft, ¶ms->tunnel_dests[tt], ttc_tunnel_rules[tt].etype, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h index ce95be8f8382d3..85fef0cd1c072d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h @@ -43,7 +43,9 @@ struct ttc_params { struct mlx5_flow_namespace *ns; struct mlx5_flow_table_attr ft_attr; struct mlx5_flow_destination dests[MLX5_NUM_TT]; + DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT); bool inner_ttc; + DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT); struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT]; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c new file mode 100644 index 00000000000000..0dd96a6b140ddd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include +#include "lib/tout.h" + +struct mlx5_timeouts { + u64 to[MAX_TIMEOUT_TYPES]; +}; + +static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = { + [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000, + [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000, + [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2, + [MLX5_TO_FW_INIT_MS] = 2000, + [MLX5_TO_CMD_MS] = 60000, + [MLX5_TO_PCI_TOGGLE_MS] = 2000, + [MLX5_TO_HEALTH_POLL_INTERVAL_MS] = 2000, + [MLX5_TO_FULL_CRDUMP_MS] = 60000, + [MLX5_TO_FW_RESET_MS] = 60000, + [MLX5_TO_FLUSH_ON_ERROR_MS] = 2000, + [MLX5_TO_PCI_SYNC_UPDATE_MS] = 5000, + [MLX5_TO_TEARDOWN_MS] = 3000, + [MLX5_TO_FSM_REACTIVATE_MS] = 5000, + [MLX5_TO_RECLAIM_PAGES_MS] = 5000, + [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000 +}; + +static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type) +{ + dev->timeouts->to[type] = val; +} + +static void tout_set_def_val(struct mlx5_core_dev *dev) +{ + int i; + + for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++) + tout_set(dev, tout_def_sw_val[i], i); +} + +int mlx5_tout_init(struct mlx5_core_dev *dev) +{ + dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL); + if (!dev->timeouts) + return -ENOMEM; + + tout_set_def_val(dev); + return 0; +} + +void mlx5_tout_cleanup(struct mlx5_core_dev *dev) +{ + kfree(dev->timeouts); +} + +/* Time register consists of two fields to_multiplier(time out multiplier) + * and to_value(time out value). to_value is the quantity of the time units and + * to_multiplier is the type and should be one off these four values. + * 0x0: millisecond + * 0x1: seconds + * 0x2: minutes + * 0x3: hours + * this function converts the time stored in the two register fields into + * millisecond. + */ +static u64 tout_convert_reg_field_to_ms(u32 to_mul, u32 to_val) +{ + u64 msec = to_val; + + to_mul &= 0x3; + /* convert hours/minutes/seconds to miliseconds */ + if (to_mul) + msec *= 1000 * int_pow(60, to_mul - 1); + + return msec; +} + +static u64 tout_convert_iseg_to_ms(u32 iseg_to) +{ + return tout_convert_reg_field_to_ms(iseg_to >> 29, iseg_to & 0xfffff); +} + +static bool tout_is_supported(struct mlx5_core_dev *dev) +{ + return !!ioread32be(&dev->iseg->cmd_q_init_to); +} + +void mlx5_tout_query_iseg(struct mlx5_core_dev *dev) +{ + u32 to; + + if (!tout_is_supported(dev)) + return; + + to = ioread32be(&dev->iseg->cmd_q_init_to); + tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_FW_INIT_MS); + + to = ioread32be(&dev->iseg->cmd_exec_to); + tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_CMD_MS); +} + +u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type) +{ + return dev->timeouts->to[type]; +} + +#define MLX5_TIMEOUT_QUERY(fld, reg_out) \ + ({ \ + struct mlx5_ifc_default_timeout_bits *time_field; \ + u32 to_multi, to_value; \ + u64 to_val_ms; \ + \ + time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \ + to_multi = MLX5_GET(default_timeout, time_field, to_multiplier); \ + to_value = MLX5_GET(default_timeout, time_field, to_value); \ + to_val_ms = tout_convert_reg_field_to_ms(to_multi, to_value); \ + to_val_ms; \ + }) + +#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \ + ({ \ + u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \ + tout_set(dev, fw_to + (to_extra), to_type); \ + fw_to; \ + }) + +static int tout_query_dtor(struct mlx5_core_dev *dev) +{ + u64 pcie_toggle_to_val, tear_down_to_val; + u32 out[MLX5_ST_SZ_DW(dtor_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(dtor_reg)] = {}; + int err; + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_DTOR, 0, 0); + if (err) + return err; + + pcie_toggle_to_val = MLX5_TIMEOUT_FILL(pcie_toggle_to, out, dev, MLX5_TO_PCI_TOGGLE_MS, 0); + MLX5_TIMEOUT_FILL(fw_reset_to, out, dev, MLX5_TO_FW_RESET_MS, pcie_toggle_to_val); + + tear_down_to_val = MLX5_TIMEOUT_FILL(tear_down_to, out, dev, MLX5_TO_TEARDOWN_MS, 0); + MLX5_TIMEOUT_FILL(pci_sync_update_to, out, dev, MLX5_TO_PCI_SYNC_UPDATE_MS, + tear_down_to_val); + + MLX5_TIMEOUT_FILL(health_poll_to, out, dev, MLX5_TO_HEALTH_POLL_INTERVAL_MS, 0); + MLX5_TIMEOUT_FILL(full_crdump_to, out, dev, MLX5_TO_FULL_CRDUMP_MS, 0); + MLX5_TIMEOUT_FILL(flush_on_err_to, out, dev, MLX5_TO_FLUSH_ON_ERROR_MS, 0); + MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0); + MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0); + MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0); + + return 0; +} + +int mlx5_tout_query_dtor(struct mlx5_core_dev *dev) +{ + if (tout_is_supported(dev)) + return tout_query_dtor(dev); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h new file mode 100644 index 00000000000000..31faa5c17aa91c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef MLX5_TIMEOUTS_H +#define MLX5_TIMEOUTS_H + +enum mlx5_timeouts_types { + /* pre init timeouts (not read from FW) */ + MLX5_TO_FW_PRE_INIT_TIMEOUT_MS, + MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS, + MLX5_TO_FW_PRE_INIT_WAIT_MS, + + /* init segment timeouts */ + MLX5_TO_FW_INIT_MS, + MLX5_TO_CMD_MS, + + /* DTOR timeouts */ + MLX5_TO_PCI_TOGGLE_MS, + MLX5_TO_HEALTH_POLL_INTERVAL_MS, + MLX5_TO_FULL_CRDUMP_MS, + MLX5_TO_FW_RESET_MS, + MLX5_TO_FLUSH_ON_ERROR_MS, + MLX5_TO_PCI_SYNC_UPDATE_MS, + MLX5_TO_TEARDOWN_MS, + MLX5_TO_FSM_REACTIVATE_MS, + MLX5_TO_RECLAIM_PAGES_MS, + MLX5_TO_RECLAIM_VFS_PAGES_MS, + + MAX_TIMEOUT_TYPES +}; + +struct mlx5_core_dev; +int mlx5_tout_init(struct mlx5_core_dev *dev); +void mlx5_tout_cleanup(struct mlx5_core_dev *dev); +void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); +int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); +u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); + +#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) + +# endif /* MLX5_TIMEOUTS_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 79482824c64ff2..a92a92a52346d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -60,6 +60,7 @@ #include "devlink.h" #include "fw_reset.h" #include "lib/mlx5.h" +#include "lib/tout.h" #include "fpga/core.h" #include "fpga/ipsec.h" #include "accel/ipsec.h" @@ -176,11 +177,6 @@ static struct mlx5_profile profile[] = { }, }; -#define FW_INIT_TIMEOUT_MILI 2000 -#define FW_INIT_WAIT_MS 2 -#define FW_PRE_INIT_TIMEOUT_MILI 120000 -#define FW_INIT_WARN_MESSAGE_INTERVAL 20000 - static int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; @@ -193,8 +189,6 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); int err = 0; - BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL); - while (fw_initializing(dev)) { if (time_after(jiffies, end)) { err = -EBUSY; @@ -205,7 +199,7 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, jiffies_to_msecs(end - warn) / 1000); warn = jiffies + msecs_to_jiffies(warn_time_mili); } - msleep(FW_INIT_WAIT_MS); + msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT)); } return err; @@ -564,15 +558,38 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix, MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix)); + if (MLX5_CAP_GEN(dev, roce_rw_supported)) + MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev)); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } +/* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the + * boot process. + * In case RoCE cap is writable in FW and user/devlink requested to change the + * cap, we are yet to query the final state of the above cap. + * Hence, the need for this function. + * + * Returns + * True: + * 1) RoCE cap is read only in FW and already disabled + * OR: + * 2) RoCE cap is writable in FW and user/devlink requested it off. + * + * In any other case, return False. + */ +static bool is_roce_fw_disabled(struct mlx5_core_dev *dev) +{ + return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) || + (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce)); +} + static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx) { void *set_hca_cap; int err; - if (!MLX5_CAP_GEN(dev, roce)) + if (is_roce_fw_disabled(dev)) return 0; err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE); @@ -975,25 +992,34 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); + err = mlx5_tout_init(dev); + if (err) { + mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); + return err; + } + /* wait for firmware to accept initialization segments configurations */ - err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL); + err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT), + mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL)); if (err) { - mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n", - FW_PRE_INIT_TIMEOUT_MILI); - return err; + mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", + mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); + goto err_tout_cleanup; } err = mlx5_cmd_init(dev); if (err) { mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); - return err; + goto err_tout_cleanup; } - err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); + mlx5_tout_query_iseg(dev); + + err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0); if (err) { - mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n", - FW_INIT_TIMEOUT_MILI); + mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n", + mlx5_tout_ms(dev, FW_INIT)); goto err_cmd_cleanup; } @@ -1017,6 +1043,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) goto err_disable_hca; } + err = mlx5_tout_query_dtor(dev); + if (err) { + mlx5_core_err(dev, "failed to read dtor\n"); + goto reclaim_boot_pages; + } + err = set_hca_ctrl(dev); if (err) { mlx5_core_err(dev, "set_hca_ctrl failed\n"); @@ -1062,6 +1094,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); +err_tout_cleanup: + mlx5_tout_cleanup(dev); return err; } @@ -1080,6 +1114,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) mlx5_core_disable_hca(dev, 0); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); + mlx5_tout_cleanup(dev); return 0; } @@ -1112,8 +1147,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) err = mlx5_fw_tracer_init(dev->tracer); if (err) { - mlx5_core_err(dev, "Failed to init FW tracer\n"); - goto err_fw_tracer; + mlx5_core_err(dev, "Failed to init FW tracer %d\n", err); + mlx5_fw_tracer_destroy(dev->tracer); + dev->tracer = NULL; } mlx5_fw_reset_events_start(dev); @@ -1121,8 +1157,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) err = mlx5_rsc_dump_init(dev); if (err) { - mlx5_core_err(dev, "Failed to init Resource dump\n"); - goto err_rsc_dump; + mlx5_core_err(dev, "Failed to init Resource dump %d\n", err); + mlx5_rsc_dump_destroy(dev); + dev->rsc_dump = NULL; } err = mlx5_fpga_device_start(dev); @@ -1192,11 +1229,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) mlx5_fpga_device_stop(dev); err_fpga_start: mlx5_rsc_dump_cleanup(dev); -err_rsc_dump: mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_fw_reset_events_stop(dev); mlx5_fw_tracer_cleanup(dev->tracer); -err_fw_tracer: mlx5_eq_table_destroy(dev); err_eq_table: mlx5_irq_table_destroy(dev); @@ -1381,6 +1416,8 @@ static const int types[] = { MLX5_CAP_TLS, MLX5_CAP_VDPA_EMULATION, MLX5_CAP_IPSEC, + MLX5_CAP_PORT_SELECTION, + MLX5_CAP_DEV_SHAMPO, }; static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) @@ -1537,8 +1574,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); pci_save_state(pdev); - if (!mlx5_core_is_mp_slave(dev)) - devlink_reload_enable(devlink); + devlink_register(devlink); return 0; err_init_one: @@ -1558,7 +1594,7 @@ static void remove_one(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); - devlink_reload_disable(devlink); + devlink_unregister(devlink); mlx5_crdump_disable(dev); mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 230eab7e3bc91a..bb677329ea08d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -97,6 +97,30 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) +{ + struct device *device = dev->device; + struct va_format vaf; + va_list args; + + if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG, + "Level %d is out of range, set to default level\n", level)) + level = LOGLEVEL_DEFAULT; + + va_start(args, format); + vaf.fmt = format; + vaf.va = &args; + + dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device), + &vaf); + va_end(args); +} + +#define mlx5_log(__dev, level, format, ...) \ + mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev) { return &dev->pdev->dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index abd024173c42e3..8116815663a77b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -8,8 +8,6 @@ #define MLX5_COMP_EQS_PER_SF 8 -#define MLX5_IRQ_EQ_CTRL (0) - struct mlx5_irq; int mlx5_irq_table_init(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 174f71ed52800c..f099a087400eff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -35,13 +35,11 @@ #include #include "mlx5_core.h" -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - u32 *in, int inlen) +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, + int inlen) { u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {}; u32 mkey_index; - void *mkc; int err; MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); @@ -50,38 +48,33 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, if (err) return err; - mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index); - mkey->iova = MLX5_GET64(mkc, mkc, start_addr); - mkey->size = MLX5_GET64(mkc, mkc, len); - mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index); - mkey->pd = MLX5_GET(mkc, mkc, pd); - init_waitqueue_head(&mkey->wait); + *mkey = MLX5_GET(create_mkey_in, in, memory_key_mkey_entry.mkey_7_0) | + mlx5_idx_to_mkey(mkey_index); - mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key); + mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, *mkey); return 0; } EXPORT_SYMBOL(mlx5_core_create_mkey); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey) +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey) { u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {}; MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); - MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey)); return mlx5_cmd_exec_in(dev, destroy_mkey, in); } EXPORT_SYMBOL(mlx5_core_destroy_mkey); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *out, int outlen) +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, + int outlen) { u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {}; memset(out, 0, outlen); MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY); - MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey)); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 110c0837f95b94..f6b5451328fc70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -38,6 +38,7 @@ #include #include "mlx5_core.h" #include "lib/eq.h" +#include "lib/tout.h" enum { MLX5_PAGES_CANT_GIVE = 0, @@ -64,11 +65,6 @@ struct fw_page { unsigned int free_count; }; -enum { - MAX_RECLAIM_TIME_MSECS = 5000, - MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, -}; - enum { MLX5_MAX_RECLAIM_TIME_MILI = 5000, MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, @@ -641,7 +637,8 @@ static int optimal_reclaimed_pages(void) static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, struct rb_root *root, u16 func_id) { - unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES)); + unsigned long end = jiffies + recl_pages_to_jiffies; while (!RB_EMPTY_ROOT(root)) { int nclaimed; @@ -656,7 +653,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, } if (nclaimed) - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + end = jiffies + recl_pages_to_jiffies; if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); @@ -727,7 +724,8 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) { - unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); + u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES)); + unsigned long end = jiffies + recl_vf_pages_to_jiffies; int prev_pages = *pages; /* In case of internal error we will free the pages manually later */ @@ -743,7 +741,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) return -ETIMEDOUT; } if (*pages < prev_pages) { - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); + end = jiffies + recl_vf_pages_to_jiffies; prev_pages = *pages; } msleep(50); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 763c83a0238091..830444f927d456 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx); } -static void irq_set_name(char *name, int vecidx) +static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) { - if (vecidx == 0) { + if (!pool->xa_num_irqs.max) { + /* in case we only have a single irq for the device */ + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx); + return; + } + + if (vecidx == pool->xa_num_irqs.max) { snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx); return; } - snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", - vecidx - MLX5_IRQ_VEC_COMP_BASE); + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); +} + +static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) +{ + return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); } static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) @@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) if (!irq) return ERR_PTR(-ENOMEM); irq->irqn = pci_irq_vector(dev->pdev, i); - if (!pool->name[0]) - irq_set_name(name, i); + if (!irq_pool_is_sf_pool(pool)) + irq_set_name(pool, name, i); else irq_sf_set_name(pool, name, i); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); @@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, if (IS_ERR(irq) || !affinity) goto unlock; cpumask_copy(irq->mask, affinity); + if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max && + cpumask_empty(irq->mask)) + cpumask_set_cpu(0, irq->mask); irq_set_affinity_hint(irq->irqn, irq->mask); unlock: mutex_unlock(&pool->lock); @@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, } pf_irq: pool = irq_table->pf_pool; + vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx; irq = irq_pool_request_vector(pool, vecidx, affinity); out: if (IS_ERR(irq)) @@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev) int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) { + if (!table->pf_pool->xa_num_irqs.max) + return 1; return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min; } @@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) if (mlx5_core_is_sf(dev)) return 0; - pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + - MLX5_IRQ_VEC_COMP_BASE; + pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; pf_vec = min_t(int, pf_vec, num_eqs); - if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE) - return -ENOMEM; total_vec = pf_vec; if (mlx5_sf_max_functions(dev)) total_vec += MLX5_IRQ_CTRL_SF_MAX + MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); - total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1, - total_vec, PCI_IRQ_MSIX); + total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX); if (total_vec < 0) return total_vec; pf_vec = min(pf_vec, total_vec); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 871c2fbe18d39f..f37db7cc32a658 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -9,6 +9,8 @@ #include "sf/sf.h" #include "sf/mlx5_ifc_vhca_event.h" #include "ecpf.h" +#define CREATE_TRACE_POINTS +#include "diag/dev_tracepoint.h" struct mlx5_sf_dev_table { struct xarray devices; @@ -66,13 +68,18 @@ static void mlx5_sf_dev_release(struct device *device) kfree(sf_dev); } -static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev) +static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev) { + int id; + + id = sf_dev->adev.id; + trace_mlx5_sf_dev_del(dev, sf_dev, id); + auxiliary_device_delete(&sf_dev->adev); auxiliary_device_uninit(&sf_dev->adev); } -static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum) +static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, u32 sfnum) { struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; struct mlx5_sf_dev *sf_dev; @@ -100,6 +107,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum) sf_dev->adev.dev.groups = sf_attr_groups; sf_dev->sfnum = sfnum; sf_dev->parent_mdev = dev; + sf_dev->fn_id = fn_id; if (!table->max_sfs) { mlx5_adev_idx_free(id); @@ -109,6 +117,8 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum) } sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); + trace_mlx5_sf_dev_add(dev, sf_dev, id); + err = auxiliary_device_init(&sf_dev->adev); if (err) { mlx5_adev_idx_free(id); @@ -128,7 +138,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum) return; xa_err: - mlx5_sf_dev_remove(sf_dev); + mlx5_sf_dev_remove(dev, sf_dev); add_err: mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n", sf_index, sfnum, err); @@ -139,7 +149,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; xa_erase(&table->devices, sf_index); - mlx5_sf_dev_remove(sf_dev); + mlx5_sf_dev_remove(dev, sf_dev); } static int @@ -178,7 +188,8 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ break; case MLX5_VHCA_STATE_ACTIVE: if (!sf_dev) - mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id); + mlx5_sf_dev_add(table->dev, sf_index, event->function_id, + event->sw_function_id); break; default: break; @@ -260,7 +271,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table) xa_for_each(&table->devices, index, sf_dev) { xa_erase(&table->devices, index); - mlx5_sf_dev_remove(sf_dev); + mlx5_sf_dev_remove(table->dev, sf_dev); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h index 149fd9e698cf85..2a66a427ef15a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -16,6 +16,7 @@ struct mlx5_sf_dev { struct mlx5_core_dev *mdev; phys_addr_t bar_base_addr; u32 sfnum; + u16 fn_id; }; void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h new file mode 100644 index 00000000000000..7f7c9af5deed99 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_SF_DEV_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_SF_DEV_TP_ + +#include +#include +#include "../../dev/dev.h" + +DECLARE_EVENT_CLASS(mlx5_sf_dev_template, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_sf_dev *sfdev, + int aux_id), + TP_ARGS(dev, sfdev, aux_id), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(const struct mlx5_sf_dev*, sfdev) + __field(int, aux_id) + __field(u16, hw_fn_id) + __field(u32, sfnum) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->sfdev = sfdev; + __entry->aux_id = aux_id; + __entry->hw_fn_id = sfdev->fn_id; + __entry->sfnum = sfdev->sfnum; + ), + TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n", + __get_str(devname), __entry->sfdev, + __entry->aux_id, __entry->hw_fn_id, + __entry->sfnum) +); + +DEFINE_EVENT(mlx5_sf_dev_template, mlx5_sf_dev_add, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_sf_dev *sfdev, + int aux_id), + TP_ARGS(dev, sfdev, aux_id) + ); + +DEFINE_EVENT(mlx5_sf_dev_template, mlx5_sf_dev_del, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_sf_dev *sfdev, + int aux_id), + TP_ARGS(dev, sfdev, aux_id) + ); + +#endif /* _MLX5_SF_DEV_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH sf/dev/diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE dev_tracepoint +#include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 052f48068dc16f..7b4783ce213e2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -46,7 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); goto init_one_err; } - devlink_reload_enable(devlink); + devlink_register(devlink); return 0; init_one_err: @@ -61,10 +61,9 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia static void mlx5_sf_dev_remove(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - struct devlink *devlink; + struct devlink *devlink = priv_to_devlink(sf_dev->mdev); - devlink = priv_to_devlink(sf_dev->mdev); - devlink_reload_disable(devlink); + devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); mlx5_mdev_uninit(sf_dev->mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 13891fdc607e8c..3be659cd91f118 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -8,6 +8,8 @@ #include "mlx5_ifc_vhca_event.h" #include "vhca_event.h" #include "ecpf.h" +#define CREATE_TRACE_POINTS +#include "diag/sf_tracepoint.h" struct mlx5_sf { struct devlink_port dl_port; @@ -112,6 +114,7 @@ static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) { mlx5_sf_id_erase(table, sf); mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id); + trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id); kfree(sf); } @@ -209,6 +212,7 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, return err; sf->hw_state = MLX5_VHCA_STATE_ACTIVE; + trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id); return 0; } @@ -224,6 +228,7 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf) return err; sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST; + trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id); return 0; } @@ -293,6 +298,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, if (err) goto esw_err; *new_port_index = sf->port_index; + trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum); return 0; esw_err: @@ -323,7 +329,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported"); return -EOPNOTSUPP; } - if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) { + if (new_attr->pfnum != mlx5_get_dev_index(dev)) { NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied"); return -EOPNOTSUPP; } @@ -442,6 +448,8 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v update = mlx5_sf_state_update_check(sf, event->new_vhca_state); if (update) sf->hw_state = event->new_vhca_state; + trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller, + sf->hw_fn_id, sf->hw_state); sf_err: mutex_unlock(&table->sf_state_lock); mlx5_sf_table_put(table); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h new file mode 100644 index 00000000000000..8bf1cd90930d3a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_SF_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_SF_TP_ + +#include +#include +#include "sf/vhca_event.h" + +TRACE_EVENT(mlx5_sf_add, + TP_PROTO(const struct mlx5_core_dev *dev, + unsigned int port_index, + u32 controller, + u16 hw_fn_id, + u32 sfnum), + TP_ARGS(dev, port_index, controller, hw_fn_id, sfnum), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(unsigned int, port_index) + __field(u32, controller) + __field(u16, hw_fn_id) + __field(u32, sfnum) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->port_index = port_index; + __entry->controller = controller; + __entry->hw_fn_id = hw_fn_id; + __entry->sfnum = sfnum; + ), + TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x sfnum=%u\n", + __get_str(devname), __entry->port_index, __entry->controller, + __entry->hw_fn_id, __entry->sfnum) +); + +TRACE_EVENT(mlx5_sf_free, + TP_PROTO(const struct mlx5_core_dev *dev, + unsigned int port_index, + u32 controller, + u16 hw_fn_id), + TP_ARGS(dev, port_index, controller, hw_fn_id), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(unsigned int, port_index) + __field(u32, controller) + __field(u16, hw_fn_id) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->port_index = port_index; + __entry->controller = controller; + __entry->hw_fn_id = hw_fn_id; + ), + TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x\n", + __get_str(devname), __entry->port_index, __entry->controller, + __entry->hw_fn_id) +); + +TRACE_EVENT(mlx5_sf_hwc_alloc, + TP_PROTO(const struct mlx5_core_dev *dev, + u32 controller, + u16 hw_fn_id, + u32 sfnum), + TP_ARGS(dev, controller, hw_fn_id, sfnum), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(u32, controller) + __field(u16, hw_fn_id) + __field(u32, sfnum) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->controller = controller; + __entry->hw_fn_id = hw_fn_id; + __entry->sfnum = sfnum; + ), + TP_printk("(%s) controller=%u hw_id=0x%x sfnum=%u\n", + __get_str(devname), __entry->controller, __entry->hw_fn_id, + __entry->sfnum) +); + +TRACE_EVENT(mlx5_sf_hwc_free, + TP_PROTO(const struct mlx5_core_dev *dev, + u16 hw_fn_id), + TP_ARGS(dev, hw_fn_id), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(u16, hw_fn_id) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->hw_fn_id = hw_fn_id; + ), + TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id) +); + +TRACE_EVENT(mlx5_sf_hwc_deferred_free, + TP_PROTO(const struct mlx5_core_dev *dev, + u16 hw_fn_id), + TP_ARGS(dev, hw_fn_id), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(u16, hw_fn_id) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->hw_fn_id = hw_fn_id; + ), + TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id) +); + +DECLARE_EVENT_CLASS(mlx5_sf_state_template, + TP_PROTO(const struct mlx5_core_dev *dev, + u32 port_index, + u32 controller, + u16 hw_fn_id), + TP_ARGS(dev, port_index, controller, hw_fn_id), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(unsigned int, port_index) + __field(u32, controller) + __field(u16, hw_fn_id)), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->port_index = port_index; + __entry->controller = controller; + __entry->hw_fn_id = hw_fn_id; + ), + TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x\n", + __get_str(devname), __entry->port_index, __entry->controller, + __entry->hw_fn_id) +); + +DEFINE_EVENT(mlx5_sf_state_template, mlx5_sf_activate, + TP_PROTO(const struct mlx5_core_dev *dev, + u32 port_index, + u32 controller, + u16 hw_fn_id), + TP_ARGS(dev, port_index, controller, hw_fn_id) + ); + +DEFINE_EVENT(mlx5_sf_state_template, mlx5_sf_deactivate, + TP_PROTO(const struct mlx5_core_dev *dev, + u32 port_index, + u32 controller, + u16 hw_fn_id), + TP_ARGS(dev, port_index, controller, hw_fn_id) + ); + +TRACE_EVENT(mlx5_sf_update_state, + TP_PROTO(const struct mlx5_core_dev *dev, + unsigned int port_index, + u32 controller, + u16 hw_fn_id, + u8 state), + TP_ARGS(dev, port_index, controller, hw_fn_id, state), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(unsigned int, port_index) + __field(u32, controller) + __field(u16, hw_fn_id) + __field(u8, state) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->port_index = port_index; + __entry->controller = controller; + __entry->hw_fn_id = hw_fn_id; + __entry->state = state; + ), + TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x state=%u\n", + __get_str(devname), __entry->port_index, __entry->controller, + __entry->hw_fn_id, __entry->state) +); + +#endif /* _MLX5_SF_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH sf/diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE sf_tracepoint +#include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h new file mode 100644 index 00000000000000..fd814a190b8b1c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_SF_VHCA_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_SF_VHCA_TP_ + +#include +#include +#include "sf/vhca_event.h" + +TRACE_EVENT(mlx5_sf_vhca_event, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_vhca_state_event *event), + TP_ARGS(dev, event), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(u16, hw_fn_id) + __field(u32, sfnum) + __field(u8, vhca_state) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->hw_fn_id = event->function_id; + __entry->sfnum = event->sw_function_id; + __entry->vhca_state = event->new_vhca_state; + ), + TP_printk("(%s) hw_id=0x%x sfnum=%u vhca_state=%d\n", + __get_str(devname), __entry->hw_fn_id, + __entry->sfnum, __entry->vhca_state) +); + +#endif /* _MLX5_SF_VHCA_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH sf/diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE vhca_tracepoint +#include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index d9c69123c1abc4..252d6017387dcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -8,6 +8,7 @@ #include "ecpf.h" #include "mlx5_core.h" #include "eswitch.h" +#include "diag/sf_tracepoint.h" struct mlx5_sf_hw { u32 usr_sfnum; @@ -142,6 +143,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr goto vhca_err; } + trace_mlx5_sf_hwc_alloc(dev, controller, hw_fn_id, usr_sfnum); mutex_unlock(&table->table_lock); return sw_id; @@ -172,6 +174,7 @@ static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev, mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx); hwc->sfs[idx].allocated = false; hwc->sfs[idx].pending_delete = false; + trace_mlx5_sf_hwc_free(dev, hwc->start_fn_id + idx); } void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id) @@ -195,6 +198,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller hwc->sfs[id].allocated = false; } else { hwc->sfs[id].pending_delete = true; + trace_mlx5_sf_hwc_deferred_free(dev, hw_fn_id); } err: mutex_unlock(&table->table_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index 28b14b05086f63..d908fba968f083 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -6,6 +6,8 @@ #include "mlx5_core.h" #include "vhca_event.h" #include "ecpf.h" +#define CREATE_TRACE_POINTS +#include "diag/vhca_tracepoint.h" struct mlx5_vhca_state_notifier { struct mlx5_core_dev *dev; @@ -82,6 +84,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * vhca_state_context.vhca_state); mlx5_vhca_event_arm(dev, event->function_id); + trace_mlx5_sf_vhca_event(dev, event); blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index a5b9f65db23c64..07936841ce99b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = { [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", + [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER", [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", @@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, /* If destination is vport we will get the FW flow table * that recalculates the CS and forwards to the vport. */ - ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, - dest_action->vport->caps->num, - final_icm_addr); + ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn, + dest_action->vport->caps->num, + final_icm_addr); if (ret) { mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); return ret; @@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, return -EOPNOTSUPP; case DR_ACTION_TYP_CTR: attr.ctr_id = action->ctr->ctr_id + - action->ctr->offeset; + action->ctr->offset; break; case DR_ACTION_TYP_TAG: attr.flow_tag = action->flow_tag->flow_tag; @@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { - if (action->vport->caps->num == WIRE_PORT) { + if (action->vport->caps->num == MLX5_VPORT_UPLINK) { mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); return -EOPNOTSUPP; } @@ -853,6 +854,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action *action; bool reformat_req = false; u32 num_of_ref = 0; + u32 ref_act_cnt; int ret; int i; @@ -861,11 +863,14 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, return NULL; } - hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL); + hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL); if (!hw_dests) return NULL; - ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL); + if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt))) + goto free_hw_dests; + + ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL); if (!ref_actions) goto free_hw_dests; @@ -1747,7 +1752,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn, struct mlx5dr_action * mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, - u32 vport, u8 vhca_id_valid, + u16 vport, u8 vhca_id_valid, u16 vhca_id) { struct mlx5dr_cmd_vport_cap *vport_cap; @@ -1767,9 +1772,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, return NULL; } - vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport); + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport); if (!vport_cap) { - mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport); + mlx5dr_err(dmn, + "Failed to get vport 0x%x caps - vport is disabled or invalid\n", + vport); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 56307283bf9b35..1d8febed0d76d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port); + caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev); + return 0; } @@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, u32 table_id, u32 group_id, u32 modify_header_id, - u32 vport_id) + u16 vport) { u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {}; void *in_flow_context; @@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); MLX5_SET(dest_format_struct, in_dests, destination_type, MLX5_FLOW_DESTINATION_TYPE_VPORT); - MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id); + MLX5_SET(dest_format_struct, in_dests, destination_id, vport); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 0fe159809ba155..49089cbe897c69 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -9,48 +9,45 @@ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) -static int dr_domain_init_cache(struct mlx5dr_domain *dmn) +static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn) { /* Per vport cached FW FT for checksum recalculation, this - * recalculation is needed due to a HW bug. + * recalculation is needed due to a HW bug in STEv0. */ - dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, - sizeof(dmn->cache.recalc_cs_ft[0]), - GFP_KERNEL); - if (!dmn->cache.recalc_cs_ft) - return -ENOMEM; - - return 0; + xa_init(&dmn->csum_fts_xa); } -static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn) +static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn) { - int i; - - for (i = 0; i < dmn->info.caps.num_vports; i++) { - if (!dmn->cache.recalc_cs_ft[i]) - continue; + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; + unsigned long i; - mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]); + xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) { + if (recalc_cs_ft) + mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft); } - kfree(dmn->cache.recalc_cs_ft); + xa_destroy(&dmn->csum_fts_xa); } -int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, - u32 vport_num, - u64 *rx_icm_addr) +int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u16 vport_num, + u64 *rx_icm_addr) { struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; + int ret; - recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num]; + recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num); if (!recalc_cs_ft) { - /* Table not in cache, need to allocate a new one */ + /* Table hasn't been created yet */ recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num); if (!recalc_cs_ft) return -EINVAL; - dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft; + ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num, + recalc_cs_ft, GFP_KERNEL)); + if (ret) + return ret; } *rx_icm_addr = recalc_cs_ft->rx_icm_addr; @@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn) mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); } +static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn, + struct mlx5dr_cmd_vport_cap *uplink_vport) +{ + struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; + + uplink_vport->num = MLX5_VPORT_UPLINK; + uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; + uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; + uplink_vport->vport_gvmi = 0; + uplink_vport->vhca_gvmi = dmn->info.caps.gvmi; +} + static int dr_domain_query_vport(struct mlx5dr_domain *dmn, - bool other_vport, - u16 vport_number) + u16 vport_number, + struct mlx5dr_cmd_vport_cap *vport_caps) { - struct mlx5dr_cmd_vport_cap *vport_caps; + u16 cmd_vport = vport_number; + bool other_vport = true; int ret; - vport_caps = &dmn->info.caps.vports_caps[vport_number]; + if (vport_number == MLX5_VPORT_UPLINK) { + dr_domain_fill_uplink_caps(dmn, vport_caps); + return 0; + } + + if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) { + other_vport = false; + cmd_vport = 0; + } ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, other_vport, - vport_number, + cmd_vport, &vport_caps->icm_address_rx, &vport_caps->icm_address_tx); if (ret) @@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ret = mlx5dr_cmd_query_gvmi(dmn->mdev, other_vport, - vport_number, + cmd_vport, &vport_caps->vport_gvmi); if (ret) return ret; @@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, return 0; } -static int dr_domain_query_vports(struct mlx5dr_domain *dmn) +static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) { - struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; - struct mlx5dr_cmd_vport_cap *wire_vport; - int vport; + return dr_domain_query_vport(dmn, + dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, + &dmn->info.caps.vports.esw_manager_caps); +} + +static struct mlx5dr_cmd_vport_cap * +dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + struct mlx5dr_cmd_vport_cap *vport_caps; int ret; - /* Query vports (except wire vport) */ - for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) { - ret = dr_domain_query_vport(dmn, !!vport, vport); - if (ret) - return ret; + vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL); + if (!vport_caps) + return NULL; + + ret = dr_domain_query_vport(dmn, vport, vport_caps); + if (ret) { + kvfree(vport_caps); + return NULL; } - /* Last vport is the wire port */ - wire_vport = &dmn->info.caps.vports_caps[vport]; - wire_vport->num = WIRE_PORT; - wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; - wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; - wire_vport->vport_gvmi = 0; - wire_vport->vhca_gvmi = dmn->info.caps.gvmi; + ret = xa_insert(&caps->vports.vports_caps_xa, vport, + vport_caps, GFP_KERNEL); + if (ret) { + mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); + kvfree(vport_caps); + return ERR_PTR(ret); + } + + return vport_caps; +} + +struct mlx5dr_cmd_vport_cap * +mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + struct mlx5dr_cmd_vport_cap *vport_caps; + + if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) || + (!caps->is_ecpf && vport == 0)) + return &caps->vports.esw_manager_caps; + +vport_load: + vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); + if (vport_caps) + return vport_caps; + + vport_caps = dr_domain_add_vport_cap(dmn, vport); + if (PTR_ERR(vport_caps) == -EBUSY) + /* caps were already stored by another thread */ + goto vport_load; + + return vport_caps; +} + +static void dr_domain_clear_vports(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_vport_cap *vport_caps; + unsigned long i; + + xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) { + vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i); + kvfree(vport_caps); + } +} + +static int dr_domain_query_uplink(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_vport_cap *vport_caps; + + vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK); + if (!vport_caps) + return -EINVAL; return 0; } @@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx; dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx; - dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports, - sizeof(dmn->info.caps.vports_caps[0]), - GFP_KERNEL); - if (!dmn->info.caps.vports_caps) - return -ENOMEM; + xa_init(&dmn->info.caps.vports.vports_caps_xa); - ret = dr_domain_query_vports(dmn); + /* Query eswitch manager and uplink vports only. Rest of the + * vports (vport 0, VFs and SFs) will be queried dynamically. + */ + + ret = dr_domain_query_esw_mngr(dmn); if (ret) { - mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret); - goto free_vports_caps; + mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret); + goto free_vports_caps_xa; } - dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1; + ret = dr_domain_query_uplink(dmn); + if (ret) { + mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret); + goto free_vports_caps_xa; + } return 0; -free_vports_caps: - kfree(dmn->info.caps.vports_caps); - dmn->info.caps.vports_caps = NULL; +free_vports_caps_xa: + xa_destroy(&dmn->info.caps.vports.vports_caps_xa); + return ret; } @@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -EOPNOTSUPP; } - dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev); - ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps); if (ret) return ret; @@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; - vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); - if (!vport_cap) { - mlx5dr_err(dmn, "Failed to get esw manager vport\n"); - return -ENOENT; - } + vport_cap = &dmn->info.caps.vports.esw_manager_caps; dmn->info.supp_sw_steering = true; dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx; @@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn) { - kfree(dmn->info.caps.vports_caps); + dr_domain_clear_vports(dmn); + xa_destroy(&dmn->info.caps.vports.vports_caps_xa); } struct mlx5dr_domain * @@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) goto uninit_caps; } - ret = dr_domain_init_cache(dmn); - if (ret) { - mlx5dr_err(dmn, "Failed initialize domain cache\n"); - goto uninit_resourses; - } + dr_domain_init_csum_recalc_fts(dmn); return dmn; -uninit_resourses: - dr_domain_uninit_resources(dmn); uninit_caps: dr_domain_caps_uninit(dmn); free_domain: @@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) /* make sure resources are not used by the hardware */ mlx5dr_cmd_sync_steering(dmn->mdev); - dr_domain_uninit_cache(dmn); + dr_domain_uninit_csum_recalc_fts(dmn); dr_domain_uninit_resources(dmn); dr_domain_caps_uninit(dmn); mutex_destroy(&dmn->info.tx.mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 0d6f86eb248b9a..68a4c32d5f34c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -5,7 +5,7 @@ #include "dr_types.h" struct mlx5dr_fw_recalc_cs_ft * -mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) +mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num) { struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index 66c24767e3b00a..7f6fd9c5e371b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -24,7 +24,7 @@ struct mlx5dr_icm_dm { }; struct mlx5dr_icm_mr { - struct mlx5_core_mkey mkey; + u32 mkey; struct mlx5dr_icm_dm dm; struct mlx5dr_domain *dmn; size_t length; @@ -33,7 +33,7 @@ struct mlx5dr_icm_mr { static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev, u32 pd, u64 length, u64 start_addr, int mode, - struct mlx5_core_mkey *mkey) + u32 *mkey) { u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {}; @@ -116,7 +116,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) return icm_mr; free_mkey: - mlx5_core_destroy_mkey(mdev, &icm_mr->mkey); + mlx5_core_destroy_mkey(mdev, icm_mr->mkey); free_dm: mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0, icm_mr->dm.addr, icm_mr->dm.obj_id); @@ -130,7 +130,7 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) struct mlx5_core_dev *mdev = icm_mr->dmn->mdev; struct mlx5dr_icm_dm *dm = &icm_mr->dm; - mlx5_core_destroy_mkey(mdev, &icm_mr->mkey); + mlx5_core_destroy_mkey(mdev, icm_mr->mkey); mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0, dm->addr, dm->obj_id); kvfree(icm_mr); @@ -252,7 +252,7 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg; - chunk->rkey = buddy_mem_pool->icm_mr->mkey.key; + chunk->rkey = buddy_mem_pool->icm_mr->mkey; chunk->mr_addr = offset; chunk->icm_addr = (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index b5409cc021d330..75c775bee35105 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -875,9 +875,10 @@ static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher) static int dr_matcher_init(struct mlx5dr_matcher *matcher, struct mlx5dr_match_parameters *mask) { + struct mlx5dr_match_parameters consumed_mask; struct mlx5dr_table *tbl = matcher->tbl; struct mlx5dr_domain *dmn = tbl->dmn; - int ret; + int i, ret; if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) { mlx5dr_err(dmn, "Invalid match criteria attribute\n"); @@ -889,8 +890,16 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, mlx5dr_err(dmn, "Invalid match size attribute\n"); return -EINVAL; } + + consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL); + if (!consumed_mask.match_buf) + return -ENOMEM; + + consumed_mask.match_sz = mask->match_sz; + memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz); mlx5dr_ste_copy_param(matcher->match_criteria, - &matcher->mask, mask); + &matcher->mask, &consumed_mask, + true); } switch (dmn->type) { @@ -909,9 +918,22 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, break; default: WARN_ON(true); - return -EINVAL; + ret = -EINVAL; + goto free_consumed_mask; + } + + /* Check that all mask data was consumed */ + for (i = 0; i < consumed_mask.match_sz; i++) { + if (consumed_mask.match_buf[i]) { + mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n"); + ret = -EOPNOTSUPP; + goto free_consumed_mask; + } } + ret = 0; +free_consumed_mask: + kfree(consumed_mask.match_buf); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index aca80efc28fa5e..6a390e981b0975 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -917,7 +917,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, return false; } - mlx5dr_ste_copy_param(matcher->match_criteria, param, value); + mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false); if (match_criteria & DR_MATCHER_CRITERIA_OUTER) { s_idx = offsetof(struct mlx5dr_match_param, outer); @@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain, return false; if (mask->misc.source_port) { - if (rx && value->misc.source_port != WIRE_PORT) + if (rx && value->misc.source_port != MLX5_VPORT_UPLINK) return true; - if (!rx && value->misc.source_port == WIRE_PORT) + if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK) return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index bfb14b4b190675..00aef47d7682d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -350,7 +350,7 @@ static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring, send_info->read.length = send_info->write.length; /* Read into the same write area */ send_info->read.addr = (uintptr_t)send_info->write.addr; - send_info->read.lkey = send_ring->mr->mkey.key; + send_info->read.lkey = send_ring->mr->mkey; if (send_ring->pending_wqe % send_ring->signal_th == 0) send_info->read.send_flags = IB_SEND_SIGNALED; @@ -388,7 +388,7 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, (void *)(uintptr_t)send_info->write.addr, send_info->write.length); send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset; - send_info->write.lkey = send_ring->mr->mkey.key; + send_info->write.lkey = send_ring->mr->mkey; } send_ring->tx_head++; @@ -848,8 +848,7 @@ static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq) kfree(cq); } -static int -dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) +static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey) { u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {}; void *mkc; @@ -908,7 +907,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev, static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr) { - mlx5_core_destroy_mkey(mdev, &mr->mkey); + mlx5_core_destroy_mkey(mdev, mr->mkey); dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size, DMA_BIDIRECTIONAL); kfree(mr); @@ -1039,7 +1038,7 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn) send_info.write.lkey = 0; /* Using the sync_mr in order to write/read */ send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr; - send_info.rkey = send_ring->sync_mr->mkey.key; + send_info.rkey = send_ring->sync_mr->mkey; for (i = 0; i < num_of_sends_req; i++) { ret = dr_postsend_icm_data(dmn, &send_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 1cdfe4fccc7a94..219a5474a8a46a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -668,101 +668,116 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, return 0; } -static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) -{ - spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present); - spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present); - spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present); - spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port); - spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn); - - spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port); - spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask, - source_eswitch_owner_vhca_id); - - spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio); - spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi); - spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid); - spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio); - spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi); - spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid); +#define IFC_GET_CLR(typ, p, fld, clear) ({ \ + void *__p = (p); \ + u32 __t = MLX5_GET(typ, __p, fld); \ + if (clear) \ + MLX5_SET(typ, __p, fld, 0); \ + __t; \ +}) + +#define memcpy_and_clear(to, from, len, clear) ({ \ + void *__to = (to), *__from = (from); \ + size_t __len = (len); \ + memcpy(__to, __from, __len); \ + if (clear) \ + memset(__from, 0, __len); \ +}) + +static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr) +{ + spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr); + spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr); + spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr); + spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr); + spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr); + + spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr); + spec->source_eswitch_owner_vhca_id = + IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr); + + spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr); + spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr); + spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr); + spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr); + spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr); + spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr); spec->outer_second_cvlan_tag = - MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag); + IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr); spec->inner_second_cvlan_tag = - MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag); + IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr); spec->outer_second_svlan_tag = - MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag); + IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr); spec->inner_second_svlan_tag = - MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag); - - spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol); + IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr); + spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr); - spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi); - spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo); + spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr); + spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr); - spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni); + spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr); - spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni); - spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam); + spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr); + spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr); spec->outer_ipv6_flow_label = - MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label); + IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr); spec->inner_ipv6_flow_label = - MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label); + IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr); - spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len); + spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr); spec->geneve_protocol_type = - MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type); + IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr); - spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp); + spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr); } -static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) +static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr) { __be32 raw_ip[4]; - spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16); + spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr); - spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0); - spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype); + spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr); + spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr); - spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16); + spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr); - spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0); - spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio); - spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi); - spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid); + spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr); + spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr); + spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr); + spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr); - spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol); - spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp); - spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn); - spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag); - spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag); - spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag); - spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version); - spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags); - spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport); - spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport); + spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr); + spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr); + spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr); + spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr); + spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr); + spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr); + spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr); + spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr); + spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr); + spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr); - spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit); + spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr); - spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport); - spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport); + spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr); + spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr); - memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, - src_ipv4_src_ipv6.ipv6_layout.ipv6), - sizeof(raw_ip)); + memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(raw_ip), clr); spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]); spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]); spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]); spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]); - memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - sizeof(raw_ip)); + memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(raw_ip), clr); spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]); spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]); @@ -770,104 +785,105 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]); } -static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec) +static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr) { spec->outer_first_mpls_label = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr); spec->outer_first_mpls_exp = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr); spec->outer_first_mpls_s_bos = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr); spec->outer_first_mpls_ttl = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr); spec->inner_first_mpls_label = - MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label); + IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr); spec->inner_first_mpls_exp = - MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp); + IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr); spec->inner_first_mpls_s_bos = - MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos); + IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr); spec->inner_first_mpls_ttl = - MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl); + IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr); spec->outer_first_mpls_over_gre_label = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr); spec->outer_first_mpls_over_gre_exp = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr); spec->outer_first_mpls_over_gre_s_bos = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr); spec->outer_first_mpls_over_gre_ttl = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr); spec->outer_first_mpls_over_udp_label = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr); spec->outer_first_mpls_over_udp_exp = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr); spec->outer_first_mpls_over_udp_s_bos = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr); spec->outer_first_mpls_over_udp_ttl = - MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl); - spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7); - spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6); - spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5); - spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4); - spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3); - spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2); - spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1); - spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0); - spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a); -} - -static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) -{ - spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num); - spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num); - spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num); - spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num); + IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr); + spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr); + spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr); + spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr); + spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr); + spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr); + spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr); + spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr); + spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr); + spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr); +} + +static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr) +{ + spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr); + spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr); + spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr); + spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr); spec->outer_vxlan_gpe_vni = - MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni); + IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr); spec->outer_vxlan_gpe_next_protocol = - MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol); + IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr); spec->outer_vxlan_gpe_flags = - MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags); - spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data); + IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr); + spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr); spec->icmpv6_header_data = - MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data); - spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type); - spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); - spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); - spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); + IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr); + spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr); + spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr); + spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr); + spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr); spec->geneve_tlv_option_0_data = - MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data); - spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags); - spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type); - spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid); - spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0); - spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2); + IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr); + spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr); + spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr); + spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr); + spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr); + spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr); spec->gtpu_first_ext_dw_0 = - MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0); + IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr); } -static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec) +static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr) { spec->prog_sample_field_id_0 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr); spec->prog_sample_field_value_0 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr); spec->prog_sample_field_id_1 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr); spec->prog_sample_field_value_1 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr); spec->prog_sample_field_id_2 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr); spec->prog_sample_field_value_2 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr); spec->prog_sample_field_id_3 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr); spec->prog_sample_field_value_3 = - MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3); + IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr); } void mlx5dr_ste_copy_param(u8 match_criteria, struct mlx5dr_match_param *set_param, - struct mlx5dr_match_parameters *mask) + struct mlx5dr_match_parameters *mask, + bool clr) { u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {}; u8 *data = (u8 *)mask->match_buf; @@ -881,7 +897,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } else { buff = mask->match_buf; } - dr_ste_copy_mask_spec(buff, &set_param->outer); + dr_ste_copy_mask_spec(buff, &set_param->outer, clr); } param_location = sizeof(struct mlx5dr_match_spec); @@ -894,7 +910,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } else { buff = data + param_location; } - dr_ste_copy_mask_misc(buff, &set_param->misc); + dr_ste_copy_mask_misc(buff, &set_param->misc, clr); } param_location += sizeof(struct mlx5dr_match_misc); @@ -907,7 +923,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } else { buff = data + param_location; } - dr_ste_copy_mask_spec(buff, &set_param->inner); + dr_ste_copy_mask_spec(buff, &set_param->inner, clr); } param_location += sizeof(struct mlx5dr_match_spec); @@ -920,7 +936,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } else { buff = data + param_location; } - dr_ste_copy_mask_misc2(buff, &set_param->misc2); + dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr); } param_location += sizeof(struct mlx5dr_match_misc2); @@ -934,7 +950,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } else { buff = data + param_location; } - dr_ste_copy_mask_misc3(buff, &set_param->misc3); + dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr); } param_location += sizeof(struct mlx5dr_match_misc3); @@ -948,7 +964,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } else { buff = data + param_location; } - dr_ste_copy_mask_misc4(buff, &set_param->misc4); + dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 9c704bce3c1258..b0649c2877dd63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_domain *dmn = sb->dmn; - struct mlx5dr_cmd_caps *caps; + struct mlx5dr_domain *vport_dmn; u8 *bit_mask = sb->bit_mask; bool source_gvmi_set; @@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, if (sb->vhca_id_valid) { /* Find port GVMI based on the eswitch_owner_vhca_id */ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) - caps = &dmn->info.caps; + vport_dmn = dmn; else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == dmn->peer_dmn->info.caps.gvmi)) - caps = &dmn->peer_dmn->info.caps; + vport_dmn = dmn->peer_dmn; else return -EINVAL; misc->source_eswitch_owner_vhca_id = 0; } else { - caps = &dmn->info.caps; + vport_dmn = dmn; } source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); if (source_gvmi_set) { - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, + misc->source_port); if (!vport_cap) { - mlx5dr_err(dmn, "Vport 0x%x is invalid\n", + mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", misc->source_port); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index b2481c99da79a6..cb9cf67b0a02fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { u8 *d_action; - dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); - action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); - action_sz = DR_STE_ACTION_TRIPLE_SZ; + if (action_sz < DR_STE_ACTION_TRIPLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } d_action = action + DR_STE_ACTION_SINGLE_SZ; dr_ste_v1_set_encap_l3(last_ste, @@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_domain *dmn = sb->dmn; - struct mlx5dr_cmd_caps *caps; + struct mlx5dr_domain *vport_dmn; u8 *bit_mask = sb->bit_mask; DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); @@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, if (sb->vhca_id_valid) { /* Find port GVMI based on the eswitch_owner_vhca_id */ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) - caps = &dmn->info.caps; + vport_dmn = dmn; else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == dmn->peer_dmn->info.caps.gvmi)) - caps = &dmn->peer_dmn->info.caps; + vport_dmn = dmn->peer_dmn; else return -EINVAL; - misc->source_eswitch_owner_vhca_id = 0; + misc->source_eswitch_owner_vhca_id = 0; } else { - caps = &dmn->info.caps; + vport_dmn = dmn; } if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi)) return 0; - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port); if (!vport_cap) { mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", misc->source_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index b20e8aabb861b6..3028b776da00ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -4,7 +4,7 @@ #ifndef _DR_TYPES_ #define _DR_TYPES_ -#include +#include #include #include "fs_core.h" #include "wq.h" @@ -14,7 +14,6 @@ #define DR_RULE_MAX_STES 18 #define DR_ACTION_MAX_STES 5 -#define WIRE_PORT 0xFFFF #define DR_STE_SVLAN 0x1 #define DR_STE_CVLAN 0x2 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) @@ -752,9 +751,9 @@ struct mlx5dr_esw_caps { struct mlx5dr_cmd_vport_cap { u16 vport_gvmi; u16 vhca_gvmi; + u16 num; u64 icm_address_rx; u64 icm_address_tx; - u32 num; }; struct mlx5dr_roce_cap { @@ -763,6 +762,11 @@ struct mlx5dr_roce_cap { u8 fl_rc_qp_when_roce_enabled:1; }; +struct mlx5dr_vports { + struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct xarray vports_caps_xa; +}; + struct mlx5dr_cmd_caps { u16 gvmi; u64 nic_rx_drop_address; @@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps { u8 flex_parser_id_gtpu_first_ext_dw_0; u8 max_ft_level; u16 roce_min_src_udp; - u8 num_esw_ports; u8 sw_format_ver; bool eswitch_manager; bool rx_sw_owner; @@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps { u8 rx_sw_owner_v2:1; u8 tx_sw_owner_v2:1; u8 fdb_sw_owner_v2:1; - u32 num_vports; struct mlx5dr_esw_caps esw_caps; - struct mlx5dr_cmd_vport_cap *vports_caps; + struct mlx5dr_vports vports; bool prio_tag_required; struct mlx5dr_roce_cap roce_caps; + u8 is_ecpf:1; u8 isolate_vl_tc:1; }; @@ -826,10 +829,6 @@ struct mlx5dr_domain_info { struct mlx5dr_cmd_caps caps; }; -struct mlx5dr_domain_cache { - struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft; -}; - struct mlx5dr_domain { struct mlx5dr_domain *peer_dmn; struct mlx5_core_dev *mdev; @@ -841,7 +840,7 @@ struct mlx5dr_domain { struct mlx5dr_icm_pool *action_icm_pool; struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; - struct mlx5dr_domain_cache cache; + struct xarray csum_fts_xa; struct mlx5dr_ste_ctx *ste_ctx; }; @@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl { struct mlx5dr_action_ctr { u32 ctr_id; - u32 offeset; + u32 offset; }; struct mlx5dr_action_vport { @@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl) return true; } -static inline struct mlx5dr_cmd_vport_cap * -mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) -{ - if (!caps->vports_caps || - (vport >= caps->num_vports && vport != WIRE_PORT)) - return NULL; - - if (vport == WIRE_PORT) - vport = caps->num_vports; - - return &caps->vports_caps[vport]; -} +struct mlx5dr_cmd_vport_cap * +mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport); struct mlx5dr_cmd_query_flow_table_details { u8 status; @@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, u32 table_id, u32 group_id, u32 modify_header_id, - u32 vport_id); + u16 vport_id); int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, u32 table_type, u32 table_id); @@ -1241,7 +1230,8 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_htbl_connect_info *connect_info); void mlx5dr_ste_copy_param(u8 match_criteria, struct mlx5dr_match_param *set_param, - struct mlx5dr_match_parameters *mask); + struct mlx5dr_match_parameters *mask, + bool clear); struct mlx5dr_qp { struct mlx5_core_dev *mdev; @@ -1275,7 +1265,7 @@ struct mlx5dr_cq { struct mlx5dr_mr { struct mlx5_core_dev *mdev; - struct mlx5_core_mkey mkey; + u32 mkey; dma_addr_t dma_addr; void *addr; size_t size; @@ -1372,12 +1362,12 @@ struct mlx5dr_fw_recalc_cs_ft { }; struct mlx5dr_fw_recalc_cs_ft * -mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); +mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num); void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); -int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, - u32 vport_num, - u64 *rx_icm_addr); +int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u16 vport_num, + u64 *rx_icm_addr); int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_cmd_flow_destination_hw_info *dest, int num_dest, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 7e58f4e594b748..2632d5ae9bc0e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } -#define MLX5_FLOW_CONTEXT_ACTION_MAX 20 +#define MLX5_FLOW_CONTEXT_ACTION_MAX 32 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *group, @@ -625,6 +625,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n mlx5dr_action_destroy(modify_hdr->action.dr_action); } +static int +mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns, + int definer_id) +{ + return -EOPNOTSUPP; +} + +static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns, + u16 format_id, u32 *match_mask) +{ + return -EOPNOTSUPP; +} + static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) @@ -727,6 +740,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = { .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc, .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc, .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc, + .create_match_definer = mlx5_cmd_dr_create_match_definer, + .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer, .set_peer = mlx5_cmd_dr_set_peer, .create_ns = mlx5_cmd_dr_create_ns, .destroy_ns = mlx5_cmd_dr_destroy_ns, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index c5a8b160199911..c7c93131b762b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain, struct mlx5dr_action * mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, - u32 vport, u8 vhca_id_valid, + u16 vport, u8 vhca_id_valid, u16 vhca_id); struct mlx5dr_action * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index da481a7c12f4e6..01e9c412977cf3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -36,7 +36,7 @@ #include #include "mlx5_core.h" -int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) +static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {}; @@ -44,13 +44,14 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out); - if (!err) - *uarn = MLX5_GET(alloc_uar_out, out, uar); - return err; + if (err) + return err; + + *uarn = MLX5_GET(alloc_uar_out, out, uar); + return 0; } -EXPORT_SYMBOL(mlx5_cmd_alloc_uar); -int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) +static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) { u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {}; @@ -58,7 +59,6 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) MLX5_SET(dealloc_uar_in, in, uar, uarn); return mlx5_cmd_exec_in(dev, dealloc_uar, in); } -EXPORT_SYMBOL(mlx5_cmd_free_uar); static int uars_per_sys_page(struct mlx5_core_dev *mdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 4c1440a95ad757..8846d30a380a00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -421,19 +421,21 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, out); + if (err) + goto out; *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.system_image_guid); - +out: kvfree(out); - - return 0; + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); @@ -1133,19 +1135,20 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport); u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) { int port_type_cap = MLX5_CAP_GEN(mdev, port_type); - u64 tmp = 0; + u64 tmp; + int err; if (mdev->sys_image_guid) return mdev->sys_image_guid; if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH) - mlx5_query_nic_vport_system_image_guid(mdev, &tmp); + err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); else - mlx5_query_hca_vport_system_image_guid(mdev, &tmp); + err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); - mdev->sys_image_guid = tmp; + mdev->sys_image_guid = err ? 0 : tmp; - return tmp; + return mdev->sys_image_guid; } EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 6704f5c1aa32e7..b990782c1eb1f6 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -75,7 +75,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv) u64_to_ether_addr(local_mac, mac); if (is_valid_ether_addr(mac)) { - ether_addr_copy(priv->netdev->dev_addr, mac); + eth_hw_addr_set(priv->netdev, mac); } else { /* Provide a random MAC if for some reason the device has * not been configured with a valid MAC address already. diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h index 7654841a05c289..e6475ea77cd182 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h @@ -19,7 +19,7 @@ struct mlxfw_dev { static inline struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev) { - return mlxfw_dev->devlink->dev; + return devlink_to_dev(mlxfw_dev->devlink); } #define MLXFW_PRFX "mlxfw: " diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f080fab3de2b54..3fd3812b8f314e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -90,7 +90,6 @@ struct mlxsw_core { struct devlink_health_reporter *fw_fatal; } health; struct mlxsw_env *env; - bool is_initialized; /* Denotes if core was already initialized. */ unsigned long driver_priv[]; /* driver_priv has to be always the last item */ }; @@ -1974,12 +1973,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; - if (!reload) { - err = devlink_register(devlink); - if (err) - goto err_devlink_register; - } - if (!reload) { err = mlxsw_core_params_register(mlxsw_core); if (err) @@ -1995,12 +1988,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_health_init; - if (mlxsw_driver->init) { - err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); - if (err) - goto err_driver_init; - } - err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); if (err) goto err_hwmon_init; @@ -2014,31 +2001,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_env_init; - mlxsw_core->is_initialized = true; - devlink_params_publish(devlink); - - if (!reload) - devlink_reload_enable(devlink); + if (mlxsw_driver->init) { + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); + if (err) + goto err_driver_init; + } + if (!reload) { + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); + } return 0; +err_driver_init: + mlxsw_env_fini(mlxsw_core->env); err_env_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: mlxsw_hwmon_fini(mlxsw_core->hwmon); err_hwmon_init: - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); -err_driver_init: mlxsw_core_health_fini(mlxsw_core); err_health_init: err_fw_rev_validate: if (!reload) mlxsw_core_params_unregister(mlxsw_core); err_register_params: - if (!reload) - devlink_unregister(devlink); -err_devlink_register: mlxsw_emad_fini(mlxsw_core); err_emad_init: kfree(mlxsw_core->lag.mapping); @@ -2088,7 +2075,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, struct devlink *devlink = priv_to_devlink(mlxsw_core); if (!reload) - devlink_reload_disable(devlink); + devlink_unregister(devlink); + if (devlink_is_reload_failed(devlink)) { if (!reload) /* Only the parts that were not de-initialized in the @@ -2099,18 +2087,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; } - devlink_params_unpublish(devlink); - mlxsw_core->is_initialized = false; + if (mlxsw_core->driver->fini) + mlxsw_core->driver->fini(mlxsw_core); mlxsw_env_fini(mlxsw_core->env); mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); mlxsw_core_health_fini(mlxsw_core); if (!reload) mlxsw_core_params_unregister(mlxsw_core); - if (!reload) - devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); kfree(mlxsw_core->lag.mapping); mlxsw_ports_fini(mlxsw_core, reload); @@ -2124,7 +2108,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, reload_fail_deinit: mlxsw_core_params_unregister(mlxsw_core); - devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } @@ -2939,49 +2922,6 @@ struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) return mlxsw_core->env; } -bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core) -{ - return mlxsw_core->is_initialized; -} - -int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module) -{ - enum mlxsw_reg_pmtm_module_type module_type; - char pmtm_pl[MLXSW_REG_PMTM_LEN]; - int err; - - mlxsw_reg_pmtm_pack(pmtm_pl, module); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl); - if (err) - return err; - mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type); - - /* Here we need to get the module width according to the module type. */ - - switch (module_type) { - case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: - case MLXSW_REG_PMTM_MODULE_TYPE_OSFP: - return 8; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP: - return 4; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: - case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: - case MLXSW_REG_PMTM_MODULE_TYPE_DSFP: - return 2; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: - case MLXSW_REG_PMTM_MODULE_TYPE_SFP: - return 1; - default: - return -EINVAL; - } -} -EXPORT_SYMBOL(mlxsw_core_module_max_width); - static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 80712dc803d0f5..12023a550007be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -249,8 +249,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u8 local_port); bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); -bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core); -int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 3713c45cfa1ed5..6dd4ae2f45f41e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "core.h" #include "core_env.h" @@ -14,12 +15,15 @@ struct mlxsw_env_module_info { u64 module_overheat_counter; bool is_overheat; + int num_ports_mapped; + int num_ports_up; + enum ethtool_module_power_mode_policy power_mode_policy; }; struct mlxsw_env { struct mlxsw_core *core; u8 module_count; - spinlock_t module_info_lock; /* Protects 'module_info'. */ + struct mutex module_info_lock; /* Protects 'module_info'. */ struct mlxsw_env_module_info module_info[]; }; @@ -389,6 +393,205 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); +static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module) +{ + char pmaos_pl[MLXSW_REG_PMAOS_LEN]; + + mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_rst_set(pmaos_pl, true); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); +} + +int mlxsw_env_reset_module(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, u8 module, u32 *flags) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + u32 req = *flags; + int err; + + if (!(req & ETH_RESET_PHY) && + !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) + return 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].num_ports_up) { + netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n"); + err = -EINVAL; + goto out; + } + + if (mlxsw_env->module_info[module].num_ports_mapped > 1 && + !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) { + netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n"); + err = -EINVAL; + goto out; + } + + err = mlxsw_env_module_reset(mlxsw_core, module); + if (err) { + netdev_err(netdev, "Failed to reset module\n"); + goto out; + } + + *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)); + +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_reset_module); + +int +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + char mcion_pl[MLXSW_REG_MCION_LEN]; + u32 status_bits; + int err; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + params->policy = mlxsw_env->module_info[module].power_mode_policy; + + mlxsw_reg_mcion_pack(mcion_pl, module); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode"); + goto out; + } + + status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl); + if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK)) + goto out; + + if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK) + params->mode = ETHTOOL_MODULE_POWER_MODE_LOW; + else + params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH; + +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_get_module_power_mode); + +static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, + u8 module, bool enable) +{ + enum mlxsw_reg_pmaos_admin_status admin_status; + char pmaos_pl[MLXSW_REG_PMAOS_LEN]; + + mlxsw_reg_pmaos_pack(pmaos_pl, module); + admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED : + MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED; + mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status); + mlxsw_reg_pmaos_ase_set(pmaos_pl, true); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); +} + +static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, + u8 module, bool low_power) +{ + u16 eeprom_override_mask, eeprom_override; + char pmmp_pl[MLXSW_REG_PMMP_LEN]; + + mlxsw_reg_pmmp_pack(pmmp_pl, module); + mlxsw_reg_pmmp_sticky_set(pmmp_pl, true); + /* Mask all the bits except low power mode. */ + eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK; + mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask); + eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK : + 0; + mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl); +} + +static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, + u8 module, bool low_power, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_env_module_enable_set(mlxsw_core, module, false); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to disable module"); + return err; + } + + err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode"); + goto err_module_low_power_set; + } + + err = mlxsw_env_module_enable_set(mlxsw_core, module, true); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to enable module"); + goto err_module_enable_set; + } + + return 0; + +err_module_enable_set: + mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power); +err_module_low_power_set: + mlxsw_env_module_enable_set(mlxsw_core, module, true); + return err; +} + +int +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + bool low_power; + int err = 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && + policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); + return -EOPNOTSUPP; + } + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].power_mode_policy == policy) + goto out; + + /* If any ports are up, we are already in high power mode. */ + if (mlxsw_env->module_info[module].num_ports_up) + goto out_set_policy; + + low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO; + err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power, + extack); + if (err) + goto out; + +out_set_policy: + mlxsw_env->module_info[module].power_mode_policy = policy; +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_set_module_power_mode); + static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, u8 module, bool *p_has_temp_sensor) @@ -482,22 +685,32 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core, return 0; } -static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, - char *mtwe_pl, void *priv) +struct mlxsw_env_module_temp_warn_event { + struct mlxsw_env *mlxsw_env; + char mtwe_pl[MLXSW_REG_MTWE_LEN]; + struct work_struct work; +}; + +static void mlxsw_env_mtwe_event_work(struct work_struct *work) { - struct mlxsw_env *mlxsw_env = priv; + struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env *mlxsw_env; int i, sensor_warning; bool is_overheat; + event = container_of(work, struct mlxsw_env_module_temp_warn_event, + work); + mlxsw_env = event->mlxsw_env; + for (i = 0; i < mlxsw_env->module_count; i++) { /* 64-127 of sensor_index are mapped to the port modules * sequentially (module 0 is mapped to sensor_index 64, * module 1 to sensor_index 65 and so on) */ sensor_warning = - mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl, + mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl, i + MLXSW_REG_MTMP_MODULE_INDEX_MIN); - spin_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); is_overheat = mlxsw_env->module_info[i].is_overheat; @@ -507,13 +720,13 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, * warning OR current state in "no warning" and MTWE * does not report warning. */ - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); continue; } else if (is_overheat && !sensor_warning) { /* MTWE reports "no warning", turn is_overheat off. */ mlxsw_env->module_info[i].is_overheat = false; - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); } else { /* Current state is "no warning" and MTWE reports * "warning", increase the counter and turn is_overheat @@ -521,13 +734,32 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, */ mlxsw_env->module_info[i].is_overheat = true; mlxsw_env->module_info[i].module_overheat_counter++; - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); } } + + kfree(event); +} + +static void +mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl, + void *priv) +{ + struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env *mlxsw_env = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + + event->mlxsw_env = mlxsw_env; + memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN); + INIT_WORK(&event->work, mlxsw_env_mtwe_event_work); + mlxsw_core_schedule_work(&event->work); } static const struct mlxsw_listener mlxsw_env_temp_warn_listener = - MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE); + MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE); static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core) { @@ -568,9 +800,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - spin_lock_bh(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); mlxsw_env->module_info[event->module].is_overheat = false; - spin_unlock_bh(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module, &has_temp_sensor); @@ -652,8 +884,10 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core, for (i = 0; i < module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, i, - MLXSW_REG_PMAOS_E_GENERATE_EVENT); + mlxsw_reg_pmaos_pack(pmaos_pl, i); + mlxsw_reg_pmaos_e_set(pmaos_pl, + MLXSW_REG_PMAOS_E_GENERATE_EVENT); + mlxsw_reg_pmaos_ee_set(pmaos_pl, true); err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); if (err) return err; @@ -667,29 +901,110 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - /* Prevent switch driver from accessing uninitialized data. */ - if (!mlxsw_core_is_initialized(mlxsw_core)) { - *p_counter = 0; - return 0; - } - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) return -EINVAL; - spin_lock_bh(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); *p_counter = mlxsw_env->module_info[module].module_overheat_counter; - spin_unlock_bh(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); return 0; } EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get); +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_mapped++; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_map); + +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_mapped--; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_unmap); + +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + int err = 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].power_mode_policy != + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) + goto out_inc; + + if (mlxsw_env->module_info[module].num_ports_up != 0) + goto out_inc; + + /* Transition to high power mode following first port using the module + * being put administratively up. + */ + err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false, + NULL); + if (err) + goto out_unlock; + +out_inc: + mlxsw_env->module_info[module].num_ports_up++; +out_unlock: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_module_port_up); + +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + + mlxsw_env->module_info[module].num_ports_up--; + + if (mlxsw_env->module_info[module].power_mode_policy != + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) + goto out_unlock; + + if (mlxsw_env->module_info[module].num_ports_up != 0) + goto out_unlock; + + /* Transition to low power mode following last port using the module + * being put administratively down. + */ + __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL); + +out_unlock: + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_down); + int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) { char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_env *env; u8 module_count; - int err; + int i, err; mlxsw_reg_mgpir_pack(mgpir_pl); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); @@ -702,7 +1017,14 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) if (!env) return -ENOMEM; - spin_lock_init(&env->module_info_lock); + /* Firmware defaults to high power mode policy where modules are + * transitioned to high power mode following plug-in. + */ + for (i = 0; i < module_count; i++) + env->module_info[i].power_mode_policy = + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH; + + mutex_init(&env->module_info_lock); env->core = mlxsw_core; env->module_count = module_count; *p_env = env; @@ -732,6 +1054,7 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); err_temp_warn_event_register: + mutex_destroy(&env->module_info_lock); kfree(env); return err; } @@ -742,5 +1065,6 @@ void mlxsw_env_fini(struct mlxsw_env *env) /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); mlxsw_env_temp_warn_event_unregister(env); + mutex_destroy(&env->module_info_lock); kfree(env); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index 0bf5bd0f8a7ee1..da121b1a84b4b7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -24,9 +24,32 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); +int mlxsw_env_reset_module(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, u8 module, + u32 *flags); + +int +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack); + +int +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack); + int mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, u64 *p_counter); + +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module); + +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module); + +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module); + +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module); + int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env); void mlxsw_env_fini(struct mlxsw_env *env); diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index e92cadc9812848..ab70a873a01a85 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -270,11 +270,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u8 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \ { \ __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -290,13 +292,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u8 \ +static inline u8 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u8 val) \ { \ @@ -311,11 +313,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u16 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \ { \ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -331,13 +335,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u16 \ +static inline u16 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u16 val) \ { \ @@ -352,11 +356,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u32 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \ { \ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -372,13 +378,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u32 \ +static inline u32 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u32 val) \ { \ @@ -393,11 +399,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u64 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \ { \ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -413,13 +421,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u64 \ +static inline u64 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u64 val) \ { \ @@ -433,19 +441,19 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bytes = _sizebytes,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \ { \ __mlxsw_item_memcpy_from(buf, dst, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline char * \ +static inline char * __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ { \ return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ @@ -460,7 +468,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bytes = _sizebytes,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \ unsigned short index, \ char *dst) \ @@ -468,7 +476,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \ __mlxsw_item_memcpy_from(buf, dst, \ &__ITEM_NAME(_type, _cname, _iname), index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ unsigned short index, \ const char *src) \ @@ -476,7 +484,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), index); \ } \ -static inline char * \ +static inline char * __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ { \ return __mlxsw_item_data(buf, \ @@ -491,14 +499,14 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bytes = _sizebytes,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u8 \ +static inline u8 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \ { \ return __mlxsw_item_bit_array_get(buf, \ &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \ { \ return __mlxsw_item_bit_array_set(buf, \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index d9d56c44e994fd..5d4dfa5ddbb531 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -54,8 +54,20 @@ static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m) return 0; } -static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) +static int mlxsw_m_port_open(struct net_device *dev) { + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module); +} + +static int mlxsw_m_port_stop(struct net_device *dev) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module); return 0; } @@ -70,8 +82,8 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev) } static const struct net_device_ops mlxsw_m_port_netdev_ops = { - .ndo_open = mlxsw_m_port_dummy_open_stop, - .ndo_stop = mlxsw_m_port_dummy_open_stop, + .ndo_open = mlxsw_m_port_open, + .ndo_stop = mlxsw_m_port_stop, .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port, }; @@ -124,11 +136,47 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, page, extack); } +static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module, + flags); +} + +static int +mlxsw_m_get_module_power_mode(struct net_device *netdev, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module, + params, extack); +} + +static int +mlxsw_m_set_module_power_mode(struct net_device *netdev, + const struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module, + params->policy, extack); +} + static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { .get_drvinfo = mlxsw_m_module_get_drvinfo, .get_module_info = mlxsw_m_get_module_info, .get_module_eeprom = mlxsw_m_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page, + .reset = mlxsw_m_reset, + .get_module_power_mode = mlxsw_m_get_module_power_mode, + .set_module_power_mode = mlxsw_m_set_module_power_mode, }; static int @@ -152,20 +200,16 @@ static int mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port) { struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; - struct net_device *dev = mlxsw_m_port->dev; char ppad_pl[MLXSW_REG_PPAD_LEN]; + u8 addr[ETH_ALEN]; int err; mlxsw_reg_ppad_pack(ppad_pl, false, 0); err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl); if (err) return err; - mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr); - /* The last byte value in base mac address is guaranteed - * to be such it does not overflow when adding local_port - * value. - */ - dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1; + mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr); + eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1); return 0; } @@ -266,6 +310,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, if (WARN_ON_ONCE(module >= max_ports)) return -EINVAL; + mlxsw_env_module_port_map(mlxsw_m->core, module); mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports; return 0; @@ -274,6 +319,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) { mlxsw_m->module_to_port[module] = -1; + mlxsw_env_module_port_unmap(mlxsw_m->core, module); } static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6fbda6ebd59013..8d420eb8ade2b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4951,7 +4951,7 @@ enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_DISCARD_CNT = 0x6, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, - MLXSW_REG_PPCNT_TC_CONG_TC = 0x13, + MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13, }; /* reg_ppcnt_grp @@ -5371,7 +5371,7 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); -/* Ethernet Per Traffic Group Counters */ +/* Ethernet Per Traffic Class Counters */ /* reg_ppcnt_tc_transmit_queue * Contains the transmit queue depth in cells of traffic class @@ -5398,6 +5398,12 @@ MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, MLXSW_ITEM64(reg, ppcnt, wred_discard, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); +/* reg_ppcnt_ecn_marked_tc + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) @@ -5681,6 +5687,14 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN); +/* reg_pmaos_rst + * Module reset toggle. + * Note: Setting reset while module is plugged-in will result in transition to + * "initializing" operational state. + * Access: OP + */ +MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1); + /* reg_pmaos_slot_index * Slot index. * Access: Index @@ -5693,6 +5707,24 @@ MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4); */ MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8); +enum mlxsw_reg_pmaos_admin_status { + MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1, + MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2, + /* If the module is active and then unplugged, or experienced an error + * event, the operational status should go to "disabled" and can only + * be enabled upon explicit enable command. + */ + MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3, +}; + +/* reg_pmaos_admin_status + * Module administrative state (the desired state of the module). + * Note: To disable a module, all ports associated with the port must be + * administatively down first. + * Access: RW + */ +MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4); + /* reg_pmaos_ase * Admin state update enable. * If this bit is set, admin state will be updated based on admin_state field. @@ -5721,13 +5753,10 @@ enum mlxsw_reg_pmaos_e { */ MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2); -static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module, - enum mlxsw_reg_pmaos_e e) +static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module) { MLXSW_REG_ZERO(pmaos, payload); mlxsw_reg_pmaos_module_set(payload, module); - mlxsw_reg_pmaos_e_set(payload, e); - mlxsw_reg_pmaos_ee_set(payload, true); } /* PPLR - Port Physical Loopback Register @@ -5766,6 +5795,69 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port, MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0); } +/* PMTDB - Port Module To local DataBase Register + * ---------------------------------------------- + * The PMTDB register allows to query the possible module<->local port + * mapping than can be used in PMLP. It does not represent the actual/current + * mapping of the local to module. Actual mapping is only defined by PMLP. + */ +#define MLXSW_REG_PMTDB_ID 0x501A +#define MLXSW_REG_PMTDB_LEN 0x40 + +MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN); + +/* reg_pmtdb_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4); + +/* reg_pmtdb_module + * Module number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8); + +/* reg_pmtdb_ports_width + * Port's width + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4); + +/* reg_pmtdb_num_ports + * Number of ports in a single module (split/breakout) + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4); + +enum mlxsw_reg_pmtdb_status { + MLXSW_REG_PMTDB_STATUS_SUCCESS, +}; + +/* reg_pmtdb_status + * Status + * Access: RO + */ +MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4); + +/* reg_pmtdb_port_num + * The local_port value which can be assigned to the module. + * In case of more than one port, port represent the / port of + * the module. + * Access: RO + */ +MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false); + +static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, + u8 ports_width, u8 num_ports) +{ + MLXSW_REG_ZERO(pmtdb, payload); + mlxsw_reg_pmtdb_slot_index_set(payload, slot_index); + mlxsw_reg_pmtdb_module_set(payload, module); + mlxsw_reg_pmtdb_ports_width_set(payload, ports_width); + mlxsw_reg_pmtdb_num_ports_set(payload, num_ports); +} + /* PMPE - Port Module Plug/Unplug Event Register * --------------------------------------------- * This register reports any operational status change of a module. @@ -5860,67 +5952,100 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port, mlxsw_reg_pddr_page_select_set(payload, page_select); } -/* PMTM - Port Module Type Mapping Register - * ---------------------------------------- - * The PMTM allows query or configuration of module types. +/* PMMP - Port Module Memory Map Properties Register + * ------------------------------------------------- + * The PMMP register allows to override the module memory map advertisement. + * The register can only be set when the module is disabled by PMAOS register. */ -#define MLXSW_REG_PMTM_ID 0x5067 -#define MLXSW_REG_PMTM_LEN 0x10 +#define MLXSW_REG_PMMP_ID 0x5044 +#define MLXSW_REG_PMMP_LEN 0x2C -MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN); +MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN); -/* reg_pmtm_module +/* reg_pmmp_module * Module number. * Access: Index */ -MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8); +MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8); -enum mlxsw_reg_pmtm_module_type { - /* Backplane with 4 lanes */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_4X, - /* QSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_QSFP, - /* SFP */ - MLXSW_REG_PMTM_MODULE_TYPE_SFP, - /* Backplane with single lane */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4, - /* Backplane with two lane */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8, - /* Chip2Chip4x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10, - /* Chip2Chip2x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C2X, - /* Chip2Chip1x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C1X, - /* QSFP-DD */ - MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14, - /* OSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_OSFP, - /* SFP-DD */ - MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD, - /* DSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_DSFP, - /* Chip2Chip8x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C8X, +/* reg_pmmp_sticky + * When set, will keep eeprom_override values after plug-out event. + * Access: OP + */ +MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1); + +/* reg_pmmp_eeprom_override_mask + * Write mask bit (negative polarity). + * 0 - Allow write + * 1 - Ignore write + * On write, indicates which of the bits from eeprom_override field are + * updated. + * Access: WO + */ +MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16); + +enum { + /* Set module to low power mode */ + MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8), }; -/* reg_pmtm_module_type - * Module type. +/* reg_pmmp_eeprom_override + * Override / ignore EEPROM advertisement properties bitmask * Access: RW */ -MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4); +MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16); -static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module) +static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module) { - MLXSW_REG_ZERO(pmtm, payload); - mlxsw_reg_pmtm_module_set(payload, module); + MLXSW_REG_ZERO(pmmp, payload); + mlxsw_reg_pmmp_module_set(payload, module); } -static inline void -mlxsw_reg_pmtm_unpack(char *payload, - enum mlxsw_reg_pmtm_module_type *module_type) +/* PLLP - Port Local port to Label Port mapping Register + * ----------------------------------------------------- + * The PLLP register returns the mapping from Local Port into Label Port. + */ +#define MLXSW_REG_PLLP_ID 0x504A +#define MLXSW_REG_PLLP_LEN 0x10 + +MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN); + +/* reg_pllp_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8); + +/* reg_pllp_label_port + * Front panel label of the port. + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8); + +/* reg_pllp_split_num + * Label split mapping for local_port. + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4); + +/* reg_pllp_slot_index + * Slot index (0: Main board). + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4); + +static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port) { - *module_type = mlxsw_reg_pmtm_module_type_get(payload); + MLXSW_REG_ZERO(pllp, payload); + mlxsw_reg_pllp_local_port_set(payload, local_port); +} + +static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port, + u8 *split_num, u8 *slot_index) +{ + *label_port = mlxsw_reg_pllp_label_port_get(payload); + *split_num = mlxsw_reg_pllp_split_num_get(payload); + *slot_index = mlxsw_reg_pllp_slot_index_get(payload); } /* HTGT - Host Trap Group Table @@ -6401,6 +6526,12 @@ MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16); */ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); +/* reg_ritr_if_mac_profile_id + * MAC msb profile ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_mac_profile_id, 0x10, 16, 4); + /* reg_ritr_if_mac * Router interface MAC address. * In Spectrum, all MAC addresses must have the same 38 MSBits. @@ -6664,6 +6795,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); } +static inline void +mlxsw_reg_ritr_loopback_ipip6_pack(char *payload, + enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, + enum mlxsw_reg_ritr_loopback_ipip_options options, + u16 uvr_id, u16 underlay_rif, + const struct in6_addr *usip, u32 gre_key) +{ + enum mlxsw_reg_ritr_loopback_protocol protocol = + MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6; + + mlxsw_reg_ritr_loopback_protocol_set(payload, protocol); + mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options, + uvr_id, underlay_rif, gre_key); + mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload, + (const char *)usip); +} + /* RTAR - Router TCAM Allocation Register * -------------------------------------- * This register is used for allocation of regions in the TCAM table. @@ -6932,6 +7080,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip) mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip); } +static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr) +{ + mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6); + mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr); +} + static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index, bool counter_enable) { @@ -8117,19 +8271,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload, } static inline void -mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, - enum mlxsw_reg_rtdp_ipip_sip_check sip_check, - unsigned int type_check, bool gre_key_check, - u32 ipv4_usip, u32 expected_gre_key) +mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 expected_gre_key) { mlxsw_reg_rtdp_ipip_irif_set(payload, irif); mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check); mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check); mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check); - mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +static inline void +mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv4_usip, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check, + gre_key_check, expected_gre_key); + mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); +} + +static inline void +mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv6_usip_ptr, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check, + gre_key_check, expected_gre_key); + mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr); +} + +/* RIPS - Router IP version Six Register + * ------------------------------------- + * The RIPS register is used to store IPv6 addresses for use by the NVE and + * IPinIP + */ +#define MLXSW_REG_RIPS_ID 0x8021 +#define MLXSW_REG_RIPS_LEN 0x14 + +MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN); + +/* reg_rips_index + * Index to IPv6 address. + * For Spectrum, the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24); + +/* reg_rips_ipv6 + * IPv6 address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16); + +static inline void mlxsw_reg_rips_pack(char *payload, u32 index, + const struct in6_addr *ipv6) +{ + MLXSW_REG_ZERO(rips, payload); + mlxsw_reg_rips_index_set(payload, index); + mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6); +} + /* RATRAD - Router Adjacency Table Activity Dump Register * ------------------------------------------------------ * The RATRAD register is used to dump and optionally clear activity bits of @@ -10208,6 +10414,39 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, MLXSW_REG_MLCR_DURATION_MAX : 0); } +/* MCION - Management Cable IO and Notifications Register + * ------------------------------------------------------ + * The MCION register is used to query transceiver modules' IO pins and other + * notifications. + */ +#define MLXSW_REG_MCION_ID 0x9052 +#define MLXSW_REG_MCION_LEN 0x18 + +MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN); + +/* reg_mcion_module + * Module number. + * Access: Index + */ +MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8); + +enum { + MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0), + MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8), +}; + +/* reg_mcion_module_status_bits + * Module IO status as defined by SFF. + * Access: RO + */ +MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16); + +static inline void mlxsw_reg_mcion_pack(char *payload, u8 module) +{ + MLXSW_REG_ZERO(mcion, payload); + mlxsw_reg_mcion_module_set(payload, module); +} + /* MTPPS - Management Pulse Per Second Register * -------------------------------------------- * This register provides the device PPS capabilities, configure the PPS in and @@ -12200,9 +12439,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pspa), MLXSW_REG(pmaos), MLXSW_REG(pplr), + MLXSW_REG(pmtdb), MLXSW_REG(pmpe), MLXSW_REG(pddr), - MLXSW_REG(pmtm), + MLXSW_REG(pmmp), + MLXSW_REG(pllp), MLXSW_REG(htgt), MLXSW_REG(hpkt), MLXSW_REG(rgcr), @@ -12210,6 +12451,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), + MLXSW_REG(rips), MLXSW_REG(ratrad), MLXSW_REG(rdpm), MLXSW_REG(ricnt), @@ -12249,6 +12491,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mgir), MLXSW_REG(mrsr), MLXSW_REG(mlcr), + MLXSW_REG(mcion), MLXSW_REG(mtpps), MLXSW_REG(mtutc), MLXSW_REG(mpsc), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index a56c9e19a3904d..c7fc650608ebee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -25,9 +25,6 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_SYSTEM_PORT, MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, - MLXSW_RES_ID_LOCAL_PORTS_IN_1X, - MLXSW_RES_ID_LOCAL_PORTS_IN_2X, - MLXSW_RES_ID_LOCAL_PORTS_IN_4X, MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER, MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_MAX_HEADROOM_SIZE, @@ -52,6 +49,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, + MLXSW_RES_ID_MAX_RIF_MAC_PROFILES, MLXSW_RES_ID_MAX_LPM_TREES, MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4, MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6, @@ -84,9 +82,6 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, - [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610, - [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611, - [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612, [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ @@ -111,6 +106,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, + [MLXSW_RES_ID_MAX_RIF_MAC_PROFILES] = 0x2C14, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02, [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 250c5a24264dcc..5925db386b1ba5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -47,7 +47,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 2406 +#define MLXSW_SP1_FWREV_SUBMINOR 3326 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -64,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { #define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 2406 +#define MLXSW_SP2_FWREV_SUBMINOR 3326 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -79,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { #define MLXSW_SP3_FWREV_MAJOR 30 #define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 2406 +#define MLXSW_SP3_FWREV_SUBMINOR 3326 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -316,11 +316,11 @@ static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - unsigned char *addr = mlxsw_sp_port->dev->dev_addr; - ether_addr_copy(addr, mlxsw_sp->base_mac); - addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; - return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); + eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, + mlxsw_sp_port->local_port); + return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, + mlxsw_sp_port->dev->dev_addr); } static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) @@ -351,12 +351,12 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); } -static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) +static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 swid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pspa_pl[MLXSW_REG_PSPA_LEN]; - mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); + mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); } @@ -529,55 +529,80 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, port_mapping->module = module; port_mapping->width = width; + port_mapping->module_width = width; port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); return 0; } -static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) +static int +mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const struct mlxsw_sp_port_mapping *port_mapping) { - struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; - int i; + int i, err; + + mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); for (i = 0; i < port_mapping->width; i++) { mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ } - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + goto err_pmlp_write; + return 0; + +err_pmlp_write: + mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); + return err; } -static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) +static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 module) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + mlxsw_env_module_port_unmap(mlxsw_sp->core, module); } static int mlxsw_sp_port_open(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + err = mlxsw_env_module_port_up(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); if (err) return err; + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) + goto err_port_admin_status_set; netif_start_queue(dev); return 0; + +err_port_admin_status_set: + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); + return err; } static int mlxsw_sp_port_stop(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; netif_stop_queue(dev); - return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); + return 0; } static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, @@ -649,7 +674,7 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); if (err) return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -799,12 +824,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev, for (i = 0; i < TC_MAX_QUEUE; i++) { err = mlxsw_sp_port_get_stats_raw(dev, - MLXSW_REG_PPCNT_TC_CONG_TC, + MLXSW_REG_PPCNT_TC_CONG_CNT, i, ppcnt_pl); - if (!err) - xstats->wred_drop[i] = - mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); + if (err) + goto tc_cnt; + xstats->wred_drop[i] = + mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); + xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); + +tc_cnt: err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, i, ppcnt_pl); if (err) @@ -1010,6 +1039,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); + case FLOW_BLOCK_BINDER_TYPE_RED_MARK: + return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); default: return -EOPNOTSUPP; } @@ -1442,29 +1473,68 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); } +static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *port_number, + u8 *split_port_subnumber, + u8 *slot_index) +{ + char pllp_pl[MLXSW_REG_PLLP_LEN]; + int err; + + mlxsw_reg_pllp_pack(pllp_pl, local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); + if (err) + return err; + mlxsw_reg_pllp_unpack(pllp_pl, port_number, + split_port_subnumber, slot_index); + return 0; +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 split_base_local_port, + bool split, struct mlxsw_sp_port_mapping *port_mapping) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - bool split = !!split_base_local_port; struct mlxsw_sp_port *mlxsw_sp_port; u32 lanes = port_mapping->width; + u8 split_port_subnumber; struct net_device *dev; + u8 port_number; + u8 slot_index; bool splittable; int err; + err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", + local_port); + return err; + } + + err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + local_port); + goto err_port_swid_set; + } + + err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, + &split_port_subnumber, &slot_index); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", + local_port); + goto err_port_label_info_get; + } + splittable = lanes > 1 && !split; err = mlxsw_core_port_init(mlxsw_sp->core, local_port, - port_mapping->module + 1, split, - port_mapping->lane / lanes, - splittable, lanes, - mlxsw_sp->base_mac, + port_number, split, split_port_subnumber, + splittable, lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", local_port); - return err; + goto err_core_port_init; } dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1480,7 +1550,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_sp_port->local_port = local_port; mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; mlxsw_sp_port->split = split; - mlxsw_sp_port->split_base_local_port = split_base_local_port; mlxsw_sp_port->mapping = *port_mapping; mlxsw_sp_port->link.autoneg = 1; INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); @@ -1498,20 +1567,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, dev->netdev_ops = &mlxsw_sp_port_netdev_ops; dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; - err = mlxsw_sp_port_module_map(mlxsw_sp_port); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", - mlxsw_sp_port->local_port); - goto err_port_module_map; - } - - err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", - mlxsw_sp_port->local_port); - goto err_port_swid_set; - } - err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", @@ -1712,21 +1767,24 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, err_port_speed_by_width_set: err_port_system_port_mapping_set: err_dev_addr_init: - mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); -err_port_swid_set: - mlxsw_sp_port_module_unmap(mlxsw_sp_port); -err_port_module_map: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: free_netdev(dev); err_alloc_etherdev: mlxsw_core_port_fini(mlxsw_sp->core, local_port); +err_core_port_init: +err_port_label_info_get: + mlxsw_sp_port_swid_set(mlxsw_sp, local_port, + MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); return err; } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + u8 module = mlxsw_sp_port->mapping.module; cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); @@ -1742,12 +1800,13 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); mlxsw_sp_port_buffers_fini(mlxsw_sp_port); - mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); - mlxsw_sp_port_module_unmap(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); free_netdev(mlxsw_sp_port->dev); mlxsw_core_port_fini(mlxsw_sp->core, local_port); + mlxsw_sp_port_swid_set(mlxsw_sp, local_port, + MLXSW_PORT_SWID_DISABLED_PORT); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); } static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) @@ -1789,8 +1848,15 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp_port); } +static bool mlxsw_sp_local_port_valid(u8 local_port) +{ + return local_port != MLXSW_PORT_CPU_PORT; +} + static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) { + if (!mlxsw_sp_local_port_valid(local_port)) + return false; return mlxsw_sp->ports[local_port] != NULL; } @@ -1827,7 +1893,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) port_mapping = mlxsw_sp->port_mapping[i]; if (!port_mapping) continue; - err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); + err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); if (err) goto err_port_create; } @@ -1894,17 +1960,10 @@ static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->port_mapping); } -static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) -{ - u8 offset = (local_port - 1) % max_width; - - return local_port - offset; -} - static int -mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, +mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port_mapping *port_mapping, - unsigned int count, u8 offset) + unsigned int count, const char *pmtdb_pl) { struct mlxsw_sp_port_mapping split_port_mapping; int err, i; @@ -1912,8 +1971,13 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, split_port_mapping = *port_mapping; split_port_mapping.width /= count; for (i = 0; i < count; i++) { - err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, - base_port, &split_port_mapping); + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + if (!mlxsw_sp_local_port_valid(s_local_port)) + continue; + + err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, + true, &split_port_mapping); if (err) goto err_port_create; split_port_mapping.lane += split_port_mapping.width; @@ -1922,49 +1986,34 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, return 0; err_port_create: - for (i--; i >= 0; i--) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); + for (i--; i >= 0; i--) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); + } return err; } static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, - u8 base_port, - unsigned int count, u8 offset) + unsigned int count, + const char *pmtdb_pl) { struct mlxsw_sp_port_mapping *port_mapping; int i; /* Go over original unsplit ports in the gap and recreate them. */ - for (i = 0; i < count * offset; i++) { - port_mapping = mlxsw_sp->port_mapping[base_port + i]; - if (!port_mapping) + for (i = 0; i < count; i++) { + u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + port_mapping = mlxsw_sp->port_mapping[local_port]; + if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) continue; - mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); + mlxsw_sp_port_create(mlxsw_sp, local_port, + false, port_mapping); } } -static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, - unsigned int count, - unsigned int max_width) -{ - enum mlxsw_res_id local_ports_in_x_res_id; - int split_width = max_width / count; - - if (split_width == 1) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; - else if (split_width == 2) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; - else if (split_width == 4) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; - else - return -EINVAL; - - if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) - return -EINVAL; - return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); -} - static struct mlxsw_sp_port * mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) { @@ -1980,9 +2029,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port_mapping port_mapping; struct mlxsw_sp_port *mlxsw_sp_port; - int max_width; - u8 base_port; - int offset; + enum mlxsw_reg_pmtdb_status status; + char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; int i; int err; @@ -1994,57 +2042,37 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return -EINVAL; } - max_width = mlxsw_core_module_max_width(mlxsw_core, - mlxsw_sp_port->mapping.module); - if (max_width < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); - return max_width; + if (mlxsw_sp_port->split) { + NL_SET_ERR_MSG_MOD(extack, "Port is already split"); + return -EINVAL; } - /* Split port with non-max cannot be split. */ - if (mlxsw_sp_port->mapping.width != max_width) { - netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); - NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); - return -EINVAL; + mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_sp_port->mapping.module_width / count, + count); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); + return err; } - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); - if (offset < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); + status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); + if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); return -EINVAL; } - /* Only in case max split is being done, the local port and - * base port may differ. - */ - base_port = count == max_width ? - mlxsw_sp_cluster_base_port_get(local_port, max_width) : - local_port; + port_mapping = mlxsw_sp_port->mapping; - for (i = 0; i < count * offset; i++) { - /* Expect base port to exist and also the one in the middle in - * case of maximal split count. - */ - if (i == 0 || (count == max_width && i == count / 2)) - continue; + for (i = 0; i < count; i++) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { - netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); - NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); - return -EINVAL; - } + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); } - port_mapping = mlxsw_sp_port->mapping; - - for (i = 0; i < count; i++) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); - - err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, - count, offset); + err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, + count, pmtdb_pl); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); goto err_port_split_create; @@ -2053,7 +2081,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; err_port_split_create: - mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); + mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); return err; } @@ -2062,11 +2090,10 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; + char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; unsigned int count; - int max_width; - u8 base_port; - int offset; int i; + int err; mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { @@ -2077,35 +2104,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, } if (!mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); NL_SET_ERR_MSG_MOD(extack, "Port was not split"); return -EINVAL; } - max_width = mlxsw_core_module_max_width(mlxsw_core, - mlxsw_sp_port->mapping.module); - if (max_width < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); - return max_width; - } - - count = max_width / mlxsw_sp_port->mapping.width; + count = mlxsw_sp_port->mapping.module_width / + mlxsw_sp_port->mapping.width; - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); - if (WARN_ON(offset < 0)) { - netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); - return -EINVAL; + mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_sp_port->mapping.module_width / count, + count); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); + return err; } - base_port = mlxsw_sp_port->split_base_local_port; + for (i = 0; i < count; i++) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - for (i = 0; i < count; i++) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); + } - mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); + mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); return 0; } @@ -3260,6 +3282,30 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) &span_size_params); } +static int +mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params size_params; + u8 max_rif_mac_profiles; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) + return -EIO; + + max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, + MAX_RIF_MAC_PROFILES); + devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, + max_rif_mac_profiles, 1, + DEVLINK_RESOURCE_UNIT_ENTRY); + + return devlink_resource_register(devlink, + "rif_mac_profiles", + max_rif_mac_profiles, + MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &size_params); +} + static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) { int err; @@ -3278,10 +3324,16 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) err = mlxsw_sp_policer_resources_register(mlxsw_core); if (err) - goto err_resources_counter_register; + goto err_policer_resources_register; + + err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); + if (err) + goto err_resources_rif_mac_profile_register; return 0; +err_resources_rif_mac_profile_register: +err_policer_resources_register: err_resources_counter_register: err_resources_span_register: devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); @@ -3306,10 +3358,16 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) err = mlxsw_sp_policer_resources_register(mlxsw_core); if (err) - goto err_resources_counter_register; + goto err_policer_resources_register; + + err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); + if (err) + goto err_resources_rif_mac_profile_register; return 0; +err_resources_rif_mac_profile_register: +err_policer_resources_register: err_resources_counter_register: err_resources_span_register: devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3a43cba6d23c2f..32fdd37657ddae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -67,6 +67,7 @@ enum mlxsw_sp_resource_id { MLXSW_SP_RESOURCE_COUNTERS_RIF, MLXSW_SP_RESOURCE_GLOBAL_POLICERS, MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS, + MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, }; struct mlxsw_sp_port; @@ -144,7 +145,8 @@ struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; - u8 width; + u8 width; /* Number of lanes used by the port */ + u8 module_width; /* Number of lanes in the module (static) */ u8 lane; }; @@ -284,6 +286,7 @@ struct mlxsw_sp_port_vlan { /* No need an internal lock; At worse - miss a single periodic iteration */ struct mlxsw_sp_port_xstats { u64 ecn; + u64 tc_ecn[TC_MAX_QUEUE]; u64 wred_drop[TC_MAX_QUEUE]; u64 tail_drop[TC_MAX_QUEUE]; u64 backlog[TC_MAX_QUEUE]; @@ -345,7 +348,6 @@ struct mlxsw_sp_port { u16 egr_types; struct mlxsw_sp_ptp_port_stats stats; } ptp; - u8 split_base_local_port; int max_mtu; u32 max_speed; struct mlxsw_sp_hdroom *hdroom; @@ -747,6 +749,7 @@ enum mlxsw_sp_kvdl_entry_type { MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, }; @@ -758,6 +761,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: + case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS: case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: default: return 1; @@ -1193,6 +1197,8 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_fifo_qopt_offload *p); int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f); +int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f); /* spectrum_fid.c */ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index 3a73d654017fe5..10ae1115de6c84 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { MAX_KVD_ACTION_SETS), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index ded4cf6586809b..4b713832fdd559 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -119,7 +119,6 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion) { struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; struct mlxsw_sp_acl_atcam_region_12kb *region_12kb; - size_t alloc_size; u64 max_lkey_id; int err; @@ -131,8 +130,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion) if (!region_12kb) return -ENOMEM; - alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long); - region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL); + region_12kb->used_lkey_id = bitmap_zalloc(max_lkey_id, GFP_KERNEL); if (!region_12kb->used_lkey_id) { err = -ENOMEM; goto err_used_lkey_id_alloc; @@ -149,7 +147,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion) return 0; err_rhashtable_init: - kfree(region_12kb->used_lkey_id); + bitmap_free(region_12kb->used_lkey_id); err_used_lkey_id_alloc: kfree(region_12kb); return err; @@ -161,7 +159,7 @@ mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion) struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; rhashtable_destroy(®ion_12kb->lkey_ht); - kfree(region_12kb->used_lkey_id); + bitmap_free(region_12kb->used_lkey_id); kfree(region_12kb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 7cccc41dd69c9c..31f7f4c3acc367 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -36,7 +36,6 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, u64 max_tcam_regions; u64 max_regions; u64 max_groups; - size_t alloc_size; int err; mutex_init(&tcam->lock); @@ -52,15 +51,13 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, if (max_tcam_regions < max_regions) max_regions = max_tcam_regions; - alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions); - tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL); + tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL); if (!tcam->used_regions) return -ENOMEM; tcam->max_regions = max_regions; max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); - alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups); - tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL); + tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL); if (!tcam->used_groups) { err = -ENOMEM; goto err_alloc_used_groups; @@ -76,9 +73,9 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, return 0; err_tcam_init: - kfree(tcam->used_groups); + bitmap_free(tcam->used_groups); err_alloc_used_groups: - kfree(tcam->used_regions); + bitmap_free(tcam->used_regions); return err; } @@ -89,8 +86,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, mutex_destroy(&tcam->lock); ops->fini(mlxsw_sp, tcam->priv); - kfree(tcam->used_groups); - kfree(tcam->used_regions); + bitmap_free(tcam->used_groups); + bitmap_free(tcam->used_regions); } int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9de160e740b280..d78cf5a7220a99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -1583,7 +1583,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; - unsigned long cb_priv; + unsigned long cb_priv = 0; LIST_HEAD(bulk_list); char *sbsr_pl; u8 masked_count; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c index b65b93a2b9bc60..fc2257753b9b39 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -122,7 +122,6 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools); struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_counter_pool *pool; - unsigned int map_size; int err; pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count), @@ -143,9 +142,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS, mlxsw_sp_counter_pool_occ_get, pool); - map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long); - - pool->usage = kzalloc(map_size, GFP_KERNEL); + pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); if (!pool->usage) { err = -ENOMEM; goto err_usage_alloc; @@ -158,7 +155,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) return 0; err_sub_pools_init: - kfree(pool->usage); + bitmap_free(pool->usage); err_usage_alloc: devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_COUNTERS); @@ -176,7 +173,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp) WARN_ON(find_first_bit(pool->usage, pool->pool_size) != pool->pool_size); WARN_ON(atomic_read(&pool->active_entries_count)); - kfree(pool->usage); + bitmap_free(pool->usage); devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_COUNTERS); kfree(pool); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 267590a0eee78c..84d4460f3dcded 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -96,6 +96,9 @@ mlxsw_sp_link_ext_state_opcode_map[] = { {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, + + {1042, ETHTOOL_LINK_EXT_STATE_MODULE, + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY}, }; static void @@ -124,6 +127,10 @@ mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_m link_ext_state_info->cable_issue = link_ext_state_mapping.link_ext_substate; break; + case ETHTOOL_LINK_EXT_STATE_MODULE: + link_ext_state_info->module = + link_ext_state_mapping.link_ext_substate; + break; default: break; } @@ -1197,6 +1204,41 @@ mlxsw_sp_get_rmon_stats(struct net_device *dev, *ranges = mlxsw_rmon_ranges; } +static int mlxsw_sp_reset(struct net_device *dev, u32 *flags) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags); +} + +static int +mlxsw_sp_get_module_power_mode(struct net_device *dev, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params, + extack); +} + +static int +mlxsw_sp_set_module_power_mode(struct net_device *dev, + const struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module, + params->policy, extack); +} + const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .cap_link_lanes_supported = true, .get_drvinfo = mlxsw_sp_port_get_drvinfo, @@ -1218,6 +1260,9 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats, .get_rmon_stats = mlxsw_sp_get_rmon_stats, + .reset = mlxsw_sp_reset, + .get_module_power_mode = mlxsw_sp_get_module_power_mode, + .set_module_power_mode = mlxsw_sp_set_module_power_mode, }; struct mlxsw_sp1_port_link_mode { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 5facabd86882a5..ad3926de88f285 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) return tun->parms; } -static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) { - return !!(parms.i_flags & TUNNEL_KEY); + return !!(parms->i_flags & TUNNEL_KEY); } -static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) { - return !!(parms.o_flags & TUNNEL_KEY); + return !!(parms->i_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) +{ + return !!(parms->o_flags & TUNNEL_KEY); +} + +static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) +{ + return !!(parms->o_flags & TUNNEL_KEY); +} + +static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) { return mlxsw_sp_ipip_parms4_has_ikey(parms) ? - be32_to_cpu(parms.i_key) : 0; + be32_to_cpu(parms->i_key) : 0; } -static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms) +{ + return mlxsw_sp_ipip_parms6_has_ikey(parms) ? + be32_to_cpu(parms->i_key) : 0; +} + +static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) { return mlxsw_sp_ipip_parms4_has_okey(parms) ? - be32_to_cpu(parms.o_key) : 0; + be32_to_cpu(parms->o_key) : 0; +} + +static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms) +{ + return mlxsw_sp_ipip_parms6_has_okey(parms) ? + be32_to_cpu(parms->o_key) : 0; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr }; + return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms) +mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr }; + return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr }; + return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms) +mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr }; + return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr }; } union mlxsw_sp_l3addr @@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_saddr(parms4); + return mlxsw_sp_ipip_parms4_saddr(&parms4); case MLXSW_SP_L3_PROTO_IPV6: parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); - return mlxsw_sp_ipip_parms6_saddr(parms6); + return mlxsw_sp_ipip_parms6_saddr(&parms6); } WARN_ON(1); @@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_daddr(parms4).addr4; + return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; } static union mlxsw_sp_l3addr @@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_daddr(parms4); + return mlxsw_sp_ipip_parms4_daddr(&parms4); case MLXSW_SP_L3_PROTO_IPV6: parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); - return mlxsw_sp_ipip_parms6_daddr(parms6); + return mlxsw_sp_ipip_parms6_daddr(&parms6); } WARN_ON(1); @@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) return !memcmp(&addr, &naddr, sizeof(naddr)); } +static struct mlxsw_sp_ipip_parms +mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) +{ + struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + + return (struct mlxsw_sp_ipip_parms) { + .proto = MLXSW_SP_L3_PROTO_IPV4, + .saddr = mlxsw_sp_ipip_parms4_saddr(&parms), + .daddr = mlxsw_sp_ipip_parms4_daddr(&parms), + .link = parms.link, + .ikey = mlxsw_sp_ipip_parms4_ikey(&parms), + .okey = mlxsw_sp_ipip_parms4_okey(&parms), + }; +} + static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry, @@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, u32 ikey; parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms); - ikey = mlxsw_sp_ipip_parms4_ikey(parms); + has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms); + ikey = mlxsw_sp_ipip_parms4_ikey(&parms); mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); @@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; - lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ? + lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; return (struct mlxsw_sp_rif_ipip_lb_config){ .lb_ipipt = lb_ipipt, - .okey = mlxsw_sp_ipip_parms4_okey(parms), + .okey = mlxsw_sp_ipip_parms4_okey(&parms), .ul_protocol = MLXSW_SP_L3_PROTO_IPV4, .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, ol_dev), @@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_ipip_entry *ipip_entry, - struct netlink_ext_ack *extack) +mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + const struct mlxsw_sp_ipip_parms *new_parms, + struct netlink_ext_ack *extack) { - union mlxsw_sp_l3addr old_saddr, new_saddr; - union mlxsw_sp_l3addr old_daddr, new_daddr; - struct ip_tunnel_parm new_parms; + const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms; bool update_tunnel = false; bool update_decap = false; bool update_nhs = false; int err = 0; - new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - - new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms); - old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4); - new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms); - old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4); - - if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) { + if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) { u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); /* Since the local address has changed, if there is another * tunnel with a matching saddr, both need to be demoted. */ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, - MLXSW_SP_L3_PROTO_IPV4, - new_saddr, ul_tb_id, + new_parms->proto, + new_parms->saddr, + ul_tb_id, ipip_entry)) { mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); return 0; } update_tunnel = true; - } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) != - mlxsw_sp_ipip_parms4_okey(new_parms)) || - ipip_entry->parms4.link != new_parms.link) { + } else if (old_parms->okey != new_parms->okey || + old_parms->link != new_parms->link) { update_tunnel = true; - } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { + } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) { update_nhs = true; - } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) != - mlxsw_sp_ipip_parms4_ikey(new_parms)) { + } else if (old_parms->ikey != new_parms->ikey) { update_decap = true; } @@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, false, false, false, extack); + if (err) + return err; - ipip_entry->parms4 = new_parms; - return err; + ipip_entry->parms = *new_parms; + return 0; +} + +static int +mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_parms new_parms; + + new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev); + return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); +} + +static int +mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + return 0; +} + +static void +mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ } static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { .dev_type = ARPHRD_IPGRE, .ul_proto = MLXSW_SP_L3_PROTO_IPV4, + .inc_parsing_depth = false, + .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4, .nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4, .decap_config = mlxsw_sp_ipip_decap_config_gre4, .can_offload = mlxsw_sp_ipip_can_offload_gre4, .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4, .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4, + .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4, + .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4, }; -const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = { +static struct mlxsw_sp_ipip_parms +mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_parms parms = {0}; + + WARN_ON_ONCE(1); + return parms; +} + +static int +mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static int +mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + return false; +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct mlxsw_sp_rif_ipip_lb_config config = {0}; + + WARN_ON_ONCE(1); + return config; +} + +static int +mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static int +mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static void +mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON_ONCE(1); +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = { + .dev_type = ARPHRD_IP6GRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV6, + .inc_parsing_depth = true, + .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6, + .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6, + .decap_config = mlxsw_sp1_ipip_decap_config_gre6, + .can_offload = mlxsw_sp1_ipip_can_offload_gre6, + .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6, + .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6, + .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6, + .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = { [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, + [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops, +}; + +static struct mlxsw_sp_ipip_parms +mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev); + + return (struct mlxsw_sp_ipip_parms) { + .proto = MLXSW_SP_L3_PROTO_IPV6, + .saddr = mlxsw_sp_ipip_parms6_saddr(&parms), + .daddr = mlxsw_sp_ipip_parms6_daddr(&parms), + .link = parms.link, + .ikey = mlxsw_sp_ipip_parms6_ikey(&parms), + .okey = mlxsw_sp_ipip_parms6_okey(&parms), + }; +} + +static int +mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + enum mlxsw_reg_ratr_op op; + + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP, + adj_index, rif_index); + mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl, + ipip_entry->dip_kvdl_index); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int +mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); + char rtdp_pl[MLXSW_REG_RTDP_LEN]; + struct __ip6_tnl_parm parms; + unsigned int type_check; + bool has_ikey; + u32 ikey; + + parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms); + ikey = mlxsw_sp_ipip_parms6_ikey(&parms); + + mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); + mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); + + type_check = has_ikey ? + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY : + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE; + + /* Linux demuxes tunnels based on packet SIP (which must match tunnel + * remote IP). Thus configure decap so that it filters out packets that + * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is + * generated for packets that fail this criterion. Linux then handles + * such packets in slow path and generates ICMP destination unreachable. + */ + mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index, + MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6, + type_check, has_ikey, + ipip_entry->dip_kvdl_index, ikey); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); +} + +static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev); + bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; + bool inherit_ttl = tparm.hop_limit == 0; + __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ + + return (tparm.i_flags & ~okflags) == 0 && + (tparm.o_flags & ~okflags) == 0 && + inherit_ttl && inherit_tos && + mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev); +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev); + enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; + + lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ? + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; + return (struct mlxsw_sp_rif_ipip_lb_config){ + .lb_ipipt = lb_ipipt, + .okey = mlxsw_sp_ipip_parms6_okey(&parms), + .ul_protocol = MLXSW_SP_L3_PROTO_IPV6, + .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6, + ol_dev), + }; +} + +static int +mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_parms new_parms; + + new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev); + return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); +} + +static int +mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + char rips_pl[MLXSW_REG_RIPS_LEN]; + struct __ip6_tnl_parm parms6; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + &ipip_entry->dip_kvdl_index); + if (err) + return err; + + parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index, + &parms6.raddr); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); + if (err) + goto err_rips_write; + + return 0; + +err_rips_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + ipip_entry->dip_kvdl_index); + return err; +} + +static void +mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + ipip_entry->dip_kvdl_index); +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = { + .dev_type = ARPHRD_IP6GRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV6, + .inc_parsing_depth = true, + .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6, + .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6, + .decap_config = mlxsw_sp2_ipip_decap_config_gre6, + .can_offload = mlxsw_sp2_ipip_can_offload_gre6, + .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6, + .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6, + .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6, + .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = { + [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, + [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops, }; static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp, @@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp) return 0; } + +struct net_device * +mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) +{ + struct net *net = dev_net(ol_dev); + struct ip_tunnel *tun4; + struct ip6_tnl *tun6; + + switch (ol_dev->type) { + case ARPHRD_IPGRE: + tun4 = netdev_priv(ol_dev); + return dev_get_by_index_rcu(net, tun4->parms.link); + case ARPHRD_IP6GRE: + tun6 = netdev_priv(ol_dev); + return dev_get_by_index_rcu(net, tun6->parms.link); + default: + return NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index f0837b42d1d62e..8cc259dcc8d098 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -7,6 +7,7 @@ #include "spectrum_router.h" #include #include +#include struct ip_tunnel_parm mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); @@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr); enum mlxsw_sp_ipip_type { MLXSW_SP_IPIP_TYPE_GRE4, + MLXSW_SP_IPIP_TYPE_GRE6, MLXSW_SP_IPIP_TYPE_MAX, }; +struct mlxsw_sp_ipip_parms { + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr saddr; + union mlxsw_sp_l3addr daddr; + int link; + u32 ikey; + u32 okey; +}; + struct mlxsw_sp_ipip_entry { enum mlxsw_sp_ipip_type ipipt; struct net_device *ol_dev; /* Overlay. */ struct mlxsw_sp_rif_ipip_lb *ol_lb; struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; - union { - struct ip_tunnel_parm parms4; - }; + struct mlxsw_sp_ipip_parms parms; + u32 dip_kvdl_index; }; struct mlxsw_sp_ipip_ops { int dev_type; enum mlxsw_sp_l3proto ul_proto; /* Underlay. */ + bool inc_parsing_depth; + + struct mlxsw_sp_ipip_parms + (*parms_init)(const struct net_device *ol_dev); int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry, @@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops { int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, struct netlink_ext_ack *extack); + int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry); + void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry); }; -extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[]; +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[]; +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[]; #endif /* _MLXSW_IPIP_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 9958d503bf0e9a..4243d3b883ff18 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -50,12 +50,24 @@ struct mlxsw_sp_qdisc_ops { struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 parent); unsigned int num_classes; + + u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc *child); + int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc *child); +}; + +struct mlxsw_sp_qdisc_ets_band { + u8 prio_bitmap; + int tclass_num; +}; + +struct mlxsw_sp_qdisc_ets_data { + struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS]; }; struct mlxsw_sp_qdisc { u32 handle; - int tclass_num; - u8 prio_bitmap; union { struct red_stats red; } xstats_base; @@ -67,6 +79,10 @@ struct mlxsw_sp_qdisc { u64 backlog; } stats_base; + union { + struct mlxsw_sp_qdisc_ets_data *ets_data; + }; + struct mlxsw_sp_qdisc_ops *ops; struct mlxsw_sp_qdisc *parent; struct mlxsw_sp_qdisc *qdiscs; @@ -141,8 +157,7 @@ mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data) } static struct mlxsw_sp_qdisc * -mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent, - bool root_only) +mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent) { struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; @@ -150,8 +165,6 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent, return NULL; if (parent == TC_H_ROOT) return &qdisc_state->root_qdisc; - if (root_only) - return NULL; return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc, mlxsw_sp_qdisc_walk_cb_find, &parent); } @@ -187,6 +200,32 @@ mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog; } +static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent; + + if (!parent) + return 0xff; + if (!parent->ops->get_prio_bitmap) + return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent); + return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc); +} + +#define MLXSW_SP_PORT_DEFAULT_TCLASS 0 + +static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent; + + if (!parent) + return MLXSW_SP_PORT_DEFAULT_TCLASS; + if (!parent->ops->get_tclass_num) + return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent); + return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc); +} + static int mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) @@ -194,6 +233,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc; int err_hdroom = 0; int err = 0; + int i; if (!mlxsw_sp_qdisc) return 0; @@ -211,6 +251,9 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, if (!mlxsw_sp_qdisc->ops) return 0; + for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, + &mlxsw_sp_qdisc->qdiscs[i]); mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc); if (mlxsw_sp_qdisc->ops->destroy) err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port, @@ -226,6 +269,87 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, return err_hdroom ?: err; } +struct mlxsw_sp_qdisc_tree_validate { + bool forbid_ets; + bool forbid_root_tbf; + bool forbid_tbf; + bool forbid_red; +}; + +static int +__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_tree_validate validate); + +static int +mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_tree_validate validate) +{ + unsigned int i; + int err; + + for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) { + err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i], + validate); + if (err) + return err; + } + + return 0; +} + +static int +__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_tree_validate validate) +{ + if (!mlxsw_sp_qdisc->ops) + return 0; + + switch (mlxsw_sp_qdisc->ops->type) { + case MLXSW_SP_QDISC_FIFO: + break; + case MLXSW_SP_QDISC_RED: + if (validate.forbid_red) + return -EINVAL; + validate.forbid_red = true; + validate.forbid_root_tbf = true; + validate.forbid_ets = true; + break; + case MLXSW_SP_QDISC_TBF: + if (validate.forbid_root_tbf) { + if (validate.forbid_tbf) + return -EINVAL; + /* This is a TC TBF. */ + validate.forbid_tbf = true; + validate.forbid_ets = true; + } else { + /* This is root TBF. */ + validate.forbid_root_tbf = true; + } + break; + case MLXSW_SP_QDISC_PRIO: + case MLXSW_SP_QDISC_ETS: + if (validate.forbid_ets) + return -EINVAL; + validate.forbid_root_tbf = true; + validate.forbid_ets = true; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate); +} + +static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_qdisc_tree_validate validate = {}; + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + + mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc; + return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate); +} + static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, @@ -268,6 +392,10 @@ static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_qdisc->num_classes = ops->num_classes; mlxsw_sp_qdisc->ops = ops; mlxsw_sp_qdisc->handle = handle; + err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port); + if (err) + goto err_replace; + err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params); if (err) goto err_replace; @@ -406,13 +534,17 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port, u64 *p_tx_bytes, u64 *p_tx_packets, u64 *p_drops, u64 *p_backlog) { - int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; u64 tx_bytes, tx_packets; + u8 prio_bitmap; + int tclass_num; + prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, + mlxsw_sp_qdisc); + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - mlxsw_sp_qdisc_bstats_per_priority_get(xstats, - mlxsw_sp_qdisc->prio_bitmap, + mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap, &tx_packets, &tx_bytes); *p_tx_packets += tx_packets; @@ -506,19 +638,24 @@ static void mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; struct red_stats *red_base; + u8 prio_bitmap; + int tclass_num; + prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, + mlxsw_sp_qdisc); + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; stats_base = &mlxsw_sp_qdisc->stats_base; red_base = &mlxsw_sp_qdisc->xstats_base.red; - mlxsw_sp_qdisc_bstats_per_priority_get(xstats, - mlxsw_sp_qdisc->prio_bitmap, + mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap, &stats_base->tx_packets, &stats_base->tx_bytes); + red_base->prob_mark = xstats->tc_ecn[tclass_num]; red_base->prob_drop = xstats->wred_drop[tclass_num]; red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); @@ -532,8 +669,10 @@ static int mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, - mlxsw_sp_qdisc->tclass_num); + int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); + + return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num); } static int @@ -563,6 +702,14 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int +mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle, unsigned int band, + struct mlxsw_sp_qdisc *child_qdisc); +static void +mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle); + static int mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, @@ -570,9 +717,19 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct tc_red_qopt_offload_params *p = params; - int tclass_num = mlxsw_sp_qdisc->tclass_num; + int tclass_num; u32 min, max; u64 prob; + int err; + + err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0, + &mlxsw_sp_qdisc->qdiscs[0]); + if (err) + return err; + mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC); + + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); /* calculate probability in percentage */ prob = p->probability; @@ -615,22 +772,27 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, void *xstats_ptr) { struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red; - int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; struct red_stats *res = xstats_ptr; - int early_drops, pdrops; + int early_drops, marks, pdrops; + int tclass_num; + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; + marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark; pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; + res->prob_mark += marks; xstats_base->pdrop += pdrops; xstats_base->prob_drop += early_drops; + xstats_base->prob_mark += marks; return 0; } @@ -639,16 +801,19 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, struct tc_qopt_offload_stats *stats_ptr) { - int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; u64 overlimits; + int tclass_num; + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; stats_base = &mlxsw_sp_qdisc->stats_base; mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr); - overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits; + overlimits = xstats->wred_drop[tclass_num] + + xstats->tc_ecn[tclass_num] - stats_base->overlimits; stats_ptr->qstats->overlimits += overlimits; stats_base->overlimits += overlimits; @@ -660,11 +825,12 @@ static struct mlxsw_sp_qdisc * mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 parent) { - return NULL; + /* RED and TBF are formally classful qdiscs, but all class references, + * including X:0, just refer to the same one class. + */ + return &mlxsw_sp_qdisc->qdiscs[0]; } -#define MLXSW_SP_PORT_DEFAULT_TCLASS 0 - static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { .type = MLXSW_SP_QDISC_RED, .check_params = mlxsw_sp_qdisc_red_check_params, @@ -675,14 +841,19 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { .get_xstats = mlxsw_sp_qdisc_get_red_xstats, .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats, .find_class = mlxsw_sp_qdisc_leaf_find_class, + .num_classes = 1, }; +static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u8 band, u32 child_handle); + static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_red_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent); if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; @@ -704,6 +875,9 @@ static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, case TC_RED_STATS: return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); + case TC_RED_GRAFT: + return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0, + p->child_handle); default: return -EOPNOTSUPP; } @@ -740,13 +914,34 @@ mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_qdisc->stats_base.backlog = 0; } +static enum mlxsw_reg_qeec_hr +mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc) + return MLXSW_REG_QEEC_HR_PORT; + + /* Configure subgroup shaper, so that both UC and MC traffic is subject + * to shaping. That is unlike RED, however UC queue lengths are going to + * be different than MC ones due to different pool and quota + * configurations, so the configuration is not applicable. For shaper on + * the other hand, subjecting the overall stream to the configured + * shaper makes sense. Also note that that is what we do for + * ieee_setmaxrate(). + */ + return MLXSW_REG_QEEC_HR_SUBGROUP; +} + static int mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HR_SUBGROUP, - mlxsw_sp_qdisc->tclass_num, 0, + enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port, + mlxsw_sp_qdisc); + int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); + + return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0, MLXSW_REG_QEEC_MAS_DIS, 0); } @@ -828,27 +1023,29 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params) { + enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port, + mlxsw_sp_qdisc); struct tc_tbf_qopt_offload_replace_params *p = params; u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p); + int tclass_num; u8 burst_size; int err; + err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0, + &mlxsw_sp_qdisc->qdiscs[0]); + if (err) + return err; + mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC); + + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, + mlxsw_sp_qdisc); + err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size); if (WARN_ON_ONCE(err)) /* check_params above was supposed to reject this value. */ return -EINVAL; - /* Configure subgroup shaper, so that both UC and MC traffic is subject - * to shaping. That is unlike RED, however UC queue lengths are going to - * be different than MC ones due to different pool and quota - * configurations, so the configuration is not applicable. For shaper on - * the other hand, subjecting the overall stream to the configured - * shaper makes sense. Also note that that is what we do for - * ieee_setmaxrate(). - */ - return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, - MLXSW_REG_QEEC_HR_SUBGROUP, - mlxsw_sp_qdisc->tclass_num, 0, + return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0, rate_kbps, burst_size); } @@ -881,6 +1078,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = { .get_stats = mlxsw_sp_qdisc_get_tbf_stats, .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats, .find_class = mlxsw_sp_qdisc_leaf_find_class, + .num_classes = 1, }; static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, @@ -888,7 +1086,7 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent); if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; @@ -907,6 +1105,9 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, case TC_TBF_STATS: return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); + case TC_TBF_GRAFT: + return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0, + p->child_handle); default: return -EOPNOTSUPP; } @@ -957,6 +1158,32 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = { .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats, }; +static int +mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle, unsigned int band, + struct mlxsw_sp_qdisc *child_qdisc) +{ + struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; + + if (handle == qdisc_state->future_handle && + qdisc_state->future_fifos[band]) + return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC, + child_qdisc, + &mlxsw_sp_qdisc_ops_fifo, + NULL); + return 0; +} + +static void +mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle) +{ + struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; + + qdisc_state->future_handle = handle; + memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos)); +} + static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_fifo_qopt_offload *p) { @@ -965,16 +1192,15 @@ static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, unsigned int band; u32 parent_handle; - mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent); if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) { parent_handle = TC_H_MAJ(p->parent); if (parent_handle != qdisc_state->future_handle) { /* This notifications is for a different Qdisc than * previously. Wipe the future cache. */ - memset(qdisc_state->future_fifos, 0, - sizeof(qdisc_state->future_fifos)); - qdisc_state->future_handle = parent_handle; + mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, + parent_handle); } band = TC_H_MIN(p->parent) - 1; @@ -1033,11 +1259,10 @@ static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HR_SUBGROUP, i, 0, false, 0); - mlxsw_sp_qdisc_destroy(mlxsw_sp_port, - &mlxsw_sp_qdisc->qdiscs[i]); - mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0; } + kfree(mlxsw_sp_qdisc->ets_data); + mlxsw_sp_qdisc->ets_data = NULL; return 0; } @@ -1066,6 +1291,31 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, return __mlxsw_sp_qdisc_ets_check_params(p->bands); } +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *mlxsw_sp_port) +{ + u64 backlog; + + if (mlxsw_sp_qdisc->ops) { + backlog = mlxsw_sp_qdisc->stats_base.backlog; + if (mlxsw_sp_qdisc->ops->clean_stats) + mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, + mlxsw_sp_qdisc); + mlxsw_sp_qdisc->stats_base.backlog = backlog; + } + + return NULL; +} + +static void +mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats, + mlxsw_sp_port); +} + static int __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, @@ -1074,69 +1324,80 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, const unsigned int *weights, const u8 *priomap) { - struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc; + struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data; + struct mlxsw_sp_qdisc_ets_band *ets_band; struct mlxsw_sp_qdisc *child_qdisc; - int tclass, i, band, backlog; - u8 old_priomap; + u8 old_priomap, new_priomap; + int i, band; int err; + if (!ets_data) { + ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL); + if (!ets_data) + return -ENOMEM; + mlxsw_sp_qdisc->ets_data = ets_data; + + for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) { + int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + + ets_band = &ets_data->bands[band]; + ets_band->tclass_num = tclass_num; + } + } + for (band = 0; band < nbands; band++) { - tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + int tclass_num; + child_qdisc = &mlxsw_sp_qdisc->qdiscs[band]; - old_priomap = child_qdisc->prio_bitmap; - child_qdisc->prio_bitmap = 0; + ets_band = &ets_data->bands[band]; + + tclass_num = ets_band->tclass_num; + old_priomap = ets_band->prio_bitmap; + new_priomap = 0; err = mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HR_SUBGROUP, - tclass, 0, !!quanta[band], + tclass_num, 0, !!quanta[band], weights[band]); if (err) return err; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (priomap[i] == band) { - child_qdisc->prio_bitmap |= BIT(i); + new_priomap |= BIT(i); if (BIT(i) & old_priomap) continue; err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, - i, tclass); + i, tclass_num); if (err) return err; } } - child_qdisc->tclass_num = tclass; + ets_band->prio_bitmap = new_priomap; - if (old_priomap != child_qdisc->prio_bitmap && - child_qdisc->ops && child_qdisc->ops->clean_stats) { - backlog = child_qdisc->stats_base.backlog; - child_qdisc->ops->clean_stats(mlxsw_sp_port, - child_qdisc); - child_qdisc->stats_base.backlog = backlog; - } + if (old_priomap != new_priomap) + mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port, + child_qdisc); - if (handle == qdisc_state->future_handle && - qdisc_state->future_fifos[band]) { - err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC, - child_qdisc, - &mlxsw_sp_qdisc_ops_fifo, - NULL); - if (err) - return err; - } + err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, + band, child_qdisc); + if (err) + return err; } for (; band < IEEE_8021QAZ_MAX_TCS; band++) { - tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + ets_band = &ets_data->bands[band]; + ets_band->prio_bitmap = 0; + child_qdisc = &mlxsw_sp_qdisc->qdiscs[band]; - child_qdisc->prio_bitmap = 0; mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc); + mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HR_SUBGROUP, - tclass, 0, false, 0); + ets_band->tclass_num, 0, false, 0); } - qdisc_state->future_handle = TC_H_UNSPEC; - memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos)); + mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC); return 0; } @@ -1238,6 +1499,31 @@ mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, return &mlxsw_sp_qdisc->qdiscs[band]; } +static struct mlxsw_sp_qdisc_ets_band * +mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc *child) +{ + unsigned int band = child - mlxsw_sp_qdisc->qdiscs; + + if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS)) + band = 0; + return &mlxsw_sp_qdisc->ets_data->bands[band]; +} + +static u8 +mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc *child) +{ + return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap; +} + +static int +mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc *child) +{ + return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num; +} + static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { .type = MLXSW_SP_QDISC_PRIO, .check_params = mlxsw_sp_qdisc_prio_check_params, @@ -1248,6 +1534,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, .find_class = mlxsw_sp_qdisc_prio_find_class, .num_classes = IEEE_8021QAZ_MAX_TCS, + .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap, + .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num, }; static int @@ -1299,6 +1587,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = { .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, .find_class = mlxsw_sp_qdisc_prio_find_class, .num_classes = IEEE_8021QAZ_MAX_TCS, + .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap, + .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num, }; /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting @@ -1326,10 +1616,9 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = { * grafted corresponds to the parent handle. If the two don't match, we * unoffload the child. */ -static int -__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - u8 band, u32 child_handle) +static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + u8 band, u32 child_handle) { struct mlxsw_sp_qdisc *old_qdisc; u32 parent; @@ -1362,21 +1651,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } -static int -mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - struct tc_prio_qopt_offload_graft_params *p) -{ - return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc, - p->band, p->child_handle); -} - static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_prio_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent); if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; @@ -1396,8 +1676,9 @@ static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); case TC_PRIO_GRAFT: - return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc, - &p->graft_params); + return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, + p->graft_params.band, + p->graft_params.child_handle); default: return -EOPNOTSUPP; } @@ -1420,7 +1701,7 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent); if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; @@ -1440,9 +1721,9 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); case TC_ETS_GRAFT: - return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc, - p->graft_params.band, - p->graft_params.child_handle); + return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, + p->graft_params.band, + p->graft_params.child_handle); default: return -EOPNOTSUPP; } @@ -1472,6 +1753,7 @@ struct mlxsw_sp_qevent_binding { u32 handle; int tclass_num; enum mlxsw_sp_span_trigger span_trigger; + unsigned int action_mask; }; static LIST_HEAD(mlxsw_sp_qevent_block_cb_list); @@ -1482,8 +1764,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_span_agent_parms *agent_parms, int *p_span_id) { + enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger; struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + bool ingress; int span_id; int err; @@ -1491,18 +1775,19 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, if (err) return err; - err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true); + ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger); + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress); if (err) goto err_analyzed_port_get; trigger_parms.span_id = span_id; trigger_parms.probability_rate = 1; - err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port, &trigger_parms); if (err) goto err_agent_bind; - err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger, + err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger, qevent_binding->tclass_num); if (err) goto err_trigger_enable; @@ -1511,10 +1796,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, return 0; err_trigger_enable: - mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, &trigger_parms); err_agent_bind: - mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress); err_analyzed_port_get: mlxsw_sp_span_agent_put(mlxsw_sp, span_id); return err; @@ -1524,16 +1809,20 @@ static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_qevent_binding *qevent_binding, int span_id) { + enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger; struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = { .span_id = span_id, }; + bool ingress; + + ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger); - mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger, + mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger, qevent_binding->tclass_num); - mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, &trigger_parms); - mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress); mlxsw_sp_span_agent_put(mlxsw_sp, span_id); } @@ -1583,10 +1872,17 @@ static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id); } -static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mall_entry *mall_entry, - struct mlxsw_sp_qevent_binding *qevent_binding) +static int +mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding, + struct netlink_ext_ack *extack) { + if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) { + NL_SET_ERR_MSG(extack, "Action not supported at this qevent"); + return -EOPNOTSUPP; + } + switch (mall_entry->type) { case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding); @@ -1614,15 +1910,17 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp, } } -static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block, - struct mlxsw_sp_qevent_binding *qevent_binding) +static int +mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block, + struct mlxsw_sp_qevent_binding *qevent_binding, + struct netlink_ext_ack *extack) { struct mlxsw_sp_mall_entry *mall_entry; int err; list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) { err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry, - qevent_binding); + qevent_binding, extack); if (err) goto err_entry_configure; } @@ -1646,13 +1944,17 @@ static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qe qevent_binding); } -static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block) +static int +mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block, + struct netlink_ext_ack *extack) { struct mlxsw_sp_qevent_binding *qevent_binding; int err; list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) { - err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); + err = mlxsw_sp_qevent_binding_configure(qevent_block, + qevent_binding, + extack); if (err) goto err_binding_configure; } @@ -1737,7 +2039,7 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp, list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list); - err = mlxsw_sp_qevent_block_configure(qevent_block); + err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack); if (err) goto err_block_configure; @@ -1825,7 +2127,8 @@ static void mlxsw_sp_qevent_block_release(void *cb_priv) static struct mlxsw_sp_qevent_binding * mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num, - enum mlxsw_sp_span_trigger span_trigger) + enum mlxsw_sp_span_trigger span_trigger, + unsigned int action_mask) { struct mlxsw_sp_qevent_binding *binding; @@ -1837,6 +2140,7 @@ mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, binding->handle = handle; binding->tclass_num = tclass_num; binding->span_trigger = span_trigger; + binding->action_mask = action_mask; return binding; } @@ -1862,9 +2166,11 @@ mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block, return NULL; } -static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct flow_block_offload *f, - enum mlxsw_sp_span_trigger span_trigger) +static int +mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger, + unsigned int action_mask) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_qevent_binding *qevent_binding; @@ -1872,6 +2178,7 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po struct flow_block_cb *block_cb; struct mlxsw_sp_qdisc *qdisc; bool register_block = false; + int tclass_num; int err; block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp); @@ -1904,14 +2211,19 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po goto err_binding_exists; } - qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle, - qdisc->tclass_num, span_trigger); + tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc); + qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, + f->sch->handle, + tclass_num, + span_trigger, + action_mask); if (IS_ERR(qevent_binding)) { err = PTR_ERR(qevent_binding); goto err_binding_create; } - err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); + err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding, + f->extack); if (err) goto err_binding_configure; @@ -1963,15 +2275,19 @@ static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp } } -static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, - struct flow_block_offload *f, - enum mlxsw_sp_span_trigger span_trigger) +static int +mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger, + unsigned int action_mask) { f->driver_block_list = &mlxsw_sp_qevent_block_cb_list; switch (f->command) { case FLOW_BLOCK_BIND: - return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger); + return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, + span_trigger, + action_mask); case FLOW_BLOCK_UNBIND: mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger); return 0; @@ -1983,7 +2299,22 @@ static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f) { - return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP); + unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) | + BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP); + + return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, + MLXSW_SP_SPAN_TRIGGER_EARLY_DROP, + action_mask); +} + +int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f) +{ + unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR); + + return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, + MLXSW_SP_SPAN_TRIGGER_ECN, + action_mask); } int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) @@ -1995,8 +2326,6 @@ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) return -ENOMEM; mutex_init(&qdisc_state->lock); - qdisc_state->root_qdisc.prio_bitmap = 0xff; - qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; mlxsw_sp_port->qdisc = qdisc_state; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 19bb3ca0515e21..217e3b351dfe60 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -57,6 +57,7 @@ struct mlxsw_sp_rif { unsigned char addr[ETH_ALEN]; int mtu; u16 rif_index; + u8 mac_profile_id; u16 vr_id; const struct mlxsw_sp_rif_ops *ops; struct mlxsw_sp *mlxsw_sp; @@ -106,15 +107,23 @@ struct mlxsw_sp_rif_ops { void (*setup)(struct mlxsw_sp_rif *rif, const struct mlxsw_sp_rif_params *params); - int (*configure)(struct mlxsw_sp_rif *rif); + int (*configure)(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack); void (*deconfigure)(struct mlxsw_sp_rif *rif); struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, struct netlink_ext_ack *extack); void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; +struct mlxsw_sp_rif_mac_profile { + unsigned char mac_prefix[ETH_ALEN]; + refcount_t ref_count; + u8 id; +}; + struct mlxsw_sp_router_ops { int (*init)(struct mlxsw_sp *mlxsw_sp); + int (*ipips_init)(struct mlxsw_sp *mlxsw_sp); }; static struct mlxsw_sp_rif * @@ -1055,22 +1064,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->router->vrs); } -static struct net_device * -__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) -{ - struct ip_tunnel *tun = netdev_priv(ol_dev); - struct net *net = dev_net(ol_dev); - - return dev_get_by_index_rcu(net, tun->parms.link); -} - u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) { struct net_device *d; u32 tb_id; rcu_read_lock(); - d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); if (d) tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN; else @@ -1116,6 +1116,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_ipip_entry *ret = NULL; + int err; ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); @@ -1131,26 +1132,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; + ipip_entry->parms = ipip_ops->parms_init(ol_dev); - switch (ipip_ops->ul_proto) { - case MLXSW_SP_L3_PROTO_IPV4: - ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - break; - case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON(1); - break; + err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry); + if (err) { + ret = ERR_PTR(err); + goto err_rem_ip_addr_set; } return ipip_entry; +err_rem_ip_addr_set: + mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); err_ol_ipip_lb_create: kfree(ipip_entry); return ret; } -static void -mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) +static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) { + const struct mlxsw_sp_ipip_ops *ipip_ops = + mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; + + ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry); mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); kfree(ipip_entry); } @@ -1174,6 +1179,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_l3addr_eq(&tun_saddr, &saddr); } +static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; + + /* Not all tunnels require to increase the default pasing depth + * (96 bytes). + */ + if (ipip_ops->inc_parsing_depth) + return mlxsw_sp_parsing_depth_inc(mlxsw_sp); + + return 0; +} + +static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops = + mlxsw_sp->router->ipip_ops_arr[ipipt]; + + if (ipip_ops->inc_parsing_depth) + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +} + static int mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, @@ -1187,18 +1218,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp, + ipip_entry->ipipt); + if (err) + goto err_parsing_depth_inc; + ipip_entry->decap_fib_entry = fib_entry; fib_entry->decap.ipip_entry = ipip_entry; fib_entry->decap.tunnel_index = tunnel_index; + return 0; + +err_parsing_depth_inc: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + fib_entry->decap.tunnel_index); + return err; } static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { + enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt; + /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; + mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt); mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, fib_entry->decap.tunnel_index); } @@ -1309,6 +1354,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, saddr_len = 4; saddr_prefix_len = 32; break; + case MLXSW_SP_L3_PROTO_IPV6: + saddrp = &saddr.addr6; + saddr_len = 16; + saddr_prefix_len = 128; + break; default: WARN_ON(1); return NULL; @@ -1345,7 +1395,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { list_del(&ipip_entry->ipip_list_node); - mlxsw_sp_ipip_entry_dealloc(ipip_entry); + mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry); } static bool @@ -1450,7 +1500,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, struct net_device *ipip_ul_dev; rcu_read_lock(); - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); rcu_read_unlock(); if (ipip_ul_dev == ul_dev) @@ -1536,23 +1586,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id, u16 ul_rif_id, bool enable) { struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; + enum mlxsw_reg_ritr_loopback_ipip_options ipip_options; struct mlxsw_sp_rif *rif = &lb_rif->common; struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; char ritr_pl[MLXSW_REG_RITR_LEN]; + struct in6_addr *saddr6; u32 saddr4; + ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET; switch (lb_cf.ul_protocol) { case MLXSW_SP_L3_PROTO_IPV4: saddr4 = be32_to_cpu(lb_cf.saddr.addr4); mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, rif->rif_index, rif->vr_id, rif->dev->mtu); mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, - MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, - ul_vr_id, ul_rif_id, saddr4, lb_cf.okey); + ipip_options, ul_vr_id, + ul_rif_id, saddr4, + lb_cf.okey); break; case MLXSW_SP_L3_PROTO_IPV6: - return -EAFNOSUPPORT; + saddr6 = &lb_cf.saddr.addr6; + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt, + ipip_options, ul_vr_id, + ul_rif_id, saddr6, + lb_cf.okey); + break; } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); @@ -1827,7 +1888,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, struct net_device *ipip_ul_dev; rcu_read_lock(); - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); rcu_read_unlock(); if (ipip_ul_dev == ul_dev) mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); @@ -4152,7 +4213,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) bool is_up; rcu_read_lock(); - ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true; rcu_read_unlock(); @@ -4376,6 +4437,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } } +static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_ratr_trap_action trap_action; + char ratr_pl[MLXSW_REG_RATR_LEN]; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + &mlxsw_sp->router->adj_trap_index); + if (err) + return err; + + trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, + MLXSW_REG_RATR_TYPE_ETHERNET, + mlxsw_sp->router->adj_trap_index, + mlxsw_sp->router->lb_rif_index); + mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); + mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); + if (err) + goto err_ratr_write; + + return 0; + +err_ratr_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_trap_index); + return err; +} + +static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_trap_index); +} + +static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups)) + return 0; + + err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp); + if (err) + return err; + + refcount_set(&mlxsw_sp->router->num_groups, 1); + + return 0; +} + +static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp) +{ + if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups)) + return; + + mlxsw_sp_adj_trap_entry_fini(mlxsw_sp); +} + static void mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nexthop_group *nh_grp, @@ -4790,6 +4911,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_obj_init; } + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) { NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device"); @@ -4808,6 +4932,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop_obj_init: for (i--; i >= 0; i--) { @@ -4832,6 +4958,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, cancel_delayed_work(&router->nh_grp_activity_dw); } + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -5223,6 +5350,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop4_init; } + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) goto err_group_refresh; @@ -5230,6 +5360,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop4_init: for (i--; i >= 0; i--) { @@ -5247,6 +5379,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; int i; + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -5725,41 +5858,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, return err; } -static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp) -{ - enum mlxsw_reg_ratr_trap_action trap_action; - char ratr_pl[MLXSW_REG_RATR_LEN]; - int err; - - if (mlxsw_sp->router->adj_discard_index_valid) - return 0; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - &mlxsw_sp->router->adj_discard_index); - if (err) - return err; - - trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, - MLXSW_REG_RATR_TYPE_ETHERNET, - mlxsw_sp->router->adj_discard_index, - mlxsw_sp->router->lb_rif_index); - mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); - mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); - if (err) - goto err_ratr_write; - - mlxsw_sp->router->adj_discard_index_valid = true; - - return 0; - -err_ratr_write: - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - mlxsw_sp->router->adj_discard_index); - return err; -} - static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, @@ -5772,7 +5870,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, u16 trap_id = 0; u32 adjacency_index = 0; u16 ecmp_size = 0; - int err; /* In case the nexthop group adjacency index is valid, use it * with provided ECMP size. Otherwise, setup trap and pass @@ -5783,11 +5880,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, adjacency_index = nhgi->adj_index; ecmp_size = nhgi->ecmp_size; } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) { - err = mlxsw_sp_adj_discard_write(mlxsw_sp); - if (err) - return err; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; - adjacency_index = mlxsw_sp->router->adj_discard_index; + adjacency_index = mlxsw_sp->router->adj_trap_index; ecmp_size = 1; } else { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; @@ -6036,8 +6130,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } static void -mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: @@ -6048,6 +6142,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, } } +static void +mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib4_entry *fib4_entry) +{ + mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common); +} + static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, @@ -6108,7 +6209,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; fib_info_put(fib4_entry->fi); - mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry); mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group, fib_node->fib); mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); @@ -6641,6 +6742,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); } nh_grp->nhgi = nhgi; + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) goto err_group_refresh; @@ -6648,6 +6752,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop6_init: for (i--; i >= 0; i--) { @@ -6665,6 +6771,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; int i; + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -6888,11 +6995,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry); } -static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - const struct fib6_info *rt) +static int +mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct fib6_info *rt) { - if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) + struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi; + union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr }; + int ifindex = nhgi->nexthops[0].ifindex; + struct mlxsw_sp_ipip_entry *ipip_entry; + + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex, + MLXSW_SP_L3_PROTO_IPV6, + dip); + + if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; + return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry, + ipip_entry); + } + + return 0; +} + +static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct fib6_info *rt) +{ + if (rt->fib6_flags & RTF_LOCAL) + return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry, + rt); + if (rt->fib6_flags & RTF_ANYCAST) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; else if (rt->fib6_type == RTN_BLACKHOLE) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; @@ -6902,6 +7036,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + + return 0; } static void @@ -6959,12 +7095,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_group_vr_link; - mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + if (err) + goto err_fib6_entry_type_set; fib_entry->fib_node = fib_node; return fib6_entry; +err_fib6_entry_type_set: + mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib); err_nexthop_group_vr_link: mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry); err_nexthop6_group_get: @@ -6983,11 +7123,19 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, return ERR_PTR(err); } +static void +mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common); +} + static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry) { struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry); mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group, fib_node->fib); mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); @@ -7340,16 +7488,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } - - /* After flushing all the routes, it is not possible anyone is still - * using the adjacency index that is discarding packets, so free it in - * case it was allocated. - */ - if (!mlxsw_sp->router->adj_discard_index_valid) - return; - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - mlxsw_sp->router->adj_discard_index); - mlxsw_sp->router->adj_discard_index_valid = false; } struct mlxsw_sp_fib6_event { @@ -8056,7 +8194,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, if (ops->setup) ops->setup(rif, params); - err = ops->configure(rif); + err = ops->configure(rif, extack); if (err) goto err_configure; @@ -8175,6 +8313,200 @@ static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif) mlxsw_sp_rif_destroy(rif); } +static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif_mac_profile *profile, + struct netlink_ext_ack *extack) +{ + u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile; + struct mlxsw_sp_router *router = mlxsw_sp->router; + int id; + + id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0, + max_rif_mac_profiles, GFP_KERNEL); + + if (id >= 0) { + profile->id = id; + return 0; + } + + if (id == -ENOSPC) + NL_SET_ERR_MSG_MOD(extack, + "Exceeded number of supported router interface MAC profiles"); + + return id; +} + +static struct mlxsw_sp_rif_mac_profile * +mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile) +{ + struct mlxsw_sp_rif_mac_profile *profile; + + profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr, + mac_profile); + WARN_ON(!profile); + return profile; +} + +static struct mlxsw_sp_rif_mac_profile * +mlxsw_sp_rif_mac_profile_alloc(const char *mac) +{ + struct mlxsw_sp_rif_mac_profile *profile; + + profile = kzalloc(sizeof(*profile), GFP_KERNEL); + if (!profile) + return NULL; + + ether_addr_copy(profile->mac_prefix, mac); + refcount_set(&profile->ref_count, 1); + return profile; +} + +static struct mlxsw_sp_rif_mac_profile * +mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac) +{ + struct mlxsw_sp_router *router = mlxsw_sp->router; + struct mlxsw_sp_rif_mac_profile *profile; + int id; + + idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) { + if (!profile) + continue; + + if (ether_addr_equal_masked(profile->mac_prefix, mac, + mlxsw_sp->mac_mask)) + return profile; + } + + return NULL; +} + +static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv) +{ + const struct mlxsw_sp *mlxsw_sp = priv; + + return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count); +} + +static struct mlxsw_sp_rif_mac_profile * +mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif_mac_profile *profile; + int err; + + profile = mlxsw_sp_rif_mac_profile_alloc(mac); + if (!profile) + return ERR_PTR(-ENOMEM); + + err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack); + if (err) + goto profile_index_alloc_err; + + atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count); + return profile; + +profile_index_alloc_err: + kfree(profile); + return ERR_PTR(err); +} + +static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp, + u8 mac_profile) +{ + struct mlxsw_sp_rif_mac_profile *profile; + + atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count); + profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile); + kfree(profile); +} + +static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp, + const char *mac, u8 *p_mac_profile, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif_mac_profile *profile; + + profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac); + if (profile) { + refcount_inc(&profile->ref_count); + goto out; + } + + profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack); + if (IS_ERR(profile)) + return PTR_ERR(profile); + +out: + *p_mac_profile = profile->id; + return 0; +} + +static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp, + u8 mac_profile) +{ + struct mlxsw_sp_rif_mac_profile *profile; + + profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr, + mac_profile); + if (WARN_ON(!profile)) + return; + + if (!refcount_dec_and_test(&profile->ref_count)) + return; + + mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile); +} + +static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_rif_mac_profile *profile; + + profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr, + rif->mac_profile_id); + if (WARN_ON(!profile)) + return false; + + return refcount_read(&profile->ref_count) > 1; +} + +static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif, + const char *new_mac) +{ + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_rif_mac_profile *profile; + + profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr, + rif->mac_profile_id); + if (WARN_ON(!profile)) + return -EINVAL; + + ether_addr_copy(profile->mac_prefix, new_mac); + return 0; +} + +static int +mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + const char *new_mac, + struct netlink_ext_ack *extack) +{ + u8 mac_profile; + int err; + + if (!mlxsw_sp_rif_mac_profile_is_shared(rif)) + return mlxsw_sp_rif_mac_profile_edit(rif, new_mac); + + err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac, + &mac_profile, extack); + if (err) + return err; + + mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id); + rif->mac_profile_id = mac_profile; + return 0; +} + static int __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, struct net_device *l3_dev, @@ -8523,36 +8855,6 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, - const unsigned char *dev_addr, - struct netlink_ext_ack *extack) -{ - struct mlxsw_sp_rif *rif; - int i; - - /* A RIF is not created for macvlan netdevs. Their MAC is used to - * populate the FDB - */ - if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) - return 0; - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { - rif = mlxsw_sp->router->rifs[i]; - if (rif && rif->ops && - rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB) - continue; - if (rif && rif->dev && rif->dev != dev && - !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, - mlxsw_sp->mac_mask)) { - NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix"); - return -EINVAL; - } - } - - return 0; -} - static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, unsigned long event, @@ -8618,11 +8920,6 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, - ivi->extack); - if (err) - goto out; - err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack); out: mutex_unlock(&mlxsw_sp->router->lock); @@ -8706,11 +9003,6 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, - i6vi->extack); - if (err) - goto out; - err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack); out: mutex_unlock(&mlxsw_sp->router->lock); @@ -8718,7 +9010,7 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, } static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, - const char *mac, int mtu) + const char *mac, int mtu, u8 mac_profile) { char ritr_pl[MLXSW_REG_RITR_LEN]; int err; @@ -8730,15 +9022,18 @@ static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); + mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile); mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } static int mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *rif) + struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { struct net_device *dev = rif->dev; + u8 old_mac_profile; u16 fid_index; int err; @@ -8748,8 +9043,14 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, if (err) return err; + old_mac_profile = rif->mac_profile_id; + err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr, + extack); + if (err) + goto err_rif_mac_profile_replace; + err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr, - dev->mtu); + dev->mtu, rif->mac_profile_id); if (err) goto err_rif_edit; @@ -8779,8 +9080,11 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, return 0; err_rif_fdb_op: - mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu); + mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu, + old_mac_profile); err_rif_edit: + mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack); +err_rif_mac_profile_replace: mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true); return err; } @@ -8788,16 +9092,34 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif, struct netdev_notifier_pre_changeaddr_info *info) { + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_rif_mac_profile *profile; struct netlink_ext_ack *extack; + u8 max_rif_mac_profiles; + u64 occ; extack = netdev_notifier_info_to_extack(&info->info); - return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev, - info->dev_addr, extack); + + profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr); + if (profile) + return 0; + + max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile; + occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp); + if (occ < max_rif_mac_profiles) + return 0; + + if (!mlxsw_sp_rif_mac_profile_is_shared(rif)) + return 0; + + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles"); + return -ENOBUFS; } int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp_rif *rif; int err = 0; @@ -8814,7 +9136,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, switch (event) { case NETDEV_CHANGEMTU: case NETDEV_CHANGEADDR: - err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif); + err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack); break; case NETDEV_PRE_CHANGEADDR: err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); @@ -8937,6 +9259,7 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable) mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF, rif->rif_index, rif->vr_id, rif->dev->mtu); mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); + mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id); mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag, rif_subport->lag ? rif_subport->lag_id : rif_subport->system_port, @@ -8945,13 +9268,21 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } -static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif) +static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { + u8 mac_profile; int err; - err = mlxsw_sp_rif_subport_op(rif, true); + err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr, + &mac_profile, extack); if (err) return err; + rif->mac_profile_id = mac_profile; + + err = mlxsw_sp_rif_subport_op(rif, true); + if (err) + goto err_rif_subport_op; err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(rif->fid), true); @@ -8963,6 +9294,8 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif) err_rif_fdb_op: mlxsw_sp_rif_subport_op(rif, false); +err_rif_subport_op: + mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile); return err; } @@ -8975,6 +9308,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_index(fid), false); mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_rif_subport_op(rif, false); + mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id); } static struct mlxsw_sp_fid * @@ -9003,6 +9337,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id, rif->dev->mtu); mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); + mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id); mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); @@ -9013,16 +9348,24 @@ u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) return mlxsw_core_max_ports(mlxsw_sp->core) + 1; } -static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) +static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; u16 fid_index = mlxsw_sp_fid_index(rif->fid); + u8 mac_profile; int err; + err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr, + &mac_profile, extack); + if (err) + return err; + rif->mac_profile_id = mac_profile; + err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, true); if (err) - return err; + goto err_rif_vlan_fid_op; err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, mlxsw_sp_router_port(mlxsw_sp), true); @@ -9050,6 +9393,8 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) mlxsw_sp_router_port(mlxsw_sp), false); err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); +err_rif_vlan_fid_op: + mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile); return err; } @@ -9068,6 +9413,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); + mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id); } static struct mlxsw_sp_fid * @@ -9172,7 +9518,8 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif, } static int -mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) +mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); @@ -9359,7 +9706,8 @@ void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index) } static int -mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) +mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); @@ -9414,6 +9762,13 @@ static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) { u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_core *core = mlxsw_sp->core; + + if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES)) + return -EIO; + mlxsw_sp->router->max_rif_mac_profile = + MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES); mlxsw_sp->router->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), @@ -9421,16 +9776,28 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp->router->rifs) return -ENOMEM; + idr_init(&mlxsw_sp->router->rif_mac_profiles_idr); + atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, + mlxsw_sp_rif_mac_profiles_occ_get, + mlxsw_sp); + return 0; } static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) { + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); int i; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) WARN_ON_ONCE(mlxsw_sp->router->rifs[i]); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_RIF_MAC_PROFILES); + WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr)); + idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr); kfree(mlxsw_sp->router->rifs); } @@ -9447,7 +9814,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) { int err; - mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp); @@ -9460,6 +9826,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); } +static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr; + return mlxsw_sp_ipips_init(mlxsw_sp); +} + +static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr; + return mlxsw_sp_ipips_init(mlxsw_sp); +} + static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) { WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list)); @@ -9874,6 +10252,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = { .init = mlxsw_sp1_router_init, + .ipips_init = mlxsw_sp1_ipips_init, }; static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) @@ -9889,6 +10268,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = { .init = mlxsw_sp2_router_init, + .ipips_init = mlxsw_sp2_ipips_init, }; int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, @@ -9934,7 +10314,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rifs_init; - err = mlxsw_sp_ipips_init(mlxsw_sp); + err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp); if (err) goto err_ipips_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 25d3eae6350133..99e8371a82a591 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -39,6 +39,9 @@ mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx) struct mlxsw_sp_router { struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp_rif **rifs; + struct idr rif_mac_profiles_idr; + atomic_t rif_mac_profiles_count; + u8 max_rif_mac_profile; struct mlxsw_sp_vr *vrs; struct rhashtable neigh_ht; struct rhashtable nexthop_group_ht; @@ -65,8 +68,6 @@ struct mlxsw_sp_router { struct notifier_block inet6addr_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; - u32 adj_discard_index; - bool adj_discard_index_valid; struct mlxsw_sp_router_nve_decap nve_decap_config; struct mutex lock; /* Protects shared router resources */ struct work_struct fib_event_work; @@ -82,6 +83,8 @@ struct mlxsw_sp_router { struct delayed_work nh_grp_activity_dw; struct list_head nh_res_grp_list; bool inc_parsing_depth; + refcount_t num_groups; + u32 adj_trap_index; }; struct mlxsw_sp_fib_entry_priv { @@ -226,6 +229,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp); +struct net_device * +mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev); extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 3398cc01e5ec4c..f5f819aa9a653c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -1650,6 +1650,22 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); } +bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger) +{ + switch (trigger) { + case MLXSW_SP_SPAN_TRIGGER_INGRESS: + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: + return true; + case MLXSW_SP_SPAN_TRIGGER_EGRESS: + case MLXSW_SP_SPAN_TRIGGER_ECN: + return false; + } + + WARN_ON_ONCE(1); + return false; +} + static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index efaefd1ae8636d..82e711afb02bb4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -120,6 +120,7 @@ int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_sp_span_trigger trigger, u8 tc); void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_sp_span_trigger trigger, u8 tc); +bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger); extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops; extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 22fede5cb32c22..81c7e8a7fcf5df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1635,16 +1635,13 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, u16 fid) { struct mlxsw_sp_mid *mid; - size_t alloc_size; mid = kzalloc(sizeof(*mid), GFP_KERNEL); if (!mid) return NULL; - alloc_size = sizeof(unsigned long) * - BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); - - mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); + mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core), + GFP_KERNEL); if (!mid->ports_in_mid) goto err_ports_in_mid_alloc; @@ -1663,7 +1660,7 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, return mid; err_write_mdb_entry: - kfree(mid->ports_in_mid); + bitmap_free(mid->ports_in_mid); err_ports_in_mid_alloc: kfree(mid); return NULL; @@ -1680,7 +1677,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_core_max_ports(mlxsw_sp->core))) { err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); list_del(&mid->list); - kfree(mid->ports_in_mid); + bitmap_free(mid->ports_in_mid); kfree(mid); } return err; diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index b27713906d3a65..c11b118dc4158d 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -348,13 +348,15 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter) ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); } -static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) +static void ks8842_init_mac_addr(struct ks8842_adapter *adapter) { + u8 addr[ETH_ALEN]; int i; u16 mac; for (i = 0; i < ETH_ALEN; i++) - dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); + addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); + eth_hw_addr_set(adapter->netdev, addr); if (adapter->conf_flags & MICREL_KS884X) { /* @@ -380,7 +382,7 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) } } -static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) +static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac) { unsigned long flags; unsigned i; @@ -1064,7 +1066,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, mac, netdev->addr_len); + eth_hw_addr_set(netdev, mac); ks8842_write_mac_addr(adapter, mac); return 0; @@ -1191,12 +1193,11 @@ static int ks8842_probe(struct platform_device *pdev) if (i < netdev->addr_len) /* an address was passed, use it */ - memcpy(netdev->dev_addr, pdata->macaddr, - netdev->addr_len); + eth_hw_addr_set(netdev, pdata->macaddr); } if (i == netdev->addr_len) { - ks8842_read_mac_addr(adapter, netdev->dev_addr); + ks8842_init_mac_addr(adapter); if (!is_valid_ether_addr(netdev->dev_addr)) eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index e2eb0caeac8216..6f34a61739b662 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -427,7 +427,7 @@ struct ks8851_net { int ks8851_probe_common(struct net_device *netdev, struct device *dev, int msg_en); -int ks8851_remove_common(struct device *dev); +void ks8851_remove_common(struct device *dev); int ks8851_suspend(struct device *dev); int ks8851_resume(struct device *dev); diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index a6db1a8156e1a8..691206f19ea7bc 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -165,6 +165,7 @@ static void ks8851_read_mac_addr(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); unsigned long flags; + u8 addr[ETH_ALEN]; u16 reg; int i; @@ -172,9 +173,10 @@ static void ks8851_read_mac_addr(struct net_device *dev) for (i = 0; i < ETH_ALEN; i += 2) { reg = ks8851_rdreg16(ks, KS_MAR(i)); - dev->dev_addr[i] = reg >> 8; - dev->dev_addr[i + 1] = reg & 0xff; + addr[i] = reg >> 8; + addr[i + 1] = reg & 0xff; } + eth_hw_addr_set(dev, addr); ks8851_unlock(ks, &flags); } @@ -195,7 +197,7 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) struct net_device *dev = ks->netdev; int ret; - ret = of_get_mac_address(np, dev->dev_addr); + ret = of_get_ethdev_address(np, dev); if (!ret) { ks8851_write_mac_addr(dev); return; @@ -672,7 +674,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); return ks8851_write_mac_addr(dev); } @@ -1247,7 +1249,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev, } EXPORT_SYMBOL_GPL(ks8851_probe_common); -int ks8851_remove_common(struct device *dev) +void ks8851_remove_common(struct device *dev) { struct ks8851_net *priv = dev_get_drvdata(dev); @@ -1261,8 +1263,6 @@ int ks8851_remove_common(struct device *dev) gpio_set_value(priv->gpio, 0); regulator_disable(priv->vdd_reg); regulator_disable(priv->vdd_io); - - return 0; } EXPORT_SYMBOL_GPL(ks8851_remove_common); diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c index 2e8fcce50f9d17..2e25798c610eed 100644 --- a/drivers/net/ethernet/micrel/ks8851_par.c +++ b/drivers/net/ethernet/micrel/ks8851_par.c @@ -327,7 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev) static int ks8851_remove_par(struct platform_device *pdev) { - return ks8851_remove_common(&pdev->dev); + ks8851_remove_common(&pdev->dev); + + return 0; } static const struct of_device_id ks8851_match_table[] = { diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 479406ecbaa300..0303e727e99f96 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -454,7 +454,9 @@ static int ks8851_probe_spi(struct spi_device *spi) static int ks8851_remove_spi(struct spi_device *spi) { - return ks8851_remove_common(&spi->dev); + ks8851_remove_common(&spi->dev); + + return 0; } static const struct of_device_id ks8851_match_table[] = { diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index a0ee155f9f5167..99c0c1491af20a 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4033,7 +4033,7 @@ static void hw_set_add_addr(struct ksz_hw *hw) } } -static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) +static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr) { int i; int j = ADDITIONAL_ENTRIES; @@ -4054,7 +4054,7 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) return -1; } -static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) +static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr) { int i; @@ -5581,7 +5581,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); } - memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, mac->sa_data); interrupt = hw_block_intr(hw); @@ -7005,12 +7005,14 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) dev->mem_end = dev->mem_start + reg_len - 1; dev->irq = pdev->irq; if (MAIN_PORT == i) - memcpy(dev->dev_addr, hw_priv->hw.override_addr, - ETH_ALEN); + eth_hw_addr_set(dev, hw_priv->hw.override_addr); else { - memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); + u8 addr[ETH_ALEN]; + + ether_addr_copy(addr, sw->other_addr); if (ether_addr_equal(sw->other_addr, hw->override_addr)) - dev->dev_addr[5] += port->first_port; + addr[5] += port->first_port; + eth_hw_addr_set(dev, addr); } dev->netdev_ops = &netdev_ops; diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 09cdc2f2e7ffba..634ac7649c43e1 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -517,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - ether_addr_copy(dev->dev_addr, address->sa_data); + eth_hw_addr_set(dev, address->sa_data); return enc28j60_set_hw_macaddr(dev); } @@ -1539,7 +1539,6 @@ static const struct net_device_ops enc28j60_netdev_ops = { static int enc28j60_probe(struct spi_device *spi) { - unsigned char macaddr[ETH_ALEN]; struct net_device *dev; struct enc28j60_net *priv; int ret = 0; @@ -1572,9 +1571,7 @@ static int enc28j60_probe(struct spi_device *spi) goto error_irq; } - if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr))) - ether_addr_copy(dev->dev_addr, macaddr); - else + if (device_get_ethdev_address(&spi->dev, dev)) eth_hw_addr_random(dev); enc28j60_set_hw_macaddr(dev); diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 0bc6b3176fbf0a..b90efc80fb59fa 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -761,7 +761,7 @@ static int encx24j600_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + eth_hw_addr_set(dev, address->sa_data); return encx24j600_set_hw_macaddr(dev); } @@ -1001,6 +1001,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) struct net_device *ndev; struct encx24j600_priv *priv; u16 eidled; + u8 addr[ETH_ALEN]; ndev = alloc_etherdev(sizeof(struct encx24j600_priv)); @@ -1056,7 +1057,8 @@ static int encx24j600_spi_probe(struct spi_device *spi) } /* Get the MAC address from the chip */ - encx24j600_hw_get_macaddr(priv, ndev->dev_addr); + encx24j600_hw_get_macaddr(priv, addr); + eth_hw_addr_set(ndev, addr); ndev->ethtool_ops = &encx24j600_ethtool_ops; @@ -1125,4 +1127,3 @@ module_spi_driver(encx24j600_spi_net_driver); MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Jon Ringle "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4d5a5d6595b3bb..4fc97823bc84fb 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -816,7 +816,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) eth_random_addr(adapter->mac_address); } lan743x_mac_set_address(adapter, adapter->mac_address); - ether_addr_copy(netdev->dev_addr, adapter->mac_address); + eth_hw_addr_set(netdev, adapter->mac_address); return 0; } @@ -2670,7 +2670,7 @@ static int lan743x_netdev_set_mac_address(struct net_device *netdev, ret = eth_prepare_mac_addr_change(netdev, sock_addr); if (ret) return ret; - ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + eth_hw_addr_set(netdev, sock_addr->sa_data); lan743x_mac_set_address(adapter, sock_addr->sa_data); lan743x_rfe_update_mac_address(adapter); return 0; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 6080028c1df2c7..aaf7aaeaba0c03 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -279,6 +279,7 @@ #define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3) #define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4) #define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6) #define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \ (((value) & 0x7) << (1 + ((channel) << 2))) #define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2)) @@ -830,7 +831,7 @@ struct lan743x_rx_buffer_info { unsigned int buffer_length; }; -#define LAN743X_RX_RING_SIZE (65) +#define LAN743X_RX_RING_SIZE (128) #define RX_PROCESS_RESULT_NOTHING_TO_DO (0) #define RX_PROCESS_RESULT_BUFFER_RECEIVED (1) diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index ab6d719d40f0c4..9380e396f64878 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -491,9 +491,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, int perout_pin = 0; unsigned int index = perout_request->index; struct lan743x_ptp_perout *perout = &ptp->perout[index]; + int ret = 0; /* Reject requests with unsupported flags */ - if (perout_request->flags) + if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE) return -EOPNOTSUPP; if (on) { @@ -518,6 +519,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, netif_warn(adapter, drv, adapter->netdev, "Failed to reserve event channel %d for PEROUT\n", index); + ret = -EBUSY; goto failed; } @@ -529,6 +531,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, netif_warn(adapter, drv, adapter->netdev, "Failed to reserve gpio %d for PEROUT\n", perout_pin); + ret = -EBUSY; goto failed; } @@ -540,27 +543,93 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, period_sec += perout_request->period.nsec / 1000000000; period_nsec = perout_request->period.nsec % 1000000000; - if (period_sec == 0) { - if (period_nsec >= 400000000) { + if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on, ts_period; + s64 wf_high, period64, half; + s32 reminder; + + ts_on.tv_sec = perout_request->on.sec; + ts_on.tv_nsec = perout_request->on.nsec; + wf_high = timespec64_to_ns(&ts_on); + ts_period.tv_sec = perout_request->period.sec; + ts_period.tv_nsec = perout_request->period.nsec; + period64 = timespec64_to_ns(&ts_period); + + if (period64 < 200) { + netif_warn(adapter, drv, adapter->netdev, + "perout period too small, minimum is 200nS\n"); + ret = -EOPNOTSUPP; + goto failed; + } + if (wf_high >= period64) { + netif_warn(adapter, drv, adapter->netdev, + "pulse width must be smaller than period\n"); + ret = -EINVAL; + goto failed; + } + + /* Check if we can do 50% toggle on an even value of period. + * If the period number is odd, then check if the requested + * pulse width is the same as one of pre-defined width values. + * Otherwise, return failure. + */ + half = div_s64_rem(period64, 2, &reminder); + if (!reminder) { + if (half == wf_high) { + /* It's 50% match. Use the toggle option */ + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_; + /* In this case, devide period value by 2 */ + ts_period = ns_to_timespec64(div_s64(period64, 2)); + period_sec = ts_period.tv_sec; + period_nsec = ts_period.tv_nsec; + + goto program; + } + } + /* if we can't do toggle, then the width option needs to be the exact match */ + if (wf_high == 200000000) { pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; - } else if (period_nsec >= 20000000) { + } else if (wf_high == 10000000) { pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_; - } else if (period_nsec >= 2000000) { + } else if (wf_high == 1000000) { pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_; - } else if (period_nsec >= 200000) { + } else if (wf_high == 100000) { pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_; - } else if (period_nsec >= 20000) { + } else if (wf_high == 10000) { pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_; - } else if (period_nsec >= 200) { + } else if (wf_high == 100) { pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_; } else { netif_warn(adapter, drv, adapter->netdev, - "perout period too small, minimum is 200nS\n"); + "duty cycle specified is not supported\n"); + ret = -EOPNOTSUPP; goto failed; } } else { - pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + if (period_sec == 0) { + if (period_nsec >= 400000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } else if (period_nsec >= 20000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_; + } else if (period_nsec >= 2000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_; + } else if (period_nsec >= 200000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_; + } else if (period_nsec >= 20000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_; + } else if (period_nsec >= 200) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_; + } else { + netif_warn(adapter, drv, adapter->netdev, + "perout period too small, minimum is 200nS\n"); + ret = -EOPNOTSUPP; + goto failed; + } + } else { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } } +program: /* turn off by setting target far in future */ lan743x_csr_write(adapter, @@ -599,7 +668,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, failed: lan743x_ptp_perout_off(adapter, index); - return -ENODEV; + return ret; } static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 5030dfca38798d..4625d4fb4cde48 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5) } iomem[idx] = devm_ioremap(sparx5->dev, iores[idx]->start, - iores[idx]->end - iores[idx]->start - + 1); + resource_size(iores[idx])); if (!iomem[idx]) { dev_err(sparx5->dev, "Unable to get switch registers: %s\n", iores[idx]->name); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index cb68eaaac88118..e042f117dc7a40 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -162,7 +162,7 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p) sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid); /* Record the address */ - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -200,7 +200,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno) { struct sparx5_port *spx5_port; struct net_device *ndev; - u64 val; ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port)); if (!ndev) @@ -216,8 +215,7 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno) ndev->netdev_ops = &sparx5_port_netdev_ops; ndev->ethtool_ops = &sparx5_ethtool_ops; - val = ether_addr_to_u64(sparx5->base_mac) + portno + 1; - u64_to_ether_addr(val, ndev->dev_addr); + eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1); return ndev; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c index af70e279512542..fb74752de0ca25 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c @@ -92,12 +92,11 @@ static void sparx5_phylink_validate(struct phylink_config *config, } break; default: - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void sparx5_phylink_mac_config(struct phylink_config *config, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index cee75b561f59d0..c96ac81212f74c 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -3,6 +3,8 @@ #include #include +#include +#include #include "mana.h" @@ -848,6 +850,15 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3; req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4; + req.drv_ver = 0; /* Unused*/ + req.os_type = 0x10; /* Linux */ + req.os_ver_major = LINUX_VERSION_MAJOR; + req.os_ver_minor = LINUX_VERSION_PATCHLEVEL; + req.os_ver_build = LINUX_VERSION_SUBLEVEL; + strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1)); + strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2)); + strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3)); + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n", @@ -1247,6 +1258,52 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev) gc->irq_contexts = NULL; } +static int mana_gd_setup(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + int err; + + mana_gd_init_registers(pdev); + mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); + + err = mana_gd_setup_irqs(pdev); + if (err) + return err; + + err = mana_hwc_create_channel(gc); + if (err) + goto remove_irq; + + err = mana_gd_verify_vf_version(pdev); + if (err) + goto destroy_hwc; + + err = mana_gd_query_max_resources(pdev); + if (err) + goto destroy_hwc; + + err = mana_gd_detect_devices(pdev); + if (err) + goto destroy_hwc; + + return 0; + +destroy_hwc: + mana_hwc_destroy_channel(gc); +remove_irq: + mana_gd_remove_irqs(pdev); + return err; +} + +static void mana_gd_cleanup(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + mana_hwc_destroy_channel(gc); + + mana_gd_remove_irqs(pdev); +} + static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct gdma_context *gc; @@ -1276,6 +1333,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!gc) goto release_region; + mutex_init(&gc->eq_test_event_mutex); + pci_set_drvdata(pdev, gc); + bar0_va = pci_iomap(pdev, bar, 0); if (!bar0_va) goto free_gc; @@ -1283,49 +1343,23 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) gc->bar0_va = bar0_va; gc->dev = &pdev->dev; - pci_set_drvdata(pdev, gc); - mana_gd_init_registers(pdev); - - mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); - - err = mana_gd_setup_irqs(pdev); + err = mana_gd_setup(pdev); if (err) goto unmap_bar; - mutex_init(&gc->eq_test_event_mutex); - - err = mana_hwc_create_channel(gc); - if (err) - goto remove_irq; - - err = mana_gd_verify_vf_version(pdev); - if (err) - goto remove_irq; - - err = mana_gd_query_max_resources(pdev); - if (err) - goto remove_irq; - - err = mana_gd_detect_devices(pdev); + err = mana_probe(&gc->mana, false); if (err) - goto remove_irq; - - err = mana_probe(&gc->mana); - if (err) - goto clean_up_gdma; + goto cleanup_gd; return 0; -clean_up_gdma: - mana_hwc_destroy_channel(gc); - vfree(gc->cq_table); - gc->cq_table = NULL; -remove_irq: - mana_gd_remove_irqs(pdev); +cleanup_gd: + mana_gd_cleanup(pdev); unmap_bar: pci_iounmap(pdev, bar0_va); free_gc: + pci_set_drvdata(pdev, NULL); vfree(gc); release_region: pci_release_regions(pdev); @@ -1340,13 +1374,9 @@ static void mana_gd_remove(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); - mana_remove(&gc->mana); + mana_remove(&gc->mana, false); - mana_hwc_destroy_channel(gc); - vfree(gc->cq_table); - gc->cq_table = NULL; - - mana_gd_remove_irqs(pdev); + mana_gd_cleanup(pdev); pci_iounmap(pdev, gc->bar0_va); @@ -1357,6 +1387,52 @@ static void mana_gd_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +/* The 'state' parameter is not used. */ +static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + mana_remove(&gc->mana, true); + + mana_gd_cleanup(pdev); + + return 0; +} + +/* In case the NIC hardware stops working, the suspend and resume callbacks will + * fail -- if this happens, it's safer to just report an error than try to undo + * what has been done. + */ +static int mana_gd_resume(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + int err; + + err = mana_gd_setup(pdev); + if (err) + return err; + + err = mana_probe(&gc->mana, true); + if (err) + return err; + + return 0; +} + +/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */ +static void mana_gd_shutdown(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "Shutdown was calledd\n"); + + mana_remove(&gc->mana, true); + + mana_gd_cleanup(pdev); + + pci_disable_device(pdev); +} + #ifndef PCI_VENDOR_ID_MICROSOFT #define PCI_VENDOR_ID_MICROSOFT 0x1414 #endif @@ -1371,6 +1447,9 @@ static struct pci_driver mana_driver = { .id_table = mana_id_table, .probe = mana_gd_probe, .remove = mana_gd_remove, + .suspend = mana_gd_suspend, + .resume = mana_gd_resume, + .shutdown = mana_gd_shutdown, }; module_pci_driver(mana_driver); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index d5c485a6d2845d..34b971ff8ef8ba 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -309,9 +309,6 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self) static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq) { - if (!hwc_cq) - return; - kfree(hwc_cq->comp_buf); if (hwc_cq->gdma_cq) @@ -363,7 +360,7 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, } hwc_cq->gdma_cq = cq; - comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL); + comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL); if (!comp_buf) { err = -ENOMEM; goto out; @@ -446,9 +443,6 @@ static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc, static void mana_hwc_destroy_wq(struct hw_channel_context *hwc, struct hwc_wq *hwc_wq) { - if (!hwc_wq) - return; - mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf); if (hwc_wq->gdma_wq) @@ -580,7 +574,7 @@ static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth, return err; } - ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL); + ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -621,6 +615,7 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth, *max_req_msg_size = hwc->hwc_init_max_req_msg_size; *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size; + /* Both were set in mana_hwc_init_event_handler(). */ if (WARN_ON(cq->id >= gc->max_num_cqs)) return -EPROTO; @@ -636,9 +631,6 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth, static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth, u32 max_req_msg_size, u32 max_resp_msg_size) { - struct hwc_wq *hwc_rxq = NULL; - struct hwc_wq *hwc_txq = NULL; - struct hwc_cq *hwc_cq = NULL; int err; err = mana_hwc_init_inflight_msg(hwc, q_depth); @@ -651,44 +643,32 @@ static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth, err = mana_hwc_create_cq(hwc, q_depth * 2, mana_hwc_init_event_handler, hwc, mana_hwc_rx_event_handler, hwc, - mana_hwc_tx_event_handler, hwc, &hwc_cq); + mana_hwc_tx_event_handler, hwc, &hwc->cq); if (err) { dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err); goto out; } - hwc->cq = hwc_cq; err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size, - hwc_cq, &hwc_rxq); + hwc->cq, &hwc->rxq); if (err) { dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err); goto out; } - hwc->rxq = hwc_rxq; err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size, - hwc_cq, &hwc_txq); + hwc->cq, &hwc->txq); if (err) { dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err); goto out; } - hwc->txq = hwc_txq; hwc->num_inflight_msg = q_depth; hwc->max_req_msg_size = max_req_msg_size; return 0; out: - if (hwc_txq) - mana_hwc_destroy_wq(hwc, hwc_txq); - - if (hwc_rxq) - mana_hwc_destroy_wq(hwc, hwc_rxq); - - if (hwc_cq) - mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); - - mana_gd_free_res_map(&hwc->inflight_msg_res); + /* mana_hwc_create_channel() will do the cleanup.*/ return err; } @@ -716,6 +696,9 @@ int mana_hwc_create_channel(struct gdma_context *gc) gd->pdid = INVALID_PDID; gd->doorbell = INVALID_DOORBELL; + /* mana_hwc_init_queues() only creates the required data structures, + * and doesn't touch the HWC device. + */ err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, HW_CHANNEL_MAX_REQUEST_SIZE, HW_CHANNEL_MAX_RESPONSE_SIZE); @@ -741,42 +724,50 @@ int mana_hwc_create_channel(struct gdma_context *gc) return 0; out: - kfree(hwc); + mana_hwc_destroy_channel(gc); return err; } void mana_hwc_destroy_channel(struct gdma_context *gc) { struct hw_channel_context *hwc = gc->hwc.driver_data; - struct hwc_caller_ctx *ctx; - mana_smc_teardown_hwc(&gc->shm_channel, false); + if (!hwc) + return; + + /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's + * non-zero, the HWC worked and we should tear down the HWC here. + */ + if (gc->max_num_cqs > 0) { + mana_smc_teardown_hwc(&gc->shm_channel, false); + gc->max_num_cqs = 0; + } - ctx = hwc->caller_ctx; - kfree(ctx); + kfree(hwc->caller_ctx); hwc->caller_ctx = NULL; - mana_hwc_destroy_wq(hwc, hwc->txq); - hwc->txq = NULL; + if (hwc->txq) + mana_hwc_destroy_wq(hwc, hwc->txq); - mana_hwc_destroy_wq(hwc, hwc->rxq); - hwc->rxq = NULL; + if (hwc->rxq) + mana_hwc_destroy_wq(hwc, hwc->rxq); - mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); - hwc->cq = NULL; + if (hwc->cq) + mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); mana_gd_free_res_map(&hwc->inflight_msg_res); hwc->num_inflight_msg = 0; - if (hwc->gdma_dev->pdid != INVALID_PDID) { - hwc->gdma_dev->doorbell = INVALID_DOORBELL; - hwc->gdma_dev->pdid = INVALID_PDID; - } + hwc->gdma_dev->doorbell = INVALID_DOORBELL; + hwc->gdma_dev->pdid = INVALID_PDID; kfree(hwc); gc->hwc.driver_data = NULL; gc->hwc.gdma_context = NULL; + + vfree(gc->cq_table); + gc->cq_table = NULL; } int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index fc98a5ba5ed070..d047ee876f12b2 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -374,8 +374,8 @@ int mana_alloc_queues(struct net_device *ndev); int mana_attach(struct net_device *ndev); int mana_detach(struct net_device *ndev, bool from_close); -int mana_probe(struct gdma_dev *gd); -void mana_remove(struct gdma_dev *gd); +int mana_probe(struct gdma_dev *gd, bool resuming); +void mana_remove(struct gdma_dev *gd, bool suspending); extern const struct ethtool_ops mana_ethtool_ops; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 030ae89f3a337a..72cbf45c42d88f 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1599,7 +1599,8 @@ static int mana_init_port(struct net_device *ndev) err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, &num_indirect_entries); if (err) { - netdev_err(ndev, "Failed to query info for vPort 0\n"); + netdev_err(ndev, "Failed to query info for vPort %d\n", + port_idx); goto reset_apc; } @@ -1610,7 +1611,7 @@ static int mana_init_port(struct net_device *ndev) if (apc->num_queues > apc->max_queues) apc->num_queues = apc->max_queues; - ether_addr_copy(ndev->dev_addr, apc->mac_addr); + eth_hw_addr_set(ndev, apc->mac_addr); return 0; @@ -1667,24 +1668,23 @@ int mana_attach(struct net_device *ndev) if (err) return err; - err = mana_alloc_queues(ndev); - if (err) { - kfree(apc->rxqs); - apc->rxqs = NULL; - return err; + if (apc->port_st_save) { + err = mana_alloc_queues(ndev); + if (err) { + mana_cleanup_port_context(apc); + return err; + } } - netif_device_attach(ndev); - apc->port_is_up = apc->port_st_save; /* Ensure port state updated before txq state */ smp_wmb(); - if (apc->port_is_up) { + if (apc->port_is_up) netif_carrier_on(ndev); - netif_tx_wake_all_queues(ndev); - } + + netif_device_attach(ndev); return 0; } @@ -1828,11 +1828,12 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, return err; } -int mana_probe(struct gdma_dev *gd) +int mana_probe(struct gdma_dev *gd, bool resuming) { struct gdma_context *gc = gd->gdma_context; + struct mana_context *ac = gd->driver_data; struct device *dev = gc->dev; - struct mana_context *ac; + u16 num_ports = 0; int err; int i; @@ -1844,44 +1845,70 @@ int mana_probe(struct gdma_dev *gd) if (err) return err; - ac = kzalloc(sizeof(*ac), GFP_KERNEL); - if (!ac) - return -ENOMEM; + if (!resuming) { + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + if (!ac) + return -ENOMEM; - ac->gdma_dev = gd; - ac->num_ports = 1; - gd->driver_data = ac; + ac->gdma_dev = gd; + gd->driver_data = ac; + } err = mana_create_eq(ac); if (err) goto out; err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, - MANA_MICRO_VERSION, &ac->num_ports); + MANA_MICRO_VERSION, &num_ports); if (err) goto out; + if (!resuming) { + ac->num_ports = num_ports; + } else { + if (ac->num_ports != num_ports) { + dev_err(dev, "The number of vPorts changed: %d->%d\n", + ac->num_ports, num_ports); + err = -EPROTO; + goto out; + } + } + + if (ac->num_ports == 0) + dev_err(dev, "Failed to detect any vPort\n"); + if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) ac->num_ports = MAX_PORTS_IN_MANA_DEV; - for (i = 0; i < ac->num_ports; i++) { - err = mana_probe_port(ac, i, &ac->ports[i]); - if (err) - break; + if (!resuming) { + for (i = 0; i < ac->num_ports; i++) { + err = mana_probe_port(ac, i, &ac->ports[i]); + if (err) + break; + } + } else { + for (i = 0; i < ac->num_ports; i++) { + rtnl_lock(); + err = mana_attach(ac->ports[i]); + rtnl_unlock(); + if (err) + break; + } } out: if (err) - mana_remove(gd); + mana_remove(gd, false); return err; } -void mana_remove(struct gdma_dev *gd) +void mana_remove(struct gdma_dev *gd, bool suspending) { struct gdma_context *gc = gd->gdma_context; struct mana_context *ac = gd->driver_data; struct device *dev = gc->dev; struct net_device *ndev; + int err; int i; for (i = 0; i < ac->num_ports; i++) { @@ -1897,7 +1924,16 @@ void mana_remove(struct gdma_dev *gd) */ rtnl_lock(); - mana_detach(ndev, false); + err = mana_detach(ndev, false); + if (err) + netdev_err(ndev, "Failed to detach vPort %d: %d\n", + i, err); + + if (suspending) { + /* No need to unregister the ndev. */ + rtnl_unlock(); + continue; + } unregister_netdevice(ndev); @@ -1910,6 +1946,10 @@ void mana_remove(struct gdma_dev *gd) out: mana_gd_deregister_device(gd); + + if (suspending) + return; + gd->driver_data = NULL; gd->gdma_context = NULL; kfree(ac); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index 7e74339f39ae35..c3c81ae3fafd34 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -211,9 +211,6 @@ static int mana_set_channels(struct net_device *ndev, unsigned int old_count = apc->num_queues; int err, err2; - if (!apc->port_is_up) - return -EOPNOTSUPP; - err = mana_detach(ndev, false); if (err) { netdev_err(ndev, "mana_detach failed: %d\n", err); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 49def6934cad1b..15179b9529e137 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -65,7 +65,7 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, address->sa_data); moxart_update_mac_address(ndev); return 0; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index b6a73d151dec11..8dd8c7f425d2c9 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM - depends on OF_NET + depends on OF select MSCC_OCELOT_SWITCH_LIB select GENERIC_PHY help diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a08e4f530c1c11..e6c18b598d5c5e 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -20,11 +20,13 @@ struct ocelot_mact_entry { enum macaccess_entry_type type; }; +/* Caller must hold &ocelot->mact_lock */ static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot) { return ocelot_read(ocelot, ANA_TABLES_MACACCESS); } +/* Caller must hold &ocelot->mact_lock */ static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) { u32 val; @@ -36,6 +38,7 @@ static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); } +/* Caller must hold &ocelot->mact_lock */ static void ocelot_mact_select(struct ocelot *ocelot, const unsigned char mac[ETH_ALEN], unsigned int vid) @@ -67,6 +70,7 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, ANA_TABLES_MACACCESS_ENTRYTYPE(type) | ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN); unsigned int mc_ports; + int err; /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */ if (type == ENTRYTYPE_MACv4) @@ -79,18 +83,28 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, if (mc_ports & BIT(ocelot->num_phys_ports)) cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; + mutex_lock(&ocelot->mact_lock); + ocelot_mact_select(ocelot, mac, vid); /* Issue a write command */ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS); - return ocelot_mact_wait_for_completion(ocelot); + err = ocelot_mact_wait_for_completion(ocelot); + + mutex_unlock(&ocelot->mact_lock); + + return err; } EXPORT_SYMBOL(ocelot_mact_learn); int ocelot_mact_forget(struct ocelot *ocelot, const unsigned char mac[ETH_ALEN], unsigned int vid) { + int err; + + mutex_lock(&ocelot->mact_lock); + ocelot_mact_select(ocelot, mac, vid); /* Issue a forget command */ @@ -98,7 +112,11 @@ int ocelot_mact_forget(struct ocelot *ocelot, ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), ANA_TABLES_MACACCESS); - return ocelot_mact_wait_for_completion(ocelot); + err = ocelot_mact_wait_for_completion(ocelot); + + mutex_unlock(&ocelot->mact_lock); + + return err; } EXPORT_SYMBOL(ocelot_mact_forget); @@ -114,7 +132,9 @@ static void ocelot_mact_init(struct ocelot *ocelot) | ANA_AGENCTRL_LEARN_IGNORE_VLAN, ANA_AGENCTRL); - /* Clear the MAC table */ + /* Clear the MAC table. We are not concurrent with anyone, so + * holding &ocelot->mact_lock is pointless. + */ ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); } @@ -162,48 +182,117 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) return ocelot_vlant_wait_for_completion(ocelot); } -static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port, - struct ocelot_vlan native_vlan) +static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - u32 val = 0; + struct ocelot_bridge_vlan *vlan; + int num_untagged = 0; - ocelot_port->native_vlan = native_vlan; + list_for_each_entry(vlan, &ocelot->vlans, list) { + if (!(vlan->portmask & BIT(port))) + continue; - ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid), - REW_PORT_VLAN_CFG_PORT_VID_M, - REW_PORT_VLAN_CFG, port); + if (vlan->untagged & BIT(port)) + num_untagged++; + } + + return num_untagged; +} + +static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port) +{ + struct ocelot_bridge_vlan *vlan; + int num_tagged = 0; + + list_for_each_entry(vlan, &ocelot->vlans, list) { + if (!(vlan->portmask & BIT(port))) + continue; + + if (!(vlan->untagged & BIT(port))) + num_tagged++; + } + + return num_tagged; +} + +/* We use native VLAN when we have to mix egress-tagged VLANs with exactly + * _one_ egress-untagged VLAN (_the_ native VLAN) + */ +static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port) +{ + return ocelot_port_num_tagged_vlans(ocelot, port) && + ocelot_port_num_untagged_vlans(ocelot, port) == 1; +} + +static struct ocelot_bridge_vlan * +ocelot_port_find_native_vlan(struct ocelot *ocelot, int port) +{ + struct ocelot_bridge_vlan *vlan; + + list_for_each_entry(vlan, &ocelot->vlans, list) + if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port)) + return vlan; + + return NULL; +} + +/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable, + * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness + * state of the port. + */ +static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + enum ocelot_port_tag_config tag_cfg; + bool uses_native_vlan = false; if (ocelot_port->vlan_aware) { - if (native_vlan.valid) - /* Tag all frames except when VID == DEFAULT_VLAN */ - val = REW_TAG_CFG_TAG_CFG(1); + uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port); + + if (uses_native_vlan) + tag_cfg = OCELOT_PORT_TAG_NATIVE; + else if (ocelot_port_num_untagged_vlans(ocelot, port)) + tag_cfg = OCELOT_PORT_TAG_DISABLED; else - /* Tag all frames */ - val = REW_TAG_CFG_TAG_CFG(3); + tag_cfg = OCELOT_PORT_TAG_TRUNK; } else { - /* Port tagging disabled. */ - val = REW_TAG_CFG_TAG_CFG(0); + tag_cfg = OCELOT_PORT_TAG_DISABLED; } - ocelot_rmw_gix(ocelot, val, + + ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg), REW_TAG_CFG_TAG_CFG_M, REW_TAG_CFG, port); + + if (uses_native_vlan) { + struct ocelot_bridge_vlan *native_vlan; + + /* Not having a native VLAN is impossible, because + * ocelot_port_num_untagged_vlans has returned 1. + * So there is no use in checking for NULL here. + */ + native_vlan = ocelot_port_find_native_vlan(ocelot, port); + + ocelot_rmw_gix(ocelot, + REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid), + REW_PORT_VLAN_CFG_PORT_VID_M, + REW_PORT_VLAN_CFG, port); + } } /* Default vlan to clasify for untagged frames (may be zero) */ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, - struct ocelot_vlan pvid_vlan) + const struct ocelot_bridge_vlan *pvid_vlan) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + u16 pvid = OCELOT_VLAN_UNAWARE_PVID; u32 val = 0; ocelot_port->pvid_vlan = pvid_vlan; - if (!ocelot_port->vlan_aware) - pvid_vlan.vid = 0; + if (ocelot_port->vlan_aware && pvid_vlan) + pvid = pvid_vlan->vid; ocelot_rmw_gix(ocelot, - ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid), + ANA_PORT_VLAN_CFG_VLAN_VID(pvid), ANA_PORT_VLAN_CFG_VLAN_VID_M, ANA_PORT_VLAN_CFG, port); @@ -212,7 +301,7 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, * classified to VLAN 0, but that is always in our RX filter, so it * would get accepted were it not for this setting. */ - if (!pvid_vlan.valid && ocelot_port->vlan_aware) + if (!pvid_vlan && ocelot_port->vlan_aware) val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; @@ -222,31 +311,90 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, ANA_PORT_DROP_CFG, port); } -static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid) +static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot, + u16 vid) { + struct ocelot_bridge_vlan *vlan; + + list_for_each_entry(vlan, &ocelot->vlans, list) + if (vlan->vid == vid) + return vlan; + + return NULL; +} + +static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid, + bool untagged) +{ + struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); + unsigned long portmask; int err; - err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask); - if (err) + if (vlan) { + portmask = vlan->portmask | BIT(port); + + err = ocelot_vlant_set_mask(ocelot, vid, portmask); + if (err) + return err; + + vlan->portmask = portmask; + /* Bridge VLANs can be overwritten with a different + * egress-tagging setting, so make sure to override an untagged + * with a tagged VID if that's going on. + */ + if (untagged) + vlan->untagged |= BIT(port); + else + vlan->untagged &= ~BIT(port); + + return 0; + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return -ENOMEM; + + portmask = BIT(port); + + err = ocelot_vlant_set_mask(ocelot, vid, portmask); + if (err) { + kfree(vlan); return err; + } - ocelot->vlan_mask[vid] = vlan_mask; + vlan->vid = vid; + vlan->portmask = portmask; + if (untagged) + vlan->untagged = BIT(port); + INIT_LIST_HEAD(&vlan->list); + list_add_tail(&vlan->list, &ocelot->vlans); return 0; } -static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid) -{ - return ocelot_vlan_member_set(ocelot, - ocelot->vlan_mask[vid] | BIT(port), - vid); -} - static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) { - return ocelot_vlan_member_set(ocelot, - ocelot->vlan_mask[vid] & ~BIT(port), - vid); + struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); + unsigned long portmask; + int err; + + if (!vlan) + return 0; + + portmask = vlan->portmask & ~BIT(port); + + err = ocelot_vlant_set_mask(ocelot, vid, portmask); + if (err) + return err; + + vlan->portmask = portmask; + if (vlan->portmask) + return 0; + + list_del(&vlan->list); + kfree(vlan); + + return 0; } int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, @@ -279,7 +427,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, ANA_PORT_VLAN_CFG, port); ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); - ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan); + ocelot_port_manage_port_tag(ocelot, port); return 0; } @@ -288,14 +436,20 @@ EXPORT_SYMBOL(ocelot_port_vlan_filtering); int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged, struct netlink_ext_ack *extack) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - - /* Deny changing the native VLAN, but always permit deleting it */ - if (untagged && ocelot_port->native_vlan.vid != vid && - ocelot_port->native_vlan.valid) { - NL_SET_ERR_MSG_MOD(extack, - "Port already has a native VLAN"); - return -EBUSY; + if (untagged) { + /* We are adding an egress-tagged VLAN */ + if (ocelot_port_uses_native_vlan(ocelot, port)) { + NL_SET_ERR_MSG_MOD(extack, + "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN"); + return -EBUSY; + } + } else { + /* We are adding an egress-tagged VLAN */ + if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) { + NL_SET_ERR_MSG_MOD(extack, + "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs"); + return -EBUSY; + } } return 0; @@ -307,27 +461,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, { int err; - err = ocelot_vlan_member_add(ocelot, port, vid); + err = ocelot_vlan_member_add(ocelot, port, vid, untagged); if (err) return err; /* Default ingress vlan classification */ - if (pvid) { - struct ocelot_vlan pvid_vlan; - - pvid_vlan.vid = vid; - pvid_vlan.valid = true; - ocelot_port_set_pvid(ocelot, port, pvid_vlan); - } + if (pvid) + ocelot_port_set_pvid(ocelot, port, + ocelot_bridge_vlan_find(ocelot, vid)); /* Untagged egress vlan clasification */ - if (untagged) { - struct ocelot_vlan native_vlan; - - native_vlan.vid = vid; - native_vlan.valid = true; - ocelot_port_set_native_vlan(ocelot, port, native_vlan); - } + ocelot_port_manage_port_tag(ocelot, port); return 0; } @@ -343,18 +487,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) return err; /* Ingress */ - if (ocelot_port->pvid_vlan.vid == vid) { - struct ocelot_vlan pvid_vlan = {0}; - - ocelot_port_set_pvid(ocelot, port, pvid_vlan); - } + if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + ocelot_port_set_pvid(ocelot, port, NULL); /* Egress */ - if (ocelot_port->native_vlan.vid == vid) { - struct ocelot_vlan native_vlan = {0}; - - ocelot_port_set_native_vlan(ocelot, port, native_vlan); - } + ocelot_port_manage_port_tag(ocelot, port); return 0; } @@ -372,13 +509,13 @@ static void ocelot_vlan_init(struct ocelot *ocelot) /* Configure the port VLAN memberships */ for (vid = 1; vid < VLAN_N_VID; vid++) - ocelot_vlan_member_set(ocelot, 0, vid); + ocelot_vlant_set_mask(ocelot, vid, 0); /* Because VLAN filtering is enabled, we need VID 0 to get untagged * traffic. It is added automatically if 8021q module is loaded, but * we can't rely on it since module may be not loaded. */ - ocelot_vlan_member_set(ocelot, all_ports, 0); + ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports); /* Set vlan ingress filter mask to all ports but the CPU port by * default. @@ -951,7 +1088,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, ocelot_ifh_set_bypass(ifh, 1); ocelot_ifh_set_dest(ifh, BIT_ULL(port)); ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); - ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb)); + ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb)); ocelot_ifh_set_rew_op(ifh, rew_op); for (i = 0; i < OCELOT_TAG_LEN / 4; i++) @@ -1053,6 +1190,7 @@ int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, } EXPORT_SYMBOL(ocelot_port_fdb_do_dump); +/* Caller must hold &ocelot->mact_lock */ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, struct ocelot_mact_entry *entry) { @@ -1103,33 +1241,40 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, int ocelot_fdb_dump(struct ocelot *ocelot, int port, dsa_fdb_dump_cb_t *cb, void *data) { + int err = 0; int i, j; + /* We could take the lock just around ocelot_mact_read, but doing so + * thousands of times in a row seems rather pointless and inefficient. + */ + mutex_lock(&ocelot->mact_lock); + /* Loop through all the mac tables entries. */ for (i = 0; i < ocelot->num_mact_rows; i++) { for (j = 0; j < 4; j++) { struct ocelot_mact_entry entry; bool is_static; - int ret; - ret = ocelot_mact_read(ocelot, port, i, j, &entry); + err = ocelot_mact_read(ocelot, port, i, j, &entry); /* If the entry is invalid (wrong port, invalid...), * skip it. */ - if (ret == -EINVAL) + if (err == -EINVAL) continue; - else if (ret) - return ret; + else if (err) + break; is_static = (entry.type == ENTRYTYPE_LOCKED); - ret = cb(entry.mac, entry.vid, is_static, data); - if (ret) - return ret; + err = cb(entry.mac, entry.vid, is_static, data); + if (err) + break; } } - return 0; + mutex_unlock(&ocelot->mact_lock); + + return err; } EXPORT_SYMBOL(ocelot_fdb_dump); @@ -1680,12 +1825,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, struct net_device *bridge) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_vlan pvid = {0}, native_vlan = {0}; ocelot_port->bridge = NULL; - ocelot_port_set_pvid(ocelot, port, pvid); - ocelot_port_set_native_vlan(ocelot, port, native_vlan); + ocelot_port_set_pvid(ocelot, port, NULL); + ocelot_port_manage_port_tag(ocelot, port); ocelot_apply_bridge_fwd_mask(ocelot); } EXPORT_SYMBOL(ocelot_port_bridge_leave); @@ -2071,9 +2215,10 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot) OCELOT_TAG_PREFIX_NONE); /* Configure the CPU port to be VLAN aware */ - ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | - ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | - ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ocelot_write_gix(ocelot, + ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), ANA_PORT_VLAN_CFG, cpu); } @@ -2114,6 +2259,7 @@ int ocelot_init(struct ocelot *ocelot) mutex_init(&ocelot->stats_lock); mutex_init(&ocelot->ptp_lock); + mutex_init(&ocelot->mact_lock); spin_lock_init(&ocelot->ptp_clock_lock); spin_lock_init(&ocelot->ts_id_lock); snprintf(queue_name, sizeof(queue_name), "%s-stats", @@ -2130,6 +2276,7 @@ int ocelot_init(struct ocelot *ocelot) INIT_LIST_HEAD(&ocelot->multicast); INIT_LIST_HEAD(&ocelot->pgids); + INIT_LIST_HEAD(&ocelot->vlans); ocelot_detect_features(ocelot); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 1952d6a1b98ab3..e43da09b8f9110 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -25,6 +25,7 @@ #include "ocelot_rew.h" #include "ocelot_qs.h" +#define OCELOT_VLAN_UNAWARE_PVID 0 #define OCELOT_BUFFER_CELL_SZ 60 #define OCELOT_STATS_CHECK_DELAY (2 * HZ) diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 8b843d3c9189a4..769a8159373e01 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -142,17 +142,77 @@ ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain) return NULL; } +static int +ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port, + struct ocelot_vcap_filter *filter, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + + if (!ocelot_port->vlan_aware) { + NL_SET_ERR_MSG_MOD(extack, + "Can only modify VLAN under VLAN aware bridge"); + return -EOPNOTSUPP; + } + + filter->action.vid_replace_ena = true; + filter->action.pcp_dei_ena = true; + filter->action.vid = a->vlan.vid; + filter->action.pcp = a->vlan.prio; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + + return 0; +} + +static int +ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + enum ocelot_tag_tpid_sel tpid; + + switch (ntohs(a->vlan.proto)) { + case ETH_P_8021Q: + tpid = OCELOT_TAG_TPID_SEL_8021Q; + break; + case ETH_P_8021AD: + tpid = OCELOT_TAG_TPID_SEL_8021AD; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify custom TPID"); + return -EOPNOTSUPP; + } + + filter->action.tag_a_tpid_sel = tpid; + filter->action.push_outer_tag = OCELOT_ES0_TAG; + filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID; + filter->action.vid_a_val = a->vlan.vid; + filter->action.pcp_a_val = a->vlan.prio; + filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + + return 0; +} + static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, bool ingress, struct flow_cls_offload *f, struct ocelot_vcap_filter *filter) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; struct netlink_ext_ack *extack = f->common.extack; bool allow_missing_goto_target = false; const struct flow_action_entry *a; enum ocelot_tag_tpid_sel tpid; int i, chain, egress_port; u64 rate; + int err; if (!flow_action_basic_hw_stats_check(&f->rule->action, f->common.extack)) @@ -273,26 +333,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->type = OCELOT_VCAP_FILTER_OFFLOAD; break; case FLOW_ACTION_VLAN_MANGLE: - if (filter->block_id != VCAP_IS1) { - NL_SET_ERR_MSG_MOD(extack, - "VLAN modify action can only be offloaded to VCAP IS1"); - return -EOPNOTSUPP; - } - if (filter->goto_target != -1) { + if (filter->block_id == VCAP_IS1) { + err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port, + filter, a, + extack); + } else if (filter->block_id == VCAP_ES0) { + err = ocelot_flower_parse_egress_vlan_modify(filter, a, + extack); + } else { NL_SET_ERR_MSG_MOD(extack, - "Last action must be GOTO"); - return -EOPNOTSUPP; + "VLAN modify action can only be offloaded to VCAP IS1 or ES0"); + err = -EOPNOTSUPP; } - if (!ocelot_port->vlan_aware) { - NL_SET_ERR_MSG_MOD(extack, - "Can only modify VLAN under VLAN aware bridge"); - return -EOPNOTSUPP; - } - filter->action.vid_replace_ena = true; - filter->action.pcp_dei_ena = true; - filter->action.vid = a->vlan.vid; - filter->action.pcp = a->vlan.prio; - filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + if (err) + return err; break; case FLOW_ACTION_PRIORITY: if (filter->block_id != VCAP_IS1) { @@ -340,7 +394,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, } filter->action.tag_a_tpid_sel = tpid; filter->action.push_outer_tag = OCELOT_ES0_TAG; - filter->action.tag_a_vid_sel = 1; + filter->action.tag_a_vid_sel = OCELOT_ES0_VID; filter->action.vid_a_val = a->vlan.vid; filter->action.pcp_a_val = a->vlan.prio; filter->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -678,6 +732,31 @@ static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot, return 0; } +/* If we have an egress VLAN modification rule, we need to actually write the + * delta between the input VLAN (from the key) and the output VLAN (from the + * action), but the action was parsed first. So we need to patch the delta into + * the action here. + */ +static int +ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) +{ + if (filter->block_id != VCAP_ES0 || + filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID) + return 0; + + if (filter->vlan.vid.mask != VLAN_VID_MASK) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 VLAN rewriting needs a full VLAN in the key"); + return -EOPNOTSUPP; + } + + filter->action.vid_a_val -= filter->vlan.vid.value; + filter->action.vid_a_val &= VLAN_VID_MASK; + + return 0; +} + int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { @@ -701,6 +780,12 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, return ret; } + ret = ocelot_flower_patch_es0_vlan_modify(filter, extack); + if (ret) { + kfree(filter); + return ret; + } + /* The non-optional GOTOs for the TCAM skeleton don't need * to be actually offloaded. */ diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c index 4b0941f09f718a..1fa58546abdce7 100644 --- a/drivers/net/ethernet/mscc/ocelot_mrp.c +++ b/drivers/net/ethernet/mscc/ocelot_mrp.c @@ -116,16 +116,16 @@ static void ocelot_mrp_save_mac(struct ocelot *ocelot, struct ocelot_port *port) { ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac, - port->pvid_vlan.vid, ENTRYTYPE_LOCKED); + OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac, - port->pvid_vlan.vid, ENTRYTYPE_LOCKED); + OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); } static void ocelot_mrp_del_mac(struct ocelot *ocelot, struct ocelot_port *port) { - ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid); - ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid); + ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID); + ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID); } int ocelot_mrp_add(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 2545727fd5b2f3..eaeba60b1bba5e 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -418,7 +418,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) * with VLAN filtering feature. We need to keep it to receive * untagged traffic. */ - if (vid == 0) + if (vid == OCELOT_VLAN_UNAWARE_PVID) return 0; ret = ocelot_vlan_del(ocelot, port, vid); @@ -553,7 +553,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) struct ocelot_mact_work_ctx w; ether_addr_copy(w.forget.addr, addr); - w.forget.vid = ocelot_port->pvid_vlan.vid; + w.forget.vid = OCELOT_VLAN_UNAWARE_PVID; w.type = OCELOT_MACT_FORGET; return ocelot_enqueue_mact_action(ocelot, &w); @@ -567,7 +567,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) struct ocelot_mact_work_ctx w; ether_addr_copy(w.learn.addr, addr); - w.learn.vid = ocelot_port->pvid_vlan.vid; + w.learn.vid = OCELOT_VLAN_UNAWARE_PVID; w.learn.pgid = PGID_CPU; w.learn.entry_type = ENTRYTYPE_LOCKED; w.type = OCELOT_MACT_LEARN; @@ -602,11 +602,11 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p) /* Learn the new net device MAC address in the mac table. */ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, - ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED); + OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); /* Then forget the previous one. */ - ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid); + ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID); - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -1509,7 +1509,7 @@ static void vsc7514_phylink_validate(struct phylink_config *config, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != ocelot_port->phy_mode) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } @@ -1528,9 +1528,8 @@ static void vsc7514_phylink_validate(struct phylink_config *config, phylink_set(mask, 2500baseT_Full); phylink_set(mask, 2500baseX_Full); - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void vsc7514_phylink_mac_config(struct phylink_config *config, @@ -1705,10 +1704,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, NETIF_F_HW_TC; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; - memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); - dev->dev_addr[ETH_ALEN - 1] += port; + eth_hw_addr_gen(dev, ocelot->base_mac, port); ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, - ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED); + OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); ocelot_init_port(ocelot, port); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index d51f799e4e8613..38103b0255b0c1 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -1135,10 +1135,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_put_ports; - err = devlink_register(devlink); - if (err) - goto out_ocelot_deinit; - err = mscc_ocelot_init_ports(pdev, ports); if (err) goto out_ocelot_devlink_unregister; @@ -1161,6 +1157,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); of_node_put(ports); + devlink_register(devlink); dev_info(&pdev->dev, "Ocelot switch probed\n"); @@ -1170,8 +1167,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) mscc_ocelot_release_ports(ocelot); mscc_ocelot_teardown_devlink_ports(ocelot); out_ocelot_devlink_unregister: - devlink_unregister(devlink); -out_ocelot_deinit: ocelot_deinit(ocelot); out_put_ports: of_node_put(ports); @@ -1184,11 +1179,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); + devlink_unregister(ocelot->devlink); ocelot_deinit_timestamp(ocelot); ocelot_devlink_sb_unregister(ocelot); mscc_ocelot_release_ports(ocelot); mscc_ocelot_teardown_devlink_ports(ocelot); - devlink_unregister(ocelot->devlink); ocelot_deinit(ocelot); unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); unregister_switchdev_notifier(&ocelot_switchdev_nb); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index c1a75b08ced7e6..5736fcdafd7a02 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -796,7 +796,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt) return status; } -static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) +static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, + const u8 * addr) { struct myri10ge_cmd cmd; int status; @@ -3022,7 +3023,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) } /* change the dev structure */ - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); return 0; } @@ -3738,7 +3739,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct myri10ge_priv *mgp; struct device *dev = &pdev->dev; - int i; int status = -ENXIO; int dac_enabled; unsigned hdr_offset, ss_offset; @@ -3828,8 +3828,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (status) goto abort_with_ioremap; - for (i = 0; i < ETH_ALEN; i++) - netdev->dev_addr[i] = mgp->mac_addr[i]; + eth_hw_addr_set(netdev, mgp->mac_addr); myri10ge_select_firmware(mgp); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 3f982033944bc6..82a22711ce4589 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -809,6 +809,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) unsigned long iosize; void __iomem *ioaddr; const int pcibar = 1; /* PCI base address register */ + u8 addr[ETH_ALEN]; int prev_eedata; u32 tmp; @@ -859,10 +860,11 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) prev_eedata = eeprom_read(ioaddr, 6); for (i = 0; i < 3; i++) { int eedata = eeprom_read(ioaddr, i + 7); - dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15); - dev->dev_addr[i*2+1] = eedata >> 7; + addr[i*2] = (eedata << 1) + (prev_eedata >> 15); + addr[i*2+1] = eedata >> 7; prev_eedata = eedata; } + eth_hw_addr_set(dev, addr); np = netdev_priv(dev); np->ioaddr = ioaddr; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 72794d15887118..49ea130c906712 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1649,9 +1649,11 @@ static int ns83820_open(struct net_device *ndev) return ret; } -static void ns83820_getmac(struct ns83820 *dev, u8 *mac) +static void ns83820_getmac(struct ns83820 *dev, struct net_device *ndev) { + u8 mac[ETH_ALEN]; unsigned i; + for (i=0; i<3; i++) { u32 data; @@ -1661,9 +1663,10 @@ static void ns83820_getmac(struct ns83820 *dev, u8 *mac) writel(i*2, dev->base + RFCR); data = readl(dev->base + RFDR); - *mac++ = data; - *mac++ = data >> 8; + mac[i * 2] = data; + mac[i * 2 + 1] = data >> 8; } + eth_hw_addr_set(ndev, mac); } static void ns83820_set_multicast(struct net_device *ndev) @@ -2136,7 +2139,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev, /* Disable Wake On Lan */ writel(0, dev->base + WCSR); - ns83820_getmac(dev, ndev->dev_addr); + ns83820_getmac(dev, ndev); /* Yes, we support dumb IP checksum on transmit */ ndev->features |= NETIF_F_SG; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 3b6b2e61139e65..d1c32c65db054e 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5202,7 +5202,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); /* store the MAC address in CAM */ return do_s2io_prog_unicast(dev, dev->dev_addr); @@ -5217,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p) * as defined in errno.h file on failure. */ -static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) +static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr) { struct s2io_nic *sp = netdev_priv(dev); register u64 mac_addr = 0, perm_addr = 0; @@ -7954,7 +7954,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Set the factory defined MAC address initially */ dev->addr_len = ETH_ALEN; - memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr); /* initialize number of multicast & unicast MAC entries variables */ if (sp->device_type == XFRAME_I_DEVICE) { diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index 5a6032212c19d2..a4266d1544abb4 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1073,7 +1073,7 @@ static void s2io_reset(struct s2io_nic * sp); static int s2io_poll_msix(struct napi_struct *napi, int budget); static int s2io_poll_inta(struct napi_struct *napi, int budget); static void s2io_init_pci(struct s2io_nic * sp); -static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); +static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr); static void s2io_alarm_handle(struct timer_list *t); static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index df4a3f3da83a9b..1969009a91e79b 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1328,7 +1328,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) } if (unlikely(!is_vxge_card_up(vdev))) { - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return VXGE_HW_OK; } @@ -1341,7 +1341,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) return -EINVAL; } - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return status; } @@ -4663,7 +4663,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) /* Store the fw version for ethttool option */ strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); - memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); + eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr); /* Copy the station mac address to the list */ for (i = 0; i < vdev->no_of_vpath; i++) { diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 605a1617b195ed..5d3df28c648ffc 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -305,7 +305,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, return; } - ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + eth_hw_addr_set(nn->dp.netdev, mac_addr); ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c index 2473fb5f75e5e5..2a5cc64227e9f0 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c +++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c @@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle, static void nfp_abm_stats_calculate(struct nfp_alink_stats *new, struct nfp_alink_stats *old, - struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_sync *bstats, struct gnet_stats_queue *qstats) { _bstats_update(bstats, new->tx_bytes - old->tx_bytes, diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c index 36491835ac6553..db297ee4d7ad10 100644 --- a/drivers/net/ethernet/netronome/nfp/devlink_param.c +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf) if (err <= 0) return err; - err = devlink_params_register(devlink, nfp_devlink_params, - ARRAY_SIZE(nfp_devlink_params)); - if (err) - return err; - - devlink_params_publish(devlink); - return 0; + return devlink_params_register(devlink, nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); } void nfp_devlink_params_unregister(struct nfp_pf *pf) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 2a432de11858da..a3242b36e216ec 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -272,7 +272,8 @@ nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx) for (act_idx = start_idx + 1; act_idx < num_act; act_idx++) if (act[act_idx].id == FLOW_ACTION_REDIRECT || act[act_idx].id == FLOW_ACTION_MIRRED) - return netif_is_gretap(act[act_idx].dev); + return netif_is_gretap(act[act_idx].dev) || + netif_is_ip6gretap(act[act_idx].dev); return false; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index a2926b1b3cff4a..784292b1629070 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -703,7 +703,7 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev, { if (netif_is_vxlan(netdev)) return tun_type == NFP_FL_TUNNEL_VXLAN; - if (netif_is_gretap(netdev)) + if (netif_is_gretap(netdev) || netif_is_ip6gretap(netdev)) return tun_type == NFP_FL_TUNNEL_GRE; if (netif_is_geneve(netdev)) return tun_type == NFP_FL_TUNNEL_GENEVE; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 64c0ef57ad4263..224089d04d9831 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -360,7 +360,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { /* check if GRE, which has no enc_ports */ - if (!netif_is_gretap(netdev)) { + if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index ab70179728f636..dfb4468fe287ab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -837,7 +837,7 @@ nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) } static int -__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) +__nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del) { struct nfp_tun_mac_addr_offload payload; @@ -886,7 +886,7 @@ static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) } static struct nfp_tun_offloaded_mac * -nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac) +nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac) { struct nfp_flower_priv *priv = app->priv; @@ -1005,7 +1005,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, static int nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, - u8 *mac, bool mod) + const u8 *mac, bool mod) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_repr_priv *repr_priv; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 5bfa22accf2c99..850bfdf83d0a43 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2067,7 +2067,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) if (napi_complete_done(napi, pkts_polled)) nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); - if (r_vec->nfp_net->rx_coalesce_adapt_on) { + if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) { struct dim_sample dim_sample = {}; unsigned int start; u64 pkts, bytes; @@ -2082,7 +2082,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) net_dim(&r_vec->rx_dim, dim_sample); } - if (r_vec->nfp_net->tx_coalesce_adapt_on) { + if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) { struct dim_sample dim_sample = {}; unsigned int start; u64 pkts, bytes; @@ -3016,10 +3016,8 @@ static void nfp_net_rx_dim_work(struct work_struct *work) /* copy RX interrupt coalesce parameters */ value = (moder.pkts << 16) | (factor * moder.usec); - rtnl_lock(); nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value); (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); - rtnl_unlock(); dim->state = DIM_START_MEASURE; } @@ -3047,10 +3045,8 @@ static void nfp_net_tx_dim_work(struct work_struct *work) /* copy TX interrupt coalesce parameters */ value = (moder.pkts << 16) | (factor * moder.usec); - rtnl_lock(); nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value); (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); - rtnl_unlock(); dim->state = DIM_START_MEASURE; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index d10a938013445f..751f76cd4f7988 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -55,7 +55,7 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, return; } - ether_addr_copy(netdev->dev_addr, eth_port->mac_addr); + eth_hw_addr_set(netdev, eth_port->mac_addr); ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); } @@ -701,10 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_unmap; - err = devlink_register(devlink); - if (err) - goto err_app_clean; - err = nfp_shared_buf_register(pf); if (err) goto err_devlink_unreg; @@ -734,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_stop_app; mutex_unlock(&pf->lock); + devlink_register(devlink); return 0; @@ -751,8 +748,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) nfp_shared_buf_unregister(pf); err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); - devlink_unregister(devlink); -err_app_clean: nfp_net_pf_app_clean(pf); err_unmap: nfp_net_pci_unmap_mem(pf); @@ -763,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) { struct nfp_net *nn, *next; + devlink_unregister(priv_to_devlink(pf)); mutex_lock(&pf->lock); list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { if (!nfp_net_is_data_vnic(nn)) @@ -779,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf) nfp_devlink_params_unregister(pf); nfp_shared_buf_unregister(pf); - devlink_unregister(priv_to_devlink(pf)); nfp_net_pf_free_irqs(pf); nfp_net_pf_app_clean(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 3b8e675087dea1..369f6ae700c7d8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -499,8 +499,7 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs) { struct nfp_reprs *reprs; - reprs = kzalloc(sizeof(*reprs) + - num_reprs * sizeof(struct net_device *), GFP_KERNEL); + reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL); if (!reprs) return NULL; reprs->num_reprs = num_reprs; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index c0e2f4394aef84..87f2268b16d6e4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -58,7 +58,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn) return; } - ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + eth_hw_addr_set(nn->dp.netdev, mac_addr); ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 346145d3180eb2..cfeb7620ae20b6 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1283,7 +1283,7 @@ static int nixge_probe(struct platform_device *pdev) mac_addr = nixge_get_nvmem_address(&pdev->dev); if (mac_addr && is_valid_ether_addr(mac_addr)) { - ether_addr_copy(ndev->dev_addr, mac_addr); + eth_hw_addr_set(ndev, mac_addr); kfree(mac_addr); } else { eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ef3fb4cc90af67..9b530d7509a42a 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3175,7 +3175,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) return -EADDRNOTAVAIL; /* synchronized against open : rtnl_lock() held by caller */ - memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, macaddr->sa_data); if (netif_running(dev)) { netif_tx_lock_bh(dev); @@ -5711,6 +5711,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) u32 phystate_orig = 0, phystate; int phyinitialized = 0; static int printed_version; + u8 mac[ETH_ALEN]; if (!printed_version++) pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n", @@ -5884,50 +5885,52 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) txreg = readl(base + NvRegTransmitPoll); if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { /* mac address is already in correct order */ - dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; - dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; - dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; - dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; - dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; - dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + mac[0] = (np->orig_mac[0] >> 0) & 0xff; + mac[1] = (np->orig_mac[0] >> 8) & 0xff; + mac[2] = (np->orig_mac[0] >> 16) & 0xff; + mac[3] = (np->orig_mac[0] >> 24) & 0xff; + mac[4] = (np->orig_mac[1] >> 0) & 0xff; + mac[5] = (np->orig_mac[1] >> 8) & 0xff; } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { /* mac address is already in correct order */ - dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; - dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; - dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; - dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; - dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; - dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + mac[0] = (np->orig_mac[0] >> 0) & 0xff; + mac[1] = (np->orig_mac[0] >> 8) & 0xff; + mac[2] = (np->orig_mac[0] >> 16) & 0xff; + mac[3] = (np->orig_mac[0] >> 24) & 0xff; + mac[4] = (np->orig_mac[1] >> 0) & 0xff; + mac[5] = (np->orig_mac[1] >> 8) & 0xff; /* * Set orig mac address back to the reversed version. * This flag will be cleared during low power transition. * Therefore, we should always put back the reversed address. */ - np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + - (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); - np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); + np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) + + (mac[3] << 16) + (mac[2] << 24); + np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8); } else { /* need to reverse mac address to correct order */ - dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; - dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; - dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; - dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; - dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; - dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; + mac[0] = (np->orig_mac[1] >> 8) & 0xff; + mac[1] = (np->orig_mac[1] >> 0) & 0xff; + mac[2] = (np->orig_mac[0] >> 24) & 0xff; + mac[3] = (np->orig_mac[0] >> 16) & 0xff; + mac[4] = (np->orig_mac[0] >> 8) & 0xff; + mac[5] = (np->orig_mac[0] >> 0) & 0xff; writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); dev_dbg(&pci_dev->dev, "%s: set workaround bit for reversed mac addr\n", __func__); } - if (!is_valid_ether_addr(dev->dev_addr)) { + if (is_valid_ether_addr(mac)) { + eth_hw_addr_set(dev, mac); + } else { /* * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab */ dev_err(&pci_dev->dev, "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", - dev->dev_addr); + mac); eth_hw_addr_random(dev); dev_err(&pci_dev->dev, "Using random MAC address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index c910fa2f40a4bc..bc39558fe82be1 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -419,7 +419,7 @@ struct netdata_local { /* * MAC support functions */ -static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) +static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac) { u32 tmp; @@ -1092,7 +1092,7 @@ static int lpc_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&pldat->lock, flags); @@ -1231,6 +1231,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) struct net_device *ndev; dma_addr_t dma_handle; struct resource *res; + u8 addr[ETH_ALEN]; int irq, ret; /* Setup network interface for RMII or MII mode */ @@ -1346,10 +1347,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) pldat->phy_node = of_parse_phandle(np, "phy-handle", 0); /* Get MAC address from current HW setting (POR state is all zeros) */ - __lpc_get_mac(pldat, ndev->dev_addr); + __lpc_get_mac(pldat, addr); + eth_hw_addr_set(ndev, addr); if (!is_valid_ether_addr(ndev->dev_addr)) { - of_get_mac_address(np, ndev->dev_addr); + of_get_ethdev_address(np, ndev); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index ec3e558f890eeb..71d234291fc576 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2137,7 +2137,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(skaddr->sa_data)) { ret_val = -EADDRNOTAVAIL; } else { - memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, skaddr->sa_data); memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len); pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0); ret_val = 0; @@ -2555,7 +2555,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, goto err_free_adapter; } - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { /* * If the MAC is invalid (or just missing), display a warning diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 1a6336a56d3da9..9c408328be0db4 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -592,6 +592,7 @@ static int hamachi_init_one(struct pci_dev *pdev, void *ring_space; dma_addr_t ring_dma; int ret = -ENOMEM; + u8 addr[ETH_ALEN]; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -628,8 +629,8 @@ static int hamachi_init_one(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); for (i = 0; i < 6; i++) - dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i) - : readb(ioaddr + StationAddr + i); + addr[i] = read_eeprom(ioaddr, 4 + i); + eth_hw_addr_set(dev, addr); #if ! defined(final_version) if (hamachi_debug > 4) diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index f5cd8f51be7c46..12105f62cbddb7 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -384,6 +384,7 @@ static int yellowfin_init_one(struct pci_dev *pdev, #else int bar = 1; #endif + u8 addr[ETH_ALEN]; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -416,12 +417,13 @@ static int yellowfin_init_one(struct pci_dev *pdev, if (drv_flags & DontUseEeprom) for (i = 0; i < 6; i++) - dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i); + addr[i] = ioread8(ioaddr + StnAddr + i); else { int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0); for (i = 0; i < 6; i++) - dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i); + addr[i] = read_eeprom(ioaddr, ee_offset + i); } + eth_hw_addr_set(dev, addr); /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 7e096b2888b926..f0ace3a0e85cb7 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -221,7 +221,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); adr0 = dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | @@ -1722,7 +1722,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENODEV; goto out; } - memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); + eth_hw_addr_set(dev, mac->mac_addr); ret = mac_to_intf(mac); if (ret < 0) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 66204106f83e3d..5e25411ff02fb6 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -19,6 +19,7 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 #define DEVCMD_TIMEOUT 10 +#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) #define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */ #define NORMAL_PPB 1000000000 /* one billion parts per billion */ @@ -69,8 +70,13 @@ struct ionic_admin_ctx { }; int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); -int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err); +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, + const int err, const bool do_msg); int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, + u8 status, int err); + int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); int ionic_set_dma_mask(struct ionic *ionic); int ionic_setup(struct ionic *ionic); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c index 39f59849720d66..c5821702756434 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -143,8 +143,6 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index); debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type); debugfs_create_u64("drop", 0400, q_dentry, &q->drop); - debugfs_create_u64("stop", 0400, q_dentry, &q->stop); - debugfs_create_u64("wake", 0400, q_dentry, &q->wake); debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops); debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops); @@ -228,6 +226,50 @@ static int netdev_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(netdev); +static int lif_filters_show(struct seq_file *seq, void *v) +{ + struct ionic_lif *lif = seq->private; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int i; + + seq_puts(seq, "id flow state type filter\n"); + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + switch (le16_to_cpu(f->cmd.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n", + f->filter_id, f->flow_id, f->state, + le16_to_cpu(f->cmd.vlan.vlan)); + break; + case IONIC_RX_FILTER_MATCH_MAC: + seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n", + f->filter_id, f->flow_id, f->state, + f->cmd.mac.addr); + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n", + f->filter_id, f->flow_id, f->state, + le16_to_cpu(f->cmd.vlan.vlan), + f->cmd.mac.addr); + break; + case IONIC_RX_FILTER_STEER_PKTCLASS: + seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n", + f->filter_id, f->flow_id, f->state, + le64_to_cpu(f->cmd.pkt_class)); + break; + } + } + } + spin_unlock_bh(&lif->rx_filters.lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lif_filters); + void ionic_debugfs_add_lif(struct ionic_lif *lif) { struct dentry *lif_dentry; @@ -239,6 +281,8 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif) debugfs_create_file("netdev", 0400, lif->dentry, lif->netdev, &netdev_fops); + debugfs_create_file("filters", 0400, lif->dentry, + lif, &lif_filters_fops); } void ionic_debugfs_del_lif(struct ionic_lif *lif) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 0d6858ab511c6e..d57e80d44c9df8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -581,7 +581,6 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, cq->done_color = !cq->done_color; cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); cq_info = &cq->info[cq->tail_idx]; - DEBUG_STATS_CQE_CNT(cq); if (++work_done >= work_to_do) break; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 8311086fb1f490..e5acf3bd62b2d5 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -220,9 +220,6 @@ struct ionic_queue { unsigned int num_descs; unsigned int max_sg_elems; u64 features; - u64 dbell_count; - u64 stop; - u64 wake; u64 drop; struct ionic_dev *idev; unsigned int type; @@ -269,7 +266,6 @@ struct ionic_cq { bool done_color; unsigned int num_descs; unsigned int desc_size; - u64 compl_count; void *base; dma_addr_t base_pa; } ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index c7d0e195d17607..4297ed9024c01a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -82,22 +82,16 @@ int ionic_devlink_register(struct ionic *ionic) struct devlink_port_attrs attrs = {}; int err; - err = devlink_register(dl); - if (err) { - dev_warn(ionic->dev, "devlink_register failed: %d\n", err); - return err; - } - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); if (err) { dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); - devlink_unregister(dl); return err; } devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); + devlink_register(dl); return 0; } @@ -105,6 +99,6 @@ void ionic_devlink_unregister(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); - devlink_port_unregister(&ionic->dl_port); devlink_unregister(dl); + devlink_port_unregister(&ionic->dl_port); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 3de1a03839e250..c54d735b9e2e83 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -11,13 +11,6 @@ #include "ionic_ethtool.h" #include "ionic_stats.h" -static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define IONIC_PRIV_F_SW_DBG_STATS BIT(0) - "sw-dbg-stats", -}; - -#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings) - static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) { u32 i; @@ -59,9 +52,6 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_STATS: count = ionic_get_stats_count(lif); break; - case ETH_SS_PRIV_FLAGS: - count = IONIC_PRIV_FLAGS_COUNT; - break; } return count; } @@ -75,10 +65,6 @@ static void ionic_get_strings(struct net_device *netdev, case ETH_SS_STATS: ionic_get_stats_strings(lif, buf); break; - case ETH_SS_PRIV_FLAGS: - memcpy(buf, ionic_priv_flags_strings, - IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN); - break; } } @@ -228,8 +214,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev, break; } - bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(ks->link_modes.advertising, ks->link_modes.supported); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); @@ -691,28 +676,6 @@ static int ionic_set_channels(struct net_device *netdev, return err; } -static u32 ionic_get_priv_flags(struct net_device *netdev) -{ - struct ionic_lif *lif = netdev_priv(netdev); - u32 priv_flags = 0; - - if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - priv_flags |= IONIC_PRIV_F_SW_DBG_STATS; - - return priv_flags; -} - -static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct ionic_lif *lif = netdev_priv(netdev); - - clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state); - if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS) - set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state); - - return 0; -} - static int ionic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { @@ -1013,8 +976,6 @@ static const struct ethtool_ops ionic_ethtool_ops = { .get_strings = ionic_get_strings, .get_ethtool_stats = ionic_get_stats, .get_sset_count = ionic_get_sset_count, - .get_priv_flags = ionic_get_priv_flags, - .set_priv_flags = ionic_set_priv_flags, .get_rxnfc = ionic_get_rxnfc, .get_rxfh_indir_size = ionic_get_rxfh_indir_size, .get_rxfh_key_size = ionic_get_rxfh_key_size, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 7f3322ce044c7c..63f8a8163b5f10 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -287,11 +287,9 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) return ionic_adminq_post_wait(lif, &ctx); } -static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) +static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) { struct ionic_queue *q; - struct ionic_lif *lif; - int err = 0; struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -301,11 +299,12 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) }, }; - if (!qcq) + if (!qcq) { + netdev_err(lif->netdev, "%s: bad qcq\n", __func__); return -ENXIO; + } q = &qcq->q; - lif = q->lif; if (qcq->flags & IONIC_QCQ_F_INTR) { struct ionic_dev *idev = &lif->ionic->idev; @@ -318,17 +317,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) napi_disable(&qcq->napi); } - if (send_to_hw) { - ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); - ctx.cmd.q_control.type = q->type; - ctx.cmd.q_control.index = cpu_to_le32(q->index); - dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", - ctx.cmd.q_control.index, ctx.cmd.q_control.type); + /* If there was a previous fw communcation error, don't bother with + * sending the adminq command and just return the same error value. + */ + if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) + return fw_err; - err = ionic_adminq_post_wait(lif, &ctx); - } + ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); + ctx.cmd.q_control.type = q->type; + ctx.cmd.q_control.index = cpu_to_le32(q->index); + dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); - return err; + return ionic_adminq_post_wait(lif, &ctx); } static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) @@ -1241,137 +1242,6 @@ void ionic_get_stats64(struct net_device *netdev, ns->tx_errors = ns->tx_aborted_errors; } -int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) -{ - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_add = { - .opcode = IONIC_CMD_RX_FILTER_ADD, - .lif_index = cpu_to_le16(lif->index), - .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), - }, - }; - int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - bool mc = is_multicast_ether_addr(addr); - struct ionic_rx_filter *f; - int err = 0; - - memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - - spin_lock_bh(&lif->rx_filters.lock); - f = ionic_rx_filter_by_addr(lif, addr); - if (f) { - /* don't bother if we already have it and it is sync'd */ - if (f->state == IONIC_FILTER_STATE_SYNCED) { - spin_unlock_bh(&lif->rx_filters.lock); - return 0; - } - - /* mark preemptively as sync'd to block any parallel attempts */ - f->state = IONIC_FILTER_STATE_SYNCED; - } else { - /* save as SYNCED to catch any DEL requests while processing */ - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_SYNCED); - } - spin_unlock_bh(&lif->rx_filters.lock); - if (err) - return err; - - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); - - /* Don't bother with the write to FW if we know there's no room, - * we can try again on the next sync attempt. - */ - if ((lif->nucast + lif->nmcast) >= nfilters) - err = -ENOSPC; - else - err = ionic_adminq_post_wait(lif, &ctx); - - spin_lock_bh(&lif->rx_filters.lock); - if (err && err != -EEXIST) { - /* set the state back to NEW so we can try again later */ - f = ionic_rx_filter_by_addr(lif, addr); - if (f && f->state == IONIC_FILTER_STATE_SYNCED) { - f->state = IONIC_FILTER_STATE_NEW; - set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); - } - - spin_unlock_bh(&lif->rx_filters.lock); - - if (err == -ENOSPC) - return 0; - else - return err; - } - - if (mc) - lif->nmcast++; - else - lif->nucast++; - - f = ionic_rx_filter_by_addr(lif, addr); - if (f && f->state == IONIC_FILTER_STATE_OLD) { - /* Someone requested a delete while we were adding - * so update the filter info with the results from the add - * and the data will be there for the delete on the next - * sync cycle. - */ - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_OLD); - } else { - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_SYNCED); - } - - spin_unlock_bh(&lif->rx_filters.lock); - - return err; -} - -int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) -{ - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_del = { - .opcode = IONIC_CMD_RX_FILTER_DEL, - .lif_index = cpu_to_le16(lif->index), - }, - }; - struct ionic_rx_filter *f; - int state; - int err; - - spin_lock_bh(&lif->rx_filters.lock); - f = ionic_rx_filter_by_addr(lif, addr); - if (!f) { - spin_unlock_bh(&lif->rx_filters.lock); - return -ENOENT; - } - - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", - addr, f->filter_id); - - state = f->state; - ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); - ionic_rx_filter_free(lif, f); - - if (is_multicast_ether_addr(addr) && lif->nmcast) - lif->nmcast--; - else if (!is_multicast_ether_addr(addr) && lif->nucast) - lif->nucast--; - - spin_unlock_bh(&lif->rx_filters.lock); - - if (state != IONIC_FILTER_STATE_NEW) { - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; - } - - return 0; -} - static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); @@ -1407,7 +1277,7 @@ void ionic_lif_rx_mode(struct ionic_lif *lif) rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; - /* sync the mac filters */ + /* sync the filters */ ionic_rx_filter_sync(lif); /* check for overflow state @@ -1417,14 +1287,12 @@ void ionic_lif_rx_mode(struct ionic_lif *lif) * to see if we can disable NIC PROMISC */ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - if ((lif->nucast + lif->nmcast) >= nfilters) { + + if (((lif->nucast + lif->nmcast) >= nfilters) || + (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { rx_mode |= IONIC_RX_MODE_F_PROMISC; rx_mode |= IONIC_RX_MODE_F_ALLMULTI; - lif->uc_overflow = true; - lif->mc_overflow = true; - } else if (lif->uc_overflow) { - lif->uc_overflow = false; - lif->mc_overflow = false; + } else { if (!(nd_flags & IFF_PROMISC)) rx_mode &= ~IONIC_RX_MODE_F_PROMISC; if (!(nd_flags & IFF_ALLMULTI)) @@ -1809,59 +1677,30 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_add = { - .opcode = IONIC_CMD_RX_FILTER_ADD, - .lif_index = cpu_to_le16(lif->index), - .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), - .vlan.vlan = cpu_to_le16(vid), - }, - }; int err; - netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); - err = ionic_adminq_post_wait(lif, &ctx); + err = ionic_lif_vlan_add(lif, vid); if (err) return err; - spin_lock_bh(&lif->rx_filters.lock); - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_SYNCED); - spin_unlock_bh(&lif->rx_filters.lock); + ionic_lif_rx_mode(lif); - return err; + return 0; } static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_del = { - .opcode = IONIC_CMD_RX_FILTER_DEL, - .lif_index = cpu_to_le16(lif->index), - }, - }; - struct ionic_rx_filter *f; - - spin_lock_bh(&lif->rx_filters.lock); - - f = ionic_rx_filter_by_vlan(lif, vid); - if (!f) { - spin_unlock_bh(&lif->rx_filters.lock); - return -ENOENT; - } + int err; - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", - vid, f->filter_id); + err = ionic_lif_vlan_del(lif, vid); + if (err) + return err; - ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); - ionic_rx_filter_free(lif, f); - spin_unlock_bh(&lif->rx_filters.lock); + ionic_lif_rx_mode(lif); - return ionic_adminq_post_wait(lif, &ctx); + return 0; } int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, @@ -1953,19 +1792,19 @@ static void ionic_txrx_disable(struct ionic_lif *lif) if (lif->txqcqs) { for (i = 0; i < lif->nxqs; i++) - err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->txqcqs[i], err); } if (lif->hwstamp_txq) - err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); if (lif->rxqcqs) { for (i = 0; i < lif->nxqs; i++) - err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); } if (lif->hwstamp_rxq) - err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); ionic_lif_quiesce(lif); } @@ -2165,7 +2004,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) err = ionic_qcq_enable(lif->txqcqs[i]); if (err) { - derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); + derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); goto err_out; } } @@ -2187,13 +2026,13 @@ static int ionic_txrx_enable(struct ionic_lif *lif) err_out_hwstamp_tx: if (lif->hwstamp_rxq) - derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT)); + derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); err_out_hwstamp_rx: i = lif->nxqs; err_out: while (i--) { - derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); - derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); + derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); + derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); } return err; @@ -2896,6 +2735,9 @@ int ionic_lif_alloc(struct ionic *ionic) snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); + mutex_init(&lif->queue_lock); + mutex_init(&lif->config_lock); + spin_lock_init(&lif->adminq_lock); spin_lock_init(&lif->deferred.lock); @@ -2909,7 +2751,7 @@ int ionic_lif_alloc(struct ionic *ionic) if (!lif->info) { dev_err(dev, "Failed to allocate lif info, aborting\n"); err = -ENOMEM; - goto err_out_free_netdev; + goto err_out_free_mutex; } ionic_debugfs_add_lif(lif); @@ -2944,6 +2786,9 @@ int ionic_lif_alloc(struct ionic *ionic) dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); lif->info = NULL; lif->info_pa = 0; +err_out_free_mutex: + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); err_out_free_netdev: free_netdev(lif->netdev); lif = NULL; @@ -2974,11 +2819,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) netif_device_detach(lif->netdev); + mutex_lock(&lif->queue_lock); if (test_bit(IONIC_LIF_F_UP, lif->state)) { dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); - mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); - mutex_unlock(&lif->queue_lock); } if (netif_running(lif->netdev)) { @@ -2989,6 +2833,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) ionic_reset(ionic); ionic_qcqs_free(lif); + mutex_unlock(&lif->queue_lock); + dev_info(ionic->dev, "FW Down: LIFs stopped\n"); } @@ -3012,9 +2858,12 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) err = ionic_port_init(ionic); if (err) goto err_out; + + mutex_lock(&lif->queue_lock); + err = ionic_qcqs_alloc(lif); if (err) - goto err_out; + goto err_unlock; err = ionic_lif_init(lif); if (err) @@ -3035,6 +2884,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) goto err_txrx_free; } + mutex_unlock(&lif->queue_lock); + clear_bit(IONIC_LIF_F_FW_RESET, lif->state); ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); @@ -3051,6 +2902,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) ionic_lif_deinit(lif); err_qcqs_free: ionic_qcqs_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); err_out: dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); } @@ -3084,6 +2937,9 @@ void ionic_lif_free(struct ionic_lif *lif) kfree(lif->dbid_inuse); lif->dbid_inuse = NULL; + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); + /* free netdev & lif */ ionic_debugfs_del_lif(lif); free_netdev(lif->netdev); @@ -3106,8 +2962,6 @@ void ionic_lif_deinit(struct ionic_lif *lif) ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); - mutex_destroy(&lif->config_lock); - mutex_destroy(&lif->queue_lock); ionic_lif_reset(lif); } @@ -3273,8 +3127,6 @@ int ionic_lif_init(struct ionic_lif *lif) return err; lif->hw_index = le16_to_cpu(comp.hw_index); - mutex_init(&lif->queue_lock); - mutex_init(&lif->config_lock); /* now that we have the hw_index we can figure out our doorbell page */ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 4915184f3efbe9..9f7ab2f17f93c0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -14,9 +14,6 @@ #define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ #define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ -#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) -#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) - #define ADD_ADDR true #define DEL_ADDR false #define CAN_SLEEP true @@ -37,7 +34,6 @@ struct ionic_tx_stats { u64 clean; u64 linearize; u64 crc32_csum; - u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; u64 dma_map_err; u64 hwstamp_valid; u64 hwstamp_invalid; @@ -48,7 +44,6 @@ struct ionic_rx_stats { u64 bytes; u64 csum_none; u64 csum_complete; - u64 buffers_posted; u64 dropped; u64 vlan_stripped; u64 csum_error; @@ -65,11 +60,6 @@ struct ionic_rx_stats { #define IONIC_QCQ_F_RX_STATS BIT(4) #define IONIC_QCQ_F_NOTIFYQ BIT(5) -struct ionic_napi_stats { - u64 poll_count; - u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR]; -}; - struct ionic_qcq { void *q_base; dma_addr_t q_base_pa; @@ -85,7 +75,6 @@ struct ionic_qcq { struct ionic_cq cq; struct ionic_intr_info intr; struct napi_struct napi; - struct ionic_napi_stats napi_stats; unsigned int flags; struct dentry *dentry; }; @@ -142,7 +131,6 @@ struct ionic_lif_sw_stats { enum ionic_lif_state_flags { IONIC_LIF_F_INITED, - IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, IONIC_LIF_F_FILTER_SYNC_NEEDED, @@ -201,11 +189,11 @@ struct ionic_lif { u16 rx_mode; u64 hw_features; bool registered; - bool mc_overflow; - bool uc_overflow; u16 lif_type; unsigned int nmcast; unsigned int nucast; + unsigned int nvlans; + unsigned int max_vlans; char name[IONIC_LIF_NAME_MAX_SZ]; union ionic_lif_identity *identity; @@ -350,37 +338,4 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, void ionic_lif_rx_mode(struct ionic_lif *lif); int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_queue_params *qparam); - -static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell) -{ - struct ionic_txq_desc *desc = &q->txq[q->head_idx]; - u8 num_sg_elems; - - q->dbell_count += dbell; - - num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT) - & IONIC_TXQ_DESC_NSGE_MASK); - if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1)) - num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1; - - q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++; -} - -static inline void debug_stats_napi_poll(struct ionic_qcq *qcq, - unsigned int work_done) -{ - qcq->napi_stats.poll_count++; - - if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1)) - work_done = IONIC_MAX_NUM_NAPI_CNTR - 1; - - qcq->napi_stats.work_done_cntr[work_done]++; -} - -#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) -#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++) -#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell) -#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \ - debug_stats_napi_poll(qcq, work_done) - #endif /* _IONIC_LIF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 6f07bf509efedb..875f4ec42efee1 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "ionic.h" #include "ionic_bus.h" @@ -211,24 +212,28 @@ static void ionic_adminq_flush(struct ionic_lif *lif) spin_unlock_irqrestore(&lif->adminq_lock, irqflags); } +void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, + u8 status, int err) +{ + netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(status), err); +} + static int ionic_adminq_check_err(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, - bool timeout) + const bool timeout, + const bool do_msg) { - struct net_device *netdev = lif->netdev; - const char *opcode_str; - const char *status_str; int err = 0; if (ctx->comp.comp.status || timeout) { - opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode); - status_str = ionic_error_to_str(ctx->comp.comp.status); err = timeout ? -ETIMEDOUT : ionic_error_to_errno(ctx->comp.comp.status); - netdev_err(netdev, "%s (%d) failed: %s (%d)\n", - opcode_str, ctx->cmd.cmd.opcode, - timeout ? "TIMEOUT" : status_str, err); + if (do_msg) + ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode, + ctx->comp.comp.status, err); if (timeout) ionic_adminq_flush(lif); @@ -297,24 +302,52 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) return err; } -int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err) +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, + const int err, const bool do_msg) { struct net_device *netdev = lif->netdev; + unsigned long time_limit; + unsigned long time_start; + unsigned long time_done; unsigned long remaining; const char *name; + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + if (err) { - if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { - name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) netdev_err(netdev, "Posting of %s (%d) failed: %d\n", name, ctx->cmd.cmd.opcode, err); - } return err; } - remaining = wait_for_completion_timeout(&ctx->work, - HZ * (ulong)DEVCMD_TIMEOUT); - return ionic_adminq_check_err(lif, ctx, (remaining == 0)); + time_start = jiffies; + time_limit = time_start + HZ * (ulong)DEVCMD_TIMEOUT; + do { + remaining = wait_for_completion_timeout(&ctx->work, + IONIC_ADMINQ_TIME_SLICE); + + /* check for done */ + if (remaining) + break; + + /* interrupt the wait if FW stopped */ + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + if (do_msg) + netdev_err(netdev, "%s (%d) interrupted, FW in reset\n", + name, ctx->cmd.cmd.opcode); + return -ENXIO; + } + + } while (time_before(jiffies, time_limit)); + time_done = jiffies; + + dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n", + __func__, jiffies_to_msecs(time_done - time_start)); + + return ionic_adminq_check_err(lif, ctx, + time_after_eq(time_done, time_limit), + do_msg); } int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) @@ -323,7 +356,16 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) err = ionic_adminq_post(lif, ctx); - return ionic_adminq_wait(lif, ctx, err); + return ionic_adminq_wait(lif, ctx, err, true); +} + +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + int err; + + err = ionic_adminq_post(lif, ctx); + + return ionic_adminq_wait(lif, ctx, err, false); } static void ionic_dev_cmd_clean(struct ionic *ionic) @@ -450,13 +492,23 @@ int ionic_identify(struct ionic *ionic) } mutex_unlock(&ionic->dev_cmd_lock); - dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version); - if (err) { - dev_err(ionic->dev, "Cannot identify ionic: %dn", err); + dev_err(ionic->dev, "Cannot identify ionic: %d\n", err); goto err_out; } + if (isprint(idev->dev_info.fw_version[0]) && + isascii(idev->dev_info.fw_version[0])) + dev_info(ionic->dev, "FW: %.*s\n", + (int)(sizeof(idev->dev_info.fw_version) - 1), + idev->dev_info.fw_version); + else + dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n", + (u8)idev->dev_info.fw_version[0], + (u8)idev->dev_info.fw_version[1], + (u8)idev->dev_info.fw_version[2], + (u8)idev->dev_info.fw_version[3]); + err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC, &ionic->ident.lif); if (err) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index eed2db69d7083f..887046838b3b18 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -348,7 +348,7 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm) spin_unlock_irqrestore(&phc->lock, irqflags); - return ionic_adminq_wait(phc->lif, &ctx, err); + return ionic_adminq_wait(phc->lif, &ctx, err, true); } static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta) @@ -373,7 +373,7 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta) spin_unlock_irqrestore(&phc->lock, irqflags); - return ionic_adminq_wait(phc->lif, &ctx, err); + return ionic_adminq_wait(phc->lif, &ctx, err, true); } static int ionic_phc_settime64(struct ptp_clock_info *info, @@ -402,7 +402,7 @@ static int ionic_phc_settime64(struct ptp_clock_info *info, spin_unlock_irqrestore(&phc->lock, irqflags); - return ionic_adminq_wait(phc->lif, &ctx, err); + return ionic_adminq_wait(phc->lif, &ctx, err, true); } static int ionic_phc_gettimex64(struct ptp_clock_info *info, @@ -459,7 +459,7 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info) spin_unlock_irqrestore(&phc->lock, irqflags); - ionic_adminq_wait(phc->lif, &ctx, err); + ionic_adminq_wait(phc->lif, &ctx, err, true); return phc->aux_work_delay; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 69728f9013cbbf..f6e785f949f9f9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -239,6 +239,21 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) return NULL; } +static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); + case IONIC_RX_FILTER_MATCH_MAC: + return ionic_rx_filter_by_addr(lif, ac->mac.addr); + default: + netdev_err(lif->netdev, "unsupported filter match %d", + le16_to_cpu(ac->match)); + return NULL; + } +} + int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) { struct ionic_rx_filter *f; @@ -286,6 +301,228 @@ int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) return 0; } +static int ionic_lif_filter_add(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + }; + struct ionic_rx_filter *f; + int nfilters; + int err = 0; + + ctx.cmd.rx_filter_add = *ac; + ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD, + ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index), + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f) { + /* don't bother if we already have it and it is sync'd */ + if (f->state == IONIC_FILTER_STATE_SYNCED) { + spin_unlock_bh(&lif->rx_filters.lock); + return 0; + } + + /* mark preemptively as sync'd to block any parallel attempts */ + f->state = IONIC_FILTER_STATE_SYNCED; + } else { + /* save as SYNCED to catch any DEL requests while processing */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + spin_unlock_bh(&lif->rx_filters.lock); + if (err) + return err; + + /* Don't bother with the write to FW if we know there's no room, + * we can try again on the next sync attempt. + * Since the FW doesn't have a way to tell us the vlan limit, + * we start max_vlans at 0 until we hit the ENOSPC error. + */ + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n", + __func__, ctx.cmd.rx_filter_add.vlan.vlan); + if (lif->max_vlans && lif->nvlans >= lif->max_vlans) + err = -ENOSPC; + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n", + __func__, ctx.cmd.rx_filter_add.mac.addr); + nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + if ((lif->nucast + lif->nmcast) >= nfilters) + err = -ENOSPC; + break; + } + + if (err != -ENOSPC) + err = ionic_adminq_post_wait_nomsg(lif, &ctx); + + spin_lock_bh(&lif->rx_filters.lock); + + if (err && err != -EEXIST) { + /* set the state back to NEW so we can try again later */ + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f && f->state == IONIC_FILTER_STATE_SYNCED) { + f->state = IONIC_FILTER_STATE_NEW; + + /* If -ENOSPC we won't waste time trying to sync again + * until there is a delete that might make room + */ + if (err != -ENOSPC) + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + if (err == -ENOSPC) { + if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN) + lif->max_vlans = lif->nvlans; + return 0; + } + + ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode, + ctx.comp.comp.status, err); + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n", + ctx.cmd.rx_filter_add.vlan.vlan); + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n", + ctx.cmd.rx_filter_add.mac.addr); + break; + } + + return err; + } + + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + lif->nvlans++; + break; + case IONIC_RX_FILTER_MATCH_MAC: + if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr)) + lif->nmcast++; + else + lif->nucast++; + break; + } + + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f && f->state == IONIC_FILTER_STATE_OLD) { + /* Someone requested a delete while we were adding + * so update the filter info with the results from the add + * and the data will be there for the delete on the next + * sync cycle. + */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_OLD); + } else { + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + return err; +} + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }; + + memcpy(&ac.mac.addr, addr, ETH_ALEN); + + return ionic_lif_filter_add(lif, &ac); +} + +int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }; + + return ionic_lif_filter_add(lif, &ac); +} + +static int ionic_lif_filter_del(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + int state; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_find(lif, ac); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n", + __func__, ac->vlan.vlan, f->filter_id); + lif->nvlans--; + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n", + __func__, ac->mac.addr, f->filter_id); + if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast) + lif->nucast--; + break; + } + + state = f->state; + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + + spin_unlock_bh(&lif->rx_filters.lock); + + if (state != IONIC_FILTER_STATE_NEW) { + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; + } + + return 0; +} + +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }; + + memcpy(&ac.mac.addr, addr, ETH_ALEN); + + return ionic_lif_filter_del(lif, &ac); +} + +int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }; + + return ionic_lif_filter_del(lif, &ac); +} + struct sync_item { struct list_head list; struct ionic_rx_filter f; @@ -340,14 +577,14 @@ void ionic_rx_filter_sync(struct ionic_lif *lif) * they can clear room for some new filters */ list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { - (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr); + (void)ionic_lif_filter_del(lif, &sync_item->f.cmd); list_del(&sync_item->list); devm_kfree(dev, sync_item); } list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { - (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); + (void)ionic_lif_filter_add(lif, &sync_item->f.cmd); list_del(&sync_item->list); devm_kfree(dev, sync_item); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index a66e35f0833bb4..87b2666f248b0f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -44,5 +44,7 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); void ionic_rx_filter_sync(struct ionic_lif *lif); int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); int ionic_rx_filters_need_sync(struct ionic_lif *lif); +int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid); +int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid); #endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index c14de5fcedea37..fd6806b4a1b98d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -151,33 +151,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = { IONIC_RX_STAT_DESC(vlan_stripped), }; -static const struct ionic_stat_desc ionic_txq_stats_desc[] = { - IONIC_TX_Q_STAT_DESC(stop), - IONIC_TX_Q_STAT_DESC(wake), - IONIC_TX_Q_STAT_DESC(drop), - IONIC_TX_Q_STAT_DESC(dbell_count), -}; - -static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = { - IONIC_CQ_STAT_DESC(compl_count), -}; - -static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = { - IONIC_INTR_STAT_DESC(rearm_count), -}; - -static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = { - IONIC_NAPI_STAT_DESC(poll_count), -}; #define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) #define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc) #define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc) #define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc) -#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc) -#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc) -#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc) -#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc) #define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues) @@ -253,21 +231,6 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) total += tx_queues * IONIC_NUM_TX_STATS; total += rx_queues * IONIC_NUM_RX_STATS; - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - /* tx debug stats */ - total += tx_queues * (IONIC_NUM_DBG_CQ_STATS + - IONIC_NUM_TX_Q_STATS + - IONIC_NUM_DBG_INTR_STATS + - IONIC_MAX_NUM_SG_CNTR); - - /* rx debug stats */ - total += rx_queues * (IONIC_NUM_DBG_CQ_STATS + - IONIC_NUM_DBG_INTR_STATS + - IONIC_NUM_DBG_NAPI_STATS + - IONIC_MAX_NUM_NAPI_CNTR); - } - return total; } @@ -279,22 +242,6 @@ static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf, for (i = 0; i < IONIC_NUM_TX_STATS; i++) ethtool_sprintf(buf, "tx_%d_%s", q_num, ionic_tx_stats_desc[i].name); - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) - ethtool_sprintf(buf, "txq_%d_%s", q_num, - ionic_txq_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) - ethtool_sprintf(buf, "txq_%d_cq_%s", q_num, - ionic_dbg_cq_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) - ethtool_sprintf(buf, "txq_%d_intr_%s", q_num, - ionic_dbg_intr_stats_desc[i].name); - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) - ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i); } static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, @@ -305,22 +252,6 @@ static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, for (i = 0; i < IONIC_NUM_RX_STATS; i++) ethtool_sprintf(buf, "rx_%d_%s", q_num, ionic_rx_stats_desc[i].name); - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) - ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num, - ionic_dbg_cq_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) - ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num, - ionic_dbg_intr_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) - ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num, - ionic_dbg_napi_stats_desc[i].name); - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) - ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i); } static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) @@ -350,7 +281,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf, int q_num) { struct ionic_tx_stats *txstats; - struct ionic_qcq *txqcq; int i; txstats = &lif->txqstats[q_num]; @@ -359,38 +289,12 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf, **buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]); (*buf)++; } - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - txqcq = lif->txqcqs[q_num]; - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->q, - &ionic_txq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->cq, - &ionic_dbg_cq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->intr, - &ionic_dbg_intr_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { - **buf = txstats->sg_cntr[i]; - (*buf)++; - } } static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf, int q_num) { struct ionic_rx_stats *rxstats; - struct ionic_qcq *rxqcq; int i; rxstats = &lif->rxqstats[q_num]; @@ -399,31 +303,6 @@ static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf, **buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]); (*buf)++; } - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - rxqcq = lif->rxqcqs[q_num]; - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->cq, - &ionic_dbg_cq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->intr, - &ionic_dbg_intr_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, - &ionic_dbg_napi_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { - **buf = rxqcq->napi_stats.work_done_cntr[i]; - (*buf)++; - } } static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 37c39581b65999..94384f5d2a22d2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -14,8 +14,6 @@ static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { - DEBUG_STATS_TXQ_POST(q, ring_dbell); - ionic_q_post(q, ring_dbell, cb_func, cb_arg); } @@ -23,8 +21,6 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { ionic_q_post(q, ring_dbell, cb_func, cb_arg); - - DEBUG_STATS_RX_BUFF_CNT(q); } static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) @@ -507,8 +503,6 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, work_done); - return work_done; } @@ -546,8 +540,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, work_done); - return work_done; } @@ -591,9 +583,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done + rx_work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); - DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); - return rx_work_done; } @@ -735,7 +724,6 @@ static void ionic_tx_clean(struct ionic_queue *q, } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { netif_wake_subqueue(q->lif->netdev, qi); - q->wake++; } desc_info->bytes = skb->len; @@ -1174,7 +1162,6 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) if (unlikely(!ionic_q_has_space(q, ndescs))) { netif_stop_subqueue(q->lif->netdev, q->index); - q->stop++; stopped = 1; /* Might race with ionic_tx_clean, check again */ @@ -1269,7 +1256,6 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; err_out_drop: - q->stop++; q->drop++; dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 344ea11434549f..07dd3c3b177112 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -463,6 +463,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) u64 mac_addr; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + u8 addr[ETH_ALEN]; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) @@ -474,7 +475,8 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) p = (unsigned char *)&mac_addr; for (i = 0; i < 6; i++) - netdev->dev_addr[i] = *(p + 5 - i); + addr[i] = *(p + 5 - i); + eth_hw_addr_set(netdev, addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); @@ -500,7 +502,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p) } memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); adapter->macaddr_set(adapter, addr->sa_data); if (netif_running(netdev)) { @@ -842,7 +844,7 @@ netxen_check_options(struct netxen_adapter *adapter) adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); /* Get FW Mini Coredump template and store it */ - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (adapter->mdump.md_template == NULL || adapter->fw_version > prev_fw_version) { kfree(adapter->mdump.md_template); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d58e021614cd0e..d613095b78e09a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -23,6 +23,8 @@ #include #include "qed_debug.h" #include "qed_hsi.h" +#include "qed_dbg_hsi.h" +#include "qed_mfw_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; @@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) } #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ - ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \ + ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \ ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) -#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++) +#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++) #define D_TRINE(val, cond1, cond2, true1, true2, def) \ - (val == (cond1) ? true1 : \ - (val == (cond2) ? true2 : def)) + ((val) == (cond1) ? true1 : \ + ((val) == (cond2) ? true2 : def)) /* forward */ struct qed_ptt_pool; @@ -510,7 +512,7 @@ enum qed_hsi_def_type { struct qed_simd_fp_handler { void *token; - void (*func)(void *); + void (*func)(void *cookie); }; enum qed_slowpath_wq_flag { @@ -703,8 +705,6 @@ struct qed_dev { #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev)) #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_K2(dev) QED_IS_AH(dev) -#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) -#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5) u16 vendor_id; @@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); #define NUM_OF_BTB_BLOCKS(dev) \ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS) - /** - * @brief qed_concrete_to_sw_fid - get the sw function id from - * the concrete value. + * qed_concrete_to_sw_fid(): Get the sw function id from + * the concrete value. * - * @param concrete_fid + * @cdev: Qed dev pointer. + * @concrete_fid: Concrete fid. * - * @return inline u8 + * Return: inline u8. */ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) @@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, } #define PKT_LB_TC 9 -#define MAX_NUM_VOQS_E4 20 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, @@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); -#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) +#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0]) #define QED_IS_CMT(dev) ((dev)->num_hwfns > 1) /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */ #define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin]) @@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb, #define PQ_FLAGS_LLT (BIT(7)) #define PQ_FLAGS_MTC (BIT(8)) -/* physical queue index for cm context intialization */ +/* physical queue index for cm context initialization */ u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags); u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); @@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); +#define GET_GTT_REG_ADDR(__base, __offset, __idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx))) + +#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx))) + /* Other Linux specific common definitions */ #define DP_NAME(cdev) ((cdev)->name) -#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\ - (cdev->regview) + \ - (offset)) +#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\ + ((cdev)->regview) + \ + (offset))) #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) @@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); #define DOORBELL(cdev, db_addr, val) \ writel((u32)val, (void __iomem *)((u8 __iomem *)\ - (cdev->doorbells) + (db_addr))) + ((cdev)->doorbells) + (db_addr))) #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ qed_device_num_ports((_p_hwfn)->cdev)) @@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port); void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port); void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port); void qed_llh_clear_all_filters(struct qed_dev *cdev); +unsigned long qed_get_epoch_time(void); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index cb0f2a3a1ac98a..452494f8c298ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -54,22 +54,22 @@ /* connection context union */ union conn_context { - struct e4_core_conn_context core_ctx; - struct e4_eth_conn_context eth_ctx; - struct e4_iscsi_conn_context iscsi_ctx; - struct e4_fcoe_conn_context fcoe_ctx; - struct e4_roce_conn_context roce_ctx; + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; + struct roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { - struct e4_iscsi_task_context iscsi_ctx; - struct e4_fcoe_task_context fcoe_ctx; + struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { - struct e4_rdma_task_context roce_ctx; + struct rdma_task_context roce_ctx; }; struct src_ent { diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 8adb7ed0c12db5..168ce2c503859e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -28,24 +28,23 @@ struct qed_tid_mem { }; /** - * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid + * qed_cxt_get_cid_info(): Returns the context info for a specific cidi. * + * @p_hwfn: HW device data. + * @p_info: In/out. * - * @param p_hwfn - * @param p_info in/out - * - * @return int + * Return: Int. */ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); /** - * @brief qed_cxt_get_tid_mem_info + * qed_cxt_get_tid_mem_info(): Returns the tid mem info. * - * @param p_hwfn - * @param p_info + * @p_hwfn: HW device data. + * @p_info: in/out. * - * @return int + * Return: int. */ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, struct qed_tid_mem *p_info); @@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *vf_cid); /** - * @brief qed_cxt_set_pf_params - Set the PF params for cxt init + * qed_cxt_set_pf_params(): Set the PF params for cxt init. + * + * @p_hwfn: HW device data. + * @rdma_tasks: Requested maximum. * - * @param p_hwfn - * @param rdma_tasks - requested maximum - * @return int + * Return: int. */ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); /** - * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters + * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters. * - * @param p_hwfn - * @param last_line + * @p_hwfn: HW device data. + * @last_line: Last_line. * - * @return int + * Return: Int */ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); /** - * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased + * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased. + * + * @p_hwfn: HW device data. + * @used_lines: Used lines. * - * @param p_hwfn - * @param used_lines + * Return: Int. */ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); /** - * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct + * qed_cxt_mngr_alloc(): Allocate and init the context manager struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_free + * qed_cxt_mngr_free() - Context manager free. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map + * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_setup - Reset the acquired CIDs + * qed_cxt_mngr_setup(): Reset the acquired CIDs. * - * @param p_hwfn + * @p_hwfn: HW device data. */ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path. - * + * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_qm_init_pf - Initailze the QM PF phase, per path + * qed_qm_init_pf(): Initailze the QM PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @is_pf_loading: Is pf pending. * - * @param p_hwfn - * @param p_ptt - * @param is_pf_loading + * Return: Void. */ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_pf_loading); /** - * @brief Reconfigures QM pf on the fly + * qed_qm_reconf(): Reconfigures QM pf on the fly. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_CXT_PF_CID (0xff) /** - * @brief qed_cxt_release - Release a cid + * qed_cxt_release_cid(): Release a cid. * - * @param p_hwfn - * @param cid + * @p_hwfn: HW device data. + * @cid: Cid. + * + * Return: Void. */ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); /** - * @brief qed_cxt_release - Release a cid belonging to a vf-queue + * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @param p_hwfn - * @param cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * Return: Void. */ void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); /** - * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type. * - * @param p_hwfn - * @param type - * @param p_cid + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. * - * @return int + * Return: Int. */ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid); /** - * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type - * for a vf-queue + * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type + * for a vf-queue. * - * @param p_hwfn - * @param type - * @param p_cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @return int + * Return: Int. */ int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid, u8 vfid); @@ -334,7 +346,10 @@ struct qed_cxt_mngr { /* Maximal number of L2 steering filters */ u32 arfs_count; - u8 task_type_id; + u16 iscsi_task_pages; + u16 fcoe_task_pages; + u16 roce_task_pages; + u16 eth_task_pages; u16 task_ctx_size; u16 conn_ctx_size; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h new file mode 100644 index 00000000000000..9d5a0c9e1ca050 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h @@ -0,0 +1,1491 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ +#ifndef _QED_DBG_HSI_H +#define _QED_DBG_HSI_H + +#include +#include +#include +#include +#include +#include +#include + +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_DMAE, + BLOCK_PTU, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_MSTAT, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_LED, + BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, + BLOCK_BAR0_MAP, + BLOCK_MCP_FIO, + BLOCK_LAST_INIT, + BLOCK_PRS_FC, + BLOCK_PBF_FC, + BLOCK_NIG_LB_FC, + BLOCK_NIG_LB_FC_PLLH, + BLOCK_NIG_TX_FC_PLLH, + BLOCK_NIG_TX_FC, + BLOCK_NIG_RX_FC_PLLH, + BLOCK_NIG_RX_FC, + MAX_BLOCK_ID +}; + +/* binary debug buffer types */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE, + BIN_BUF_DBG_DUMP_REG, + BIN_BUF_DBG_DUMP_MEM, + BIN_BUF_DBG_IDLE_CHK_REGS, + BIN_BUF_DBG_IDLE_CHK_IMMS, + BIN_BUF_DBG_IDLE_CHK_RULES, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, + BIN_BUF_DBG_ATTN_BLOCKS, + BIN_BUF_DBG_ATTN_REGS, + BIN_BUF_DBG_ATTN_INDEXES, + BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_BLOCKS, + BIN_BUF_DBG_BLOCKS_CHIP_DATA, + BIN_BUF_DBG_BUS_LINES, + BIN_BUF_DBG_BLOCKS_USER_DATA, + BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, + BIN_BUF_DBG_RESET_REGS, + BIN_BUF_DBG_PARSING_STRINGS, + MAX_BIN_DBG_BUFFER_TYPE +}; + +/* Attention bit mapping */ +struct dbg_attn_bit_mapping { + u16 data; +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 +}; + +/* Attention block per-type data */ +struct dbg_attn_block_type_data { + u16 names_offset; + u16 reserved1; + u8 num_regs; + u8 reserved2; + u16 regs_offset; + +}; + +/* Block attentions */ +struct dbg_attn_block { + struct dbg_attn_block_type_data per_type_data[2]; +}; + +/* Attention register result */ +struct dbg_attn_reg_result { + u32 data; +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 + u16 block_attn_offset; + u16 reserved; + u32 sts_val; + u32 mask_val; +}; + +/* Attention block result */ +struct dbg_attn_block_result { + u8 block_id; + u8 data; +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 + u16 names_offset; + struct dbg_attn_reg_result reg_results[15]; +}; + +/* Mode header */ +struct dbg_mode_hdr { + u16 data; +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* Attention register */ +struct dbg_attn_reg { + struct dbg_mode_hdr mode; + u16 block_attn_offset; + u32 data; +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 + u32 sts_clr_address; + u32 mask_address; +}; + +/* Attention types */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + +/* Block debug data */ +struct dbg_block { + u8 name[15]; + u8 associated_storm_letter; +}; + +/* Chip-specific block debug data */ +struct dbg_block_chip { + u8 flags; +#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 +#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 +#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 +#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 + u8 dbg_client_id; + u8 reset_reg_id; + u8 reset_reg_bit_offset; + struct dbg_mode_hdr dbg_bus_mode; + u16 reserved1; + u8 reserved2; + u8 num_of_dbg_bus_lines; + u16 dbg_bus_lines_offset; + u32 dbg_select_reg_addr; + u32 dbg_dword_enable_reg_addr; + u32 dbg_shift_reg_addr; + u32 dbg_force_valid_reg_addr; + u32 dbg_force_frame_reg_addr; +}; + +/* Chip-specific block user debug data */ +struct dbg_block_chip_user { + u8 num_of_dbg_bus_lines; + u8 has_latency_events; + u16 names_offset; +}; + +/* Block user debug data */ +struct dbg_block_user { + u8 name[16]; +}; + +/* Block Debug line data */ +struct dbg_bus_line { + u8 data; +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 + u8 group_sizes; +}; + +/* Condition header for registers dump */ +struct dbg_dump_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u8 block_id; /* block ID */ + u8 data_size; /* size in dwords of the data following this header */ +}; + +/* Memory data for registers dump */ +struct dbg_dump_mem { + u32 dword0; +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 + u32 dword1; +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +}; + +/* Register data for registers dump */ +struct dbg_dump_reg { + u32 data; +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF +#define DBG_DUMP_REG_LENGTH_SHIFT 24 +}; + +/* Split header for registers dump */ +struct dbg_dump_split_hdr { + u32 hdr; +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +}; + +/* Condition header for idle check */ +struct dbg_idle_chk_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u16 data_size; /* size in dwords of the data following this header */ +}; + +/* Idle Check condition register */ +struct dbg_idle_chk_cond_reg { + u32 data; +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 + u16 num_entries; + u8 entry_size; + u8 start_entry; +}; + +/* Idle Check info register */ +struct dbg_idle_chk_info_reg { + u32 data; +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 + u16 size; /* register size in dwords */ + struct dbg_mode_hdr mode; /* Mode header */ +}; + +/* Idle Check register */ +union dbg_idle_chk_reg { + struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ + struct dbg_idle_chk_info_reg info_reg; /* info register */ +}; + +/* Idle Check result header */ +struct dbg_idle_chk_result_hdr { + u16 rule_id; /* Failing rule index */ + u16 mem_entry_id; /* Failing memory entry index */ + u8 num_dumped_cond_regs; /* number of dumped condition registers */ + u8 num_dumped_info_regs; /* number of dumped condition registers */ + u8 severity; /* from dbg_idle_chk_severity_types enum */ + u8 reserved; +}; + +/* Idle Check result register header */ +struct dbg_idle_chk_result_reg_hdr { + u8 data; +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 + u8 start_entry; /* index of the first checked entry */ + u16 size; /* register size in dwords */ +}; + +/* Idle Check rule */ +struct dbg_idle_chk_rule { + u16 rule_id; /* Idle Check rule ID */ + u8 severity; /* value from dbg_idle_chk_severity_types enum */ + u8 cond_id; /* Condition ID */ + u8 num_cond_regs; /* number of condition registers */ + u8 num_info_regs; /* number of info registers */ + u8 num_imms; /* number of immediates in the condition */ + u8 reserved1; + u16 reg_offset; /* offset of this rules registers in the idle check + * register array (in dbg_idle_chk_reg units). + */ + u16 imm_offset; /* offset of this rules immediate values in the + * immediate values array (in dwords). + */ +}; + +/* Idle Check rule parsing data */ +struct dbg_idle_chk_rule_parsing_data { + u32 data; +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +}; + +/* Idle check severity types */ +enum dbg_idle_chk_severity_types { + /* idle check failure should cause an error */ + IDLE_CHK_SEVERITY_ERROR, + /* idle check failure should cause an error only if theres no traffic */ + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, + /* idle check failure should cause a warning */ + IDLE_CHK_SEVERITY_WARNING, + MAX_DBG_IDLE_CHK_SEVERITY_TYPES +}; + +/* Reset register */ +struct dbg_reset_reg { + u32 data; +#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF +#define DBG_RESET_REG_ADDR_SHIFT 0 +#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 +#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 +#define DBG_RESET_REG_RESERVED_MASK 0x7F +#define DBG_RESET_REG_RESERVED_SHIFT 25 +}; + +/* Debug Bus block data */ +struct dbg_bus_block_data { + u8 enable_mask; + u8 right_shift; + u8 force_valid_mask; + u8 force_frame_mask; + u8 dword_mask; + u8 line_num; + u8 hw_id; + u8 flags; +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F +#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 +}; + +enum dbg_bus_clients { + DBG_BUS_CLIENT_RBCN, + DBG_BUS_CLIENT_RBCP, + DBG_BUS_CLIENT_RBCR, + DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCF, + DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_OTHER_ENGINE, + DBG_BUS_CLIENT_TIMESTAMP, + DBG_BUS_CLIENT_CPU, + DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCQ, + DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCV, + MAX_DBG_BUS_CLIENTS +}; + +/* Debug Bus constraint operation types */ +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ, + DBG_BUS_CONSTRAINT_OP_NE, + DBG_BUS_CONSTRAINT_OP_LT, + DBG_BUS_CONSTRAINT_OP_LTC, + DBG_BUS_CONSTRAINT_OP_LE, + DBG_BUS_CONSTRAINT_OP_LEC, + DBG_BUS_CONSTRAINT_OP_GT, + DBG_BUS_CONSTRAINT_OP_GTC, + DBG_BUS_CONSTRAINT_OP_GE, + DBG_BUS_CONSTRAINT_OP_GEC, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + +/* Debug Bus trigger state data */ +struct dbg_bus_trigger_state_data { + u8 msg_len; + u8 constraint_dword_mask; + u8 storm_id; + u8 reserved; +}; + +/* Debug Bus memory address */ +struct dbg_bus_mem_addr { + u32 lo; + u32 hi; +}; + +/* Debug Bus PCI buffer data */ +struct dbg_bus_pci_buf_data { + struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ + struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ + u32 size; /* PCI buffer size in bytes */ +}; + +/* Debug Bus Storm EID range filter params */ +struct dbg_bus_storm_eid_range_params { + u8 min; /* Minimal event ID to filter on */ + u8 max; /* Maximal event ID to filter on */ +}; + +/* Debug Bus Storm EID mask filter params */ +struct dbg_bus_storm_eid_mask_params { + u8 val; /* Event ID value */ + u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ +}; + +/* Debug Bus Storm EID filter params */ +union dbg_bus_storm_eid_params { + struct dbg_bus_storm_eid_range_params range; + struct dbg_bus_storm_eid_mask_params mask; +}; + +/* Debug Bus Storm data */ +struct dbg_bus_storm_data { + u8 enabled; + u8 mode; + u8 hw_id; + u8 eid_filter_en; + u8 eid_range_not_mask; + u8 cid_filter_en; + union dbg_bus_storm_eid_params eid_filter_params; + u32 cid; +}; + +/* Debug Bus data */ +struct dbg_bus_data { + u32 app_version; + u8 state; + u8 mode_256b_en; + u8 num_enabled_blocks; + u8 num_enabled_storms; + u8 target; + u8 one_shot_en; + u8 grc_input_en; + u8 timestamp_input_en; + u8 filter_en; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; + u8 trigger_en; + u8 filter_constraint_dword_mask; + u8 next_trigger_state; + u8 next_constraint_id; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 filter_msg_len; + u8 rcv_from_other_engine; + u8 blocks_dword_mask; + u8 blocks_dword_overlap; + u32 hw_id_mask; + struct dbg_bus_pci_buf_data pci_buf; + struct dbg_bus_block_data blocks[132]; + struct dbg_bus_storm_data storms[6]; +}; + +/* Debug bus states */ +enum dbg_bus_states { + DBG_BUS_STATE_IDLE, + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING, + DBG_BUS_STATE_STOPPED, + MAX_DBG_BUS_STATES +}; + +/* Debug Bus Storm modes */ +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF, + DBG_BUS_STORM_MODE_PRAM_ADDR, + DBG_BUS_STORM_MODE_DRA_RW, + DBG_BUS_STORM_MODE_DRA_W, + DBG_BUS_STORM_MODE_LD_ST_ADDR, + DBG_BUS_STORM_MODE_DRA_FSM, + DBG_BUS_STORM_MODE_FAST_DBGMUX, + DBG_BUS_STORM_MODE_RH, + DBG_BUS_STORM_MODE_RH_WITH_STORE, + DBG_BUS_STORM_MODE_FOC, + DBG_BUS_STORM_MODE_EXT_STORE, + MAX_DBG_BUS_STORM_MODES +}; + +/* Debug bus target IDs */ +enum dbg_bus_targets { + DBG_BUS_TARGET_ID_INT_BUF, + DBG_BUS_TARGET_ID_NIG, + DBG_BUS_TARGET_ID_PCI, + MAX_DBG_BUS_TARGETS +}; + +/* GRC Dump data */ +struct dbg_grc_data { + u8 params_initialized; + u8 reserved1; + u16 reserved2; + u32 param_val[48]; +}; + +/* Debug GRC params */ +enum dbg_grc_params { + DBG_GRC_PARAM_DUMP_TSTORM, + DBG_GRC_PARAM_DUMP_MSTORM, + DBG_GRC_PARAM_DUMP_USTORM, + DBG_GRC_PARAM_DUMP_XSTORM, + DBG_GRC_PARAM_DUMP_YSTORM, + DBG_GRC_PARAM_DUMP_PSTORM, + DBG_GRC_PARAM_DUMP_REGS, + DBG_GRC_PARAM_DUMP_RAM, + DBG_GRC_PARAM_DUMP_PBUF, + DBG_GRC_PARAM_DUMP_IOR, + DBG_GRC_PARAM_DUMP_VFC, + DBG_GRC_PARAM_DUMP_CM_CTX, + DBG_GRC_PARAM_DUMP_PXP, + DBG_GRC_PARAM_DUMP_RSS, + DBG_GRC_PARAM_DUMP_CAU, + DBG_GRC_PARAM_DUMP_QM, + DBG_GRC_PARAM_DUMP_MCP, + DBG_GRC_PARAM_DUMP_DORQ, + DBG_GRC_PARAM_DUMP_CFC, + DBG_GRC_PARAM_DUMP_IGU, + DBG_GRC_PARAM_DUMP_BRB, + DBG_GRC_PARAM_DUMP_BTB, + DBG_GRC_PARAM_DUMP_BMB, + DBG_GRC_PARAM_RESERVD1, + DBG_GRC_PARAM_DUMP_MULD, + DBG_GRC_PARAM_DUMP_PRS, + DBG_GRC_PARAM_DUMP_DMAE, + DBG_GRC_PARAM_DUMP_TM, + DBG_GRC_PARAM_DUMP_SDM, + DBG_GRC_PARAM_DUMP_DIF, + DBG_GRC_PARAM_DUMP_STATIC, + DBG_GRC_PARAM_UNSTALL, + DBG_GRC_PARAM_RESERVED2, + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, + DBG_GRC_PARAM_EXCLUDE_ALL, + DBG_GRC_PARAM_CRASH, + DBG_GRC_PARAM_PARITY_SAFE, + DBG_GRC_PARAM_DUMP_CM, + DBG_GRC_PARAM_DUMP_PHY, + DBG_GRC_PARAM_NO_MCP, + DBG_GRC_PARAM_NO_FW_VER, + DBG_GRC_PARAM_RESERVED3, + DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, + DBG_GRC_PARAM_DUMP_ILT_CDUC, + DBG_GRC_PARAM_DUMP_ILT_CDUT, + DBG_GRC_PARAM_DUMP_CAU_EXT, + MAX_DBG_GRC_PARAMS +}; + +/* Debug status codes */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, + DBG_STATUS_NO_MATCHING_FRAMING_MODE, + DBG_STATUS_VFC_READ_ERROR, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_256B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_RESERVED0, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_RESERVED1, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INSUFFICIENT_HW_IDS, + DBG_STATUS_DBG_BUS_IN_USE, + DBG_STATUS_INVALID_STORM_DBG_MODE, + DBG_STATUS_OTHER_ENGINE_BB_ONLY, + DBG_STATUS_FILTER_SINGLE_HW_ID, + DBG_STATUS_TRIGGER_SINGLE_HW_ID, + DBG_STATUS_MISSING_TRIGGER_STATE_STORM, + MAX_DBG_STATUS +}; + +/* Debug Storms IDs */ +enum dbg_storms { + DBG_TSTORM_ID, + DBG_MSTORM_ID, + DBG_USTORM_ID, + DBG_XSTORM_ID, + DBG_YSTORM_ID, + DBG_PSTORM_ID, + MAX_DBG_STORMS +}; + +/* Idle Check data */ +struct idle_chk_data { + u32 buf_size; + u8 buf_size_set; + u8 reserved1; + u16 reserved2; +}; + +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ +struct dbg_tools_data { + struct dbg_grc_data grc; + struct dbg_bus_data bus; + struct idle_chk_data idle_chk; + u8 mode_enable[40]; + u8 block_in_reset[132]; + u8 chip_id; + u8 hw_type; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; + u8 initialized; + u8 use_dmae; + u8 reserved; + struct pretend_params pretend; + u32 num_regs_read; +}; + +/* ILT Clients */ +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, + ILT_CLI_RGFS, + ILT_CLI_TGFS, + MAX_ILT_CLIENTS +}; + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug + * arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: A pointer to the binary data with debug arrays. + * + * Return: enum dbg status. + */ +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_read_regs(): Reads registers into a buffer (using GRC). + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf: Destination buffer. + * @addr: Source GRC address in dwords. + * @len: Number of registers to read. + * + * Return: Void. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); + +/** + * qed_read_fw_info(): Reads FW info from the chip. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_info: (Out) a pointer to write the FW info into. + * + * Return: True if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); +/** + * qed_dbg_grc_config(): Sets the value of a GRC parameter. + * + * @p_hwfn: HW device data. + * @grc_param: GRC parameter. + * @val: Value to set. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - Grc_param is invalid. + * - Val is outside the allowed boundaries. + */ +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val); + +/** + * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their + * default value. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); +/** + * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for + * GRC Dump. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the collected GRC data into. + * @buf_size_in_dwords:Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified dump buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size + * for idle check results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the idle check + * data. + * + * return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_idle_chk_dump: Performs idle check and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the idle check data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size + * for mcp trace results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the mcp trace data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * - The trace meta data cannot be read (from NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size + * for grc trace fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the reg fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size + * for the IGU fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the IGU fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * - The specified buffer is too small + * - DMAE transaction failed + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_protection_override_get_dump_buf_size(): Returns the required + * buffer size for protection override window results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for protection + * override data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status +qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_protection_override_dump(): Reads protection override window + * entries and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the protection override data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * @return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); +/** + * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer + * size for FW Asserts results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the FW Asserts data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_read_attn(): Reads the attention registers of the specified + * block and type, and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @block: Block ID. + * @attn_type: Attention type. + * @clear_status: Indicates if the attention status should be cleared. + * @results: (OUT) Pointer to write the read results into. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results); + +/** + * qed_dbg_print_attn(): Prints attention registers values in the + * specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); + +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_OFFSET 24 + + char *format_str; +}; + +/* MCP Trace Meta data structure */ +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + +/******************************** Constants **********************************/ + +#define MAX_NAME_LEN 16 + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with + * debug arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: a pointer to the binary data with debug arrays. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_dbg_alloc_user_data(): Allocates user debug data. + * + * @p_hwfn: HW device data. + * @user_data_ptr: (OUT) a pointer to the allocated memory. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr); + +/** + * qed_dbg_get_status_str(): Returns a string for the specified status. + * + * @status: A debug status code. + * + * Return: A string for the specified status. + */ +const char *qed_dbg_get_status_str(enum dbg_status status); + +/** + * qed_get_idle_chk_results_buf_size(): Returns the required buffer size + * for idle check results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); +/** + * qed_print_idle_chk_results(): Prints idle check results + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the idle check results. + * @num_errors: (OUT) number of errors found in idle check. + * @num_warnings: (OUT) number of warnings found in idle check. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, + u32 *num_warnings); + +/** + * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data. + * + * @p_hwfn: HW device data. + * @meta_buf: Meta buffer. + * + * Return: Void. + * + * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to + * no NVRAM access). + */ +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); + +/** + * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size + * for MCP Trace results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: MCP Trace dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Rrror if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_mcp_trace_results(): Prints MCP Trace results + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_dwords: Member of dwords that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @p_hwfn: HW device data. + * @dump_buf: MVP trace dump buffer, starting from the header. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** + * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_bytes: Number of bytes that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf); + +/** + * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** + * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size + * for reg_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_reg_fifo_results(): Prints reg fifo results. + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size + * for igu_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_igu_fifo_results(): Prints IGU fifo results + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the IGU fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_protection_override_results_buf_size(): Returns the required + * buffer size for protection override results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_protection_override_results(): Prints protection override + * results. + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size + * for FW Asserts results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_fw_asserts_results(): Prints FW Asserts results. + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer, starting from the header. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the FW Asserts results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_dbg_parse_attn(): Parses and prints attention registers values in + * the specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index e1798925b44425..ea839e60557795 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data { extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; #ifdef CONFIG_DCB -int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *); +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params); -int qed_dcbx_config_params(struct qed_hwfn *, - struct qed_ptt *, struct qed_dcbx_set *, bool); +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit); #endif /* QED local interface routines */ int -qed_dcbx_mib_update_event(struct qed_hwfn *, - struct qed_ptt *, enum qed_mib_read_type); +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_mib_read_type type); int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); void qed_dcbx_info_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 6ab3e60d4928cb..e3edca187ddfaf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #include @@ -10,6 +10,7 @@ #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_dbg_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm) return (r[0] & ~r[1]) != imm[0]; } +static u32 cond14(const u32 *r, const u32 *imm) +{ + return (r[0] | imm[0]) != imm[1]; +} + static u32 cond1(const u32 *r, const u32 *imm) { return r[0] != imm[0]; @@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond11, cond12, cond13, + cond14, }; #define NUM_PHYS_BLOCKS 84 @@ -208,10 +215,61 @@ enum dbg_bus_frame_modes { DBG_BUS_NUM_FRAME_MODES }; +/* Debug bus SEMI frame modes */ +enum dbg_bus_semi_frame_modes { + DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */ + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */ + DBG_BUS_SEMI_NUM_FRAME_MODES +}; + +/* Debug bus filter types */ +enum dbg_bus_filter_types { + DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */ + DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */ + DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */ + DBG_BUS_FILTER_TYPE_ON /* Filter always on */ +}; + +/* Debug bus pre-trigger recording types */ +enum dbg_bus_pre_trigger_types { + DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */ + DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */ + DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */ +}; + +/* Debug bus post-trigger recording types */ +enum dbg_bus_post_trigger_types { + DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */ + DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */ +}; + +/* Debug bus other engine mode */ +enum dbg_bus_other_engine_modes { + DBG_BUS_OTHER_ENGINE_MODE_NONE, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX +}; + +/* DBG block Framing mode definitions */ +struct framing_mode_defs { + u8 id; + u8 blocks_dword_mask; + u8 storms_dword_mask; + u8 semi_framing_mode_id; + u8 full_buf_thr; +}; + /* Chip constant definitions */ struct chip_defs { const char *name; + u8 dwords_per_cycle; + u8 num_framing_modes; u32 num_ilt_pages; + struct framing_mode_defs *framing_modes; }; /* HW type constant definitions */ @@ -334,7 +392,7 @@ struct split_type_defs { #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE #define FIELD_DWORD_OFFSET(type, field) \ - (int)(FIELD_BIT_OFFSET(type, field) / 32) + ((int)(FIELD_BIT_OFFSET(type, field) / 32)) #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32) #define FIELD_BIT_MASK(type, field) \ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ @@ -431,11 +489,13 @@ struct split_type_defs { #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_COMMON_GLOBAL_PARAMS 9 +#define NUM_COMMON_GLOBAL_PARAMS 11 #define MAX_RECURSION_DEPTH 10 +#define FW_IMG_KUKU 0 #define FW_IMG_MAIN 1 +#define FW_IMG_L2B 2 #define REG_FIFO_ELEMENT_DWORDS 2 #define REG_FIFO_DEPTH_ELEMENTS 32 @@ -464,10 +524,25 @@ struct split_type_defs { /***************************** Constant Arrays *******************************/ +/* DBG block framing mode definitions, in descending preference order */ +static struct framing_mode_defs s_framing_mode_defs[4] = { + {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf, + DBG_BUS_SEMI_FRAME_MODE_4FAST, + 10}, + {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW, + 10}, + {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc, + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10}, + {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8, + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10} +}; + /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2}, - {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2} + {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2, + s_framing_mode_defs}, + {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2, + s_framing_mode_defs} }; /* Storm constant definitions array */ @@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, - TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, + TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, + TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT, TCM_REG_CTX_RBC_ACCS, {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX, @@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, - MSEM_REG_DBG_FRAME_MODE_BB_K2, - MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - MSEM_REG_SLOW_DBG_MODE_BB_K2, - MSEM_REG_DBG_MODE1_CFG_BB_K2, + MSEM_REG_DBG_FRAME_MODE, + MSEM_REG_SLOW_DBG_ACTIVE, + MSEM_REG_SLOW_DBG_MODE, + MSEM_REG_DBG_MODE1_CFG, MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_DBG_GPRE_VECT, MCM_REG_CTX_RBC_ACCS, @@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, - USEM_REG_DBG_FRAME_MODE_BB_K2, - USEM_REG_SLOW_DBG_ACTIVE_BB_K2, - USEM_REG_SLOW_DBG_MODE_BB_K2, - USEM_REG_DBG_MODE1_CFG_BB_K2, + USEM_REG_DBG_FRAME_MODE, + USEM_REG_SLOW_DBG_ACTIVE, + USEM_REG_SLOW_DBG_MODE, + USEM_REG_DBG_MODE1_CFG, USEM_REG_SYNC_DBG_EMPTY, USEM_REG_DBG_GPRE_VECT, UCM_REG_CTX_RBC_ACCS, @@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, - XSEM_REG_DBG_FRAME_MODE_BB_K2, - XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - XSEM_REG_SLOW_DBG_MODE_BB_K2, - XSEM_REG_DBG_MODE1_CFG_BB_K2, + XSEM_REG_DBG_FRAME_MODE, + XSEM_REG_SLOW_DBG_ACTIVE, + XSEM_REG_SLOW_DBG_MODE, + XSEM_REG_DBG_MODE1_CFG, XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_DBG_GPRE_VECT, XCM_REG_CTX_RBC_ACCS, @@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, - YSEM_REG_DBG_FRAME_MODE_BB_K2, - YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - YSEM_REG_SLOW_DBG_MODE_BB_K2, - YSEM_REG_DBG_MODE1_CFG_BB_K2, + YSEM_REG_DBG_FRAME_MODE, + YSEM_REG_SLOW_DBG_ACTIVE, + YSEM_REG_SLOW_DBG_MODE, + YSEM_REG_DBG_MODE1_CFG, YSEM_REG_SYNC_DBG_EMPTY, YSEM_REG_DBG_GPRE_VECT, YCM_REG_CTX_RBC_ACCS, @@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, - PSEM_REG_DBG_FRAME_MODE_BB_K2, - PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - PSEM_REG_SLOW_DBG_MODE_BB_K2, - PSEM_REG_DBG_MODE1_CFG_BB_K2, + PSEM_REG_DBG_FRAME_MODE, + PSEM_REG_SLOW_DBG_ACTIVE, + PSEM_REG_SLOW_DBG_MODE, + PSEM_REG_DBG_MODE1_CFG, PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_DBG_GPRE_VECT, PCM_REG_CTX_RBC_ACCS, @@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = { {"asic", 1, 256, 32768}, {"reserved", 0, 0, 0}, {"reserved2", 0, 0, 0}, - {"reserved3", 0, 0, 0} + {"reserved3", 0, 0, 0}, + {"reserved4", 0, 0, 0} }; static struct grc_param_defs s_grc_param_defs[] = { @@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = { static struct phy_defs s_phy_defs[] = { {"nw_phy", NWS_REG_NWS_CMU_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5}, - {"sgmii_phy", MS_REG_MS_CMU_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, - {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, - {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, + {"sgmii_phy", MS_REG_MS_CMU_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, }; static struct split_type_defs s_split_type_defs[] = { @@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = { {"vf"} }; +/******************************** Variables **********************************/ + +/* The version of the calling app */ +static u32 s_app_ver; + /**************************** Private Functions ******************************/ +static void qed_static_asserts(void) +{ +} + /* Reads and returns a single dword from the specified unaligned buffer */ static u32 qed_read_unaligned_dword(u8 *buf) { @@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn) if (dev_data->initialized) return DBG_STATUS_OK; + if (!s_app_ver) + return DBG_STATUS_APP_VERSION_NOT_SET; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; @@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); - /* qed_rq() fetches data in CPU byteorder. Swap it back to - * the device's to get right structure layout. - */ - cpu_to_le32_array(dest, size); - /* Read FW version info from Storm RAM */ size = le32_to_cpu(fw_info_location.size); if (!size || size > sizeof(*fw_info)) @@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); - - cpu_to_le32_array(dest, size); } /* Dumps the specified string to the specified buffer. @@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unexpected debug error: invalid FW version string\n"); switch (fw_info.ver.image_id) { + case FW_IMG_KUKU: + strcpy(fw_img_str, "kuku"); + break; case FW_IMG_MAIN: strcpy(fw_img_str, "main"); break; + case FW_IMG_L2B: + strcpy(fw_img_str, "l2b"); + break; default: strcpy(fw_img_str, "unknown"); break; @@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, s_hw_type_defs[dev_data->hw_type].name); offset += qed_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id); + offset += qed_dump_num_param(dump_buf + offset, + dump, "epoch", qed_get_epoch_time()); if (dev_data->chip_id == CHIP_BB) offset += qed_dump_num_param(dump_buf + offset, dump, "path", QED_PATH_ID(p_hwfn)); @@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, continue; reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_STALL_0_BB_K2; + SEM_FAST_REG_STALL_0; qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); } @@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; const struct dbg_attn_reg *attn_reg_arr; + u32 block_id, sts_clr_address; u8 reg_idx, num_attn_regs; - u32 block_id; for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id]) @@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); + sts_clr_address = reg_data->sts_clr_address; /* If Mode match: clear parity status */ if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) qed_rd(p_hwfn, p_ptt, - DWORDS_TO_BYTES(reg_data-> - sts_clr_address)); + DWORDS_TO_BYTES(sts_clr_address)); } } } +/* Finds the meta data image in NVRAM */ +static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 image_type, + u32 *nvram_offset_bytes, + u32 *nvram_size_bytes) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; + struct mcp_file_att file_att; + int nvm_result; + + /* Call NVRAM get file command */ + nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, + (u32 *)&file_att, false); + + /* Check response */ + if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != + FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; + + /* Update return values */ + *nvram_offset_bytes = file_att.nvm_start_addr; + *nvram_size_bytes = file_att.len; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", + image_type, *nvram_offset_bytes, *nvram_size_bytes); + + /* Check alignment */ + if (*nvram_size_bytes & 0x3) + return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; + + return DBG_STATUS_OK; +} + +/* Reads data from NVRAM */ +static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 nvram_offset_bytes, + u32 nvram_size_bytes, u32 *ret_buf) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; + s32 bytes_left = nvram_size_bytes; + u32 read_offset = 0, param = 0; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "nvram_read: reading image of size %d bytes from NVRAM\n", + nvram_size_bytes); + + do { + bytes_to_copy = + (bytes_left > + MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; + + /* Call NVRAM read command */ + SET_MFW_FIELD(param, + DRV_MB_PARAM_NVM_OFFSET, + nvram_offset_bytes + read_offset); + SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); + if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, param, + &ret_mcp_resp, + &ret_mcp_param, &ret_read_size, + (u32 *)((u8 *)ret_buf + read_offset), + false)) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Check response */ + if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Update read offset */ + read_offset += ret_read_size; + bytes_left -= ret_read_size; + } while (bytes_left > 0); + + return DBG_STATUS_OK; +} + /* Dumps GRC registers section header. Returns the dumped size in dwords. * the following parameters are dumped: * - count: no. of dumped entries @@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, return offset; } -static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 image_type, - u32 *nvram_offset_bytes, - u32 *nvram_size_bytes); - -static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf); - /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, has_dbg_bus = GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_HAS_DBG_BUS); - /* read+clear for NWS parity is not working, skip NWS block */ - if (block_id == BLOCK_NWS) - continue; - if (!is_removed && has_dbg_bus && GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_EVAL_MODE) > 0) { @@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 dwords_read, offset = 0; bool parities_masked = false; + u32 dwords_read, offset = 0; u8 i; *num_dumped_dwords = 0; @@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, */ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 * - dump_buf, + u32 *dump_buf, bool dump, u16 rule_id, const struct dbg_idle_chk_rule *rule, @@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, return offset; } -/* Finds the meta data image in NVRAM */ -static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 image_type, - u32 *nvram_offset_bytes, - u32 *nvram_size_bytes) -{ - u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; - struct mcp_file_att file_att; - int nvm_result; - - /* Call NVRAM get file command */ - nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, - p_ptt, - DRV_MSG_CODE_NVM_GET_FILE_ATT, - image_type, - &ret_mcp_resp, - &ret_mcp_param, - &ret_txn_size, (u32 *)&file_att); - - /* Check response */ - if (nvm_result || - (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) - return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; - - /* Update return values */ - *nvram_offset_bytes = file_att.nvm_start_addr; - *nvram_size_bytes = file_att.len; - - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", - image_type, *nvram_offset_bytes, *nvram_size_bytes); - - /* Check alignment */ - if (*nvram_size_bytes & 0x3) - return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; - - return DBG_STATUS_OK; -} - -/* Reads data from NVRAM */ -static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf) -{ - u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; - s32 bytes_left = nvram_size_bytes; - u32 read_offset = 0, param = 0; - - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "nvram_read: reading image of size %d bytes from NVRAM\n", - nvram_size_bytes); - - do { - bytes_to_copy = - (bytes_left > - MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; - - /* Call NVRAM read command */ - SET_MFW_FIELD(param, - DRV_MB_PARAM_NVM_OFFSET, - nvram_offset_bytes + read_offset); - SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); - if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NVM_READ_NVRAM, param, - &ret_mcp_resp, - &ret_mcp_param, &ret_read_size, - (u32 *)((u8 *)ret_buf + read_offset))) - return DBG_STATUS_NVRAM_READ_FAILED; - - /* Check response */ - if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) - return DBG_STATUS_NVRAM_READ_FAILED; - - /* Update read offset */ - read_offset += ret_read_size; - bytes_left -= ret_read_size; - } while (bytes_left > 0); - - return DBG_STATUS_OK; -} - /* Get info on the MCP Trace data in the scratchpad: * - trace_data_grc_addr (OUT): trace data GRC address in bytes * - trace_data_size (OUT): trace data size in bytes (without the header) @@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, /* Dumps the specified ILT pages to the specified buffer. * Returns the dumped size in dwords. */ -static u32 qed_ilt_dump_pages_range(u32 *dump_buf, - bool dump, - u32 start_page_id, +static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset, + bool *dump, u32 start_page_id, u32 num_pages, struct phys_mem_desc *ilt_pages, - bool dump_page_ids) + bool dump_page_ids, u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) { - u32 page_id, end_page_id, offset = 0; + u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; + u32 page_id, end_page_id, offset = *given_offset; + struct phys_mem_desc *mem_desc = NULL; + bool continue_dump = *dump; + u32 partial_page_size = 0; if (num_pages == 0) return offset; @@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, end_page_id = start_page_id + num_pages - 1; for (page_id = start_page_id; page_id <= end_page_id; page_id++) { - struct phys_mem_desc *mem_desc = &ilt_pages[page_id]; - - /** - * - * if (page_id >= ->p_cxt_mngr->ilt_shadow_size) - * break; - */ - + mem_desc = &ilt_pages[page_id]; if (!ilt_pages[page_id].virt_addr) continue; if (dump_page_ids) { - /* Copy page ID to dump buffer */ - if (dump) + /* Copy page ID to dump buffer + * (if dump is needed and buffer is not full) + */ + if ((continue_dump) && + (offset + 1 > buf_size_in_dwords)) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + if (continue_dump) *(dump_buf + offset) = page_id; offset++; } else { /* Copy page memory to dump buffer */ - if (dump) + if ((continue_dump) && + (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords)) { + if (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords) { + partial_page_size = + buf_size_in_dwords - offset; + memcpy(dump_buf + offset, + mem_desc->virt_addr, + partial_page_size); + continue_dump = false; + actual_dump_size_in_dwords = + offset + partial_page_size; + } + } + + if (continue_dump) memcpy(dump_buf + offset, mem_desc->virt_addr, mem_desc->size); offset += BYTES_TO_DWORDS(mem_desc->size); } } + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; + return offset; } @@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, */ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, u32 *dump_buf, - bool dump, + u32 *given_offset, + bool *dump, u32 valid_conn_pf_pages, u32 valid_conn_vf_pages, struct phys_mem_desc *ilt_pages, - bool dump_page_ids) + bool dump_page_ids, + u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; - u32 pf_start_line, start_page_id, offset = 0; + u32 pf_start_line, start_page_id, offset = *given_offset; u32 cdut_pf_init_pages, cdut_vf_init_pages; u32 cdut_pf_work_pages, cdut_vf_work_pages; u32 base_data_offset, size_param_offset; + u32 src_pages; + u32 section_header_and_param_size; u32 cdut_pf_pages, cdut_vf_pages; + u32 actual_dump_size_in_dwords; + bool continue_dump = *dump; + bool update_size = *dump; const char *section_name; - u8 i; + u32 i; + actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem"; cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn); cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn); @@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages; cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages; pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line; + section_header_and_param_size = qed_dump_section_hdr(NULL, + false, + section_name, + 1) + + qed_dump_num_param(NULL, false, "size", 0); + + if ((continue_dump) && + (offset + section_header_and_param_size > buf_size_in_dwords)) { + continue_dump = false; + update_size = false; + actual_dump_size_in_dwords = offset; + } - offset += - qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1); + offset += qed_dump_section_hdr(dump_buf + offset, + continue_dump, section_name, 1); /* Dump size parameter (0 for now, overwritten with real size later) */ size_param_offset = offset; - offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + offset += qed_dump_num_param(dump_buf + offset, + continue_dump, "size", 0); base_data_offset = offset; /* CDUC pages are ordered as follows: @@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) { /* Dump connection PF pages */ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line; - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - valid_conn_pf_pages, - ilt_pages, dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, valid_conn_pf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump connection VF pages */ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines) - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - valid_conn_vf_pages, - ilt_pages, - dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + valid_conn_vf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); } /* CDUT pages are ordered as follows: @@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, /* Dump task PF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_init_pages - pf_start_line; - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - cdut_pf_work_pages, - ilt_pages, dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, cdut_pf_work_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump task VF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_pages + cdut_vf_init_pages - pf_start_line; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += cdut_vf_pages) - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - cdut_vf_work_pages, - ilt_pages, - dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + cdut_vf_work_pages, ilt_pages, + dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + } + + /*Dump Searcher pages */ + if (clients[ILT_CLI_SRC].active) { + start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line; + src_pages = clients[ILT_CLI_SRC].last.val - + clients[ILT_CLI_SRC].first.val + 1; + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, src_pages, ilt_pages, + dump_page_ids, buf_size_in_dwords, + &actual_dump_size_in_dwords); } /* Overwrite size param */ - if (dump) - qed_dump_num_param(dump_buf + size_param_offset, - dump, "size", offset - base_data_offset); + if (update_size) { + u32 section_size = (*dump == continue_dump) ? + offset - base_data_offset : + actual_dump_size_in_dwords - base_data_offset; + if (section_size > 0) + qed_dump_num_param(dump_buf + size_param_offset, + *dump, "size", section_size); + else if ((section_size == 0) && (*dump != continue_dump)) + actual_dump_size_in_dwords -= + section_header_and_param_size; + } + + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; return offset; } -/* Performs ILT Dump to the specified buffer. +/* Dumps a section containing the global parameters. + * Part of ilt dump process * Returns the dumped size in dwords. */ -static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +static u32 +qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 cduc_page_size, + u32 conn_ctx_size, + u32 cdut_page_size, + u32 *full_dump_size_param_offset, + u32 *actual_dump_size_param_offset) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; - u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0; - u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages; - u32 num_cids_per_page, conn_ctx_size; - u32 cduc_page_size, cdut_page_size; - struct phys_mem_desc *ilt_pages; - u8 conn_type; - - cduc_page_size = 1 << - (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); - cdut_page_size = 1 << - (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); - conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; - num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); - ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 offset = 0; - /* Dump global params - 22 must match number of params below */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, - dump_buf + offset, dump, 22); + dump_buf + offset, + dump, 30); offset += qed_dump_str_param(dump_buf + offset, - dump, "dump-type", "ilt-dump"); + dump, + "dump-type", "ilt-dump"); offset += qed_dump_num_param(dump_buf + offset, dump, - "cduc-page-size", cduc_page_size); + "cduc-page-size", + cduc_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-first-page-id", @@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-pf-pages", - clients - [ILT_CLI_CDUC].pf_total_lines); + clients[ILT_CLI_CDUC].pf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-vf-pages", - clients - [ILT_CLI_CDUC].vf_total_lines); + clients[ILT_CLI_CDUC].vf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "max-conn-ctx-size", conn_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, - "cdut-page-size", cdut_page_size); + "cdut-page-size", + cdut_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-first-page-id", @@ -4709,21 +4850,18 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, dump, "max-task-ctx-size", p_hwfn->p_cxt_mngr->task_ctx_size); - offset += qed_dump_num_param(dump_buf + offset, - dump, - "task-type-id", - p_hwfn->p_cxt_mngr->task_type_id); offset += qed_dump_num_param(dump_buf + offset, dump, "first-vf-id-in-pf", p_hwfn->p_cxt_mngr->first_vf_in_pf); - offset += /* 18 */ qed_dump_num_param(dump_buf + offset, - dump, - "num-vfs-in-pf", - p_hwfn->p_cxt_mngr->vf_count); offset += qed_dump_num_param(dump_buf + offset, dump, - "ptr-size-bytes", sizeof(void *)); + "num-vfs-in-pf", + p_hwfn->p_cxt_mngr->vf_count); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ptr-size-bytes", + sizeof(void *)); offset += qed_dump_num_param(dump_buf + offset, dump, "pf-start-line", @@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, dump, "ilt-shadow-size", p_hwfn->p_cxt_mngr->ilt_shadow_size); + + *full_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, "dump-size-full", 0); + + *actual_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, + "dump-size-actual", 0); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "iscsi_task_pages", + p_hwfn->p_cxt_mngr->iscsi_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "fcoe_task_pages", + p_hwfn->p_cxt_mngr->fcoe_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "roce_task_pages", + p_hwfn->p_cxt_mngr->roce_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "eth_task_pages", + p_hwfn->p_cxt_mngr->eth_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-first-page-id", + clients[ILT_CLI_SRC].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-last-page-id", + clients[ILT_CLI_SRC].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-is-active", + clients[ILT_CLI_SRC].active); + /* Additional/Less parameters require matching of number in call to * dump_common_global_params() */ - /* Dump section containing number of PF CIDs per connection type */ + return offset; +} + +/* Dump section containing number of PF CIDs per connection type. + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_pf_cids) +{ + u32 num_pf_cids = 0; + u32 offset = 0; + u8 conn_type; + offset += qed_dump_section_hdr(dump_buf + offset, dump, "num_pf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, "size", NUM_OF_CONNECTION_TYPES_E4); - for (conn_type = 0, valid_conn_pf_cids = 0; - conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { - u32 num_pf_cids = - p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; - + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_pf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; if (dump) *(dump_buf + offset) = num_pf_cids; - valid_conn_pf_cids += num_pf_cids; + *valid_conn_pf_cids += num_pf_cids; } - /* Dump section containing number of VF CIDs per connection type */ - offset += qed_dump_section_hdr(dump_buf + offset, - dump, "num_vf_cids_per_conn_type", 1); + return offset; +} + +/* Dump section containing number of VF CIDs per connection type + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_vf_cids) +{ + u32 num_vf_cids = 0; + u32 offset = 0; + u8 conn_type; + + offset += qed_dump_section_hdr(dump_buf + offset, dump, + "num_vf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, "size", NUM_OF_CONNECTION_TYPES_E4); - for (conn_type = 0, valid_conn_vf_cids = 0; - conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { - u32 num_vf_cids = + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_vf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_vf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf; - if (dump) *(dump_buf + offset) = num_vf_cids; - valid_conn_vf_cids += num_vf_cids; + *valid_conn_vf_cids += num_vf_cids; + } + + return offset; +} + +/* Performs ILT Dump to the specified buffer. + * buf_size_in_dwords - The dumped buffer size. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, u32 buf_size_in_dwords, bool dump) +{ +#if ((!defined VMWARE) && (!defined UEFI)) + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; +#endif + u32 valid_conn_vf_cids = 0, + valid_conn_vf_pages, offset = 0, real_dumped_size = 0; + u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages; + u32 num_cids_per_page, conn_ctx_size; + u32 cduc_page_size, cdut_page_size; + u32 actual_dump_size_in_dwords = 0; + struct phys_mem_desc *ilt_pages; + u32 actul_dump_off = 0; + u32 last_section_size; + u32 full_dump_off = 0; + u32 section_size = 0; + bool continue_dump; + u32 page_id; + + last_section_size = qed_dump_last_section(NULL, 0, false); + cduc_page_size = 1 << + (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + cdut_page_size = 1 << + (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; + num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); + ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + continue_dump = dump; + + /* if need to dump then save memory for the last section + * (last section calculates CRC of dumped data) + */ + if (dump) { + if (buf_size_in_dwords >= last_section_size) { + buf_size_in_dwords -= last_section_size; + } else { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } } - /* Dump section containing physical memory descs for each ILT page */ + /* Dump global params */ + + /* if need to dump then first check that there is enough memory + * in dumped buffer for this section calculate the size of this + * section without dumping. if there is not enough memory - then + * stop the dumping. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + NULL, + false, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, + continue_dump, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + + /* Dump section containing number of PF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_pf_cids(p_hwfn, + NULL, + false, + &valid_conn_pf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_pf_cids); + + /* Dump section containing number of VF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_vf_cids(p_hwfn, + NULL, + false, + &valid_conn_vf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_vf_cids); + + /* Dump section containing physical memory descriptors for each + * ILT page. + */ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size; + + /* If need to dump then first check that there is enough memory + * in dumped buffer for the section header. + */ + if (continue_dump) { + section_size = qed_dump_section_hdr(NULL, + false, + "ilt_page_desc", + 1) + + qed_dump_num_param(NULL, + false, + "size", + num_pages * PAGE_MEM_DESC_SIZE_DWORDS); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + offset += qed_dump_section_hdr(dump_buf + offset, - dump, "ilt_page_desc", 1); + continue_dump, "ilt_page_desc", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, + continue_dump, "size", num_pages * PAGE_MEM_DESC_SIZE_DWORDS); - /* Copy memory descriptors to dump buffer */ - if (dump) { - u32 page_id; - + /* Copy memory descriptors to dump buffer + * If need to dump then dump till the dump buffer size + */ + if (continue_dump) { for (page_id = 0; page_id < num_pages; - page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) - memcpy(dump_buf + offset, - &ilt_pages[page_id], - DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS)); + page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) { + if (continue_dump && + (offset + PAGE_MEM_DESC_SIZE_DWORDS <= + buf_size_in_dwords)) { + memcpy(dump_buf + offset, + &ilt_pages[page_id], + DWORDS_TO_BYTES + (PAGE_MEM_DESC_SIZE_DWORDS)); + } else { + if (continue_dump) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + } } else { offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS; } @@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, num_cids_per_page); /* Dump ILT pages IDs */ - offset += qed_ilt_dump_pages_section(p_hwfn, - dump_buf + offset, - dump, - valid_conn_pf_pages, - valid_conn_vf_pages, - ilt_pages, true); + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, true, buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump ILT pages memory */ - offset += qed_ilt_dump_pages_section(p_hwfn, - dump_buf + offset, - dump, - valid_conn_pf_pages, - valid_conn_vf_pages, - ilt_pages, false); + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, false, buf_size_in_dwords, + &actual_dump_size_in_dwords); + + real_dumped_size = + (continue_dump == dump) ? offset : actual_dump_size_in_dwords; + qed_dump_num_param(dump_buf + full_dump_off, dump, + "full-dump-size", offset + last_section_size); + qed_dump_num_param(dump_buf + actul_dump_off, + dump, + "actual-dump-size", + real_dumped_size + last_section_size); /* Dump last section */ - offset += qed_dump_last_section(dump_buf, offset, dump); + real_dumped_size += qed_dump_last_section(dump_buf, + real_dumped_size, dump); - return offset; + return real_dumped_size; } /***************************** Public Functions *******************************/ @@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, return DBG_STATUS_OK; } +static enum dbg_status qed_dbg_set_app_ver(u32 ver) +{ + if (ver < TOOLS_VERSION) + return DBG_STATUS_UNSUPPORTED_APP_VERSION; + + s_app_ver = ver; + + return DBG_STATUS_OK; +} + bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info) { @@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; + /* Doesn't do anything, needed for compile time asserts */ + qed_static_asserts(); + /* GRC Dump */ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); @@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn, if (status != DBG_STATUS_OK) return status; - *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false); + *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false); return DBG_STATUS_OK; } @@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; - enum dbg_status status; - - *num_dumped_dwords = 0; - - status = qed_dbg_ilt_get_dump_buf_size(p_hwfn, - p_ptt, - &needed_buf_size_in_dwords); - if (status != DBG_STATUS_OK) - return status; - - if (buf_size_in_dwords < needed_buf_size_in_dwords) - return DBG_STATUS_DUMP_BUF_TOO_SMALL; - - *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true); + *num_dumped_dwords = qed_ilt_dump(p_hwfn, + p_ptt, + dump_buf, buf_size_in_dwords, true); /* Reveret GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); @@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = { "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input", /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */ - "When triggering on Storm data, the Storm to trigger on must be specified" + "When triggering on Storm data, the Storm to trigger on must be specified", + + /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */ + "Failed to request MDUMP2 Offsize", + + /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */ + "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data", + + /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */ + "Invalid Signature found at start of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */ + "Invalid Log Size of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */ + "Invalid Log Header of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */ + "Invalid Log Data of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */ + "Could not extract number of ports from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */ + "Could not extract MFW (link) status from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */ + "Could not display linkdump of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */ + "Could not read PHY CFG of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */ + "Could not read PLL Mode of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */ + "Could not read TSCF/TSCE Lane Regs of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */ + "Could not allocate MDUMP2 reg-val internal buffer" }; /* Idle check severity names array */ @@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN]; /**************************** Private Functions ******************************/ +static void qed_user_static_asserts(void) +{ +} + static u32 qed_cyclic_add(u32 a, u32 b, u32 size) { return (a + b) % size; @@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, /* Skip register names until the required reg_id is * reached. */ - for (; reg_id > curr_reg_id; - curr_reg_id++, - parsing_str += strlen(parsing_str) + 1); + for (; reg_id > curr_reg_id; curr_reg_id++) + parsing_str += strlen(parsing_str) + 1; results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *num_errors, u32 *num_warnings) { + u32 num_section_params = 0, num_rules, num_rules_not_dumped; const char *section_name, *param_name, *param_str_val; u32 *dump_buf_end = dump_buf + num_dumped_dwords; - u32 num_section_params = 0, num_rules; /* Offset in results_buf in bytes */ u32 results_offset = 0; @@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, num_section_params, results_buf, &results_offset); - /* Read idle_chk section */ + /* Read idle_chk section + * There may be 1 or 2 idle_chk section parameters: + * - 1st is "num_rules" + * - 2nd is "num_rules_not_dumped" (optional) + */ + dump_buf += qed_read_section_hdr(dump_buf, §ion_name, &num_section_params); - if (strcmp(section_name, "idle_chk") || num_section_params != 1) + if (strcmp(section_name, "idle_chk") || + (num_section_params != 2 && num_section_params != 1)) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, &num_rules); if (strcmp(param_name, "num_rules")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + if (num_section_params > 1) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + &num_rules_not_dumped); + if (strcmp(param_name, "num_rules_not_dumped")) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + } else { + num_rules_not_dumped = 0; + } if (num_rules) { u32 rules_print_size; @@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "\nIdle Check completed successfully\n"); + if (num_rules_not_dumped) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n", + num_rules_not_dumped); + /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; @@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; + /* Doesn't do anything, needed for compile time asserts */ + qed_user_static_asserts(); + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, &parsed_buf_size, true); @@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, reg_result->block_attn_offset; /* Go over attention status bits */ - for (j = 0; j < num_reg_attn; j++, bit_idx++) { + for (j = 0; j < num_reg_attn; j++) { u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; @@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, } /* Check current bit index */ - if (!(reg_result->sts_val & BIT(bit_idx))) - continue; + if (reg_result->sts_val & BIT(bit_idx)) { + /* An attention bit with value=1 was found + * Find attention name + */ + attn_name_offset = + block_attn_name_offsets[attn_idx_val]; + attn_name = attn_name_base + attn_name_offset; + attn_type_str = + (attn_type == + ATTN_TYPE_INTERRUPT ? "Interrupt" : + "Parity"); + masked_str = reg_result->mask_val & + BIT(bit_idx) ? + " [masked]" : ""; + sts_addr = + GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS); + DP_NOTICE(p_hwfn, + "%s (%s) : %s [address 0x%08x, bit %d]%s\n", + block_name, attn_type_str, attn_name, + sts_addr * 4, bit_idx, masked_str); + } - /* An attention bit with value=1 was found - * Find attention name - */ - attn_name_offset = - block_attn_name_offsets[attn_idx_val]; - attn_name = attn_name_base + attn_name_offset; - attn_type_str = - (attn_type == - ATTN_TYPE_INTERRUPT ? "Interrupt" : - "Parity"); - masked_str = reg_result->mask_val & BIT(bit_idx) ? - " [masked]" : ""; - sts_addr = GET_FIELD(reg_result->data, - DBG_ATTN_REG_RESULT_STS_ADDRESS); - DP_NOTICE(p_hwfn, - "%s (%s) : %s [address 0x%08x, bit %d]%s\n", - block_name, attn_type_str, attn_name, - sts_addr * 4, bit_idx, masked_str); + bit_idx++; } } return DBG_STATUS_OK; } -static DEFINE_MUTEX(qed_dbg_lock); - /* Wrapper for unifying the idle_chk and mcp_trace api */ static enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, @@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, &num_warnnings); } +static DEFINE_MUTEX(qed_dbg_lock); + +#define MAX_PHY_RESULT_BUFFER 9000 + +/******************************** Feature Meta data section ******************/ + +#define GRC_NUM_STR_FUNCS 2 +#define IDLE_CHK_NUM_STR_FUNCS 1 +#define MCP_TRACE_NUM_STR_FUNCS 1 +#define REG_FIFO_NUM_STR_FUNCS 1 +#define IGU_FIFO_NUM_STR_FUNCS 1 +#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1 +#define FW_ASSERTS_NUM_STR_FUNCS 1 +#define ILT_NUM_STR_FUNCS 1 +#define PHY_NUM_STR_FUNCS 20 + /* Feature meta data lookup table */ static struct { char *name; + u32 num_funcs; enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *size); enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn, @@ -7411,40 +7865,46 @@ static struct { u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + const struct qed_func_lookup *hsi_func_lookup; } qed_features_lookup[] = { { - "grc", qed_dbg_grc_get_dump_buf_size, - qed_dbg_grc_dump, NULL, NULL}, { - "idle_chk", + "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size, + qed_dbg_grc_dump, NULL, NULL, NULL}, { + "idle_chk", IDLE_CHK_NUM_STR_FUNCS, qed_dbg_idle_chk_get_dump_buf_size, qed_dbg_idle_chk_dump, qed_print_idle_chk_results_wrapper, - qed_get_idle_chk_results_buf_size}, { - "mcp_trace", + qed_get_idle_chk_results_buf_size, + NULL}, { + "mcp_trace", MCP_TRACE_NUM_STR_FUNCS, qed_dbg_mcp_trace_get_dump_buf_size, qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results, - qed_get_mcp_trace_results_buf_size}, { - "reg_fifo", + qed_get_mcp_trace_results_buf_size, + NULL}, { + "reg_fifo", REG_FIFO_NUM_STR_FUNCS, qed_dbg_reg_fifo_get_dump_buf_size, qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results, - qed_get_reg_fifo_results_buf_size}, { - "igu_fifo", + qed_get_reg_fifo_results_buf_size, + NULL}, { + "igu_fifo", IGU_FIFO_NUM_STR_FUNCS, qed_dbg_igu_fifo_get_dump_buf_size, qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results, - qed_get_igu_fifo_results_buf_size}, { - "protection_override", + qed_get_igu_fifo_results_buf_size, + NULL}, { + "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS, qed_dbg_protection_override_get_dump_buf_size, qed_dbg_protection_override_dump, qed_print_protection_override_results, - qed_get_protection_override_results_buf_size}, { - "fw_asserts", + qed_get_protection_override_results_buf_size, + NULL}, { + "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS, qed_dbg_fw_asserts_get_dump_buf_size, qed_dbg_fw_asserts_dump, qed_print_fw_asserts_results, - qed_get_fw_asserts_results_buf_size}, { - "ilt", - qed_dbg_ilt_get_dump_buf_size, - qed_dbg_ilt_dump, NULL, NULL},}; + qed_get_fw_asserts_results_buf_size, + NULL}, { + "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size, + qed_dbg_ilt_dump, NULL, NULL, NULL},}; static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) { @@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; - u32 text_size_bytes, null_char_pos, i; + u32 txt_size_bytes, null_char_pos, i; + u32 *dbuf, dwords; enum dbg_status rc; char *text_buf; @@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, if (!qed_features_lookup[feature_idx].results_buf_size) return DBG_STATUS_OK; + dbuf = (u32 *)feature->dump_buf; + dwords = feature->dumped_dwords; + /* Obtain size of formatted output */ - rc = qed_features_lookup[feature_idx]. - results_buf_size(p_hwfn, (u32 *)feature->dump_buf, - feature->dumped_dwords, &text_size_bytes); + rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn, + dbuf, + dwords, + &txt_size_bytes); if (rc != DBG_STATUS_OK) return rc; - /* Make sure that the allocated size is a multiple of dword (4 bytes) */ - null_char_pos = text_size_bytes - 1; - text_size_bytes = (text_size_bytes + 3) & ~0x3; + /* Make sure that the allocated size is a multiple of dword + * (4 bytes). + */ + null_char_pos = txt_size_bytes - 1; + txt_size_bytes = (txt_size_bytes + 3) & ~0x3; - if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { + if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { DP_NOTICE(p_hwfn->cdev, "formatted size of feature was too small %d. Aborting\n", - text_size_bytes); + txt_size_bytes); return DBG_STATUS_INVALID_ARGS; } - /* Allocate temp text buf */ - text_buf = vzalloc(text_size_bytes); - if (!text_buf) + /* allocate temp text buf */ + text_buf = vzalloc(txt_size_bytes); + if (!text_buf) { + DP_NOTICE(p_hwfn->cdev, + "failed to allocate text buffer. Aborting\n"); return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + } /* Decode feature opcodes to string on temp buf */ - rc = qed_features_lookup[feature_idx]. - print_results(p_hwfn, (u32 *)feature->dump_buf, - feature->dumped_dwords, text_buf); + rc = qed_features_lookup[feature_idx].print_results(p_hwfn, + dbuf, + dwords, + text_buf); if (rc != DBG_STATUS_OK) { vfree(text_buf); return rc; @@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, * The bytes that were added as a result of the dword alignment are also * padded with '\n' characters. */ - for (i = null_char_pos; i < text_size_bytes; i++) + for (i = null_char_pos; i < txt_size_bytes; i++) text_buf[i] = '\n'; /* Dump printable feature to log */ if (p_hwfn->cdev->print_dbg_data) - qed_dbg_print_feature(text_buf, text_size_bytes); + qed_dbg_print_feature(text_buf, txt_size_bytes); - /* Just return the original binary buffer if requested */ + /* Dump binary data as is to the output file */ if (p_hwfn->cdev->dbg_bin_dump) { vfree(text_buf); - return DBG_STATUS_OK; + return rc; } - /* Free the old dump_buf and point the dump_buf to the newly allocagted + /* Free the old dump_buf and point the dump_buf to the newly allocated * and formatted text buffer. */ vfree(feature->dump_buf); feature->dump_buf = text_buf; - feature->buf_size = text_size_bytes; - feature->dumped_dwords = text_size_bytes / 4; + feature->buf_size = txt_size_bytes; + feature->dumped_dwords = txt_size_bytes / 4; + return rc; } @@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; - u32 buf_size_dwords; + u32 buf_size_dwords, *dbuf, *dwords; enum dbg_status rc; DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n", @@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, if (!feature->dump_buf) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; - rc = qed_features_lookup[feature_idx]. - perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf, - feature->buf_size / sizeof(u32), - &feature->dumped_dwords); + dbuf = (u32 *)feature->dump_buf; + dwords = &feature->dumped_dwords; + rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt, + dbuf, + feature->buf_size / + sizeof(u32), + dwords); /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error. - * In this case the buffer holds valid binary data, but we wont able + * In this case the buffer holds valid binary data, but we won't able * to parse it (since parsing relies on data in NVRAM which is only * accessible when MFW is responsive). skip the formatting but return * success so that binary data is provided. @@ -7777,7 +8252,8 @@ enum debug_print_features { static u32 qed_calc_regdump_header(struct qed_dev *cdev, enum debug_print_features feature, - int engine, u32 feature_size, u8 omit_engine) + int engine, u32 feature_size, + u8 omit_engine, u8 dbg_bin_dump) { u32 res = 0; @@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); - SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); @@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) { u8 cur_engine, omit_engine = 0, org_engine; - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - int grc_params[MAX_DBG_GRC_PARAMS], i; + int grc_params[MAX_DBG_GRC_PARAMS], rc, i; u32 offset = 0, feature_size; - int rc; for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) grc_params[i] = dev_data->grc.param_val[i]; @@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!QED_IS_CMT(cdev)) omit_engine = 1; + cdev->dbg_bin_dump = 1; mutex_lock(&qed_dbg_lock); - cdev->dbg_bin_dump = true; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); @@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); @@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, REG_FIFO, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, REG_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc); @@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IGU_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc); @@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE, + qed_calc_regdump_header(cdev, + PROTECTION_OVERRIDE, cur_engine, - feature_size, omit_engine); + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, @@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, FW_ASSERTS, - cur_engine, feature_size, - omit_engine); + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", @@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) } feature_size = qed_dbg_ilt_size(cdev); - if (!cdev->disable_ilt_dump && - feature_size < ILT_DUMP_MAX_SIZE) { + if (!cdev->disable_ilt_dump && feature_size < + ILT_DUMP_MAX_SIZE) { rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { @@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) qed_calc_regdump_header(cdev, ILT_DUMP, cur_engine, feature_size, - omit_engine); - offset += feature_size + REGDUMP_HEADER_SIZE; + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n", rc); } } - /* GRC dump - must be last because when mcp stuck it will + /* Grc dump - must be last because when mcp stuck it will * clutter idle_chk, reg_fifo, ... */ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) @@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, GRC_DUMP, cur_engine, - feature_size, omit_engine); + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc); @@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine, - feature_size, omit_engine); + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } - /* Re-populate nvm attribute info */ - qed_mcp_nvm_info_free(p_hwfn); - qed_mcp_nvm_info_populate(p_hwfn); - /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + @@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine, - feature_size, omit_engine); + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", + rc); } - /* nvm default */ + /* nvm default */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_DEFAULT_CFG); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, DEFAULT_CFG, + cur_engine, feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", - rc); + QED_NVM_IMAGE_DEFAULT_CFG, + "QED_NVM_IMAGE_DEFAULT_CFG", rc); } /* nvm meta */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_NVM_META); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_NVM_META); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, NVM_META, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, NVM_META, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", + rc); } /* nvm mdump */ @@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, MDUMP, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, MDUMP, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, @@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } - cdev->dbg_bin_dump = false; mutex_unlock(&qed_dbg_lock); + cdev->dbg_bin_dump = 0; return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; u8 cur_engine, org_engine; cdev->disable_ilt_dump = false; @@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) "calculating idle_chk and grcdump register length for current engine\n"); qed_set_debug_engine(cdev, cur_engine); regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + - REGDUMP_HEADER_SIZE + - qed_dbg_protection_override_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); - + REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + + qed_dbg_protection_override_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev); if (ilt_len < ILT_DUMP_MAX_SIZE) { total_ilt_len += ilt_len; @@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) qed_set_debug_engine(cdev, org_engine); /* Engine common */ - regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); + regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev); qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; @@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) int qed_dbg_feature(struct qed_dev *cdev, void *buffer, enum qed_dbg_features feature, u32 *num_dumped_bytes) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; - struct qed_dbg_feature *qed_feature = - &cdev->dbg_features[feature]; + struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; enum dbg_status dbg_rc; struct qed_ptt *p_ptt; int rc = 0; @@ -8119,9 +8616,8 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer, int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 buf_size_dwords; enum dbg_status rc; @@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) return qed_feature->buf_size; } +int qed_dbg_phy_size(struct qed_dev *cdev) +{ + /* return max size of phy info and + * phy mac_stat multiplied by the number of ports + */ + return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev)); +} + u8 qed_get_debug_engine(struct qed_dev *cdev) { return cdev->engine_for_debug; @@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev) const u8 *dbg_values = NULL; int i; + /* Sync ver with debugbus qed code */ + qed_dbg_set_app_ver(TOOLS_VERSION); + /* Debug values are after init values. * The offset is the first dword of the file. */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index e71af82d320002..b0d4b937cf4aad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ -#ifndef _QED_DEBUGFS_H -#define _QED_DEBUGFS_H +#ifndef _QED_DEBUG_H +#define _QED_DEBUG_H enum qed_dbg_features { DBG_FEATURE_GRC, @@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev); int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_mcp_trace_size(struct qed_dev *cdev); +int qed_dbg_phy_size(struct qed_dev *cdev); int qed_dbg_all_data(struct qed_dev *cdev, void *buffer); int qed_dbg_all_data_size(struct qed_dev *cdev); u8 qed_get_debug_engine(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0410c3604abdbc..cc4ec2bb36db01 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -25,6 +25,7 @@ #include "qed_dev_api.h" #include "qed_fcoe.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn, } int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]) + u8 ppfid, const u8 mac_addr[ETH_ALEN]) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); @@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev) qed_rdma_info_free(p_hwfn); } + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); - qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); @@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); /* num RLs can't exceed resource amount of rls or vports */ - num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), - RESC_NUM(p_hwfn, QED_VPORT)); + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); /* Make sure after we reserve there's something left */ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) @@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) bool four_port; /* pq and vport bases for this PF */ - qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ qm_info->vport_rl_en = true; @@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) */ /* flags for pq init */ -#define PQ_INIT_SHARE_VPORT (1 << 0) -#define PQ_INIT_PF_RL (1 << 1) -#define PQ_INIT_VF_RL (1 << 2) +#define PQ_INIT_SHARE_VPORT BIT(0) +#define PQ_INIT_PF_RL BIT(1) +#define PQ_INIT_VF_RL BIT(2) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 @@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_no_mem; } - rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); if (rc) goto alloc_err; @@ -2375,6 +2377,49 @@ int qed_resc_alloc(struct qed_dev *cdev) return rc; } +static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + if (fw_return_code != COMMON_ERR_CODE_ERROR) + goto eqe_unexpected; + + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); + return 0; + } + +eqe_unexpected: + DP_ERR(p_hwfn, + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", + opcode, fw_return_code, echo); + return -EINVAL; +} + +static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + case COMMON_EVENT_VF_FLR: + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, + fw_return_code); + case COMMON_EVENT_FW_ERROR: + return qed_fw_err_handler(p_hwfn, opcode, + le16_to_cpu(echo), data, + fw_return_code); + default: + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", + opcode, echo); + return -EINVAL; + } +} + void qed_resc_setup(struct qed_dev *cdev) { int i; @@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev) qed_l2_setup(p_hwfn); qed_iov_setup(p_hwfn); + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_common_eqe_event); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); @@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); if (is_vf) id += 0x10; @@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, cache_line_size); } - if (L1_CACHE_BYTES > wr_mbs) + if (wr_mbs < L1_CACHE_BYTES) DP_INFO(p_hwfn, "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", L1_CACHE_BYTES, wr_mbs); @@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; - struct qed_qm_common_rt_init_params params; + struct qed_qm_common_rt_init_params *params; struct qed_dev *cdev = p_hwfn->cdev; u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + DP_NOTICE(p_hwfn->cdev, + "Failed to allocate common init params\n"); + + return -ENOMEM; + } + qed_init_cau_rt_data(cdev); /* Program GTT windows */ @@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qm_info->pf_wfq_en = true; } - memset(¶ms, 0, sizeof(params)); - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; - params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.pf_rl_en = qm_info->pf_rl_en; - params.pf_wfq_en = qm_info->pf_wfq_en; - params.global_rl_en = qm_info->vport_rl_en; - params.vport_wfq_en = qm_info->vport_wfq_en; - params.port_params = qm_info->qm_port_params; + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params->pf_rl_en = qm_info->pf_rl_en; + params->pf_wfq_en = qm_info->pf_wfq_en; + params->global_rl_en = qm_info->vport_rl_en; + params->vport_wfq_en = qm_info->vport_wfq_en; + params->port_params = qm_info->qm_port_params; - qed_qm_common_rt_init(p_hwfn, ¶ms); + qed_qm_common_rt_init(p_hwfn, params); qed_cxt_hw_init_common(p_hwfn); @@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) - return rc; + goto out; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); @@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); - qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); @@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); +out: + kfree(params); + return rc; } @@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_bar(p_hwfn, p_ptt); } - p_hwfn->wid_count = (u16) n_cpus; + p_hwfn->wid_count = (u16)n_cpus; DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", @@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, - PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) return qed_hsi_def_val[type][chip_id]; } + static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resc_max_val, mcp_resp; u8 res_id; int rc; + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { case QED_LL2_RAM_QUEUE: @@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) * resources allocation queries should be atomic. Since several PFs can * run in parallel - a resource lock is needed. * If either the resource lock or resource set value commands are not - * supported - skip the the max values setting, release the lock if + * supported - skip the max values setting, release the lock if * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ @@ -3934,7 +3992,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } else if (rc == -EINVAL) { DP_INFO(p_hwfn, "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); - } else if (!rc && !resc_lock_params.b_granted) { + } else if (!resc_lock_params.b_granted) { DP_NOTICE(p_hwfn, "Failed to acquire the resource lock for the resource allocation commands\n"); return -EBUSY; @@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", @@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); @@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d3c1f3879be870..f8682356d0cf4a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -15,44 +15,52 @@ #include "qed_int.h" /** - * @brief qed_init_dp - initialize the debug level + * qed_init_dp(): Initialize the debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Module debug parameter. + * @dp_level: Module debug level. + * + * Return: Void. */ void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level); /** - * @brief qed_init_struct - initialize the device structure to - * its defaults + * qed_init_struct(): Initialize the device structure to + * its defaults. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_struct(struct qed_dev *cdev); /** - * @brief qed_resc_free - + * qed_resc_free: Free device resources. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_resc_free(struct qed_dev *cdev); /** - * @brief qed_resc_alloc - + * qed_resc_alloc(): Alloc device resources. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_resc_alloc(struct qed_dev *cdev); /** - * @brief qed_resc_setup - + * qed_resc_setup(): Setup device resources. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Void. */ void qed_resc_setup(struct qed_dev *cdev); @@ -105,94 +113,96 @@ struct qed_hw_init_params { }; /** - * @brief qed_hw_init - + * qed_hw_init(): Init Qed hardware. * - * @param cdev - * @param p_params + * @cdev: Qed dev pointer. + * @p_params: Pointers to params. * - * @return int + * Return: Int. */ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); /** - * @brief qed_hw_timers_stop_all - stop the timers HW block + * qed_hw_timers_stop_all(): Stop the timers HW block. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return void + * Return: void. */ void qed_hw_timers_stop_all(struct qed_dev *cdev); /** - * @brief qed_hw_stop - + * qed_hw_stop(): Stop Qed hardware. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_hw_stop(struct qed_dev *cdev); /** - * @brief qed_hw_stop_fastpath -should be called incase - * slowpath is still required for the device, - * but fastpath is not. + * qed_hw_stop_fastpath(): Should be called incase + * slowpath is still required for the device, + * but fastpath is not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_hw_stop_fastpath(struct qed_dev *cdev); /** - * @brief qed_hw_start_fastpath -restart fastpath traffic, - * only if hw_stop_fastpath was called + * qed_hw_start_fastpath(): Restart fastpath traffic, + * only if hw_stop_fastpath was called. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); - /** - * @brief qed_hw_prepare - + * qed_hw_prepare(): Prepare Qed hardware. * - * @param cdev - * @param personality - personality to initialize + * @cdev: Qed dev pointer. + * @personality: Personality to initialize. * - * @return int + * Return: Int. */ int qed_hw_prepare(struct qed_dev *cdev, int personality); /** - * @brief qed_hw_remove - + * qed_hw_remove(): Remove Qed hardware. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_hw_remove(struct qed_dev *cdev); /** - * @brief qed_ptt_acquire - Allocate a PTT window + * qed_ptt_acquire(): Allocate a PTT window. * - * Should be called at the entry point to the driver (at the beginning of an - * exported function) + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: struct qed_ptt. * - * @return struct qed_ptt + * Should be called at the entry point to the driver (at the beginning of an + * exported function). */ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_release - Release PTT Window + * qed_ptt_release(): Release PTT Window. * - * Should be called at the end of a flow - at the end of the function that - * acquired the PTT. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * + * Return: Void. * - * @param p_hwfn - * @param p_ptt + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -205,15 +215,17 @@ enum qed_dmae_address_type_t { }; /** - * @brief qed_dmae_host2grc - copy data from source addr to - * dmae registers using the given ptt + * qed_dmae_host2grc(): Copy data from source addr to + * dmae registers using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @grc_addr: GRC address (dmae_data_offset). + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param grc_addr (dmae_data_offset) - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, @@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_grc2host - Read data from dmae data offset - * to source address using the given ptt + * qed_dmae_grc2host(): Read data from dmae data offset + * to source address using the given ptt. + * + * @p_ptt: P_ptt. + * @grc_addr: GRC address (dmae_data_offset). + * @dest_addr: Destination Address. + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_ptt - * @param grc_addr (dmae_data_offset) - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_host2host - copy data from to source address - * to a destination adress (for SRIOV) using the given ptt + * qed_dmae_host2host(): Copy data from to source address + * to a destination adrress (for SRIOV) using the given + * ptt. * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @dest_addr: Destination address. + * @size_in_dwords: size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. */ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** - * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * qed_fw_l2_queue(): Get absolute L2 queue ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id); /** - * @@brief qed_fw_vport - Get absolute vport ID + * qed_fw_vport(): Get absolute vport ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * qed_fw_rss_eng(): Get absolute RSS engine ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter - * banks that are allocated to the PF. + * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter + * banks that are allocated to the PF. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return u8 - Number of LLH filter banks + * Return: u8 Number of LLH filter banks. */ u8 qed_llh_get_num_ppfid(struct qed_dev *cdev); @@ -314,45 +331,50 @@ enum qed_eng { }; /** - * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given - * LLH filter bank. + * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given + * LLH filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param eng + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng); /** - * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity + * qed_llh_set_roce_affinity(): Set the RoCE engine affinity. * - * @param cdev - * @param eng + * @cdev: Qed dev pointer. + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng); /** - * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter - * bank. + * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter + * bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @mac_addr: MAC to add. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param mac_addr - MAC to add + * Return: Int. */ int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]); + u8 ppfid, const u8 mac_addr[ETH_ALEN]); /** - * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given - * filter bank. + * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given + * filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Ppfid. + * @mac_addr: MAC to remove * - * @param p_ptt - * @param p_filter - MAC to remove + * Return: Void. */ void qed_llh_remove_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); @@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t { }; /** - * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the - * given filter bank. + * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the + * given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add - * @param type - type of filters and comparing + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. + * + * Return: Int. */ int qed_llh_add_protocol_filter(struct qed_dev *cdev, @@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from - * the given filter bank. + * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from + * the given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. */ void qed_llh_remove_protocol_filter(struct qed_dev *cdev, @@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * *@brief Cleanup of previous driver remains prior to load + * qed_final_cleanup(): Cleanup of previous driver remains prior to load. * - * @param p_hwfn - * @param p_ptt - * @param id - For PF, engine-relative. For VF, PF-relative. - * @param is_vf - true iff cleanup is made for a VF. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @id: For PF, engine-relative. For VF, PF-relative. + * @is_vf: True iff cleanup is made for a VF. * - * @return int + * Return: Int. */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. + * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue. * - * @param p_hwfn - * @param p_coal - store coalesce value read from the hardware. - * @param p_handle + * @p_hwfn: HW device data. + * @coal: Store coalesce value read from the hardware. + * @handle: P_handle. * - * @return int + * Return: Int. **/ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and * Tx queue. The fact that we can configure coalescing to up to 511, but on * varying accuracy [the bigger the value the less accurate] up to a mistake * of 3usec for the highest values. @@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * otherwise configuration would break. * + * @rx_coal: Rx Coalesce value in micro seconds. + * @tx_coal: TX Coalesce value in micro seconds. + * @p_handle: P_handle. * - * @param rx_coal - Rx Coalesce value in micro seconds. - * @param tx_coal - TX Coalesce value in micro seconds. - * @param p_handle - * - * @return int + * Return: Int. **/ int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); /** - * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER. * - * @param p_hwfn - * @param p_ptt - * @param b_enable - true/false + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_enable: True/False. * - * @return int + * Return: Int. */ int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * qed_db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. + * + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address of where db_data is stored. + * @db_width: Doorbell is 32b pr 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_width - doorbell is 32b pr 64b - * @param db_space - doorbell recovery addresses are user or kernel space + * Return: Int. */ int qed_db_recovery_add(struct qed_dev *cdev, void __iomem *db_addr, @@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev, enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * qed_db_recovery_del() - remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the + * @cdev: Qed dev pointer. + * @db_addr: doorbell address. + * @db_data: address where db_data is stored. Serves as key for the * entry to delete. + * + * Return: Int. */ int qed_db_recovery_del(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); - const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index 78070682f2dff3..6bb4e165b59206 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; - rc = devlink_register(dl); - if (rc) - goto err_free; - rc = devlink_params_register(dl, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); if (rc) @@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) QED_DEVLINK_PARAM_ID_IWARP_CMT, value); - devlink_params_publish(dl); cdev->iwarp_cmt = false; qed_fw_reporters_create(dl); - + devlink_register(dl); return dl; err_unregister: - devlink_unregister(dl); - -err_free: devlink_free(dl); return ERR_PTR(rc); @@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink) if (!devlink) return; + devlink_unregister(devlink); qed_fw_reporters_destroy(devlink); devlink_params_unregister(devlink, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); - devlink_unregister(devlink); devlink_free(devlink); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index b768f0698170e4..3764190b948eb9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -30,6 +30,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iro_hsi.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_func_ramrod_data *p_data; - struct e4_fcoe_conn_context *p_cxt = NULL; + struct fcoe_conn_context *p_cxt = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_cxt_info cxt_info; @@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, memset(p_cxt, 0, sizeof(*p_cxt)); SET_FIELD(p_cxt->tstorm_ag_context.flags3, - E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); fcoe_pf_params->dummy_icid = (u16)dummy_cid; @@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { - struct e4_fcoe_task_context *p_task_ctx = NULL; + struct fcoe_task_context *p_task_ctx = NULL; u32 i, lc; int rc; @@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) if (rc) continue; - memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); lc = 0; SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); @@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, - E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index fb1baa2da2d0df..f2cedbd9489c0d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #ifndef _QED_HSI_H @@ -38,7 +38,7 @@ enum common_event_opcode { COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_VF_FLR, COMMON_EVENT_PF_UPDATE, - COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_FW_ERROR, COMMON_EVENT_RL_UPDATE, COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE @@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode { MAX_CORE_L4_PSEUDO_CHECKSUM_MODE }; +/* LL2 SP error code */ +enum core_ll2_error_code { + LL2_OK = 0, + LL2_ERROR, + MAX_CORE_LL2_ERROR_CODE +}; + /* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_port_stats { struct regpair gsi_invalid_hdr; @@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +struct core_ll2_rx_per_queue_stat { + struct core_ll2_tstorm_per_queue_stat tstorm_stat; + struct core_ll2_ustorm_per_queue_stat ustorm_stat; +}; + +struct core_ll2_tx_per_queue_stat { + struct core_ll2_pstorm_per_queue_stat pstorm_stat; +}; + /* Structure for doorbell data, in PWM mode, for RX producers update. */ struct core_pwm_prod_update_data { __le16 icid; /* internal CID */ @@ -135,6 +151,15 @@ struct core_pwm_prod_update_data { struct core_ll2_rx_prod prod; /* Producers */ }; +/* Ramrod data for rx/tx queue statistics query ramrod */ +struct core_queue_stats_query_ramrod_data { + u8 rx_stat; + u8 tx_stat; + __le16 reserved[3]; + struct regpair rx_stat_addr; + struct regpair tx_stat_addr; +}; + /* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, @@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe { __le16 vlan; struct core_rx_cqe_opaque_data opaque_data; struct parsing_err_flags err_flags; - __le16 reserved0; + u8 packet_source; + u8 reserved0; __le32 reserved1[3]; }; @@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe { __le16 qp_id; __le32 src_qp; struct core_rx_cqe_opaque_data opaque_data; - __le32 reserved; + u8 packet_source; + u8 reserved[3]; }; /* Core RX CQE for Light L2 */ @@ -245,6 +272,15 @@ union core_rx_cqe_union { struct core_rx_slow_path_cqe rx_cqe_sp; }; +/* RX packet source. */ +enum core_rx_pkt_source { + CORE_RX_PKT_SOURCE_NETWORK = 0, + CORE_RX_PKT_SOURCE_LB, + CORE_RX_PKT_SOURCE_TX, + CORE_RX_PKT_SOURCE_LL2_TX, + MAX_CORE_RX_PKT_SOURCE +}; + /* Ramrod data for rx queue start ramrod */ struct core_rx_start_ramrod_data { struct regpair bd_base; @@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data { u8 update_qm_pq_id_flg; u8 reserved0; __le16 qm_pq_id; - __le32 reserved1; + __le32 reserved1[1]; }; /* Enum flag for what type of dcb data to update */ @@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx { /* Core Slowpath Connection storm context of Xstorm */ struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo; - __le32 spq_base_hi; - struct regpair consolid_base_addr; + struct regpair spq_base_addr; + __le32 reserved0[2]; __le16 spq_cons; - __le16 consolid_cons; - __le32 reserved0[55]; + __le16 reserved1[111]; }; -struct e4_xstorm_core_conn_ag_ctx { +struct xstorm_core_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 consolid_prod; @@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 word15; }; -struct e4_tstorm_core_conn_ag_ctx { +struct tstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_core_conn_ag_ctx { +struct ustorm_core_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx { }; /* core connection context */ -struct e4_core_conn_context { +struct core_conn_context { struct ystorm_core_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_core_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_core_conn_st_ctx xstorm_st_context; - struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context; - struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; struct mstorm_core_conn_st_ctx mstorm_st_context; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; @@ -930,12 +964,12 @@ struct eth_rx_rate_limit { /* Update RSS indirection table entry command */ struct eth_tstorm_rss_update_data { - u8 valid; u8 vport_id; u8 ind_table_index; - u8 reserved; __le16 ind_table_value; __le16 reserved1; + u8 reserved; + u8 valid; }; struct eth_ustorm_per_pf_stat { @@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data { struct regpair msg_addr; }; -/* Event Ring malicious VF data */ -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - /* Event Ring initial cleanup data */ struct initial_cleanup_eqe_data { u8 vf_id; u8 reserved[7]; }; +/* FW error data */ +struct fw_err_data { + u8 recovery_scope; + u8 err_id; + __le16 entity_id; + u8 reserved[4]; +}; + /* Event Data Union */ union event_ring_data { u8 bytes[8]; @@ -987,8 +1022,8 @@ union event_ring_data { struct iscsi_eqe_data iscsi_info; struct iscsi_connect_done_results iscsi_conn_done_info; union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; struct initial_cleanup_eqe_data vf_init_cleanup; + struct fw_err_data err_data; }; /* Event Ring Entry */ @@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct { u8 major_ver_arr[2]; }; +/* Integration Phase */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3, + INTEG_PHASE_BB_B0_NO_MCP = 10, + INTEG_PHASE_BB_B0_WITH_MCP = 11, + MAX_INTEG_PHASE +}; + +/* Ports mode */ enum iwarp_ll2_tx_queues { IWARP_LL2_IN_ORDER_TX_QUEUE = 1, IWARP_LL2_ALIGNED_TX_QUEUE, @@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues { MAX_IWARP_LL2_TX_QUEUES }; -/* Malicious VF error ID */ -enum malicious_vf_error_id { - MALICIOUS_VF_NO_ERROR, +/* Function error ID */ +enum func_err_id { + FUNC_NO_ERROR, VF_PF_CHANNEL_NOT_READY, VF_ZONE_MSG_NOT_VALID, VF_ZONE_FUNC_NOT_ENABLED, @@ -1087,13 +1131,33 @@ enum malicious_vf_error_id { CORE_PACKET_SIZE_TOO_LARGE, CORE_ILLEGAL_BD_FLAGS, CORE_GSI_PACKET_VIOLATION, - MAX_MALICIOUS_VF_ERROR_ID, + MAX_FUNC_ERR_ID +}; + +/* FW error handling mode */ +enum fw_err_mode { + FW_ERR_FATAL_ASSERT, + FW_ERR_DRV_REPORT, + MAX_FW_ERR_MODE +}; + +/* FW error recovery scope */ +enum fw_err_recovery_scope { + ERR_SCOPE_INVALID, + ERR_SCOPE_TX_Q, + ERR_SCOPE_RX_Q, + ERR_SCOPE_QP, + ERR_SCOPE_VPORT, + ERR_SCOPE_FUNC, + ERR_SCOPE_PORT, + ERR_SCOPE_ENGINE, + MAX_FW_ERR_RECOVERY_SCOPE }; /* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; - struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD]; }; /* Mstorm VF zone */ @@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config { /* Ramrod data for PF start ramrod */ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; - struct regpair consolid_q_pbl_addr; + struct regpair consolid_q_pbl_base_addr; struct pf_start_tunnel_config tunnel_config; __le16 event_ring_sb_id; u8 base_vf_id; @@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data { u8 reserved0; struct hsi_fp_ver_struct hsi_fp_ver; struct outer_tag_config_struct outer_tag_config; + u8 pf_fp_err_mode; + u8 consolid_q_num_pages; + u8 reserved[6]; }; /* Data for port update ramrod */ @@ -1230,6 +1297,13 @@ enum ports_mode { MAX_PORTS_MODE }; +/* Protocol-common error code */ +enum protocol_common_error_code { + COMMON_ERR_CODE_OK = 0, + COMMON_ERR_CODE_ERROR, + MAX_PROTOCOL_COMMON_ERROR_CODE +}; + /* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ enum protocol_version_array_key { ETH_VER_KEY = 0, @@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; -struct e4_mstorm_core_conn_ag_ctx { +struct mstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_ystorm_core_conn_ag_ctx { +struct ystorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -1704,6 +1778,7 @@ struct igu_msix_vector { #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 }; + /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask { }; /* QM hardware structure of QM map memory */ -struct qm_rf_pq_map_e4 { +struct qm_rf_pq_map { __le32 reg; -#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF -#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F -#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 -#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26 +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 }; /* Completion params for aggregated interrupt completion */ @@ -1831,769 +1906,6 @@ struct virt_mem_desc { u32 size; /* In bytes */ }; -/****************************************/ -/* Debug Tools HSI constants and macros */ -/****************************************/ - -enum block_id { - BLOCK_GRC, - BLOCK_MISCS, - BLOCK_MISC, - BLOCK_DBU, - BLOCK_PGLUE_B, - BLOCK_CNIG, - BLOCK_CPMU, - BLOCK_NCSI, - BLOCK_OPTE, - BLOCK_BMB, - BLOCK_PCIE, - BLOCK_MCP, - BLOCK_MCP2, - BLOCK_PSWHST, - BLOCK_PSWHST2, - BLOCK_PSWRD, - BLOCK_PSWRD2, - BLOCK_PSWWR, - BLOCK_PSWWR2, - BLOCK_PSWRQ, - BLOCK_PSWRQ2, - BLOCK_PGLCS, - BLOCK_DMAE, - BLOCK_PTU, - BLOCK_TCM, - BLOCK_MCM, - BLOCK_UCM, - BLOCK_XCM, - BLOCK_YCM, - BLOCK_PCM, - BLOCK_QM, - BLOCK_TM, - BLOCK_DORQ, - BLOCK_BRB, - BLOCK_SRC, - BLOCK_PRS, - BLOCK_TSDM, - BLOCK_MSDM, - BLOCK_USDM, - BLOCK_XSDM, - BLOCK_YSDM, - BLOCK_PSDM, - BLOCK_TSEM, - BLOCK_MSEM, - BLOCK_USEM, - BLOCK_XSEM, - BLOCK_YSEM, - BLOCK_PSEM, - BLOCK_RSS, - BLOCK_TMLD, - BLOCK_MULD, - BLOCK_YULD, - BLOCK_XYLD, - BLOCK_PRM, - BLOCK_PBF_PB1, - BLOCK_PBF_PB2, - BLOCK_RPB, - BLOCK_BTB, - BLOCK_PBF, - BLOCK_RDIF, - BLOCK_TDIF, - BLOCK_CDU, - BLOCK_CCFC, - BLOCK_TCFC, - BLOCK_IGU, - BLOCK_CAU, - BLOCK_UMAC, - BLOCK_XMAC, - BLOCK_MSTAT, - BLOCK_DBG, - BLOCK_NIG, - BLOCK_WOL, - BLOCK_BMBN, - BLOCK_IPC, - BLOCK_NWM, - BLOCK_NWS, - BLOCK_MS, - BLOCK_PHY_PCIE, - BLOCK_LED, - BLOCK_AVS_WRAP, - BLOCK_PXPREQBUS, - BLOCK_BAR0_MAP, - BLOCK_MCP_FIO, - BLOCK_LAST_INIT, - BLOCK_PRS_FC, - BLOCK_PBF_FC, - BLOCK_NIG_LB_FC, - BLOCK_NIG_LB_FC_PLLH, - BLOCK_NIG_TX_FC_PLLH, - BLOCK_NIG_TX_FC, - BLOCK_NIG_RX_FC_PLLH, - BLOCK_NIG_RX_FC, - MAX_BLOCK_ID -}; - -/* binary debug buffer types */ -enum bin_dbg_buffer_type { - BIN_BUF_DBG_MODE_TREE, - BIN_BUF_DBG_DUMP_REG, - BIN_BUF_DBG_DUMP_MEM, - BIN_BUF_DBG_IDLE_CHK_REGS, - BIN_BUF_DBG_IDLE_CHK_IMMS, - BIN_BUF_DBG_IDLE_CHK_RULES, - BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, - BIN_BUF_DBG_ATTN_BLOCKS, - BIN_BUF_DBG_ATTN_REGS, - BIN_BUF_DBG_ATTN_INDEXES, - BIN_BUF_DBG_ATTN_NAME_OFFSETS, - BIN_BUF_DBG_BLOCKS, - BIN_BUF_DBG_BLOCKS_CHIP_DATA, - BIN_BUF_DBG_BUS_LINES, - BIN_BUF_DBG_BLOCKS_USER_DATA, - BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, - BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, - BIN_BUF_DBG_RESET_REGS, - BIN_BUF_DBG_PARSING_STRINGS, - MAX_BIN_DBG_BUFFER_TYPE -}; - - -/* Attention bit mapping */ -struct dbg_attn_bit_mapping { - u16 data; -#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF -#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 -#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 -#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 -}; - -/* Attention block per-type data */ -struct dbg_attn_block_type_data { - u16 names_offset; - u16 reserved1; - u8 num_regs; - u8 reserved2; - u16 regs_offset; - -}; - -/* Block attentions */ -struct dbg_attn_block { - struct dbg_attn_block_type_data per_type_data[2]; -}; - -/* Attention register result */ -struct dbg_attn_reg_result { - u32 data; -#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF -#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF -#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 - u16 block_attn_offset; - u16 reserved; - u32 sts_val; - u32 mask_val; -}; - -/* Attention block result */ -struct dbg_attn_block_result { - u8 block_id; - u8 data; -#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 -#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 -#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F -#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 - u16 names_offset; - struct dbg_attn_reg_result reg_results[15]; -}; - -/* Mode header */ -struct dbg_mode_hdr { - u16 data; -#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 -#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 -#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF -#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 -}; - -/* Attention register */ -struct dbg_attn_reg { - struct dbg_mode_hdr mode; - u16 block_attn_offset; - u32 data; -#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF -#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF -#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 - u32 sts_clr_address; - u32 mask_address; -}; - -/* Attention types */ -enum dbg_attn_type { - ATTN_TYPE_INTERRUPT, - ATTN_TYPE_PARITY, - MAX_DBG_ATTN_TYPE -}; - -/* Block debug data */ -struct dbg_block { - u8 name[15]; - u8 associated_storm_letter; -}; - -/* Chip-specific block debug data */ -struct dbg_block_chip { - u8 flags; -#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 -#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 -#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 -#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 -#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 -#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 -#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 -#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 -#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 - u8 dbg_client_id; - u8 reset_reg_id; - u8 reset_reg_bit_offset; - struct dbg_mode_hdr dbg_bus_mode; - u16 reserved1; - u8 reserved2; - u8 num_of_dbg_bus_lines; - u16 dbg_bus_lines_offset; - u32 dbg_select_reg_addr; - u32 dbg_dword_enable_reg_addr; - u32 dbg_shift_reg_addr; - u32 dbg_force_valid_reg_addr; - u32 dbg_force_frame_reg_addr; -}; - -/* Chip-specific block user debug data */ -struct dbg_block_chip_user { - u8 num_of_dbg_bus_lines; - u8 has_latency_events; - u16 names_offset; -}; - -/* Block user debug data */ -struct dbg_block_user { - u8 name[16]; -}; - -/* Block Debug line data */ -struct dbg_bus_line { - u8 data; -#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF -#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 -#define DBG_BUS_LINE_IS_256B_MASK 0x1 -#define DBG_BUS_LINE_IS_256B_SHIFT 4 -#define DBG_BUS_LINE_RESERVED_MASK 0x7 -#define DBG_BUS_LINE_RESERVED_SHIFT 5 - u8 group_sizes; -}; - -/* Condition header for registers dump */ -struct dbg_dump_cond_hdr { - struct dbg_mode_hdr mode; /* Mode header */ - u8 block_id; /* block ID */ - u8 data_size; /* size in dwords of the data following this header */ -}; - -/* Memory data for registers dump */ -struct dbg_dump_mem { - u32 dword0; -#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF -#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 -#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF -#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - u32 dword1; -#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF -#define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 -#define DBG_DUMP_MEM_RESERVED_MASK 0x7F -#define DBG_DUMP_MEM_RESERVED_SHIFT 25 -}; - -/* Register data for registers dump */ -struct dbg_dump_reg { - u32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF -#define DBG_DUMP_REG_LENGTH_SHIFT 24 -}; - -/* Split header for registers dump */ -struct dbg_dump_split_hdr { - u32 hdr; -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 -}; - -/* Condition header for idle check */ -struct dbg_idle_chk_cond_hdr { - struct dbg_mode_hdr mode; /* Mode header */ - u16 data_size; /* size in dwords of the data following this header */ -}; - -/* Idle Check condition register */ -struct dbg_idle_chk_cond_reg { - u32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - u16 num_entries; - u8 entry_size; - u8 start_entry; -}; - -/* Idle Check info register */ -struct dbg_idle_chk_info_reg { - u32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - u16 size; /* register size in dwords */ - struct dbg_mode_hdr mode; /* Mode header */ -}; - -/* Idle Check register */ -union dbg_idle_chk_reg { - struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ - struct dbg_idle_chk_info_reg info_reg; /* info register */ -}; - -/* Idle Check result header */ -struct dbg_idle_chk_result_hdr { - u16 rule_id; /* Failing rule index */ - u16 mem_entry_id; /* Failing memory entry index */ - u8 num_dumped_cond_regs; /* number of dumped condition registers */ - u8 num_dumped_info_regs; /* number of dumped condition registers */ - u8 severity; /* from dbg_idle_chk_severity_types enum */ - u8 reserved; -}; - -/* Idle Check result register header */ -struct dbg_idle_chk_result_reg_hdr { - u8 data; -#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 -#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 -#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F -#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 - u8 start_entry; /* index of the first checked entry */ - u16 size; /* register size in dwords */ -}; - -/* Idle Check rule */ -struct dbg_idle_chk_rule { - u16 rule_id; /* Idle Check rule ID */ - u8 severity; /* value from dbg_idle_chk_severity_types enum */ - u8 cond_id; /* Condition ID */ - u8 num_cond_regs; /* number of condition registers */ - u8 num_info_regs; /* number of info registers */ - u8 num_imms; /* number of immediates in the condition */ - u8 reserved1; - u16 reg_offset; /* offset of this rules registers in the idle check - * register array (in dbg_idle_chk_reg units). - */ - u16 imm_offset; /* offset of this rules immediate values in the - * immediate values array (in dwords). - */ -}; - -/* Idle Check rule parsing data */ -struct dbg_idle_chk_rule_parsing_data { - u32 data; -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 -}; - -/* Idle check severity types */ -enum dbg_idle_chk_severity_types { - /* idle check failure should cause an error */ - IDLE_CHK_SEVERITY_ERROR, - /* idle check failure should cause an error only if theres no traffic */ - IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, - /* idle check failure should cause a warning */ - IDLE_CHK_SEVERITY_WARNING, - MAX_DBG_IDLE_CHK_SEVERITY_TYPES -}; - -/* Reset register */ -struct dbg_reset_reg { - u32 data; -#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF -#define DBG_RESET_REG_ADDR_SHIFT 0 -#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 -#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 -#define DBG_RESET_REG_RESERVED_MASK 0x7F -#define DBG_RESET_REG_RESERVED_SHIFT 25 -}; - -/* Debug Bus block data */ -struct dbg_bus_block_data { - u8 enable_mask; - u8 right_shift; - u8 force_valid_mask; - u8 force_frame_mask; - u8 dword_mask; - u8 line_num; - u8 hw_id; - u8 flags; -#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 -#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 -#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F -#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 -}; - -enum dbg_bus_clients { - DBG_BUS_CLIENT_RBCN, - DBG_BUS_CLIENT_RBCP, - DBG_BUS_CLIENT_RBCR, - DBG_BUS_CLIENT_RBCT, - DBG_BUS_CLIENT_RBCU, - DBG_BUS_CLIENT_RBCF, - DBG_BUS_CLIENT_RBCX, - DBG_BUS_CLIENT_RBCS, - DBG_BUS_CLIENT_RBCH, - DBG_BUS_CLIENT_RBCZ, - DBG_BUS_CLIENT_OTHER_ENGINE, - DBG_BUS_CLIENT_TIMESTAMP, - DBG_BUS_CLIENT_CPU, - DBG_BUS_CLIENT_RBCY, - DBG_BUS_CLIENT_RBCQ, - DBG_BUS_CLIENT_RBCM, - DBG_BUS_CLIENT_RBCB, - DBG_BUS_CLIENT_RBCW, - DBG_BUS_CLIENT_RBCV, - MAX_DBG_BUS_CLIENTS -}; - -/* Debug Bus constraint operation types */ -enum dbg_bus_constraint_ops { - DBG_BUS_CONSTRAINT_OP_EQ, - DBG_BUS_CONSTRAINT_OP_NE, - DBG_BUS_CONSTRAINT_OP_LT, - DBG_BUS_CONSTRAINT_OP_LTC, - DBG_BUS_CONSTRAINT_OP_LE, - DBG_BUS_CONSTRAINT_OP_LEC, - DBG_BUS_CONSTRAINT_OP_GT, - DBG_BUS_CONSTRAINT_OP_GTC, - DBG_BUS_CONSTRAINT_OP_GE, - DBG_BUS_CONSTRAINT_OP_GEC, - MAX_DBG_BUS_CONSTRAINT_OPS -}; - -/* Debug Bus trigger state data */ -struct dbg_bus_trigger_state_data { - u8 msg_len; - u8 constraint_dword_mask; - u8 storm_id; - u8 reserved; -}; - -/* Debug Bus memory address */ -struct dbg_bus_mem_addr { - u32 lo; - u32 hi; -}; - -/* Debug Bus PCI buffer data */ -struct dbg_bus_pci_buf_data { - struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ - struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ - u32 size; /* PCI buffer size in bytes */ -}; - -/* Debug Bus Storm EID range filter params */ -struct dbg_bus_storm_eid_range_params { - u8 min; /* Minimal event ID to filter on */ - u8 max; /* Maximal event ID to filter on */ -}; - -/* Debug Bus Storm EID mask filter params */ -struct dbg_bus_storm_eid_mask_params { - u8 val; /* Event ID value */ - u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ -}; - -/* Debug Bus Storm EID filter params */ -union dbg_bus_storm_eid_params { - struct dbg_bus_storm_eid_range_params range; - struct dbg_bus_storm_eid_mask_params mask; -}; - -/* Debug Bus Storm data */ -struct dbg_bus_storm_data { - u8 enabled; - u8 mode; - u8 hw_id; - u8 eid_filter_en; - u8 eid_range_not_mask; - u8 cid_filter_en; - union dbg_bus_storm_eid_params eid_filter_params; - u32 cid; -}; - -/* Debug Bus data */ -struct dbg_bus_data { - u32 app_version; - u8 state; - u8 mode_256b_en; - u8 num_enabled_blocks; - u8 num_enabled_storms; - u8 target; - u8 one_shot_en; - u8 grc_input_en; - u8 timestamp_input_en; - u8 filter_en; - u8 adding_filter; - u8 filter_pre_trigger; - u8 filter_post_trigger; - u8 trigger_en; - u8 filter_constraint_dword_mask; - u8 next_trigger_state; - u8 next_constraint_id; - struct dbg_bus_trigger_state_data trigger_states[3]; - u8 filter_msg_len; - u8 rcv_from_other_engine; - u8 blocks_dword_mask; - u8 blocks_dword_overlap; - u32 hw_id_mask; - struct dbg_bus_pci_buf_data pci_buf; - struct dbg_bus_block_data blocks[132]; - struct dbg_bus_storm_data storms[6]; -}; - -/* Debug bus states */ -enum dbg_bus_states { - DBG_BUS_STATE_IDLE, - DBG_BUS_STATE_READY, - DBG_BUS_STATE_RECORDING, - DBG_BUS_STATE_STOPPED, - MAX_DBG_BUS_STATES -}; - -/* Debug Bus Storm modes */ -enum dbg_bus_storm_modes { - DBG_BUS_STORM_MODE_PRINTF, - DBG_BUS_STORM_MODE_PRAM_ADDR, - DBG_BUS_STORM_MODE_DRA_RW, - DBG_BUS_STORM_MODE_DRA_W, - DBG_BUS_STORM_MODE_LD_ST_ADDR, - DBG_BUS_STORM_MODE_DRA_FSM, - DBG_BUS_STORM_MODE_FAST_DBGMUX, - DBG_BUS_STORM_MODE_RH, - DBG_BUS_STORM_MODE_RH_WITH_STORE, - DBG_BUS_STORM_MODE_FOC, - DBG_BUS_STORM_MODE_EXT_STORE, - MAX_DBG_BUS_STORM_MODES -}; - -/* Debug bus target IDs */ -enum dbg_bus_targets { - DBG_BUS_TARGET_ID_INT_BUF, - DBG_BUS_TARGET_ID_NIG, - DBG_BUS_TARGET_ID_PCI, - MAX_DBG_BUS_TARGETS -}; - -/* GRC Dump data */ -struct dbg_grc_data { - u8 params_initialized; - u8 reserved1; - u16 reserved2; - u32 param_val[48]; -}; - -/* Debug GRC params */ -enum dbg_grc_params { - DBG_GRC_PARAM_DUMP_TSTORM, - DBG_GRC_PARAM_DUMP_MSTORM, - DBG_GRC_PARAM_DUMP_USTORM, - DBG_GRC_PARAM_DUMP_XSTORM, - DBG_GRC_PARAM_DUMP_YSTORM, - DBG_GRC_PARAM_DUMP_PSTORM, - DBG_GRC_PARAM_DUMP_REGS, - DBG_GRC_PARAM_DUMP_RAM, - DBG_GRC_PARAM_DUMP_PBUF, - DBG_GRC_PARAM_DUMP_IOR, - DBG_GRC_PARAM_DUMP_VFC, - DBG_GRC_PARAM_DUMP_CM_CTX, - DBG_GRC_PARAM_DUMP_PXP, - DBG_GRC_PARAM_DUMP_RSS, - DBG_GRC_PARAM_DUMP_CAU, - DBG_GRC_PARAM_DUMP_QM, - DBG_GRC_PARAM_DUMP_MCP, - DBG_GRC_PARAM_DUMP_DORQ, - DBG_GRC_PARAM_DUMP_CFC, - DBG_GRC_PARAM_DUMP_IGU, - DBG_GRC_PARAM_DUMP_BRB, - DBG_GRC_PARAM_DUMP_BTB, - DBG_GRC_PARAM_DUMP_BMB, - DBG_GRC_PARAM_RESERVD1, - DBG_GRC_PARAM_DUMP_MULD, - DBG_GRC_PARAM_DUMP_PRS, - DBG_GRC_PARAM_DUMP_DMAE, - DBG_GRC_PARAM_DUMP_TM, - DBG_GRC_PARAM_DUMP_SDM, - DBG_GRC_PARAM_DUMP_DIF, - DBG_GRC_PARAM_DUMP_STATIC, - DBG_GRC_PARAM_UNSTALL, - DBG_GRC_PARAM_RESERVED2, - DBG_GRC_PARAM_MCP_TRACE_META_SIZE, - DBG_GRC_PARAM_EXCLUDE_ALL, - DBG_GRC_PARAM_CRASH, - DBG_GRC_PARAM_PARITY_SAFE, - DBG_GRC_PARAM_DUMP_CM, - DBG_GRC_PARAM_DUMP_PHY, - DBG_GRC_PARAM_NO_MCP, - DBG_GRC_PARAM_NO_FW_VER, - DBG_GRC_PARAM_RESERVED3, - DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, - DBG_GRC_PARAM_DUMP_ILT_CDUC, - DBG_GRC_PARAM_DUMP_ILT_CDUT, - DBG_GRC_PARAM_DUMP_CAU_EXT, - MAX_DBG_GRC_PARAMS -}; - -/* Debug status codes */ -enum dbg_status { - DBG_STATUS_OK, - DBG_STATUS_APP_VERSION_NOT_SET, - DBG_STATUS_UNSUPPORTED_APP_VERSION, - DBG_STATUS_DBG_BLOCK_NOT_RESET, - DBG_STATUS_INVALID_ARGS, - DBG_STATUS_OUTPUT_ALREADY_SET, - DBG_STATUS_INVALID_PCI_BUF_SIZE, - DBG_STATUS_PCI_BUF_ALLOC_FAILED, - DBG_STATUS_PCI_BUF_NOT_ALLOCATED, - DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, - DBG_STATUS_NO_MATCHING_FRAMING_MODE, - DBG_STATUS_VFC_READ_ERROR, - DBG_STATUS_STORM_ALREADY_ENABLED, - DBG_STATUS_STORM_NOT_ENABLED, - DBG_STATUS_BLOCK_ALREADY_ENABLED, - DBG_STATUS_BLOCK_NOT_ENABLED, - DBG_STATUS_NO_INPUT_ENABLED, - DBG_STATUS_NO_FILTER_TRIGGER_256B, - DBG_STATUS_FILTER_ALREADY_ENABLED, - DBG_STATUS_TRIGGER_ALREADY_ENABLED, - DBG_STATUS_TRIGGER_NOT_ENABLED, - DBG_STATUS_CANT_ADD_CONSTRAINT, - DBG_STATUS_TOO_MANY_TRIGGER_STATES, - DBG_STATUS_TOO_MANY_CONSTRAINTS, - DBG_STATUS_RECORDING_NOT_STARTED, - DBG_STATUS_DATA_DIDNT_TRIGGER, - DBG_STATUS_NO_DATA_RECORDED, - DBG_STATUS_DUMP_BUF_TOO_SMALL, - DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, - DBG_STATUS_UNKNOWN_CHIP, - DBG_STATUS_VIRT_MEM_ALLOC_FAILED, - DBG_STATUS_BLOCK_IN_RESET, - DBG_STATUS_INVALID_TRACE_SIGNATURE, - DBG_STATUS_INVALID_NVRAM_BUNDLE, - DBG_STATUS_NVRAM_GET_IMAGE_FAILED, - DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, - DBG_STATUS_NVRAM_READ_FAILED, - DBG_STATUS_IDLE_CHK_PARSE_FAILED, - DBG_STATUS_MCP_TRACE_BAD_DATA, - DBG_STATUS_MCP_TRACE_NO_META, - DBG_STATUS_MCP_COULD_NOT_HALT, - DBG_STATUS_MCP_COULD_NOT_RESUME, - DBG_STATUS_RESERVED0, - DBG_STATUS_SEMI_FIFO_NOT_EMPTY, - DBG_STATUS_IGU_FIFO_BAD_DATA, - DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, - DBG_STATUS_FW_ASSERTS_PARSE_FAILED, - DBG_STATUS_REG_FIFO_BAD_DATA, - DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, - DBG_STATUS_DBG_ARRAY_NOT_SET, - DBG_STATUS_RESERVED1, - DBG_STATUS_NON_MATCHING_LINES, - DBG_STATUS_INSUFFICIENT_HW_IDS, - DBG_STATUS_DBG_BUS_IN_USE, - DBG_STATUS_INVALID_STORM_DBG_MODE, - DBG_STATUS_OTHER_ENGINE_BB_ONLY, - DBG_STATUS_FILTER_SINGLE_HW_ID, - DBG_STATUS_TRIGGER_SINGLE_HW_ID, - DBG_STATUS_MISSING_TRIGGER_STATE_STORM, - MAX_DBG_STATUS -}; - -/* Debug Storms IDs */ -enum dbg_storms { - DBG_TSTORM_ID, - DBG_MSTORM_ID, - DBG_USTORM_ID, - DBG_XSTORM_ID, - DBG_YSTORM_ID, - DBG_PSTORM_ID, - MAX_DBG_STORMS -}; - -/* Idle Check data */ -struct idle_chk_data { - u32 buf_size; - u8 buf_size_set; - u8 reserved1; - u16 reserved2; -}; - -struct pretend_params { - u8 split_type; - u8 reserved; - u16 split_id; -}; - -/* Debug Tools data (per HW function) - */ -struct dbg_tools_data { - struct dbg_grc_data grc; - struct dbg_bus_data bus; - struct idle_chk_data idle_chk; - u8 mode_enable[40]; - u8 block_in_reset[132]; - u8 chip_id; - u8 hw_type; - u8 num_ports; - u8 num_pfs_per_port; - u8 num_vfs; - u8 initialized; - u8 use_dmae; - u8 reserved; - struct pretend_params pretend; - u32 num_regs_read; -}; - -/* ILT Clients */ -enum ilt_clients { - ILT_CLI_CDUC, - ILT_CLI_CDUT, - ILT_CLI_QM, - ILT_CLI_TM, - ILT_CLI_SRC, - ILT_CLI_TSDM, - ILT_CLI_RGFS, - ILT_CLI_TGFS, - MAX_ILT_CLIENTS -}; - /********************************/ /* HSI Init Functions constants */ /********************************/ @@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req { /* QM per global RL init parameters */ struct init_qm_global_rl_params { + u8 type; + u8 reserved0; + u16 reserved1; u32 rate_limit; }; @@ -2658,18 +1973,33 @@ struct init_qm_port_params { /* QM per-PQ init parameters */ struct init_qm_pq_params { - u8 vport_id; + u16 vport_id; + u16 rl_id; + u8 rl_valid; u8 tc_id; u8 wrr_group; - u8 rl_valid; - u16 rl_id; u8 port_id; - u8 reserved; +}; + +/* QM per RL init parameters */ +struct init_qm_rl_params { + u32 vport_rl; + u8 vport_rl_type; + u8 reserved[3]; +}; + +/* QM Rate Limiter types */ +enum init_qm_rl_type { + QM_RL_TYPE_NORMAL, + QM_RL_TYPE_QCN, + MAX_INIT_QM_RL_TYPE }; /* QM per-vport init parameters */ struct init_qm_vport_params { u16 wfq; + u16 reserved; + u16 tc_wfq[NUM_OF_TCS]; u16 first_tx_pq_id[NUM_OF_TCS]; }; @@ -2728,14 +2058,14 @@ struct fw_info_location { }; enum init_modes { - MODE_RESERVED, + MODE_BB_A0_DEPRECATED, MODE_BB, MODE_K2, MODE_ASIC, - MODE_RESERVED2, - MODE_RESERVED3, - MODE_RESERVED4, - MODE_RESERVED5, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, MODE_SF, MODE_MF_SD, MODE_MF_SI, @@ -2743,8 +2073,8 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_RESERVED6, - MODE_RESERVED7, + MODE_SKIP_PRAM_INIT, + MODE_EMUL_MAC, MAX_INIT_MODES }; @@ -3009,1388 +2339,451 @@ struct iro { u16 size; }; -/***************************** Public Functions *******************************/ +/* Win 2 */ +#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL + +/* Win 3 */ +#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL + +/* Win 4 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL + +/* Win 5 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL + +/* Win 6 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL + +/* Win 7 */ +#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL + +/* Win 8 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL + +/* Win 9 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL + +/* Win 10 */ +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL + +/* Win 11 */ +#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL + +/* Win 12 */ +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL + +/* Win 13 */ +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL + +/* Returns the VOQ based on port and TC */ +#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \ + PURE_LB_TC ? NUM_OF_PHYS_TCS *\ + MAX_NUM_PORTS_BB + \ + (port) : (port) * \ + (max_phys_tcs_per_port) + (tc)) + +struct init_qm_pq_params; /** - * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug - * arrays. + * qed_qm_pf_mem_size(): Prepare QM ILT sizes. + * + * @num_pf_cids: Number of connections used by this PF. + * @num_vf_cids: Number of connections used by VFs of this PF. + * @num_tids: Number of tasks used by this PF. + * @num_pf_pqs: Number of PQs used by this PF. + * @num_vf_pqs: Number of PQs used by VFs of this PF. * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. + * Return: The required host memory size in 4KB units. + * + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. */ -enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, - const u8 * const bin_ptr); +u32 qed_qm_pf_mem_size(u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); + +struct qed_qm_common_rt_init_params { + u8 max_ports_per_engine; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool global_rl_en; + bool vport_wfq_en; + struct init_qm_port_params *port_params; + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]; +}; /** - * @brief qed_read_regs - Reads registers into a buffer (using GRC). + * qed_qm_common_rt_init(): Prepare QM runtime init values for the + * engine phase. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf - Destination buffer. - * @param addr - Source GRC address in dwords. - * @param len - Number of registers to read. + * @p_hwfn: HW device data. + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. */ -void qed_read_regs(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params); + +struct qed_qm_pf_rt_init_params { + u8 port_id; + u8 pf_id; + u8 max_phys_tcs_per_port; + bool is_pf_loading; + u32 num_pf_cids; + u32 num_vf_cids; + u32 num_tids; + u16 start_pq; + u16 num_pf_pqs; + u16 num_vf_pqs; + u16 start_vport; + u16 num_vports; + u16 start_rl; + u16 num_rls; + u16 pf_wfq; + u32 pf_rl; + u32 link_speed; + struct init_qm_pq_params *pq_params; + struct init_qm_vport_params *vport_params; + struct init_qm_rl_params *rl_params; +}; /** - * @brief qed_read_fw_info - Reads FW info from the chip. + * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase. * - * The FW info contains FW-related information, such as the FW version, - * FW image (main/L2B/kuku), FW timestamp, etc. - * The FW info is read from the internal RAM of the first Storm that is not in - * reset. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @p_params: Parameters. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param fw_info - Out: a pointer to write the FW info into. - * - * @return true if the FW info was read successfully from one of the Storms, - * or false if all Storms are in reset. + * Return: 0 on success, -1 on error. */ -bool qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct fw_info *fw_info); +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); + /** - * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. + * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. * - * @param p_hwfn - HW device data - * @param grc_param - GRC parameter - * @param val - Value to set. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @pf_id: PF ID + * @pf_wfq: WFQ weight. Must be non-zero. * - * @return error if one of the following holds: - * - the version wasn't set - * - grc_param is invalid - * - val is outside the allowed boundaries + * Return: 0 on success, -1 on error. */ -enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, - enum dbg_grc_params grc_param, u32 val); +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their - * default value. + * qed_init_pf_rl(): Initializes the rate limit of the specified PF + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF ID. + * @pf_rl: rate limit in Mb/sec units * - * @param p_hwfn - HW device data + * Return: 0 on success, -1 on error. */ -void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); + /** - * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for - * GRC Dump. + * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @first_tx_pq_id: An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @wfq: WFQ weight. Must be non-zero. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: 0 on success, -1 on error. */ -enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** - * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. + * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified + * VPORT and TC. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the collected GRC data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC. + * (filled by qed_qm_pf_rt_init). + * @weight: VPORT+TC WFQ weight. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified dump buffer is too small - * Otherwise, returns ok. + * Return: 0 on success, -1 on error. */ -enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 weight); /** - * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size - * for idle check results. + * qed_init_global_rl(): Initializes the rate limit of the specified + * rate limiter. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the idle check - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @rl_id: RL ID. + * @rate_limit: Rate limit in Mb/sec units + * @vport_rl_type: Vport RL type. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: 0 on success, -1 on error. */ -enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +int qed_init_global_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type); /** - * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results - * into the specified buffer. + * qed_send_qm_stop_cmd(): Sends a stop command to the QM. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the idle check data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @is_release_cmd: true for release, false for stop. + * @is_tx_pq: true for Tx PQs, false for Other PQs. + * @start_pq: first PQ ID to stop + * @num_pqs: Number of PQs to stop, starting from start_pq. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. + * Return: Bool, true if successful, false if timeout occurred while waiting + * for QM command done. */ -enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs); /** - * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size - * for mcp trace results. + * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: vxlan destination udp port. * - * @return error if one of the following holds: - * - the version wasn't set - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results - * into the specified buffer. + * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the mcp trace data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @vxlan_enable: vxlan enable flag. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * - the trace meta data cannot be read (from NVRAM or image file) - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool vxlan_enable); /** - * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size - * for grc trace fifo results. + * qed_set_gre_enable(): Enable or disable GRE tunnel in HW. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_gre_enable: Eth GRE enable flag. + * @ip_gre_enable: IP GRE enable flag. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable); /** - * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into - * the specified buffer. + * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the reg fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: Geneve destination udp port. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * Retur: Void. */ -enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size - * for the IGU fifo results. + * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_geneve_enable: Eth GENEVE enable flag. + * @ip_geneve_enable: IP GENEVE enable flag. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable); + +void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool enable); /** - * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into - * the specified buffer. + * qed_gft_disable(): Disable GFT. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the IGU fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to disable GFT. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); /** - * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required - * buffer size for protection override window results. + * qed_gft_config(): Enable and configure HW for GFT. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for protection - * override data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to enable GFT. + * @tcp: Set profile tcp packets. + * @udp: Set profile udp packet. + * @ipv4: Set profile ipv4 packet. + * @ipv6: Set profile ipv6 packet. + * @profile_type: Define packet same fields. Use enum gft_profile_type. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status -qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type); + /** - * @brief qed_dbg_protection_override_dump - Reads protection override window - * entries and writes the results into the specified buffer. + * qed_enable_context_validation(): Enable and configure context + * validation. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the protection override data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + /** - * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer - * size for FW Asserts results. + * qed_calc_session_ctx_validation(): Calcualte validation byte for + * session context. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @cid: Context cid. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid); + /** - * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results - * into the specified buffer. + * qed_calc_task_ctx_validation(): Calcualte validation byte for task + * context. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the FW Asserts data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @tid: Context tid. * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid); /** - * @brief qed_dbg_read_attn - Reads the attention registers of the specified - * block and type, and writes the results into the specified buffer. + * qed_memset_session_ctx(): Memset session context to 0 while + * preserving validation bytes. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param block - Block ID. - * @param attn_type - Attention type. - * @param clear_status - Indicates if the attention status should be cleared. - * @param results - OUT: Pointer to write the read results into + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Size to initialzie. + * @ctx_type: Context type. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum block_id block, - enum dbg_attn_type attn_type, - bool clear_status, - struct dbg_attn_block_result *results); +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); /** - * @brief qed_dbg_print_attn - Prints attention registers values in the - * specified results struct. + * qed_memset_task_ctx(): Memset task context to 0 while preserving + * validation bytes. * - * @param p_hwfn - * @param results - Pointer to the attention read results + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: size to initialzie. + * @ctx_type: context type. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Void. */ -enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, - struct dbg_attn_block_result *results); - -/******************************* Data Types **********************************/ - -struct mcp_trace_format { - u32 data; -#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 -#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 -#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 -#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 -#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 -#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_OFFSET 24 - - char *format_str; -}; - -/* MCP Trace Meta data structure */ -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; - bool is_allocated; -}; - -/* Debug Tools user data */ -struct dbg_tools_user_data { - struct mcp_trace_meta mcp_trace_meta; - const u32 *mcp_trace_user_meta_buf; -}; - -/******************************** Constants **********************************/ - -#define MAX_NAME_LEN 16 - -/***************************** Public Functions *******************************/ - -/** - * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with - * debug arrays. - * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. - */ -enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, - const u8 * const bin_ptr); - -/** - * @brief qed_dbg_alloc_user_data - Allocates user debug data. - * - * @param p_hwfn - HW device data - * @param user_data_ptr - OUT: a pointer to the allocated memory. - */ -enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, - void **user_data_ptr); - -/** - * @brief qed_dbg_get_status_str - Returns a string for the specified status. - * - * @param status - a debug status code. - * - * @return a string for the specified status - */ -const char *qed_dbg_get_status_str(enum dbg_status status); - -/** - * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size - * for idle check results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); -/** - * @brief qed_print_idle_chk_results - Prints idle check results - * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the idle check results. - * @param num_errors - OUT: number of errors found in idle check. - * @param num_warnings - OUT: number of warnings found in idle check. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf, - u32 *num_errors, - u32 *num_warnings); - -/** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. - * - * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to - * no NVRAM access). - * - * @param data - pointer to MCP Trace meta data - * @param size - size of MCP Trace meta data in dwords - */ -void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, - const u32 *meta_buf); - -/** - * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size - * for MCP Trace results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - MCP Trace dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_mcp_trace_results - Prints MCP Trace results - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and - * keeps the MCP trace meta data allocated, to support continuous MCP Trace - * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should - * be called to free the meta data. - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - char *results_buf); - -/** - * @brief print_mcp_trace_line - Prints MCP Trace results for a single line - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_bytes - number of bytes that were dumped. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, - u8 *dump_buf, - u32 num_dumped_bytes, - char *results_buf); - -/** - * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. - * Should be called after continuous MCP Trace parsing. - * - * @param p_hwfn - HW device data - */ -void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); - -/** - * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size - * for reg_fifo results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_reg_fifo_results - Prints reg fifo results - * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size - * for igu_fifo results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_igu_fifo_results - Prints IGU fifo results - * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the IGU fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_protection_override_results_buf_size - Returns the required - * buffer size for protection override results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status -qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_protection_override_results - Prints protection override - * results. - * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size - * for FW Asserts results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_fw_asserts_results - Prints FW Asserts results - * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the FW Asserts results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_dbg_parse_attn - Parses and prints attention registers values in - * the specified results struct. - * - * @param p_hwfn - HW device data - * @param results - Pointer to the attention read results - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, - struct dbg_attn_block_result *results); - -/* Win 2 */ -#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL - -/* Win 3 */ -#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL - -/* Win 4 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL - -/* Win 5 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL - -/* Win 6 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL - -/* Win 7 */ -#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL - -/* Win 8 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL - -/* Win 9 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL - -/* Win 10 */ -#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL - -/* Win 11 */ -#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL - -/* Win 12 */ -#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL - -/* Win 13 */ -#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL - -/** - * @brief qed_qm_pf_mem_size - prepare QM ILT sizes - * - * Returns the required host memory size in 4KB units. - * Must be called before all QM init HSI functions. - * - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF - * - * @return The required host memory size in 4KB units. - */ -u32 qed_qm_pf_mem_size(u32 num_pf_cids, - u32 num_vf_cids, - u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); - -struct qed_qm_common_rt_init_params { - u8 max_ports_per_engine; - u8 max_phys_tcs_per_port; - bool pf_rl_en; - bool pf_wfq_en; - bool global_rl_en; - bool vport_wfq_en; - struct init_qm_port_params *port_params; -}; - -int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, - struct qed_qm_common_rt_init_params *p_params); - -struct qed_qm_pf_rt_init_params { - u8 port_id; - u8 pf_id; - u8 max_phys_tcs_per_port; - bool is_pf_loading; - u32 num_pf_cids; - u32 num_vf_cids; - u32 num_tids; - u16 start_pq; - u16 num_pf_pqs; - u16 num_vf_pqs; - u16 start_vport; - u16 num_vports; - u16 pf_wfq; - u32 pf_rl; - struct init_qm_pq_params *pq_params; - struct init_qm_vport_params *vport_params; -}; - -int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params); - -/** - * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_wfq - WFQ weight. Must be non-zero. - * - * @return 0 on success, -1 on error. - */ -int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); - -/** - * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units - * - * @return 0 on success, -1 on error. - */ -int qed_init_pf_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); - -/** - * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param first_tx_pq_id- An array containing the first Tx PQ ID associated - * with the VPORT for each TC. This array is filled by - * qed_qm_pf_rt_init - * @param vport_wfq - WFQ weight. Must be non-zero. - * - * @return 0 on success, -1 on error. - */ -int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); - -/** - * @brief qed_init_global_rl - Initializes the rate limit of the specified - * rate limiter - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param rl_id - RL ID - * @param rate_limit - rate limit in Mb/sec units - * - * @return 0 on success, -1 on error. - */ -int qed_init_global_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 rl_id, u32 rate_limit); - -/** - * @brief qed_send_qm_stop_cmd Sends a stop command to the QM - * - * @param p_hwfn - * @param p_ptt - * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. - * - * @return bool, true if successful, false if timeout occurred while waiting for - * QM command done. - */ -bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool is_release_cmd, - bool is_tx_pq, u16 start_pq, u16 num_pqs); - -/** - * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - vxlan destination udp port. - */ -void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 dest_port); - -/** - * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param vxlan_enable - vxlan enable flag. - */ -void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, bool vxlan_enable); - -/** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param eth_gre_enable - eth GRE enable enable flag. - * @param ip_gre_enable - IP GRE enable enable flag. - */ -void qed_set_gre_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool eth_gre_enable, bool ip_gre_enable); - -/** - * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - geneve destination udp port. - */ -void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 dest_port); - -/** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW - * - * @param p_ptt - ptt window used for writing the registers. - * @param eth_geneve_enable - eth GENEVE enable enable flag. - * @param ip_geneve_enable - IP GENEVE enable enable flag. - */ -void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool eth_geneve_enable, bool ip_geneve_enable); - -void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, bool enable); - -/** - * @brief qed_gft_disable - Disable GFT - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to disable GFT. - */ -void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); - -/** - * @brief qed_gft_config - Enable and configure HW for GFT - * - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to enable GFT. - * @param tcp - set profile tcp packets. - * @param udp - set profile udp packet. - * @param ipv4 - set profile ipv4 packet. - * @param ipv6 - set profile ipv6 packet. - * @param profile_type - define packet same fields. Use enum gft_profile_type. - */ -void qed_gft_config(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 pf_id, - bool tcp, - bool udp, - bool ipv4, bool ipv6, enum gft_profile_type profile_type); - -/** - * @brief qed_enable_context_validation - Enable and configure context - * validation. - * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - */ -void qed_enable_context_validation(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt); - -/** - * @brief qed_calc_session_ctx_validation - Calcualte validation byte for - * session context. - * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param cid - context cid. - */ -void qed_calc_session_ctx_validation(void *p_ctx_mem, - u16 ctx_size, u8 ctx_type, u32 cid); - -/** - * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task - * context. - * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param tid - context tid. - */ -void qed_calc_task_ctx_validation(void *p_ctx_mem, - u16 ctx_size, u8 ctx_type, u32 tid); - -/** - * @brief qed_memset_session_ctx - Memset session context to 0 while - * preserving validation bytes. - * - * @param p_hwfn - - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. - */ -void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); - -/** - * @brief qed_memset_task_ctx - Memset task context to 0 while preserving - * validation bytes. - * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. - */ -void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** - * @brief qed_set_rdma_error_level - Sets the RDMA assert level. - * If the severity of the error will be - * above the level, the FW will assert. - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers - * @param assert_level - An array of assert levels for each storm. + * qed_set_rdma_error_level(): Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @assert_level: An array of assert levels for each storm. * + * Return: Void. */ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]); /** - * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory. + * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory. * - * @param p_hwfn - HW device data - * @param fw_overlay_in_buf - the input FW overlay buffer. - * @param buf_size - the size of the input FW overlay buffer in bytes. - * must be aligned to dwords. - * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * @p_hwfn: HW device data. + * @fw_overlay_in_buf: The input FW overlay buffer. + * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes. + * must be aligned to dwords. * - * @return a pointer to the allocated overlays memory, + * Return: A pointer to the allocated overlays memory, * or NULL in case of failures. */ struct phys_mem_desc * qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, - const u32 * const fw_overlay_in_buf, + const u32 *const fw_overlay_in_buf, u32 buf_size_in_bytes); /** - * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM. + * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM. * - * @param p_hwfn - HW device data. - * @param p_ptt - ptt window used for writing the registers. - * @param fw_overlay_mem - the allocated FW overlay memory. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_overlay_mem: the allocated FW overlay memory. + * + * Return: Void. */ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct phys_mem_desc *fw_overlay_mem); /** - * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory. + * qed_fw_overlay_mem_free(): Frees the FW overlay memory. + * + * @p_hwfn: HW device data. + * @fw_overlay_mem: The allocated FW overlay memory to free. * - * @param p_hwfn - HW device data. - * @param fw_overlay_mem - the allocated FW overlay memory to free. + * Return: Void. */ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem); + struct phys_mem_desc **fw_overlay_mem); -/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) - -/* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) \ - (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) - -/* Tstorm ll2 port statistics */ -#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ - (IRO[2].base + ((port_id) * IRO[2].m1)) -#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) - -/* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ - (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) - -/* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ - (IRO[4].base + ((pf_id) * IRO[4].m1)) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) - -/* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) \ - (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) - -/* Ustorm eth queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ - (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) - -/* Ustorm Common Queue ring consumer */ -#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ - (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) - -/* Xstorm common PQ info */ -#define XSTORM_PQ_INFO_OFFSET(pq_id) \ - (IRO[8].base + ((pq_id) * IRO[8].m1)) -#define XSTORM_PQ_INFO_SIZE (IRO[8].size) - -/* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) - -/* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) - -/* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) - -/* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) - -/* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) - -/* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size) - -/* Xstorm overlay buffer host address */ -#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base) -#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size) - -/* Ystorm overlay buffer host address */ -#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base) -#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size) - -/* Pstorm overlay buffer host address */ -#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base) -#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size) - -/* Tstorm overlay buffer host address */ -#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base) -#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size) - -/* Mstorm overlay buffer host address */ -#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base) -#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size) - -/* Ustorm overlay buffer host address */ -#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base) -#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size) - -/* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size) - -/* Tstorm LightL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size) - -/* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size) - -/* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size) - -/* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size) - -/* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size) - -/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size - * mode - */ -#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ - (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2)) -#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size) - -/* Mstorm ETH PF queues producers */ -#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ - (IRO[28].base + ((queue_id) * IRO[28].m1)) -#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size) - -/* Mstorm pf statistics */ -#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[29].base + ((pf_id) * IRO[29].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size) - -/* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[30].base + ((stat_counter_id) * IRO[30].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[30].size) - -/* Ustorm pf statistics */ -#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[31].base + ((pf_id) * IRO[31].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size) - -/* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[32].base + ((stat_counter_id) * IRO[32].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size) - -/* Pstorm pf statistics */ -#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size) - -/* Control frame's EthType configuration for TX control frame security */ -#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ - (IRO[34].base + ((eth_type_id) * IRO[34].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size) - -/* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size) - -/* Tstorm Eth limit Rx rate */ -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ - (IRO[36].base + ((pf_id) * IRO[36].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size) - -/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. - * Use eth_tstorm_rss_update_data for update - */ -#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size) - -/* Xstorm queue zone */ -#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[38].base + ((queue_id) * IRO[38].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size) - -/* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[39].base + ((rss_id) * IRO[39].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size) - -/* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[40].base + ((rss_id) * IRO[40].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size) - -/* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size) - -/* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size) - -/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, - * BDqueue-id +#define PCICFG_OFFSET 0x2000 +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 + +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs programmed + * for each PF. + * Since registers from 0x000-0x7ff are spilt across functions, each PF will + * have the same location for the same 4 bits */ -#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ - (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \ - ((bdq_id) * IRO[43].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size) - -/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ -#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ - (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \ - ((bdq_id) * IRO[44].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size) - -/* Tstorm iSCSI RX stats */ -#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[45].base + ((storage_func_id) * IRO[45].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size) - -/* Mstorm iSCSI RX stats */ -#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[46].base + ((storage_func_id) * IRO[46].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size) - -/* Ustorm iSCSI RX stats */ -#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[47].base + ((storage_func_id) * IRO[47].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size) - -/* Xstorm iSCSI TX stats */ -#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[48].base + ((storage_func_id) * IRO[48].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size) - -/* Ystorm iSCSI TX stats */ -#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[49].base + ((storage_func_id) * IRO[49].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size) - -/* Pstorm iSCSI TX stats */ -#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[50].base + ((storage_func_id) * IRO[50].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size) - -/* Tstorm FCoE RX stats */ -#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size) - -/* Pstorm FCoE TX stats */ -#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size) - -/* Pstorm RDMA queue statistics */ -#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size) - -/* Tstorm RDMA queue statistics */ -#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size) - -/* Xstorm error level for assert */ -#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[55].base + ((pf_id) * IRO[55].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size) - -/* Ystorm error level for assert */ -#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[56].base + ((pf_id) * IRO[56].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size) - -/* Pstorm error level for assert */ -#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[57].base + ((pf_id) * IRO[57].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size) - -/* Tstorm error level for assert */ -#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[58].base + ((pf_id) * IRO[58].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size) - -/* Mstorm error level for assert */ -#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[59].base + ((pf_id) * IRO[59].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size) - -/* Ustorm error level for assert */ -#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[60].base + ((pf_id) * IRO[60].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size) - -/* Xstorm iWARP rxmit stats */ -#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[61].base + ((pf_id) * IRO[61].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size) - -/* Tstorm RoCE Event Statistics */ -#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[62].base + ((roce_pf_id) * IRO[62].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size) - -/* DCQCN Received Statistics */ -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\ - (IRO[63].base + ((roce_pf_id) * IRO[63].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size) - -/* RoCE Error Statistics */ -#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[64].base + ((roce_pf_id) * IRO[64].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size) - -/* DCQCN Sent Statistics */ -#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[65].base + ((roce_pf_id) * IRO[65].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size) - -/* RoCE CQEs Statistics */ -#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[66].base + ((roce_pf_id) * IRO[66].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size) +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff /* Runtime array offsets */ #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 @@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, #define QM_REG_TXPQMAP_RT_SIZE 512 #define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32580 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 32580 +#define QM_REG_WFQVPMAP_RT_OFFSET 33092 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_PTRTBLTX_RT_OFFSET 33092 +#define QM_REG_PTRTBLTX_RT_OFFSET 33604 #define QM_REG_PTRTBLTX_RT_SIZE 1024 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 - -#define RUNTIME_ARRAY_SIZE 34472 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983 + +#define RUNTIME_ARRAY_SIZE 34984 /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx { __le32 reserved[60]; }; -struct e4_xstorm_eth_conn_ag_ctx { +struct xstorm_eth_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; __le16 e5_reserved1; @@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx { __le32 reserved[8]; }; -struct e4_ystorm_eth_conn_ag_ctx { +struct ystorm_eth_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 tx_q0_int_coallecing_timeset; u8 byte3; __le16 word0; @@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx { __le32 reg3; }; -struct e4_tstorm_eth_conn_ag_ctx { +struct tstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_eth_conn_ag_ctx { +struct ustorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx { }; /* eth connection context */ -struct e4_eth_conn_context { +struct eth_conn_context { struct tstorm_eth_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context; }; @@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id { ETH_RAMROD_RX_ADD_UDP_FILTER, ETH_RAMROD_RX_DELETE_UDP_FILTER, ETH_RAMROD_RX_CREATE_GFT_ACTION, - ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, ETH_RAMROD_TX_QUEUE_UPDATE, ETH_RAMROD_RGFS_FILTER_ADD, ETH_RAMROD_RGFS_FILTER_DEL, @@ -5596,10 +3991,12 @@ struct eth_vport_rss_config { u8 update_rss_ind_table; u8 update_rss_capabilities; u8 tbl_size; - __le32 reserved2[2]; + u8 ind_table_mask_valid; + u8 reserved2[3]; __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS]; __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; - __le32 reserved3[2]; + __le32 reserved3; }; /* eth vport RSS mode */ @@ -5674,8 +4071,20 @@ enum gft_filter_update_action { MAX_GFT_FILTER_UPDATE_ACTION }; +/* Ramrod data for rx create gft action */ +struct rx_create_gft_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* Ramrod data for rx create openflow action */ +struct rx_create_openflow_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + /* Ramrod data for rx add openflow filter */ -struct rx_add_openflow_filter_data { +struct rx_openflow_filter_ramrod_data { __le16 action_icid; u8 priority; u8 reserved0; @@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data { __le16 l4_src_port; }; -/* Ramrod data for rx create gft action */ -struct rx_create_gft_action_data { - u8 vport_id; - u8 reserved[7]; -}; - -/* Ramrod data for rx create openflow action */ -struct rx_create_openflow_action_data { - u8 vport_id; - u8 reserved[7]; -}; - /* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { __le16 rx_queue_id; @@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data { }; /* Ramrod data for rx Add UDP Filter */ -struct rx_udp_filter_data { +struct rx_udp_filter_ramrod_data { __le16 action_icid; __le16 vlan_id; u8 ip_type; @@ -5784,7 +4181,7 @@ struct rx_udp_filter_data { /* Add or delete GFT filter - filter is packet header of type of packet wished * to pass certain FW flow. */ -struct rx_update_gft_filter_data { +struct rx_update_gft_filter_ramrod_data { struct regpair pkt_hdr_addr; __le16 pkt_hdr_length; __le16 action_icid; @@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data { u8 pxp_tph_valid_bd; u8 pxp_tph_valid_pkt; __le16 pxp_st_index; - __le16 comp_agg_size; + u8 comp_agg_size; + u8 reserved3; __le16 queue_zone_id; __le16 reserved2; __le16 pbl_size; @@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn { u8 ctl_frame_ethtype_check_en; u8 update_in_to_in_pri_map_mode; u8 in_to_in_pri_map[8]; - u8 reserved[6]; + u8 update_tx_dst_port_mode_flg; + u8 tx_dst_port_mode_config; + u8 dst_vport_id; + u8 tx_dst_port_mode; + u8 dst_vport_id_valid; + u8 reserved[1]; }; struct vport_update_ramrod_mcast { @@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { +struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart { u8 reserved0; u8 state; u8 flags0; @@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { __le32 reg4; }; -struct e4_mstorm_eth_conn_ag_ctx { +struct mstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_xstorm_eth_hw_conn_ag_ctx { +struct xstorm_eth_hw_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; __le16 e5_reserved1; @@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped { #define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 }; - /* Used in gft_profile_key: Indication for ip version */ enum gft_profile_ip_version { GFT_PROFILE_IPV4 = 0, @@ -6640,204 +5042,1041 @@ struct ystorm_rdma_task_st_ctx { struct regpair temp[4]; }; -struct e4_ystorm_rdma_task_ag_ctx { - u8 reserved; +struct ystorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 msem_ctx_upd_seq; + u8 flags0; +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 + u8 flags1; +#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt_or_qp_id; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct mstorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 + u8 flags1; +#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt_or_qp_id; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +/* The roce task context of Mstorm */ +struct mstorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +/* The roce task context of Ustorm */ +struct ustorm_rdma_task_st_ctx { + struct regpair temp[6]; +}; + +struct ustorm_rdma_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 dif_rxmit_cons; + __le32 dif_rxmit_prod; + __le32 sge_index; + __le32 sq_cons; + u8 byte2; + u8 byte3; + __le16 dif_write_cons; + __le16 dif_write_prod; + __le16 word3; + __le32 dif_error_buffer_address_lo; + __le32 dif_error_buffer_address_hi; +}; + +/* RDMA task context */ +struct rdma_task_context { + struct ystorm_rdma_task_st_ctx ystorm_st_context; + struct ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct tdif_task_context tdif_context; + struct mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_st_ctx mstorm_st_context; + struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +#define TOE_MAX_RAMROD_PER_PF 8 +#define TOE_TX_PAGE_SIZE_BYTES 4096 +#define TOE_GRQ_PAGE_SIZE_BYTES 4096 +#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096 + +#define TOE_RX_MAX_RSS_CHAINS 64 +#define TOE_TX_MAX_TSS_CHAINS 64 +#define TOE_RSS_INDIRECTION_TABLE_SIZE 128 + +/* The toe storm context of Mstorm */ +struct mstorm_toe_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The toe storm context of Pstorm */ +struct pstorm_toe_conn_st_ctx { + __le32 reserved[36]; +}; + +/* The toe storm context of Ystorm */ +struct ystorm_toe_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The toe storm context of Xstorm */ +struct xstorm_toe_conn_st_ctx { + __le32 reserved[44]; +}; + +struct ystorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7 + u8 completion_opcode; + u8 byte3; + __le16 word0; + __le32 rel_seq; + __le32 rel_seq_threshold; + __le16 app_prod; + __le16 app_cons; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct xstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 word2; + __le16 word3; + __le16 bd_prod; + __le16 word5; + __le16 word6; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 local_adv_wnd_seq; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_toe_conn_ag_ctx { + u8 reserved0; u8 byte1; - __le16 msem_ctx_upd_seq; u8 flags0; -#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6 u8 flags1; -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 +#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 key; - __le32 mw_cnt_or_qp_id; - u8 ref_cnt_seq; - u8 ctx_upd_seq; - __le16 dif_flags; - __le16 tx_ref_count; - __le16 last_used_ltid; - __le16 parent_mr_lo; - __le16 parent_mr_hi; - __le32 fbo_lo; - __le32 fbo_hi; +#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; }; -struct e4_mstorm_rdma_task_ag_ctx { +struct ustorm_toe_conn_ag_ctx { u8 reserved; u8 byte1; - __le16 icid; u8 flags0; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 key; - __le32 mw_cnt_or_qp_id; - u8 ref_cnt_seq; - u8 ctx_upd_seq; - __le16 dif_flags; - __le16 tx_ref_count; - __le16 last_used_ltid; - __le16 parent_mr_lo; - __le16 parent_mr_hi; - __le32 fbo_lo; - __le32 fbo_hi; +#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; }; -/* The roce task context of Mstorm */ -struct mstorm_rdma_task_st_ctx { - struct regpair temp[4]; +/* The toe storm context of Tstorm */ +struct tstorm_toe_conn_st_ctx { + __le32 reserved[16]; }; -/* The roce task context of Ustorm */ -struct ustorm_rdma_task_st_ctx { - struct regpair temp[6]; +/* The toe storm context of Ustorm */ +struct ustorm_toe_conn_st_ctx { + __le32 reserved[52]; }; -struct e4_ustorm_rdma_task_ag_ctx { - u8 reserved; - u8 state; +/* toe connection context */ +struct toe_conn_context { + struct ystorm_toe_conn_st_ctx ystorm_st_context; + struct pstorm_toe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_toe_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct ystorm_toe_conn_ag_ctx ystorm_ag_context; + struct xstorm_toe_conn_ag_ctx xstorm_ag_context; + struct tstorm_toe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_toe_conn_ag_ctx ustorm_ag_context; + struct tstorm_toe_conn_st_ctx tstorm_st_context; + struct mstorm_toe_conn_st_ctx mstorm_st_context; + struct ustorm_toe_conn_st_ctx ustorm_st_context; +}; + +/* toe init ramrod header */ +struct toe_init_ramrod_header { + u8 first_rss; + u8 num_rss; + u8 reserved[6]; +}; + +/* toe pf init parameters */ +struct toe_pf_init_params { + __le32 push_timeout; + __le16 grq_buffer_size; + __le16 grq_sb_id; + u8 grq_sb_index; + u8 max_seg_retransmit; + u8 doubt_reachability; + u8 ll2_rx_queue_id; + __le16 grq_fetch_threshold; + u8 reserved1[2]; + struct regpair grq_page_addr; +}; + +/* toe tss parameters */ +struct toe_tss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe rss parameters */ +struct toe_rss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe init ramrod data */ +struct toe_init_ramrod_data { + struct toe_init_ramrod_header hdr; + struct tcp_init_params tcp_params; + struct toe_pf_init_params pf_params; + struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS]; + struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS]; +}; + +/* toe offload parameters */ +struct toe_offload_params { + struct regpair tx_bd_page_addr; + struct regpair tx_app_page_addr; + __le32 more_to_send_seq; + __le16 rcv_indication_size; + u8 rss_tss_id; + u8 ignore_grq_push; + struct regpair rx_db_data_ptr; +}; + +/* TOE offload ramrod data - DMAed by firmware */ +struct toe_offload_ramrod_data { + struct tcp_offload_params tcp_ofld_params; + struct toe_offload_params toe_ofld_params; +}; + +/* TOE ramrod command IDs */ +enum toe_ramrod_cmd_id { + TOE_RAMROD_UNUSED, + TOE_RAMROD_FUNC_INIT, + TOE_RAMROD_INITATE_OFFLOAD, + TOE_RAMROD_FUNC_CLOSE, + TOE_RAMROD_SEARCHER_DELETE, + TOE_RAMROD_TERMINATE, + TOE_RAMROD_QUERY, + TOE_RAMROD_UPDATE, + TOE_RAMROD_EMPTY, + TOE_RAMROD_RESET_SEND, + TOE_RAMROD_INVALIDATE, + MAX_TOE_RAMROD_CMD_ID +}; + +/* Toe RQ buffer descriptor */ +struct toe_rx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_RX_BD_START_MASK 0x1 +#define TOE_RX_BD_START_SHIFT 0 +#define TOE_RX_BD_END_MASK 0x1 +#define TOE_RX_BD_END_SHIFT 1 +#define TOE_RX_BD_NO_PUSH_MASK 0x1 +#define TOE_RX_BD_NO_PUSH_SHIFT 2 +#define TOE_RX_BD_SPLIT_MASK 0x1 +#define TOE_RX_BD_SPLIT_SHIFT 3 +#define TOE_RX_BD_RESERVED0_MASK 0xFFF +#define TOE_RX_BD_RESERVED0_SHIFT 4 + __le32 reserved1; +}; + +/* TOE RX completion queue opcodes (opcode 0 is illegal) */ +enum toe_rx_cmp_opcode { + TOE_RX_CMP_OPCODE_GA = 1, + TOE_RX_CMP_OPCODE_GR = 2, + TOE_RX_CMP_OPCODE_GNI = 3, + TOE_RX_CMP_OPCODE_GAIR = 4, + TOE_RX_CMP_OPCODE_GAIL = 5, + TOE_RX_CMP_OPCODE_GRI = 6, + TOE_RX_CMP_OPCODE_GJ = 7, + TOE_RX_CMP_OPCODE_DGI = 8, + TOE_RX_CMP_OPCODE_CMP = 9, + TOE_RX_CMP_OPCODE_REL = 10, + TOE_RX_CMP_OPCODE_SKP = 11, + TOE_RX_CMP_OPCODE_URG = 12, + TOE_RX_CMP_OPCODE_RT_TO = 13, + TOE_RX_CMP_OPCODE_KA_TO = 14, + TOE_RX_CMP_OPCODE_MAX_RT = 15, + TOE_RX_CMP_OPCODE_DBT_RE = 16, + TOE_RX_CMP_OPCODE_SYN = 17, + TOE_RX_CMP_OPCODE_OPT_ERR = 18, + TOE_RX_CMP_OPCODE_FW2_TO = 19, + TOE_RX_CMP_OPCODE_2WY_CLS = 20, + TOE_RX_CMP_OPCODE_RST_RCV = 21, + TOE_RX_CMP_OPCODE_FIN_RCV = 22, + TOE_RX_CMP_OPCODE_FIN_UPL = 23, + TOE_RX_CMP_OPCODE_INIT = 32, + TOE_RX_CMP_OPCODE_RSS_UPDATE = 33, + TOE_RX_CMP_OPCODE_CLOSE = 34, + TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80, + TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81, + TOE_RX_CMP_OPCODE_TERMINATE = 82, + TOE_RX_CMP_OPCODE_QUERY = 83, + TOE_RX_CMP_OPCODE_RESET_SEND = 84, + TOE_RX_CMP_OPCODE_INVALIDATE = 85, + TOE_RX_CMP_OPCODE_EMPTY = 86, + TOE_RX_CMP_OPCODE_UPDATE = 87, + MAX_TOE_RX_CMP_OPCODE +}; + +/* TOE rx ooo completion data */ +struct toe_rx_cqe_ooo_params { + __le32 nbytes; + __le16 grq_buff_id; + u8 isle_num; + u8 reserved0; +}; + +/* TOE rx in order completion data */ +struct toe_rx_cqe_in_order_params { + __le32 nbytes; + __le16 grq_buff_id; + __le16 reserved1; +}; + +/* Union for TOE rx completion data */ +union toe_rx_cqe_data_union { + struct toe_rx_cqe_ooo_params ooo_params; + struct toe_rx_cqe_in_order_params in_order_params; + struct regpair raw_data; +}; + +/* TOE rx completion element */ +struct toe_rx_cqe { + __le16 icid; + u8 completion_opcode; + u8 reserved0; + __le32 reserved1; + union toe_rx_cqe_data_union data; +}; + +/* toe RX doorbel data */ +struct toe_rx_db_data { + __le32 local_adv_wnd_seq; + __le32 reserved[3]; +}; + +/* Toe GRQ buffer descriptor */ +struct toe_rx_grq_bd { + struct regpair addr; + __le16 buff_id; + __le16 reserved0; + __le32 reserved1; +}; + +/* Toe transmission application buffer descriptor */ +struct toe_tx_app_buff_desc { + __le32 next_buffer_start_seq; + __le32 reserved; +}; + +/* Toe transmission application buffer descriptor page pointer */ +struct toe_tx_app_buff_page_pointer { + struct regpair next_page_addr; +}; + +/* Toe transmission buffer descriptor */ +struct toe_tx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_TX_BD_PUSH_MASK 0x1 +#define TOE_TX_BD_PUSH_SHIFT 0 +#define TOE_TX_BD_NOTIFY_MASK 0x1 +#define TOE_TX_BD_NOTIFY_SHIFT 1 +#define TOE_TX_BD_LARGE_IO_MASK 0x1 +#define TOE_TX_BD_LARGE_IO_SHIFT 2 +#define TOE_TX_BD_BD_CONS_MASK 0x1FFF +#define TOE_TX_BD_BD_CONS_SHIFT 3 + __le32 next_bd_start_seq; +}; + +/* TOE completion opcodes */ +enum toe_tx_cmp_opcode { + TOE_TX_CMP_OPCODE_DATA, + TOE_TX_CMP_OPCODE_TERMINATE, + TOE_TX_CMP_OPCODE_EMPTY, + TOE_TX_CMP_OPCODE_RESET_SEND, + TOE_TX_CMP_OPCODE_INVALIDATE, + TOE_TX_CMP_OPCODE_RST_RCV, + MAX_TOE_TX_CMP_OPCODE +}; + +/* Toe transmission completion element */ +struct toe_tx_cqe { __le16 icid; + u8 opcode; + u8 reserved; + __le32 size; +}; + +/* Toe transmission page pointer bd */ +struct toe_tx_page_pointer_bd { + struct regpair next_page_addr; + struct regpair prev_page_addr; +}; + +/* Toe transmission completion element page pointer */ +struct toe_tx_page_pointer_cqe { + struct regpair next_page_addr; +}; + +/* toe update parameters */ +struct toe_update_params { + __le16 flags; +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1 +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0 +#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF +#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1 + __le16 rcv_indication_size; + __le16 reserved1[2]; +}; + +/* TOE update ramrod data - DMAed by firmware */ +struct toe_update_ramrod_data { + struct tcp_update_params tcp_upd_params; + struct toe_update_params toe_upd_params; +}; + +struct mstorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; u8 flags0; -#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 +#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 - u8 flags3; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 dif_rxmit_cons; - __le32 dif_rxmit_prod; - __le32 sge_index; - __le32 sq_cons; - u8 byte2; - u8 byte3; - __le16 dif_write_cons; - __le16 dif_write_prod; - __le16 word3; - __le32 dif_error_buffer_address_lo; - __le32 dif_error_buffer_address_hi; +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; }; -/* RDMA task context */ -struct e4_rdma_task_context { - struct ystorm_rdma_task_st_ctx ystorm_st_context; - struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context; - struct tdif_task_context tdif_context; - struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; - struct mstorm_rdma_task_st_ctx mstorm_st_context; - struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; - struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; +/* TOE doorbell data */ +struct toe_db_data { + u8 params; +#define TOE_DB_DATA_DEST_MASK 0x3 +#define TOE_DB_DATA_DEST_SHIFT 0 +#define TOE_DB_DATA_AGG_CMD_MASK 0x3 +#define TOE_DB_DATA_AGG_CMD_SHIFT 2 +#define TOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define TOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define TOE_DB_DATA_RESERVED_MASK 0x1 +#define TOE_DB_DATA_RESERVED_SHIFT 5 +#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; }; /* rdma function init ramrod data */ @@ -6911,6 +6150,8 @@ enum rdma_event_opcode { RDMA_EVENT_CREATE_SRQ, RDMA_EVENT_MODIFY_SRQ, RDMA_EVENT_DESTROY_SRQ, + RDMA_EVENT_START_NAMESPACE_TRACKING, + RDMA_EVENT_STOP_NAMESPACE_TRACKING, MAX_RDMA_EVENT_OPCODE }; @@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr { u8 relaxed_ordering; __le16 first_reg_srq_id; __le32 reg_srq_base_addr; - u8 searcher_mode; - u8 pvrdma_mode; + u8 flags; +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2 +#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F +#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3 + u8 dpt_byte_threshold_log; + u8 dpt_common_queue_id; u8 max_num_ns_log; - u8 reserved; }; /* rdma function init ramrod data */ struct rdma_init_func_ramrod_data { struct rdma_init_func_hdr params_header; + struct rdma_cnq_params dptq_params; struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; }; +/* rdma namespace tracking ramrod data */ +struct rdma_namespace_tracking_ramrod_data { + u8 name_space; + u8 reserved[7]; +}; + /* RDMA ramrod command IDs */ enum rdma_ramrod_cmd_id { RDMA_RAMROD_UNUSED, @@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id { RDMA_RAMROD_CREATE_SRQ, RDMA_RAMROD_MODIFY_SRQ, RDMA_RAMROD_DESTROY_SRQ, + RDMA_RAMROD_START_NS_TRACKING, + RDMA_RAMROD_STOP_NS_TRACKING, MAX_RDMA_RAMROD_CMD_ID }; @@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context { struct regpair temp[9]; }; -struct e4_tstorm_rdma_task_ag_ctx { +struct tstorm_rdma_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx { __le32 reg2; }; -struct e4_ustorm_rdma_conn_ag_ctx { +struct ustorm_rdma_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 nvmf_only; __le16 conn_dpi; @@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_roce_conn_ag_ctx { +struct xstorm_roce_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx { __le32 reg6; }; -struct e4_tstorm_roce_conn_ag_ctx { +struct tstorm_roce_conn_ag_ctx { u8 reserved0; u8 byte1; u8 flags0; -#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx { }; /* roce connection context */ -struct e4_roce_conn_context { +struct roce_conn_context { struct ystorm_roce_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_roce_conn_st_ctx pstorm_st_context; struct xstorm_roce_conn_st_ctx xstorm_st_context; - struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context; + struct xstorm_roce_conn_ag_ctx xstorm_ag_context; + struct tstorm_roce_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_roce_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_roce_conn_st_ctx mstorm_st_context; @@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data { #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3 u8 name_space; u8 reserved3[3]; __le16 regular_latency_phy_queue; @@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19 __le16 xrc_domain; u8 max_ird; u8 traffic_class; @@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data { u8 reserved3[3]; }; +/* RoCE Create Suspended qp requester runtime ramrod data */ +struct roce_create_suspended_qp_req_runtime_ramrod_data { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \ + 0x7FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; +}; + +/* RoCE Create Suspended QP requester ramrod data */ +struct roce_create_suspended_qp_req_ramrod_data { + struct roce_create_qp_req_ramrod_data qp_params; + struct roce_create_suspended_qp_req_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE Create Suspended QP responder runtime params */ +struct roce_create_suspended_qp_resp_runtime_params { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; + __le32 resreved; +}; + +/* RoCE RDB array entry */ +struct roce_resp_qp_rdb_entry { + struct regpair atomic_data; + struct regpair va; + __le32 psn; + __le32 rkey; + __le32 byte_count; + u8 op_type; + u8 reserved[3]; +}; + +/* RoCE Create Suspended QP responder runtime ramrod data */ +struct roce_create_suspended_qp_resp_runtime_ramrod_data { + struct roce_create_suspended_qp_resp_runtime_params params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Create Suspended QP responder ramrod data */ +struct roce_create_suspended_qp_resp_ramrod_data { + struct roce_create_qp_resp_ramrod_data + qp_params; + struct roce_create_suspended_qp_resp_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE create ud qp ramrod data */ +struct roce_create_ud_qp_ramrod_data { + __le16 local_mac_addr[3]; + __le16 vlan_id; + __le32 src_qp_id; + u8 name_space; + u8 reserved[3]; +}; + /* roce DCQCN received statistics */ struct roce_dcqcn_received_stats { struct regpair ecn_pkt_rcv; struct regpair cnp_pkt_rcv; + struct regpair cnp_pkt_reject; }; /* roce DCQCN sent statistics */ @@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data { __le32 reserved; }; +/* RoCE destroy ud qp ramrod data */ +struct roce_destroy_ud_qp_ramrod_data { + __le32 src_qp_id; + __le32 reserved; +}; + /* roce error statistics */ struct roce_error_stats { __le32 resp_remote_access_errors; @@ -7809,13 +7152,21 @@ struct roce_events_stats { /* roce slow path EQ cmd IDs */ enum roce_event_opcode { - ROCE_EVENT_CREATE_QP = 11, + ROCE_EVENT_CREATE_QP = 13, ROCE_EVENT_MODIFY_QP, ROCE_EVENT_QUERY_QP, ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, ROCE_EVENT_FUNC_UPDATE, + ROCE_EVENT_SUSPEND_QP, + ROCE_EVENT_QUERY_SUSPENDED_QP, + ROCE_EVENT_CREATE_SUSPENDED_QP, + ROCE_EVENT_RESUME_QP, + ROCE_EVENT_SUSPEND_UD_QP, + ROCE_EVENT_RESUME_UD_QP, + ROCE_EVENT_CREATE_SUSPENDED_UD_QP, + ROCE_EVENT_FLUSH_DPT_QP, MAX_ROCE_EVENT_OPCODE }; @@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data { struct roce_init_func_params roce; }; +/* roce_ll2_cqe_data */ +struct roce_ll2_cqe_data { + u8 name_space; + u8 flags; +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1 +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0 +#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F +#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1 + u8 reserved1[2]; + __le32 cid; +}; + /* roce modify qp requester ramrod data */ struct roce_modify_qp_req_ramrod_data { __le16 flags; @@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data { #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15 u8 fields; #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 @@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data { #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12 u8 fields; #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 @@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* RoCE Query Suspended QP requester output params */ +struct roce_query_suspended_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; + __le32 reserved; +}; + +/* RoCE Query Suspended QP requester ramrod data */ +struct roce_query_suspended_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE Query Suspended QP responder runtime params */ +struct roce_query_suspended_qp_resp_runtime_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; +}; + +/* RoCE Query Suspended QP responder output params */ +struct roce_query_suspended_qp_resp_output_params { + struct roce_query_suspended_qp_resp_runtime_params runtime_params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Query Suspended QP responder ramrod data */ +struct roce_query_suspended_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + /* ROCE ramrod command IDs */ enum roce_ramrod_cmd_id { - ROCE_RAMROD_CREATE_QP = 11, + ROCE_RAMROD_CREATE_QP = 13, ROCE_RAMROD_MODIFY_QP, ROCE_RAMROD_QUERY_QP, ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, ROCE_RAMROD_FUNC_UPDATE, + ROCE_RAMROD_SUSPEND_QP, + ROCE_RAMROD_QUERY_SUSPENDED_QP, + ROCE_RAMROD_CREATE_SUSPENDED_QP, + ROCE_RAMROD_RESUME_QP, + ROCE_RAMROD_SUSPEND_UD_QP, + ROCE_RAMROD_RESUME_UD_QP, + ROCE_RAMROD_CREATE_SUSPENDED_UD_QP, + ROCE_RAMROD_FLUSH_DPT_QP, MAX_ROCE_RAMROD_CMD_ID }; +/* ROCE RDB array entry type */ +enum roce_resp_qp_rdb_entry_type { + ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0, + ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1, + ROCE_QP_RDB_ENTRY_INVALID = 2, + MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE +}; + /* RoCE func init ramrod data */ struct roce_update_func_params { u8 cnp_vlan_priority; @@ -7995,7 +7428,7 @@ struct roce_update_func_params { __le32 cnp_send_timeout; }; -struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; u8 flags0; @@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { __le32 reg4; }; -struct e4_mstorm_roce_conn_ag_ctx { +struct mstorm_roce_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_mstorm_roce_req_conn_ag_ctx { +struct mstorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_mstorm_roce_resp_conn_ag_ctx { +struct mstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_tstorm_roce_req_conn_ag_ctx { +struct tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 dif_rxmit_cnt; __le32 snd_nxt_psn; __le32 snd_max_psn; @@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx { __le32 reg10; }; -struct e4_tstorm_roce_resp_conn_ag_ctx { +struct tstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 psn_and_rxmit_id_echo; __le32 reg1; __le32 reg2; @@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_roce_req_conn_ag_ctx { +struct ustorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx { __le16 word3; }; -struct e4_ustorm_roce_resp_conn_ag_ctx { +struct ustorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_roce_req_conn_ag_ctx { +struct xstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx { __le32 orq_cons; }; -struct e4_xstorm_roce_resp_conn_ag_ctx { +struct xstorm_roce_resp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 irq_prod_shadow; @@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx { __le32 msn_and_syndrome; }; -struct e4_ystorm_roce_conn_ag_ctx { +struct ystorm_roce_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx { __le32 reg3; }; -struct e4_ystorm_roce_req_conn_ag_ctx { +struct ystorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx { __le32 reg3; }; -struct e4_ystorm_roce_resp_conn_ag_ctx { +struct ystorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx { __le32 reserved[48]; }; -struct e4_xstorm_iwarp_conn_ag_ctx { +struct xstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 u8 flags2; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx { __le32 reg17; }; -struct e4_tstorm_iwarp_conn_ag_ctx { +struct tstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 unaligned_nxt_seq; @@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx { }; /* iwarp connection context */ -struct e4_iwarp_conn_context { +struct iwarp_conn_context { struct ystorm_iwarp_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_iwarp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_iwarp_conn_st_ctx xstorm_st_context; - struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_iwarp_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_iwarp_conn_st_ctx mstorm_st_context; @@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, - IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, MAX_IWARP_EQE_ASYNC_OPCODE }; @@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion { /* iWARP completion queue types */ enum iwarp_eqe_sync_opcode { - IWARP_EVENT_TYPE_TCP_OFFLOAD = - 11, + IWARP_EVENT_TYPE_TCP_OFFLOAD = 13, IWARP_EVENT_TYPE_MPA_OFFLOAD, IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, IWARP_EVENT_TYPE_CREATE_QP, @@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code { IWARP_EXCEPTION_DETECTED_LLP_RESET, IWARP_EXCEPTION_DETECTED_IRQ_FULL, IWARP_EXCEPTION_DETECTED_RQ_EMPTY, - IWARP_EXCEPTION_DETECTED_SRQ_EMPTY, - IWARP_EXCEPTION_DETECTED_SRQ_LIMIT, IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, @@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data { struct regpair async_eqe_output_buf; struct regpair handle_for_async; struct regpair shared_queue_addr; + __le32 additional_setup_time; __le16 rcv_wnd; u8 stats_counter_id; - u8 reserved3[13]; + u8 reserved3[9]; }; /* iWARP TCP connection offload params passed by driver to FW */ @@ -9888,11 +9319,13 @@ struct iwarp_offload_params { struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; struct regpair handle_for_async; + __le32 additional_setup_time; __le16 physical_q0; __le16 physical_q1; u8 stats_counter_id; u8 mpa_mode; - u8 reserved[10]; + u8 src_vport_id; + u8 reserved[5]; }; /* iWARP query QP output params */ @@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data { /* iWARP Ramrod Command IDs */ enum iwarp_ramrod_cmd_id { - IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, IWARP_RAMROD_CMD_ID_CREATE_QP, @@ -9971,100 +9404,100 @@ struct unaligned_opaque_data { __le32 cid; }; -struct e4_mstorm_iwarp_conn_ag_ctx { +struct mstorm_iwarp_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 rcq_cons; __le16 rcq_cons_th; __le32 reg0; __le32 reg1; }; -struct e4_ustorm_iwarp_conn_ag_ctx { +struct ustorm_iwarp_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx { __le16 word3; }; -struct e4_ystorm_iwarp_conn_ag_ctx { +struct ystorm_iwarp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx { struct fcoe_wqe cached_wqes[16]; }; -struct e4_xstorm_fcoe_conn_ag_ctx { +struct xstorm_fcoe_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 u8 flags2; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx { u8 reserved[2]; }; -struct e4_tstorm_fcoe_conn_ag_ctx { +struct tstorm_fcoe_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; }; -struct e4_ustorm_fcoe_conn_ag_ctx { +struct ustorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx { u8 reserved0[4]; }; -struct e4_mstorm_fcoe_conn_ag_ctx { +struct mstorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; @@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx { }; /* fcoe connection context */ -struct e4_fcoe_conn_context { +struct fcoe_conn_context { struct ystorm_fcoe_conn_st_ctx ystorm_st_context; struct pstorm_fcoe_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_fcoe_conn_st_ctx xstorm_st_context; - struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; struct regpair xstorm_ag_padding[6]; struct ustorm_fcoe_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; struct tstorm_fcoe_conn_st_ctx tstorm_st_context; - struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; struct mstorm_fcoe_conn_st_ctx mstorm_st_context; }; @@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params { struct fcoe_stat_ramrod_data stat_ramrod_data; }; -struct e4_ystorm_fcoe_conn_ag_ctx { +struct ystorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx { __le32 reserved_iscsi[44]; }; -struct e4_xstorm_iscsi_conn_ag_ctx { +struct xstorm_iscsi_conn_ag_ctx { u8 cdu_validation; u8 state; u8 flags0; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 u8 flags6; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx { __le32 reg17; }; -struct e4_tstorm_iscsi_conn_ag_ctx { +struct tstorm_iscsi_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 rx_tcp_checksum_err_cnt; @@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx { __le16 word0; }; -struct e4_ustorm_iscsi_conn_ag_ctx { +struct ustorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx { __le32 reserved[44]; }; -struct e4_mstorm_iscsi_conn_ag_ctx { +struct mstorm_iscsi_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; @@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx { }; /* iscsi connection context */ -struct e4_iscsi_conn_context { +struct iscsi_conn_context { struct ystorm_iscsi_conn_st_ctx ystorm_st_context; struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct pb_context xpb2_context; struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; struct pb_context upb_context; struct tstorm_iscsi_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; struct ustorm_iscsi_conn_st_ctx ustorm_st_context; }; @@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params { struct tcp_init_params tcp_init; }; -struct e4_ystorm_iscsi_conn_ag_ctx { +struct ystorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx { __le32 reg3; }; -#define MFW_TRACE_SIGNATURE 0x25071946 - -/* The trace in the buffer */ -#define MFW_TRACE_EVENTID_MASK 0x00ffff -#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 -#define MFW_TRACE_PRM_SIZE_OFFSET 16 -#define MFW_TRACE_ENTRY_SIZE 3 - -struct mcp_trace { - u32 signature; /* Help to identify that the trace is valid */ - u32 size; /* the size of the trace buffer in bytes */ - u32 curr_level; /* 2 - all will be written to the buffer - * 1 - debug trace will not be written - * 0 - just errors will be written to the buffer - */ - u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means - * mask it. - */ - - /* Warning: the following pointers are assumed to be 32bits as they are - * used only in the MFW. - */ - u32 trace_prod; /* The next trace will be written to this offset */ - u32 trace_oldest; /* The oldest valid trace starts at this offset - * (usually very close after the current producer). - */ -}; - -#define VF_MAX_STATIC 192 - -#define MCP_GLOB_PATH_MAX 2 -#define MCP_PORT_MAX 2 -#define MCP_GLOB_PORT_MAX 4 -#define MCP_GLOB_FUNC_MAX 16 - -typedef u32 offsize_t; /* In DWORDS !!! */ -/* Offset from the beginning of the MCP scratchpad */ -#define OFFSIZE_OFFSET_SHIFT 0 -#define OFFSIZE_OFFSET_MASK 0x0000ffff -/* Size of specific element (not the whole array if any) */ -#define OFFSIZE_SIZE_SHIFT 16 -#define OFFSIZE_SIZE_MASK 0xffff0000 - -#define SECTION_OFFSET(_offsize) ((((_offsize & \ - OFFSIZE_OFFSET_MASK) >> \ - OFFSIZE_OFFSET_SHIFT) << 2)) - -#define QED_SECTION_SIZE(_offsize) (((_offsize & \ - OFFSIZE_SIZE_MASK) >> \ - OFFSIZE_SIZE_SHIFT) << 2) - -#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ - SECTION_OFFSET(_offsize) + \ - (QED_SECTION_SIZE(_offsize) * idx)) - -#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ - (_pub_base + offsetof(struct mcp_public_data, sections[_section])) - -/* PHY configuration */ -struct eth_phy_cfg { - u32 speed; -#define ETH_SPEED_AUTONEG 0x0 -#define ETH_SPEED_SMARTLINQ 0x8 - - u32 pause; -#define ETH_PAUSE_NONE 0x0 -#define ETH_PAUSE_AUTONEG 0x1 -#define ETH_PAUSE_RX 0x2 -#define ETH_PAUSE_TX 0x4 - - u32 adv_speed; - - u32 loopback_mode; -#define ETH_LOOPBACK_NONE 0x0 -#define ETH_LOOPBACK_INT_PHY 0x1 -#define ETH_LOOPBACK_EXT_PHY 0x2 -#define ETH_LOOPBACK_EXT 0x3 -#define ETH_LOOPBACK_MAC 0x4 -#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 -#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 -#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 -#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 -#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 - - u32 eee_cfg; -#define EEE_CFG_EEE_ENABLED BIT(0) -#define EEE_CFG_TX_LPI BIT(1) -#define EEE_CFG_ADV_SPEED_1G BIT(2) -#define EEE_CFG_ADV_SPEED_10G BIT(3) -#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 -#define EEE_TX_TIMER_USEC_OFFSET 4 -#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 -#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 -#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 - - u32 deprecated; - - u32 fec_mode; -#define FEC_FORCE_MODE_MASK 0x000000ff -#define FEC_FORCE_MODE_OFFSET 0 -#define FEC_FORCE_MODE_NONE 0x00 -#define FEC_FORCE_MODE_FIRECODE 0x01 -#define FEC_FORCE_MODE_RS 0x02 -#define FEC_FORCE_MODE_AUTO 0x07 -#define FEC_EXTENDED_MODE_MASK 0xffffff00 -#define FEC_EXTENDED_MODE_OFFSET 8 -#define ETH_EXT_FEC_NONE 0x00000100 -#define ETH_EXT_FEC_10G_NONE 0x00000200 -#define ETH_EXT_FEC_10G_BASE_R 0x00000400 -#define ETH_EXT_FEC_20G_NONE 0x00000800 -#define ETH_EXT_FEC_20G_BASE_R 0x00001000 -#define ETH_EXT_FEC_25G_NONE 0x00002000 -#define ETH_EXT_FEC_25G_BASE_R 0x00004000 -#define ETH_EXT_FEC_25G_RS528 0x00008000 -#define ETH_EXT_FEC_40G_NONE 0x00010000 -#define ETH_EXT_FEC_40G_BASE_R 0x00020000 -#define ETH_EXT_FEC_50G_NONE 0x00040000 -#define ETH_EXT_FEC_50G_BASE_R 0x00080000 -#define ETH_EXT_FEC_50G_RS528 0x00100000 -#define ETH_EXT_FEC_50G_RS544 0x00200000 -#define ETH_EXT_FEC_100G_NONE 0x00400000 -#define ETH_EXT_FEC_100G_BASE_R 0x00800000 -#define ETH_EXT_FEC_100G_RS528 0x01000000 -#define ETH_EXT_FEC_100G_RS544 0x02000000 - - u32 extended_speed; -#define ETH_EXT_SPEED_MASK 0x0000ffff -#define ETH_EXT_SPEED_OFFSET 0 -#define ETH_EXT_SPEED_AN 0x00000001 -#define ETH_EXT_SPEED_1G 0x00000002 -#define ETH_EXT_SPEED_10G 0x00000004 -#define ETH_EXT_SPEED_20G 0x00000008 -#define ETH_EXT_SPEED_25G 0x00000010 -#define ETH_EXT_SPEED_40G 0x00000020 -#define ETH_EXT_SPEED_50G_BASE_R 0x00000040 -#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080 -#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100 -#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200 -#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400 -#define ETH_EXT_ADV_SPEED_MASK 0xffff0000 -#define ETH_EXT_ADV_SPEED_OFFSET 16 -#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000 -#define ETH_EXT_ADV_SPEED_1G 0x00020000 -#define ETH_EXT_ADV_SPEED_10G 0x00040000 -#define ETH_EXT_ADV_SPEED_20G 0x00080000 -#define ETH_EXT_ADV_SPEED_25G 0x00100000 -#define ETH_EXT_ADV_SPEED_40G 0x00200000 -#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000 -#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000 -#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000 -#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000 -#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000 -}; - -struct port_mf_cfg { - u32 dynamic_cfg; -#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff -#define PORT_MF_CFG_OV_TAG_SHIFT 0 -#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK - - u32 reserved[1]; -}; - -struct eth_stats { - u64 r64; - u64 r127; - u64 r255; - u64 r511; - u64 r1023; - u64 r1518; - - union { - struct { - u64 r1522; - u64 r2047; - u64 r4095; - u64 r9216; - u64 r16383; - } bb0; - struct { - u64 unused1; - u64 r1519_to_max; - u64 unused2; - u64 unused3; - u64 unused4; - } ah0; - } u0; - - u64 rfcs; - u64 rxcf; - u64 rxpf; - u64 rxpp; - u64 raln; - u64 rfcr; - u64 rovr; - u64 rjbr; - u64 rund; - u64 rfrg; - u64 t64; - u64 t127; - u64 t255; - u64 t511; - u64 t1023; - u64 t1518; - - union { - struct { - u64 t2047; - u64 t4095; - u64 t9216; - u64 t16383; - } bb1; - struct { - u64 t1519_to_max; - u64 unused6; - u64 unused7; - u64 unused8; - } ah1; - } u1; - - u64 txpf; - u64 txpp; - - union { - struct { - u64 tlpiec; - u64 tncl; - } bb2; - struct { - u64 unused9; - u64 unused10; - } ah2; - } u2; - - u64 rbyte; - u64 rxuca; - u64 rxmca; - u64 rxbca; - u64 rxpok; - u64 tbyte; - u64 txuca; - u64 txmca; - u64 txbca; - u64 txcf; -}; - -struct brb_stats { - u64 brb_truncate[8]; - u64 brb_discard[8]; -}; - -struct port_stats { - struct brb_stats brb; - struct eth_stats eth; -}; - -struct couple_mode_teaming { - u8 port_cmt[MCP_GLOB_PORT_MAX]; -#define PORT_CMT_IN_TEAM (1 << 0) - -#define PORT_CMT_PORT_ROLE (1 << 1) -#define PORT_CMT_PORT_INACTIVE (0 << 1) -#define PORT_CMT_PORT_ACTIVE (1 << 1) - -#define PORT_CMT_TEAM_MASK (1 << 2) -#define PORT_CMT_TEAM0 (0 << 2) -#define PORT_CMT_TEAM1 (1 << 2) -}; - -#define LLDP_CHASSIS_ID_STAT_LEN 4 -#define LLDP_PORT_ID_STAT_LEN 4 -#define DCBX_MAX_APP_PROTOCOL 32 -#define MAX_SYSTEM_LLDP_TLV_DATA 32 - -enum _lldp_agent { - LLDP_NEAREST_BRIDGE = 0, - LLDP_NEAREST_NON_TPMR_BRIDGE, - LLDP_NEAREST_CUSTOMER_BRIDGE, - LLDP_MAX_LLDP_AGENTS -}; - -struct lldp_config_params_s { - u32 config; -#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff -#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 -#define LLDP_CONFIG_HOLD_MASK 0x00000f00 -#define LLDP_CONFIG_HOLD_SHIFT 8 -#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 -#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 -#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 -#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 -#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 -#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 - u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; -}; - -struct lldp_status_params_s { - u32 prefix_seq_num; - u32 status; - u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; - u32 suffix_seq_num; -}; - -struct dcbx_ets_feature { - u32 flags; -#define DCBX_ETS_ENABLED_MASK 0x00000001 -#define DCBX_ETS_ENABLED_SHIFT 0 -#define DCBX_ETS_WILLING_MASK 0x00000002 -#define DCBX_ETS_WILLING_SHIFT 1 -#define DCBX_ETS_ERROR_MASK 0x00000004 -#define DCBX_ETS_ERROR_SHIFT 2 -#define DCBX_ETS_CBS_MASK 0x00000008 -#define DCBX_ETS_CBS_SHIFT 3 -#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 -#define DCBX_ETS_MAX_TCS_SHIFT 4 -#define DCBX_OOO_TC_MASK 0x00000f00 -#define DCBX_OOO_TC_SHIFT 8 - u32 pri_tc_tbl[1]; -#define DCBX_TCP_OOO_TC (4) - -#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) -#define DCBX_CEE_STRICT_PRIORITY 0xf - u32 tc_bw_tbl[2]; - u32 tc_tsa_tbl[2]; -#define DCBX_ETS_TSA_STRICT 0 -#define DCBX_ETS_TSA_CBS 1 -#define DCBX_ETS_TSA_ETS 2 -}; - -#define DCBX_TCP_OOO_TC (4) -#define DCBX_TCP_OOO_K2_4PORT_TC (3) - -struct dcbx_app_priority_entry { - u32 entry; -#define DCBX_APP_PRI_MAP_MASK 0x000000ff -#define DCBX_APP_PRI_MAP_SHIFT 0 -#define DCBX_APP_PRI_0 0x01 -#define DCBX_APP_PRI_1 0x02 -#define DCBX_APP_PRI_2 0x04 -#define DCBX_APP_PRI_3 0x08 -#define DCBX_APP_PRI_4 0x10 -#define DCBX_APP_PRI_5 0x20 -#define DCBX_APP_PRI_6 0x40 -#define DCBX_APP_PRI_7 0x80 -#define DCBX_APP_SF_MASK 0x00000300 -#define DCBX_APP_SF_SHIFT 8 -#define DCBX_APP_SF_ETHTYPE 0 -#define DCBX_APP_SF_PORT 1 -#define DCBX_APP_SF_IEEE_MASK 0x0000f000 -#define DCBX_APP_SF_IEEE_SHIFT 12 -#define DCBX_APP_SF_IEEE_RESERVED 0 -#define DCBX_APP_SF_IEEE_ETHTYPE 1 -#define DCBX_APP_SF_IEEE_TCP_PORT 2 -#define DCBX_APP_SF_IEEE_UDP_PORT 3 -#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 - -#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 -#define DCBX_APP_PROTOCOL_ID_SHIFT 16 -}; - -struct dcbx_app_priority_feature { - u32 flags; -#define DCBX_APP_ENABLED_MASK 0x00000001 -#define DCBX_APP_ENABLED_SHIFT 0 -#define DCBX_APP_WILLING_MASK 0x00000002 -#define DCBX_APP_WILLING_SHIFT 1 -#define DCBX_APP_ERROR_MASK 0x00000004 -#define DCBX_APP_ERROR_SHIFT 2 -#define DCBX_APP_MAX_TCS_MASK 0x0000f000 -#define DCBX_APP_MAX_TCS_SHIFT 12 -#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 -#define DCBX_APP_NUM_ENTRIES_SHIFT 16 - struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; -}; - -struct dcbx_features { - struct dcbx_ets_feature ets; - u32 pfc; -#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff -#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 - -#define DCBX_PFC_FLAGS_MASK 0x0000ff00 -#define DCBX_PFC_FLAGS_SHIFT 8 -#define DCBX_PFC_CAPS_MASK 0x00000f00 -#define DCBX_PFC_CAPS_SHIFT 8 -#define DCBX_PFC_MBC_MASK 0x00004000 -#define DCBX_PFC_MBC_SHIFT 14 -#define DCBX_PFC_WILLING_MASK 0x00008000 -#define DCBX_PFC_WILLING_SHIFT 15 -#define DCBX_PFC_ENABLED_MASK 0x00010000 -#define DCBX_PFC_ENABLED_SHIFT 16 -#define DCBX_PFC_ERROR_MASK 0x00020000 -#define DCBX_PFC_ERROR_SHIFT 17 - - struct dcbx_app_priority_feature app; -}; - -struct dcbx_local_params { - u32 config; -#define DCBX_CONFIG_VERSION_MASK 0x00000007 -#define DCBX_CONFIG_VERSION_SHIFT 0 -#define DCBX_CONFIG_VERSION_DISABLED 0 -#define DCBX_CONFIG_VERSION_IEEE 1 -#define DCBX_CONFIG_VERSION_CEE 2 -#define DCBX_CONFIG_VERSION_STATIC 4 - - u32 flags; - struct dcbx_features features; -}; - -struct dcbx_mib { - u32 prefix_seq_num; - u32 flags; - struct dcbx_features features; - u32 suffix_seq_num; -}; - -struct lldp_system_tlvs_buffer_s { - u16 valid; - u16 length; - u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; -}; - -struct dcb_dscp_map { - u32 flags; -#define DCB_DSCP_ENABLE_MASK 0x1 -#define DCB_DSCP_ENABLE_SHIFT 0 -#define DCB_DSCP_ENABLE 1 - u32 dscp_pri_map[8]; -}; - -struct public_global { - u32 max_path; - u32 max_ports; -#define MODE_1P 1 -#define MODE_2P 2 -#define MODE_3P 3 -#define MODE_4P 4 - u32 debug_mb_offset; - u32 phymod_dbg_mb_offset; - struct couple_mode_teaming cmt; - s32 internal_temperature; - u32 mfw_ver; - u32 running_bundle_id; - s32 external_temperature; - u32 mdump_reason; - u64 reserved; - u32 data_ptr; - u32 data_size; -}; - -struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - u32 accum_ack; -}; - -struct public_path { - struct fw_flr_mb flr_mb; - u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; - - u32 process_kill; -#define PROCESS_KILL_COUNTER_MASK 0x0000ffff -#define PROCESS_KILL_COUNTER_SHIFT 0 -#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 -#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 -#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) -}; - -struct public_port { - u32 validity_map; - - u32 link_status; -#define LINK_STATUS_LINK_UP 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 -#define LINK_STATUS_PFC_ENABLED 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 -#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 -#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 -#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 -#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 -#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 -#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 -#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) -#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) -#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) -#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) -#define LINK_STATUS_SFP_TX_FAULT 0x00100000 -#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 -#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 -#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 -#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 -#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 -#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 - -#define LINK_STATUS_FEC_MODE_MASK 0x38000000 -#define LINK_STATUS_FEC_MODE_NONE (0 << 27) -#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27) -#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) - - u32 link_status1; - u32 ext_phy_fw_version; - u32 drv_phy_cfg_addr; - - u32 port_stx; - - u32 stat_nig_timer; - - struct port_mf_cfg port_mf_config; - struct port_stats stats; - - u32 media_type; -#define MEDIA_UNSPECIFIED 0x0 -#define MEDIA_SFPP_10G_FIBER 0x1 -#define MEDIA_XFP_FIBER 0x2 -#define MEDIA_DA_TWINAX 0x3 -#define MEDIA_BASE_T 0x4 -#define MEDIA_SFP_1G_FIBER 0x5 -#define MEDIA_MODULE_FIBER 0x6 -#define MEDIA_KR 0xf0 -#define MEDIA_NOT_PRESENT 0xff - - u32 lfa_status; - u32 link_change_count; - - struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; - struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; - struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; - - /* DCBX related MIB */ - struct dcbx_local_params local_admin_dcbx_mib; - struct dcbx_mib remote_dcbx_mib; - struct dcbx_mib operational_dcbx_mib; - - u32 reserved[2]; - - u32 transceiver_data; -#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff -#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 -#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 -#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 -#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 -#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 -#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 -#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 -#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 -#define ETH_TRANSCEIVER_TYPE_NONE 0x00 -#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff -#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 -#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 -#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 -#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 -#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 -#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 -#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 -#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 -#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 -#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a -#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b -#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c -#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d -#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e -#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f -#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 -#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 -#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 -#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 -#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 -#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 -#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 -#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 -#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a -#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b -#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c -#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d -#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e -#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f -#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 -#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 -#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a - - u32 wol_info; - u32 wol_pkt_len; - u32 wol_pkt_details; - struct dcb_dscp_map dcb_dscp_map; - - u32 eee_status; -#define EEE_ACTIVE_BIT BIT(0) -#define EEE_LD_ADV_STATUS_MASK 0x000000f0 -#define EEE_LD_ADV_STATUS_OFFSET 4 -#define EEE_1G_ADV BIT(1) -#define EEE_10G_ADV BIT(2) -#define EEE_LP_ADV_STATUS_MASK 0x00000f00 -#define EEE_LP_ADV_STATUS_OFFSET 8 -#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 -#define EEE_SUPPORTED_SPEED_OFFSET 12 -#define EEE_1G_SUPPORTED BIT(1) -#define EEE_10G_SUPPORTED BIT(2) - - u32 eee_remote; -#define EEE_REMOTE_TW_TX_MASK 0x0000ffff -#define EEE_REMOTE_TW_TX_OFFSET 0 -#define EEE_REMOTE_TW_RX_MASK 0xffff0000 -#define EEE_REMOTE_TW_RX_OFFSET 16 - - u32 reserved1; - u32 oem_cfg_port; -#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 -#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 -#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 -#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 -#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C -#define OEM_CFG_SCHED_TYPE_OFFSET 2 -#define OEM_CFG_SCHED_TYPE_ETS 0x1 -#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 -}; - -struct public_func { - u32 reserved0[2]; - - u32 mtu_size; - - u32 reserved[7]; - - u32 config; -#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 - -#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 -#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 -#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 -#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 -#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 -#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040 -#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040 - -#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 -#define FUNC_MF_CFG_MIN_BW_SHIFT 8 -#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 -#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 -#define FUNC_MF_CFG_MAX_BW_SHIFT 16 -#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 - - u32 status; -#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 - - u32 mac_upper; -#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff -#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 -#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK - u32 mac_lower; -#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff - - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; - - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; - - u32 ovlan_stag; -#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff -#define FUNC_MF_CFG_OV_STAG_SHIFT 0 -#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK - - u32 pf_allocation; - - u32 preserve_data; - - u32 driver_last_activity_ts; - - u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; - - u32 drv_id; -#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff -#define DRV_ID_PDA_COMP_VER_SHIFT 0 - -#define LOAD_REQ_HSI_VERSION 2 -#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 -#define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ - DRV_ID_MCP_HSI_VER_SHIFT) - -#define DRV_ID_DRV_TYPE_MASK 0x7f000000 -#define DRV_ID_DRV_TYPE_SHIFT 24 -#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) - -#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 -#define DRV_ID_DRV_INIT_HW_SHIFT 31 -#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) - - u32 oem_cfg_func; -#define OEM_CFG_FUNC_TC_MASK 0x0000000F -#define OEM_CFG_FUNC_TC_OFFSET 0 -#define OEM_CFG_FUNC_TC_0 0x0 -#define OEM_CFG_FUNC_TC_1 0x1 -#define OEM_CFG_FUNC_TC_2 0x2 -#define OEM_CFG_FUNC_TC_3 0x3 -#define OEM_CFG_FUNC_TC_4 0x4 -#define OEM_CFG_FUNC_TC_5 0x5 -#define OEM_CFG_FUNC_TC_6 0x6 -#define OEM_CFG_FUNC_TC_7 0x7 - -#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 -}; - -struct mcp_mac { - u32 mac_upper; - u32 mac_lower; -}; - -struct mcp_val64 { - u32 lo; - u32 hi; -}; - -struct mcp_file_att { - u32 nvm_start_addr; - u32 len; -}; - -struct bist_nvm_image_att { - u32 return_code; - u32 image_type; - u32 nvm_start_addr; - u32 len; -}; - -#define MCP_DRV_VER_STR_SIZE 16 -#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) -#define MCP_DRV_NVM_BUF_LEN 32 -struct drv_version_stc { - u32 version; - u8 name[MCP_DRV_VER_STR_SIZE - 4]; -}; - -struct lan_stats_stc { - u64 ucast_rx_pkts; - u64 ucast_tx_pkts; - u32 fcs_err; - u32 rserved; -}; - -struct fcoe_stats_stc { - u64 rx_pkts; - u64 tx_pkts; - u32 fcs_err; - u32 login_failure; -}; - -struct ocbb_data_stc { - u32 ocbb_host_addr; - u32 ocsd_host_addr; - u32 ocsd_req_update_interval; -}; - -#define MAX_NUM_OF_SENSORS 7 -struct temperature_status_stc { - u32 num_of_sensors; - u32 sensor[MAX_NUM_OF_SENSORS]; -}; - -/* crash dump configuration header */ -struct mdump_config_stc { - u32 version; - u32 config; - u32 epoc; - u32 num_of_logs; - u32 valid_logs; -}; - -enum resource_id_enum { - RESOURCE_NUM_SB_E = 0, - RESOURCE_NUM_L2_QUEUE_E = 1, - RESOURCE_NUM_VPORT_E = 2, - RESOURCE_NUM_VMQ_E = 3, - RESOURCE_FACTOR_NUM_RSS_PF_E = 4, - RESOURCE_FACTOR_RSS_PER_VF_E = 5, - RESOURCE_NUM_RL_E = 6, - RESOURCE_NUM_PQ_E = 7, - RESOURCE_NUM_VF_E = 8, - RESOURCE_VFC_FILTER_E = 9, - RESOURCE_ILT_E = 10, - RESOURCE_CQS_E = 11, - RESOURCE_GFT_PROFILES_E = 12, - RESOURCE_NUM_TC_E = 13, - RESOURCE_NUM_RSS_ENGINES_E = 14, - RESOURCE_LL2_QUEUE_E = 15, - RESOURCE_RDMA_STATS_QUEUE_E = 16, - RESOURCE_BDQ_E = 17, - RESOURCE_QCN_E = 18, - RESOURCE_LLH_FILTER_E = 19, - RESOURCE_VF_MAC_ADDR = 20, - RESOURCE_LL2_CQS_E = 21, - RESOURCE_VF_CNQS = 22, - RESOURCE_MAX_NUM, - RESOURCE_NUM_INVALID = 0xFFFFFFFF -}; - -/* Resource ID is to be filled by the driver in the MB request - * Size, offset & flags to be filled by the MFW in the MB response - */ -struct resource_info { - enum resource_id_enum res_id; - u32 size; /* number of allocated resources */ - u32 offset; /* Offset of the 1st resource */ - u32 vf_size; - u32 vf_offset; - u32 flags; -#define RESOURCE_ELEMENT_STRICT (1 << 0) -}; - -#define DRV_ROLE_NONE 0 -#define DRV_ROLE_PREBOOT 1 -#define DRV_ROLE_OS 2 -#define DRV_ROLE_KDUMP 3 - -struct load_req_stc { - u32 drv_ver_0; - u32 drv_ver_1; - u32 fw_ver; - u32 misc0; -#define LOAD_REQ_ROLE_MASK 0x000000FF -#define LOAD_REQ_ROLE_SHIFT 0 -#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 -#define LOAD_REQ_LOCK_TO_SHIFT 8 -#define LOAD_REQ_LOCK_TO_DEFAULT 0 -#define LOAD_REQ_LOCK_TO_NONE 255 -#define LOAD_REQ_FORCE_MASK 0x000F0000 -#define LOAD_REQ_FORCE_SHIFT 16 -#define LOAD_REQ_FORCE_NONE 0 -#define LOAD_REQ_FORCE_PF 1 -#define LOAD_REQ_FORCE_ALL 2 -#define LOAD_REQ_FLAGS0_MASK 0x00F00000 -#define LOAD_REQ_FLAGS0_SHIFT 20 -#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) -}; - -struct load_rsp_stc { - u32 drv_ver_0; - u32 drv_ver_1; - u32 fw_ver; - u32 misc0; -#define LOAD_RSP_ROLE_MASK 0x000000FF -#define LOAD_RSP_ROLE_SHIFT 0 -#define LOAD_RSP_HSI_MASK 0x0000FF00 -#define LOAD_RSP_HSI_SHIFT 8 -#define LOAD_RSP_FLAGS0_MASK 0x000F0000 -#define LOAD_RSP_FLAGS0_SHIFT 16 -#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) -}; - -struct mdump_retain_data_stc { - u32 valid; - u32 epoch; - u32 pf; - u32 status; -}; - -union drv_union_data { - u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; - struct mcp_mac wol_mac; - - struct eth_phy_cfg drv_phy_cfg; - - struct mcp_val64 val64; - - u8 raw_data[MCP_DRV_NVM_BUF_LEN]; - - struct mcp_file_att file_att; - - u32 ack_vf_disabled[VF_MAX_STATIC / 32]; - - struct drv_version_stc drv_version; - - struct lan_stats_stc lan_stats; - struct fcoe_stats_stc fcoe_stats; - struct ocbb_data_stc ocbb_info; - struct temperature_status_stc temp_info; - struct resource_info resource; - struct bist_nvm_image_att nvm_image_att; - struct mdump_config_stc mdump_config; -}; - -struct public_drv_mb { - u32 drv_mb_header; -#define DRV_MSG_CODE_MASK 0xffff0000 -#define DRV_MSG_CODE_LOAD_REQ 0x10000000 -#define DRV_MSG_CODE_LOAD_DONE 0x11000000 -#define DRV_MSG_CODE_INIT_HW 0x12000000 -#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000 -#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 -#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 -#define DRV_MSG_CODE_INIT_PHY 0x22000000 -#define DRV_MSG_CODE_LINK_RESET 0x23000000 -#define DRV_MSG_CODE_SET_DCBX 0x25000000 -#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 -#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 -#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 -#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 -#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 -#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 -#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 -#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 -#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 -#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 - -#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 -#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 -#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000 -#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000 -#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 -#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 -#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 -#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000 -#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 -#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 -#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 -#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 -#define DRV_MSG_CODE_MCP_RESET 0x00090000 -#define DRV_MSG_CODE_SET_VERSION 0x000f0000 -#define DRV_MSG_CODE_MCP_HALT 0x00100000 -#define DRV_MSG_CODE_SET_VMAC 0x00110000 -#define DRV_MSG_CODE_GET_VMAC 0x00120000 -#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 -#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 -#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 -#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 -#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 - -#define DRV_MSG_CODE_GET_STATS 0x00130000 -#define DRV_MSG_CODE_STATS_TYPE_LAN 1 -#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 -#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 -#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 - -#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 - -#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 - -#define DRV_MSG_CODE_BIST_TEST 0x001e0000 -#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 -#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 -/* Send crash dump commands with param[3:0] - opcode */ -#define DRV_MSG_CODE_MDUMP_CMD 0x00250000 -#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 -#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 -#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000 - -#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000 - -#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F -#define RESOURCE_CMD_REQ_RESC_SHIFT 0 -#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 -#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 -#define RESOURCE_OPCODE_REQ 1 -#define RESOURCE_OPCODE_REQ_WO_AGING 2 -#define RESOURCE_OPCODE_REQ_W_AGING 3 -#define RESOURCE_OPCODE_RELEASE 4 -#define RESOURCE_OPCODE_FORCE_RELEASE 5 -#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 -#define RESOURCE_CMD_REQ_AGE_SHIFT 8 - -#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF -#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 -#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 -#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 -#define RESOURCE_OPCODE_GNT 1 -#define RESOURCE_OPCODE_BUSY 2 -#define RESOURCE_OPCODE_RELEASED 3 -#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 -#define RESOURCE_OPCODE_WRONG_OWNER 5 -#define RESOURCE_OPCODE_UNKNOWN_CMD 255 - -#define RESOURCE_DUMP 0 - -/* DRV_MSG_CODE_MDUMP_CMD parameters */ -#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f -#define DRV_MSG_CODE_MDUMP_ACK 0x01 -#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 -#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 -#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 -#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 -#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 -#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 -#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 - -#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a -#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b -#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c - -#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 -#define DRV_MSG_CODE_OS_WOL 0x002e0000 - -#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 -#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 -#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 drv_mb_param; -#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 -#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 -#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 -#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF -#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 - -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 -#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 -#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF -#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 -#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 - -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 -#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 -#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 - -#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 -#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F -#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 -#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 -#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 -#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 - -#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 -#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF -#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 -#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 -#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 -#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF - -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 - -#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 -#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF - -#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ - DRV_MB_PARAM_WOL_DISABLED | \ - DRV_MB_PARAM_WOL_ENABLED) -#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP -#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED -#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED - -#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ - DRV_MB_PARAM_ESWITCH_MODE_VEB | \ - DRV_MB_PARAM_ESWITCH_MODE_VEPA) -#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 -#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 -#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 - -#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 -#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 - -#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 -#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 -#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 - -#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 -#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 - - /* Resource Allocation params - Driver version support */ -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - -#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 -#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 -#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 -#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 - -#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 -#define DRV_MB_PARAM_BIST_RC_PASSED 1 -#define DRV_MB_PARAM_BIST_RC_FAILED 2 -#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 - -#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 -#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 - -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 -#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 - -/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff - -/* Driver attributes params */ -#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 -#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff -#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 -#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 - -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 - - u32 fw_mb_header; -#define FW_MSG_CODE_MASK 0xffff0000 -#define FW_MSG_CODE_UNSUPPORTED 0x00000000 -#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 -#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 -#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000 -#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 -#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 -#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 -#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 -#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 -#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 -#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 -#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 -#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000 -#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 - -#define FW_MSG_CODE_NVM_OK 0x00010000 -#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 -#define FW_MSG_CODE_PHY_OK 0x00110000 -#define FW_MSG_CODE_OK 0x00160000 -#define FW_MSG_CODE_ERROR 0x00170000 -#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 -#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 -#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 - -#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 -#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 -#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000 -#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff - -#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000 -#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000 - -#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 - - u32 fw_mb_param; -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - - /* Get PF RDMA protocol command response */ -#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 -#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 -#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 -#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 - - /* Get MFW feature support response */ -#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) -#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) -#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) -#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) -#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) - -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) - -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 - -#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff -#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 - - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - - u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - - union drv_union_data union_data; -}; - -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 - -enum MFW_DRV_MSG_TYPE { - MFW_DRV_MSG_LINK_CHANGE, - MFW_DRV_MSG_FLR_FW_ACK_FAILED, - MFW_DRV_MSG_VF_DISABLED, - MFW_DRV_MSG_LLDP_DATA_UPDATED, - MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, - MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_ERROR_RECOVERY, - MFW_DRV_MSG_BW_UPDATE, - MFW_DRV_MSG_S_TAG_UPDATE, - MFW_DRV_MSG_GET_LAN_STATS, - MFW_DRV_MSG_GET_FCOE_STATS, - MFW_DRV_MSG_GET_ISCSI_STATS, - MFW_DRV_MSG_GET_RDMA_STATS, - MFW_DRV_MSG_FAILURE_DETECTED, - MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, - MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, - MFW_DRV_MSG_RESERVED, - MFW_DRV_MSG_GET_TLV_REQ, - MFW_DRV_MSG_OEM_CFG_UPDATE, - MFW_DRV_MSG_MAX -}; - -#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) -#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) -#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) -#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) - -struct public_mfw_mb { - u32 sup_msgs; - u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; - u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; -}; - -enum public_sections { - PUBLIC_DRV_MB, - PUBLIC_MFW_MB, - PUBLIC_GLOBAL, - PUBLIC_PATH, - PUBLIC_PORT, - PUBLIC_FUNC, - PUBLIC_MAX_SECTIONS -}; - -struct mcp_public_data { - u32 num_sections; - u32 sections[PUBLIC_MAX_SECTIONS]; - struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; - struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; - struct public_global global; - struct public_path path[MCP_GLOB_PATH_MAX]; - struct public_port port[MCP_GLOB_PORT_MAX]; - struct public_func func[MCP_GLOB_FUNC_MAX]; -}; - -#define MAX_I2C_TRANSACTION_SIZE 16 - -/* OCBB definitions */ -enum tlvs { - /* Category 1: Device Properties */ - DRV_TLV_CLP_STR, - DRV_TLV_CLP_STR_CTD, - /* Category 6: Device Configuration */ - DRV_TLV_SCSI_TO, - DRV_TLV_R_T_TOV, - DRV_TLV_R_A_TOV, - DRV_TLV_E_D_TOV, - DRV_TLV_CR_TOV, - DRV_TLV_BOOT_TYPE, - /* Category 8: Port Configuration */ - DRV_TLV_NPIV_ENABLED, - /* Category 10: Function Configuration */ - DRV_TLV_FEATURE_FLAGS, - DRV_TLV_LOCAL_ADMIN_ADDR, - DRV_TLV_ADDITIONAL_MAC_ADDR_1, - DRV_TLV_ADDITIONAL_MAC_ADDR_2, - DRV_TLV_LSO_MAX_OFFLOAD_SIZE, - DRV_TLV_LSO_MIN_SEGMENT_COUNT, - DRV_TLV_PROMISCUOUS_MODE, - DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, - DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, - DRV_TLV_OS_DRIVER_STATES, - DRV_TLV_PXE_BOOT_PROGRESS, - /* Category 12: FC/FCoE Configuration */ - DRV_TLV_NPIV_STATE, - DRV_TLV_NUM_OF_NPIV_IDS, - DRV_TLV_SWITCH_NAME, - DRV_TLV_SWITCH_PORT_NUM, - DRV_TLV_SWITCH_PORT_ID, - DRV_TLV_VENDOR_NAME, - DRV_TLV_SWITCH_MODEL, - DRV_TLV_SWITCH_FW_VER, - DRV_TLV_QOS_PRIORITY_PER_802_1P, - DRV_TLV_PORT_ALIAS, - DRV_TLV_PORT_STATE, - DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_LINK_FAILURE_COUNT, - DRV_TLV_FCOE_BOOT_PROGRESS, - /* Category 13: iSCSI Configuration */ - DRV_TLV_TARGET_LLMNR_ENABLED, - DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, - DRV_TLV_DATA_DIGEST_FLAG_ENABLED, - DRV_TLV_AUTHENTICATION_METHOD, - DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, - DRV_TLV_MAX_FRAME_SIZE, - DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_ISCSI_BOOT_PROGRESS, - /* Category 20: Device Data */ - DRV_TLV_PCIE_BUS_RX_UTILIZATION, - DRV_TLV_PCIE_BUS_TX_UTILIZATION, - DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, - DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, - DRV_TLV_NCSI_RX_BYTES_RECEIVED, - DRV_TLV_NCSI_TX_BYTES_SENT, - /* Category 22: Base Port Data */ - DRV_TLV_RX_DISCARDS, - DRV_TLV_RX_ERRORS, - DRV_TLV_TX_ERRORS, - DRV_TLV_TX_DISCARDS, - DRV_TLV_RX_FRAMES_RECEIVED, - DRV_TLV_TX_FRAMES_SENT, - /* Category 23: FC/FCoE Port Data */ - DRV_TLV_RX_BROADCAST_PACKETS, - DRV_TLV_TX_BROADCAST_PACKETS, - /* Category 28: Base Function Data */ - DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, - DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, - DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_PF_RX_FRAMES_RECEIVED, - DRV_TLV_RX_BYTES_RECEIVED, - DRV_TLV_PF_TX_FRAMES_SENT, - DRV_TLV_TX_BYTES_SENT, - DRV_TLV_IOV_OFFLOAD, - DRV_TLV_PCI_ERRORS_CAP_ID, - DRV_TLV_UNCORRECTABLE_ERROR_STATUS, - DRV_TLV_UNCORRECTABLE_ERROR_MASK, - DRV_TLV_CORRECTABLE_ERROR_STATUS, - DRV_TLV_CORRECTABLE_ERROR_MASK, - DRV_TLV_PCI_ERRORS_AECC_REGISTER, - DRV_TLV_TX_QUEUES_EMPTY, - DRV_TLV_RX_QUEUES_EMPTY, - DRV_TLV_TX_QUEUES_FULL, - DRV_TLV_RX_QUEUES_FULL, - /* Category 29: FC/FCoE Function Data */ - DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_FCOE_RX_FRAMES_RECEIVED, - DRV_TLV_FCOE_RX_BYTES_RECEIVED, - DRV_TLV_FCOE_TX_FRAMES_SENT, - DRV_TLV_FCOE_TX_BYTES_SENT, - DRV_TLV_CRC_ERROR_COUNT, - DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_1_TIMESTAMP, - DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_2_TIMESTAMP, - DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_3_TIMESTAMP, - DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_4_TIMESTAMP, - DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_5_TIMESTAMP, - DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, - DRV_TLV_LOSS_OF_SIGNAL_ERRORS, - DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, - DRV_TLV_DISPARITY_ERROR_COUNT, - DRV_TLV_CODE_VIOLATION_ERROR_COUNT, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, - DRV_TLV_LAST_FLOGI_TIMESTAMP, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, - DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, - DRV_TLV_LAST_FLOGI_RJT, - DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, - DRV_TLV_FDISCS_SENT_COUNT, - DRV_TLV_FDISC_ACCS_RECEIVED, - DRV_TLV_FDISC_RJTS_RECEIVED, - DRV_TLV_PLOGI_SENT_COUNT, - DRV_TLV_PLOGI_ACCS_RECEIVED, - DRV_TLV_PLOGI_RJTS_RECEIVED, - DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_1_TIMESTAMP, - DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_2_TIMESTAMP, - DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_3_TIMESTAMP, - DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_4_TIMESTAMP, - DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_5_TIMESTAMP, - DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_1_ACC_TIMESTAMP, - DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_2_ACC_TIMESTAMP, - DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_3_ACC_TIMESTAMP, - DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_4_ACC_TIMESTAMP, - DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_5_ACC_TIMESTAMP, - DRV_TLV_LOGOS_ISSUED, - DRV_TLV_LOGO_ACCS_RECEIVED, - DRV_TLV_LOGO_RJTS_RECEIVED, - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_1_TIMESTAMP, - DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_2_TIMESTAMP, - DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_3_TIMESTAMP, - DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_4_TIMESTAMP, - DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_5_TIMESTAMP, - DRV_TLV_LOGOS_RECEIVED, - DRV_TLV_ACCS_ISSUED, - DRV_TLV_PRLIS_ISSUED, - DRV_TLV_ACCS_RECEIVED, - DRV_TLV_ABTS_SENT_COUNT, - DRV_TLV_ABTS_ACCS_RECEIVED, - DRV_TLV_ABTS_RJTS_RECEIVED, - DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_1_TIMESTAMP, - DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_2_TIMESTAMP, - DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_3_TIMESTAMP, - DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_4_TIMESTAMP, - DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_5_TIMESTAMP, - DRV_TLV_RSCNS_RECEIVED, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, - DRV_TLV_LUN_RESETS_ISSUED, - DRV_TLV_ABORT_TASK_SETS_ISSUED, - DRV_TLV_TPRLOS_SENT, - DRV_TLV_NOS_SENT_COUNT, - DRV_TLV_NOS_RECEIVED_COUNT, - DRV_TLV_OLS_COUNT, - DRV_TLV_LR_COUNT, - DRV_TLV_LRR_COUNT, - DRV_TLV_LIP_SENT_COUNT, - DRV_TLV_LIP_RECEIVED_COUNT, - DRV_TLV_EOFA_COUNT, - DRV_TLV_EOFNI_COUNT, - DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, - DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, - DRV_TLV_SCSI_STATUS_BUSY_COUNT, - DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, - DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, - DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, - DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, - DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, - DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, - DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_1_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_2_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_3_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_4_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_5_TIMESTAMP, - /* Category 30: iSCSI Function Data */ - DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, - DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, - DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, - DRV_TLV_ISCSI_PDU_TX_BYTES_SENT -}; - -struct nvm_cfg_mac_address { - u32 mac_addr_hi; -#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff -#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 - - u32 mac_addr_lo; -}; - -struct nvm_cfg1_glob { - u32 generic_cont0; -#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 -#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 -#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 -#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 -#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 -#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 -#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 - - u32 engineering_change[3]; - u32 manufacturing_id; - u32 serial_number[4]; - u32 pcie_cfg; - u32 mgmt_traffic; - - u32 core_cfg; -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 - - u32 e_lane_cfg1; - u32 e_lane_cfg2; - u32 f_lane_cfg1; - u32 f_lane_cfg2; - u32 mps10_preemphasis; - u32 mps10_driver_current; - u32 mps25_preemphasis; - u32 mps25_driver_current; - u32 pci_id; - u32 pci_subsys_id; - u32 bar; - u32 mps10_txfir_main; - u32 mps10_txfir_post; - u32 mps25_txfir_main; - u32 mps25_txfir_post; - u32 manufacture_ver; - u32 manufacture_time; - u32 led_global_settings; - u32 generic_cont1; - - u32 mbi_version; -#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff -#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 -#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 -#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 -#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 -#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 - - u32 mbi_date; - u32 misc_sig; - - u32 device_capabilities; -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 - - u32 power_dissipated; - u32 power_consumed; - u32 efi_version; - u32 multi_net_modes_cap; - u32 reserved[41]; -}; - -struct nvm_cfg1_path { - u32 reserved[30]; -}; - -struct nvm_cfg1_port { - u32 rel_to_opt123; - u32 rel_to_opt124; - - u32 generic_cont0; -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 -#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 -#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 -#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 -#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 - - u32 pcie_cfg; - u32 features; - - u32 speed_cap_mask; -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 - - u32 link_settings; -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f -#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 - - u32 phy_cfg; - u32 mgmt_traffic; - - u32 ext_phy; - /* EEE power saving mode */ -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 - - u32 mba_cfg1; - u32 mba_cfg2; - u32 vf_cfg; - struct nvm_cfg_mac_address lldp_mac_address; - u32 led_port_settings; - u32 transceiver_00; - u32 device_ids; - - u32 board_cfg; -#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff -#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 -#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 -#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 - - u32 mnm_10g_cap; - u32 mnm_10g_ctrl; - u32 mnm_10g_misc; - u32 mnm_25g_cap; - u32 mnm_25g_ctrl; - u32 mnm_25g_misc; - u32 mnm_40g_cap; - u32 mnm_40g_ctrl; - u32 mnm_40g_misc; - u32 mnm_50g_cap; - u32 mnm_50g_ctrl; - u32 mnm_50g_misc; - u32 mnm_100g_cap; - u32 mnm_100g_ctrl; - u32 mnm_100g_misc; - - u32 temperature; - u32 ext_phy_cfg1; - - u32 extended_speed; -#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff -#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 - - u32 extended_fec_mode; - - u32 reserved[112]; -}; - -struct nvm_cfg1_func { - struct nvm_cfg_mac_address mac_address; - u32 rsrv1; - u32 rsrv2; - u32 device_id; - u32 cmn_cfg; - u32 pci_cfg; - struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; - struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; - u32 preboot_generic_cfg; - u32 reserved[8]; -}; - -struct nvm_cfg1 { - struct nvm_cfg1_glob glob; - struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; - struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; - struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; -}; - -enum spad_sections { - SPAD_SECTION_TRACE, - SPAD_SECTION_NVM_CFG, - SPAD_SECTION_PUBLIC, - SPAD_SECTION_PRIVATE, - SPAD_SECTION_MAX -}; - -#define MCP_TRACE_SIZE 2048 /* 2kb */ - -/* This section is located at a fixed location in the beginning of the - * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. - * All the rest of data has a floating location which differs from version to - * version, and is pointed by the mcp_meta_data below. - * Moreover, the spad_layout section is part of the MFW firmware, and is loaded - * with it from nvram in order to clear this portion. - */ -struct static_init { - u32 num_sections; - offsize_t sections[SPAD_SECTION_MAX]; -#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) - - struct mcp_trace trace; -#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace))) - u8 trace_buffer[MCP_TRACE_SIZE]; -#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer))) - /* running_mfw has the same definition as in nvm_map.h. - * This bit indicate both the running dir, and the running bundle. - * It is set once when the LIM is loaded. - */ - u32 running_mfw; -#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw)))) - u32 build_time; -#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time)))) - u32 reset_type; -#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type)))) - u32 mfw_secure_mode; -#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode)))) - u16 pme_status_pf_bitmap; -#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap)))) - u16 pme_enable_pf_bitmap; -#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap)))) - u32 mim_nvm_addr; - u32 mim_start_addr; - u32 ah_pcie_link_params; -#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff) -#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0) -#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00) -#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8) -#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000) -#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16) -#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000) -#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24) -#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params)))) - - u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */ -}; - -#define NVM_MAGIC_VALUE 0x669955aa - -enum nvm_image_type { - NVM_TYPE_TIM1 = 0x01, - NVM_TYPE_TIM2 = 0x02, - NVM_TYPE_MIM1 = 0x03, - NVM_TYPE_MIM2 = 0x04, - NVM_TYPE_MBA = 0x05, - NVM_TYPE_MODULES_PN = 0x06, - NVM_TYPE_VPD = 0x07, - NVM_TYPE_MFW_TRACE1 = 0x08, - NVM_TYPE_MFW_TRACE2 = 0x09, - NVM_TYPE_NVM_CFG1 = 0x0a, - NVM_TYPE_L2B = 0x0b, - NVM_TYPE_DIR1 = 0x0c, - NVM_TYPE_EAGLE_FW1 = 0x0d, - NVM_TYPE_FALCON_FW1 = 0x0e, - NVM_TYPE_PCIE_FW1 = 0x0f, - NVM_TYPE_HW_SET = 0x10, - NVM_TYPE_LIM = 0x11, - NVM_TYPE_AVS_FW1 = 0x12, - NVM_TYPE_DIR2 = 0x13, - NVM_TYPE_CCM = 0x14, - NVM_TYPE_EAGLE_FW2 = 0x15, - NVM_TYPE_FALCON_FW2 = 0x16, - NVM_TYPE_PCIE_FW2 = 0x17, - NVM_TYPE_AVS_FW2 = 0x18, - NVM_TYPE_INIT_HW = 0x19, - NVM_TYPE_DEFAULT_CFG = 0x1a, - NVM_TYPE_MDUMP = 0x1b, - NVM_TYPE_META = 0x1c, - NVM_TYPE_ISCSI_CFG = 0x1d, - NVM_TYPE_FCOE_CFG = 0x1f, - NVM_TYPE_ETH_PHY_FW1 = 0x20, - NVM_TYPE_ETH_PHY_FW2 = 0x21, - NVM_TYPE_BDN = 0x22, - NVM_TYPE_8485X_PHY_FW = 0x23, - NVM_TYPE_PUB_KEY = 0x24, - NVM_TYPE_RECOVERY = 0x25, - NVM_TYPE_PLDM = 0x26, - NVM_TYPE_UPK1 = 0x27, - NVM_TYPE_UPK2 = 0x28, - NVM_TYPE_MASTER_KC = 0x29, - NVM_TYPE_BACKUP_KC = 0x2a, - NVM_TYPE_HW_DUMP = 0x2b, - NVM_TYPE_HW_DUMP_OUT = 0x2c, - NVM_TYPE_BIN_NVM_META = 0x30, - NVM_TYPE_ROM_TEST = 0xf0, - NVM_TYPE_88X33X0_PHY_FW = 0x31, - NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, - NVM_TYPE_MAX, -}; - -#define DIR_ID_1 (0) - #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 2734f49956f76d..e535983ce21bba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask { #define DMAE_MAX_CLIENTS 32 /** - * @brief qed_gtt_init - Initialize GTT windows + * qed_gtt_init(): Initialize GTT windows. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured + * qed_ptt_invalidate(): Forces all ptt entries to be re-configured + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool + * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return struct _qed_status - success (0), negative - error. + * Return: struct _qed_status - success (0), negative - error. */ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_free - + * qed_ptt_pool_free(): Free PTT pool. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address + * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address + * qed_ptt_get_bar_addr(): Get PPT's external BAR address. * - * @param p_hwfn - * @param p_ptt + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); /** - * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address + * qed_ptt_set_win(): Set PTT Window's GRC BAR address * - * @param p_hwfn - * @param new_hw_addr - * @param p_ptt + * @p_hwfn: HW device data. + * @new_hw_addr: New HW address. + * @p_ptt: P_Ptt + * + * Return: Void. */ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 new_hw_addr); /** - * @brief qed_get_reserved_ptt - Get a specific reserved PTT + * qed_get_reserved_ptt(): Get a specific reserved PTT. * - * @param p_hwfn - * @param ptt_idx + * @p_hwfn: HW device data. + * @ptt_idx: Ptt Index. * - * @return struct qed_ptt * + * Return: struct qed_ptt *. */ struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, enum reserved_ptts ptt_idx); /** - * @brief qed_wr - Write value to BAR using the given ptt + * qed_wr(): Write value to BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @val: Val. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ void qed_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn, u32 val); /** - * @brief qed_rd - Read value from BAR using the given ptt + * qed_rd(): Read value from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ u32 qed_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr); /** - * @brief qed_memcpy_from - copy n bytes from BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param dest - * @param hw_addr - * @param n + * qed_memcpy_from(): Copy n bytes from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @dest: Destination. + * @hw_addr: HW address. + * @n: N + * + * Return: Void. */ void qed_memcpy_from(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, size_t n); /** - * @brief qed_memcpy_to - copy n bytes to BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param hw_addr - * @param src - * @param n + * qed_memcpy_to(): Copy n bytes to BAR using the given ptt + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address. + * @src: Source. + * @n: N + * + * Return: Void. */ void qed_memcpy_to(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, void *src, size_t n); /** - * @brief qed_fid_pretend - pretend to another function when - * accessing the ptt window. There is no way to unpretend - * a function. The only way to cancel a pretend is to - * pretend back to the original function. - * - * @param p_hwfn - * @param p_ptt - * @param fid - fid field of pxp_pretend structure. Can contain - * either pf / vf, port/path fields are don't care. + * qed_fid_pretend(): pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @fid: fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + * + * Return: Void. */ void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid); /** - * @brief qed_port_pretend - pretend to another port when - * accessing the ptt window + * qed_port_pretend(): Pretend to another port when accessing the ptt window * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * + * Return: Void. */ void qed_port_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id); /** - * @brief qed_port_unpretend - cancel any previously set port - * pretend + * qed_port_unpretend(): Cancel any previously set port pretend + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_port_fid_pretend - pretend to another port and another function - * when accessing the ptt window + * qed_port_fid_pretend(): Pretend to another port and another function + * when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * @fid: fid field of pxp_pretend structure. Can contain either pf / vf. * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to - * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + * Return: Void. */ void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id, u16 fid); /** - * @brief qed_vfid_to_concrete - build a concrete FID for a - * given VF ID + * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID * - * @param p_hwfn - * @param p_ptt - * @param vfid + * @p_hwfn: HW device data. + * @vfid: VFID. + * + * Return: Void. */ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); /** - * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd - * this is declared here since other files will require it. - * @param idx + * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd + * this is declared here since other files will require it. + * + * @idx: Index + * + * Return: Void. */ u32 qed_dmae_idx_to_go_cmd(u8 idx); /** - * @brief qed_dmae_info_alloc - Init the dmae_info structure - * which is part of p_hwfn. - * @param p_hwfn + * qed_dmae_info_alloc(): Init the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Int. */ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_dmae_info_free - Free the dmae_info structure - * which is part of p_hwfn + * qed_dmae_info_free(): Free the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_dmae_info_free(struct qed_hwfn *p_hwfn); @@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn, #define QED_HW_ERR_MAX_STR_SIZE 256 /** - * @brief qed_hw_err_notify - Notify upper layer driver and management FW - * about a HW error. - * - * @param p_hwfn - * @param p_ptt - * @param err_type - * @param fmt - debug data buffer to send to the MFW - * @param ... - buffer format args + * qed_hw_err_notify(): Notify upper layer driver and management FW + * about a HW error. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @err_type: Err Type. + * @fmt: Debug data buffer to send to the MFW + * @...: buffer format args + * + * Return void. */ void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index ea888a2c6ddbdd..321c4340815380 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #include @@ -13,17 +13,18 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_iro_hsi.h" #include "qed_reg_addr.h" -#define CDU_VALIDATION_DEFAULT_CFG 61 +#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG -static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ }; -static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ }; @@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 +/* Initial VOQ byte credit */ +#define QM_INITIAL_VOQ_BYTE_CRD 98304 /* Other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 +/* VOQ constants */ +#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) +#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) + /* WFQ constants */ -/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ -#define QM_WFQ_UPPER_BOUND 62500000 +/* PF WFQ increment value, 0x9000 = 4*9*1024 */ +#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_WFQ_UPPER_BOUND 62500000 + +/* PF WFQ max increment value, 0.7 * upper bound */ +#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) + +/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ +#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 + +/* VP WFQ increment value */ +#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) -/* Bit of VOQ in WFQ VP PQ map */ -#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 +/* VP WFQ min increment value */ +#define QM_VP_WFQ_MIN_INC_VAL 10800 -/* Bit of PF in WFQ VP PQ map */ -#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 +/* VP WFQ max increment value, 2^30 */ +#define QM_VP_WFQ_MAX_INC_VAL 0x40000000 -/* 0x9000 = 4*9*1024 */ -#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) +/* VP WFQ bypass threshold */ +#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) -/* Max WFQ increment value is 0.7 * upper bound */ -#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) +/* VP RL credit task cost */ +#define QM_VP_RL_CRD_TASK_COST 9700 + +/* Bit of VOQ in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_VOQ_SHIFT 0 + +/* Bit of PF in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_PF_SHIFT 5 /* RL constants */ @@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps */ -#define QM_RL_INC_VAL(rate) ({ \ - typeof(rate) __rate = (rate); \ - max_t(u32, \ - (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \ - (8 * 100)), \ - 1); }) +#define QM_RL_INC_VAL(rate) ({ \ + typeof(rate) __rate = (rate); \ + max_t(u32, \ + (u32)(((__rate ? __rate : \ + 100000) * \ + QM_RL_PERIOD * \ + 101) / (8 * 100)), 1); }) /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ #define QM_PF_RL_UPPER_BOUND 62500000 @@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { /* Max PF RL increment value is 0.7 * upper bound */ #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) -/* Vport RL Upper bound, link speed is in Mpbs */ -#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \ - QM_RL_INC_VAL(speed), \ - 9700 + 1000)) - -/* Max Vport RL increment value is the Vport RL upper bound */ -#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) - -/* Vport RL credit threshold in case of QM bypass */ -#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) +/* QCN RL Upper bound, speed is in Mpbs */ +#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ + u32, \ + (u32)(((speed) * \ + QM_RL_PERIOD * 101) / (8 * 100)), \ + QM_VP_RL_CRD_TASK_COST \ + + 1000)) /* AFullOprtnstcCrdMask constants */ #define QM_OPPOR_LINE_VOQ_DEF 1 @@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { cmd ## _ ## field, \ value) -#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \ +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ rl_id, ext_voq, wrr) \ do { \ u32 __reg = 0; \ \ BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ - \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ + memset(&(map), 0, sizeof(map)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ !!(rl_valid)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ (wrr)); \ \ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ @@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (((rl) >> 8) << 9)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ - XSTORM_PQ_INFO_OFFSET(pq_id) + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ + XSTORM_PQ_INFO_OFFSET(pq_id)) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { - u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u8 num_ext_voqs = MAX_NUM_VOQS; u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; /* Enable RLs for all VOQs */ @@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) if (pf_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, - QM_WFQ_UPPER_BOUND); + QM_PF_WFQ_UPPER_BOUND); } /* Prepare global RL enable/disable runtime init values */ @@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, - QM_VP_RL_BYPASS_THRESH_SPEED); + QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); } } @@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) if (vport_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, - QM_WFQ_UPPER_BOUND); + QM_VP_WFQ_BYPASS_THRESH); } /* Prepare runtime init values to allocate PBF command queue lines for @@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, } /* Prepare runtime init values to allocate PBF command queue lines. */ -static void qed_cmdq_lines_rt_init( - struct qed_hwfn *p_hwfn, - u8 max_ports_per_engine, - u8 max_phys_tcs_per_port, - struct init_qm_port_params port_params[MAX_NUM_PORTS]) +static void +qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u8 tc, ext_voq, port_id, num_tcs_in_port; - u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u8 num_ext_voqs = MAX_NUM_VOQS; /* Clear PBF lines of all VOQs */ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) @@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init( * - No optimization for lossy TC (all are considered lossless). Shared space * is not enabled and allocated for each TC. */ -static void qed_btb_blocks_rt_init( - struct qed_hwfn *p_hwfn, - u8 max_ports_per_engine, - u8 max_phys_tcs_per_port, - struct init_qm_port_params port_params[MAX_NUM_PORTS]) +static void +qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; u8 tc, ext_voq, port_id, num_tcs_in_port; @@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init( */ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) { - u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | + u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | (u32)QM_RL_CRD_REG_SIGN_BIT; u32 inc_val; u16 rl_id; @@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) return 0; } +/* Returns the upper bound for the specified Vport RL parameters. + * link_speed is in Mbps. + * Returns 0 in case of error. + */ +static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, + u32 link_speed) +{ + switch (vport_rl_type) { + case QM_RL_TYPE_NORMAL: + return QM_INITIAL_VOQ_BYTE_CRD; + case QM_RL_TYPE_QCN: + return QM_GLOBAL_RL_UPPER_BOUND(link_speed); + default: + return 0; + } +} + +/* Prepare VPORT RL runtime init values. + * Return -1 on error. + */ +static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, + u16 start_rl, + u16 num_rls, + u32 link_speed, + struct init_qm_rl_params *rl_params) +{ + u16 i, rl_id; + + if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { + DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); + return -1; + } + + /* Go over all PF VPORTs */ + for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { + u32 upper_bound, inc_val; + + upper_bound = + qed_get_vport_rl_upper_bound((enum init_qm_rl_type) + rl_params[i].vport_rl_type, + link_speed); + + inc_val = + QM_RL_INC_VAL(rl_params[i].vport_rl ? + rl_params[i].vport_rl : link_speed); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, + "Invalid RL rate - limit configuration\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, + inc_val); + } + + return 0; +} + /* Prepare Tx PQ mapping runtime init values for the specified PF */ -static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params, - u32 base_mem_addr_4kb) +static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) { u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; struct init_qm_vport_params *vport_params = p_params->vport_params; @@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { u16 *p_first_tx_pq_id, vport_id_in_pf; - struct qm_rf_pq_map_e4 tx_pq_map; + struct qm_rf_pq_map tx_pq_map; u8 tc_id = pq_params[i].tc_id; bool is_vf_pq; u8 ext_voq; @@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { u32 map_val = - (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | - (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT); + (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | + (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); /* Create new VP PQ */ *p_first_tx_pq_id = pq_id; @@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Prepare PQ map entry */ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, - E4, pq_id, *p_first_tx_pq_id, pq_params[i].rl_valid, @@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); + + return 0; } /* Prepare Other PQ mapping runtime init values for the specified PF */ @@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, - struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; @@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, u8 ext_voq; u16 i; - inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, inc_val); @@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u16 num_vports, struct init_qm_vport_params *vport_params) { - u16 vport_pq_id, i; + u16 vport_pq_id, wfq, i; u32 inc_val; u8 tc; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { - if (!vport_params[i].wfq) - continue; - - inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq); - if (inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, - "Invalid VPORT WFQ weight configuration\n"); - return -1; - } - /* Each VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { + /* Check if VPORT/TC is valid */ vport_pq_id = vport_params[i].first_tx_pq_id[tc]; - if (vport_pq_id != QM_INVALID_PQ_ID) { - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPCRD_RT_OFFSET + - vport_pq_id, - (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPWEIGHT_RT_OFFSET + - vport_pq_id, inc_val); + if (vport_pq_id == QM_INVALID_PQ_ID) + continue; + + /* Find WFQ weight (per VPORT or per VPORT+TC) */ + wfq = vport_params[i].wfq; + wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, + "Invalid VPORT WFQ weight configuration\n"); + return -1; } + + /* Config registers */ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + + vport_pq_id, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + + vport_pq_id, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); } } @@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, QM_OPPOR_LINE_VOQ_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, + p_params->pf_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, + p_params->vport_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, + p_params->pf_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, - p_params->global_rl_en); + p_params->global_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); @@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, u16 i; u8 tc; - /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) for (tc = 0; tc < NUM_OF_TCS; tc++) @@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, p_params->num_tids, 0); /* Map Tx PQs */ - qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); + if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) + return -1; /* Init PF WFQ */ if (p_params->pf_wfq) @@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; + /* Set VPORT RL */ + if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, + p_params->num_rls, p_params->link_speed, + p_params->rl_params)) + return -1; + return 0; } int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) { - u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); + u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) { + int result = 0; u16 vport_pq_id; - u32 inc_val; u8 tc; - inc_val = QM_WFQ_INC_VAL(wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { + vport_pq_id = first_tx_pq_id[tc]; + if (vport_pq_id != QM_INVALID_PQ_ID) + result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, + vport_pq_id, wfq); + } + + return result; +} + +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 wfq) +{ + u32 inc_val; + + if (first_tx_pq_id == QM_INVALID_PQ_ID) + return -1; + + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); return -1; } - /* A VPORT can have several VPORT PQ IDs for various TCs */ - for (tc = 0; tc < NUM_OF_TCS; tc++) { - vport_pq_id = first_tx_pq_id[tc]; - if (vport_pq_id != QM_INVALID_PQ_ID) - qed_wr(p_hwfn, - p_ptt, - QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); - } + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, + inc_val); return 0; } int qed_init_global_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit) + struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type) { - u32 inc_val; + u32 inc_val, upper_bound; + upper_bound = + (vport_rl_type == + QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : + QM_INITIAL_VOQ_BYTE_CRD; inc_val = QM_RL_INC_VAL(rate_limit); - if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) { - DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n"); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, + QM_REG_RLGLBLUPPERBOUND + rl_id * 4, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); return 0; @@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, __le32 *p_data, u32 addr, u32 len_in_dwords) { - struct qed_dmae_params params = {}; + struct qed_dmae_params params = { 0 }; u32 *data_cpu; int rc; @@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, + eth_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, + ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, bool eth_geneve_enable, bool ip_geneve_enable) { u32 reg_val; - u8 shift; /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, + eth_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, + ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, /* Update DORQ registers */ qed_wr(p_hwfn, p_ptt, - DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, + DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, - DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, + DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, ip_geneve_enable ? 1 : 0); } #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 -#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) @@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, /* update PRS FIC register */ qed_wr(p_hwfn, p_ptt, - PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); } else { /* clear VXLAN_NO_L2_ENABLE flag */ @@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - struct regpair ram_line = { }; + struct regpair ram_line = { 0 }; /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); @@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, storm_buf_size = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_BUF_SIZE); storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); + if (storm_id >= NUM_STORMS) + break; storm_mem_desc = allocated_mem + storm_id; storm_mem_desc->size = storm_buf_size * sizeof(u32); @@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, /* If memory allocation has failed, free all allocated memory */ if (buf_offset < buf_size) { - qed_fw_overlay_mem_free(p_hwfn, allocated_mem); + qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); return NULL; } @@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, } void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem) + struct phys_mem_desc **fw_overlay_mem) { u8 storm_id; - if (!fw_overlay_mem) + if (!fw_overlay_mem || !(*fw_overlay_mem)) return; for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { struct phys_mem_desc *storm_mem_desc = - (struct phys_mem_desc *)fw_overlay_mem + storm_id; + (struct phys_mem_desc *)*fw_overlay_mem + storm_id; /* Free Storm's physical memory */ if (storm_mem_desc->virt_addr) @@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, } /* Free allocated virtual memory */ - kfree(fw_overlay_mem); + kfree(*fw_overlay_mem); + *fw_overlay_mem = NULL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 7e6c6389523bda..b3bf9899c1a1a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -15,6 +15,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_iro_hsi.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -46,30 +47,32 @@ static u32 pxp_global_win[] = { /* IRO Array */ static const u32 iro_arr[] = { 0x00000000, 0x00000000, 0x00080000, + 0x00004478, 0x00000008, 0x00080000, 0x00003288, 0x00000088, 0x00880000, - 0x000058e8, 0x00000020, 0x00200000, + 0x000058a8, 0x00000020, 0x00200000, + 0x00003188, 0x00000008, 0x00080000, 0x00000b00, 0x00000008, 0x00040000, 0x00000a80, 0x00000008, 0x00040000, 0x00000000, 0x00000008, 0x00020000, 0x00000080, 0x00000008, 0x00040000, 0x00000084, 0x00000008, 0x00020000, - 0x00005718, 0x00000004, 0x00040000, - 0x00004dd0, 0x00000000, 0x00780000, + 0x00005798, 0x00000004, 0x00040000, + 0x00004e50, 0x00000000, 0x00780000, 0x00003e40, 0x00000000, 0x00780000, - 0x00004480, 0x00000000, 0x00780000, + 0x00004500, 0x00000000, 0x00780000, 0x00003210, 0x00000000, 0x00780000, 0x00003b50, 0x00000000, 0x00780000, 0x00007f58, 0x00000000, 0x00780000, - 0x00005f58, 0x00000000, 0x00080000, + 0x00005fd8, 0x00000000, 0x00080000, 0x00007100, 0x00000000, 0x00080000, - 0x0000aea0, 0x00000000, 0x00080000, + 0x0000af20, 0x00000000, 0x00080000, 0x00004398, 0x00000000, 0x00080000, 0x0000a5a0, 0x00000000, 0x00080000, 0x0000bde8, 0x00000000, 0x00080000, 0x00000020, 0x00000004, 0x00040000, - 0x000056c8, 0x00000010, 0x00100000, + 0x00005688, 0x00000010, 0x00100000, 0x0000c210, 0x00000030, 0x00300000, - 0x0000b088, 0x00000038, 0x00380000, + 0x0000b108, 0x00000038, 0x00380000, 0x00003d20, 0x00000080, 0x00400000, 0x0000bf60, 0x00000000, 0x00040000, 0x00004560, 0x00040080, 0x00040000, @@ -77,11 +80,11 @@ static const u32 iro_arr[] = { 0x00003d60, 0x00000080, 0x00200000, 0x00008960, 0x00000040, 0x00300000, 0x0000e840, 0x00000060, 0x00600000, - 0x00004618, 0x00000080, 0x00380000, - 0x00010738, 0x000000c0, 0x00c00000, + 0x00004698, 0x00000080, 0x00380000, + 0x000107b8, 0x000000c0, 0x00c00000, 0x000001f8, 0x00000002, 0x00020000, - 0x0000a2a0, 0x00000000, 0x01080000, - 0x0000a3a8, 0x00000008, 0x00080000, + 0x0000a260, 0x00000000, 0x01080000, + 0x0000a368, 0x00000008, 0x00080000, 0x000001c0, 0x00000008, 0x00080000, 0x000001f8, 0x00000008, 0x00080000, 0x00000ac0, 0x00000008, 0x00080000, @@ -90,39 +93,46 @@ static const u32 iro_arr[] = { 0x00000280, 0x00000008, 0x00080000, 0x00000680, 0x00080018, 0x00080000, 0x00000b78, 0x00080018, 0x00020000, - 0x0000c640, 0x00000050, 0x003c0000, - 0x00012038, 0x00000018, 0x00100000, - 0x00011b00, 0x00000040, 0x00180000, - 0x000095d0, 0x00000050, 0x00200000, + 0x0000c600, 0x00000058, 0x003c0000, + 0x00012038, 0x00000020, 0x00100000, + 0x00011b00, 0x00000048, 0x00180000, + 0x00009650, 0x00000050, 0x00200000, 0x00008b10, 0x00000040, 0x00280000, - 0x00011640, 0x00000018, 0x00100000, - 0x0000c828, 0x00000048, 0x00380000, - 0x00011710, 0x00000020, 0x00200000, - 0x00004650, 0x00000080, 0x00100000, + 0x000116c0, 0x00000018, 0x00100000, + 0x0000c808, 0x00000048, 0x00380000, + 0x00011790, 0x00000020, 0x00200000, + 0x000046d0, 0x00000080, 0x00100000, 0x00003618, 0x00000010, 0x00100000, - 0x0000a968, 0x00000008, 0x00010000, + 0x0000a9e8, 0x00000008, 0x00010000, 0x000097a0, 0x00000008, 0x00010000, - 0x00011990, 0x00000008, 0x00010000, - 0x0000f018, 0x00000008, 0x00010000, - 0x00012628, 0x00000008, 0x00010000, - 0x00011da8, 0x00000008, 0x00010000, - 0x0000aa78, 0x00000030, 0x00100000, - 0x0000d768, 0x00000028, 0x00280000, - 0x00009a58, 0x00000018, 0x00180000, - 0x00009bd8, 0x00000008, 0x00080000, - 0x00013a18, 0x00000008, 0x00080000, - 0x000126e8, 0x00000018, 0x00180000, - 0x0000e608, 0x00500288, 0x00100000, - 0x00012970, 0x00000138, 0x00280000, + 0x00011a10, 0x00000008, 0x00010000, + 0x0000e9f8, 0x00000008, 0x00010000, + 0x00012648, 0x00000008, 0x00010000, + 0x000121c8, 0x00000008, 0x00010000, + 0x0000af08, 0x00000030, 0x00100000, + 0x0000d748, 0x00000028, 0x00280000, + 0x00009e68, 0x00000018, 0x00180000, + 0x00009fe8, 0x00000008, 0x00080000, + 0x00013ea8, 0x00000008, 0x00080000, + 0x00012f18, 0x00000018, 0x00180000, + 0x0000dfe8, 0x00500288, 0x00100000, + 0x000131a0, 0x00000138, 0x00280000, }; void qed_init_iro_array(struct qed_dev *cdev) { - cdev->iro_arr = iro_arr; + cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { + if (rt_offset >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing %u in rt_data at index %u!\n", + val, rt_offset); + return; + } + p_hwfn->rt_data.init_val[rt_offset] = val; p_hwfn->rt_data.b_valid[rt_offset] = true; } @@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, { size_t i; + if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing values in rt_data at indices %u-%u!\n", + rt_offset, + (u32)(rt_offset + size - 1)); + return; + } + for (i = 0; i < size / sizeof(u32); i++) { p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; p_hwfn->rt_data.b_valid[rt_offset + i] = true; @@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, return rc; /* invalidate after writing */ - for (j = i; j < i + segment; j++) + for (j = i; j < (u32)(i + segment); j++) p_valid[j] = false; /* Jump over the entire segment, including invalid entry */ @@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, u32 fill, u32 fill_count) + u32 addr, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; struct qed_dmae_params params = {}; @@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, case INIT_SRC_ZEROS: data = le32_to_cpu(p_cmd->args.zeros_count); if (b_must_dmae || (b_can_dmae && (data >= 64))) - rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data); else qed_init_fill(p_hwfn, p_ptt, addr, 0, data); break; @@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); - val = qed_rd(p_hwfn, p_ptt, addr); if (poll == INIT_POLL_NONE) @@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, INIT_IF_MODE_OP_CMD_OFFSET); } -static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, - struct init_if_phase_op *p_cmd, +static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd, u32 phase, u32 phase_id) { u32 data = le32_to_cpu(p_cmd->phase_data); @@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, modes); break; case INIT_OP_IF_PHASE: - cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase, + cmd_num += qed_init_cmd_phase(&cmd->if_phase, phase, phase_id); break; case INIT_OP_DELAY: diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index a573c892198204..12e5c4e370d41a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -12,23 +12,24 @@ #include "qed.h" /** - * @brief qed_init_iro_array - init iro_arr. + * qed_init_iro_array(): init iro_arr. * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_iro_array(struct qed_dev *cdev); /** - * @brief qed_init_run - Run the init-sequence. + * qed_init_run(): Run the init-sequence. * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @phase: Phase. + * @phase_id: Phase ID. + * @modes: Mode. * - * @param p_hwfn - * @param p_ptt - * @param phase - * @param phase_id - * @param modes - * @return _qed_status_t + * Return: _qed_status_t */ int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn, int modes); /** - * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * qed_init_alloc(): Allocate RT array, Store 'values' ptrs. * + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return _qed_status_t + * Return: _qed_status_t. */ int qed_init_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_hwfn_deallocate + * qed_init_free(): Init HW function deallocate. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_init_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. + * qed_init_store_rt_reg(): Store a configuration value in the RT array. * + * @p_hwfn: HW device data. + * @rt_offset: RT offset. + * @val: Val. * - * @param p_hwfn - * @param rt_offset - * @param val + * Return: Void. */ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, @@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, #define OVERWRITE_RT_REG(hwfn, offset, val) \ qed_init_store_rt_reg(hwfn, offset, val) -/** - * @brief - * - * - * @param p_hwfn - * @param rt_offset - * @param val - * @param size - */ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 *val, size_t size); #define STORE_RT_REG_AGG(hwfn, offset, val) \ - qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) + qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val)) /** - * @brief - * Initialize GTT global windows and set admin window - * related params of GTT/PTT to default values. + * qed_gtt_init(): Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index f78e6055f6541e..a97f691839e04f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -36,7 +36,7 @@ struct qed_sb_sp_info { struct qed_sb_info sb_info; /* per protocol index data */ - struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; + struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; enum qed_attention_type { @@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, else SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); - sb_offset = igu_sb_id * PIS_PER_SB_E4; + sb_offset = igu_sb_id * PIS_PER_SB; pi_offset = sb_offset + pi_index; if (p_hwfn->hw_init_done) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c5550e96bbe1f0..84c17e97f5692c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -53,51 +53,54 @@ enum qed_coalescing_fsm { }; /** - * @brief qed_int_igu_enable_int - enable device interrupts + * qed_int_igu_enable_int(): Enable device interrupts. * - * @param p_hwfn - * @param p_ptt - * @param int_mode - interrupt mode to use + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrupt mode to use. + * + * Return: Void. */ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief qed_int_igu_disable_int - disable device interrupts + * qed_int_igu_disable_int(): Disable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc - * register from igu. + * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc + * register from igu. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u64 + * Return: u64. */ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); #define QED_SP_SB_ID 0xffff /** - * @brief qed_int_sb_init - Initializes the sb_info structure. + * qed_int_sb_init(): Initializes the sb_info structure. * - * once the structure is initialized it can be passed to sb related functions. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: points to an uninitialized (but allocated) sb_info structure + * @sb_virt_addr: SB Virtual address. + * @sb_phy_addr: SB Physial address. + * @sb_id: the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block * - * @param p_hwfn - * @param p_ptt - * @param sb_info points to an uninitialized (but - * allocated) sb_info structure - * @param sb_virt_addr - * @param sb_phy_addr - * @param sb_id the sb_id to be used (zero based in driver) - * should use QED_SP_SB_ID for SP Status block + * Return: int. * - * @return int + * Once the structure is initialized it can be passed to sb related functions. */ int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr, u16 sb_id); /** - * @brief qed_int_sb_setup - Setup the sb. + * qed_int_sb_setup(): Setup the sb. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: Initialized sb_info structure. * - * @param p_hwfn - * @param p_ptt - * @param sb_info initialized sb_info structure + * Return: Void. */ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info); /** - * @brief qed_int_sb_release - releases the sb_info structure. + * qed_int_sb_release(): Releases the sb_info structure. * - * once the structure is released, it's memory can be freed + * @p_hwfn: HW device data. + * @sb_info: Points to an allocated sb_info structure. + * @sb_id: The sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block). * - * @param p_hwfn - * @param sb_info points to an allocated sb_info structure - * @param sb_id the sb_id to be used (zero based in driver) - * should never be equal to QED_SP_SB_ID - * (SP Status block) + * Return: int. * - * @return int + * Once the structure is released, it's memory can be freed. */ int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id); /** - * @brief qed_int_sp_dpc - To be called when an interrupt is received on the - * default status block. + * qed_int_sp_dpc(): To be called when an interrupt is received on the + * default status block. * - * @param p_hwfn - pointer to hwfn + * @t: Tasklet. + * + * Return: Void. * */ void qed_int_sp_dpc(struct tasklet_struct *t); /** - * @brief qed_int_get_num_sbs - get the number of status - * blocks configured for this funciton in the igu. + * qed_int_get_num_sbs(): Get the number of status blocks configured + * for this funciton in the igu. * - * @param p_hwfn - * @param p_sb_cnt_info + * @p_hwfn: HW device data. + * @p_sb_cnt_info: Pointer to SB count info. * - * @return int - number of status blocks configured + * Return: Void. */ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, struct qed_sb_cnt_info *p_sb_cnt_info); /** - * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * qed_int_disable_post_isr_release(): Performs the cleanup post ISR * release. The API need to be called after releasing all slowpath IRQs * of the device. * - * @param cdev + * @cdev: Qed dev pointer. * + * Return: Void. */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** - * @brief qed_int_attn_clr_enable - sets whether the general behavior is + * qed_int_attn_clr_enable: Sets whether the general behavior is * preventing attentions from being reasserted, or following the * attributes of the specific attention. * - * @param cdev - * @param clr_enable + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable + * + * Return: Void. * */ void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** - * @brief - Doorbell Recovery handler. + * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_SB_EVENT_MASK 0x0003 #define SB_ALIGNED_SIZE(p_hwfn) \ - ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn) + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) #define QED_SB_INVALID_IDX 0xffff @@ -223,30 +235,34 @@ struct qed_igu_info { }; /** - * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources + * provided by MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into + * an IGU sb-id * - * @param p_hwfn - * @param sb_id - user provided sb_id + * @p_hwfn: HW device data. + * @sb_id: user provided sb_id. * - * @return an index inside IGU CAM where the SB resides + * Return: An index inside IGU CAM where the SB resides. */ u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief return a pointer to an unused valid SB + * qed_get_igu_free_sb(): Return a pointer to an unused valid SB * - * @param p_hwfn - * @param b_is_pf - true iff we want a SB belonging to a PF + * @p_hwfn: HW device data. + * @b_is_pf: True iff we want a SB belonging to a PF. * - * @return point to an igu_block, NULL if none is available + * Return: Point to an igu_block, NULL if none is available. */ struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf); @@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_igu_read_cam - Reads the IGU CAM. + * qed_int_igu_read_cam(): Reads the IGU CAM. * This function needs to be called during hardware * prepare. It reads the info from igu cam to know which * status block is the default / base status block etc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_int_register_cb - Register callback func for - * slowhwfn statusblock. - * - * Every protocol that uses the slowhwfn status block - * should register a callback function that will be called - * once there is an update of the sp status block. - * - * @param p_hwfn - * @param comp_cb - function to be called when there is an - * interrupt on the sp sb - * - * @param cookie - passed to the callback function - * @param sb_idx - OUT parameter which gives the chosen index - * for this protocol. - * @param p_fw_cons - pointer to the actual address of the - * consumer for this protocol. - * - * @return int + * qed_int_register_cb(): Register callback func for slowhwfn statusblock. + * + * @p_hwfn: HW device data. + * @comp_cb: Function to be called when there is an + * interrupt on the sp sb + * @cookie: Passed to the callback function + * @sb_idx: (OUT) parameter which gives the chosen index + * for this protocol. + * @p_fw_cons: Pointer to the actual address of the + * consumer for this protocol. + * + * Return: Int. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. */ int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, @@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons); /** - * @brief qed_int_unregister_cb - Unregisters callback - * function from sp sb. - * Partner of qed_int_register_cb -> should be called - * when no longer required. + * qed_int_unregister_cb(): Unregisters callback function from sp sb. + * + * @p_hwfn: HW device data. + * @pi: Producer Index. * - * @param p_hwfn - * @param pi + * Return: Int. * - * @return int + * Partner of qed_int_register_cb -> should be called + * when no longer required. */ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi); /** - * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id. + * qed_int_get_sp_sb_id(): Get the slowhwfn sb id. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u16 + * Return: u16. */ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); /** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - igu status block id - * @param opaque - opaque fid of the sb owner. - * @param b_set - set(1) / clear(0) + * qed_int_igu_init_pure_rt_single(): Status block cleanup. + * Should be called for each status + * block that will be used -> both PF / VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @igu_sb_id: IGU status block id. + * @opaque: Opaque fid of the sb owner. + * @b_set: Set(1) / Clear(0). + * + * Return: Void. */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, bool b_set); /** - * @brief qed_int_cau_conf - configure cau for a given status - * block - * - * @param p_hwfn - * @param ptt - * @param sb_phys - * @param igu_sb_id - * @param vf_number - * @param vf_valid + * qed_int_cau_conf_sb(): Configure cau for a given status block. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_phys: SB Physical. + * @igu_sb_id: IGU status block id. + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. */ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid); /** - * @brief qed_int_alloc + * qed_int_alloc(): QED interrupt alloc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_free + * qed_int_free(): QED interrupt free. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_int_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_setup + * qed_int_setup(): QED interrupt setup. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Enable Interrupt & Attention for hw function + * qed_int_igu_enable(): Enable Interrupt & Attention for hw function. * - * @param p_hwfn - * @param p_ptt - * @param int_mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrut mode * - * @return int + * Return: Int. */ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief - Initialize CAU status block entry + * qed_init_cau_sb_entry(): Initialize CAU status block entry. + * + * @p_hwfn: HW device data. + * @p_sb_entry: Pointer SB entry. + * @pf_id: PF number + * @vf_number: VF number + * @vf_valid: VF valid or not. * - * @param p_hwfn - * @param p_sb_entry - * @param pf_id - * @param vf_number - * @param vf_valid + * Return: Void. */ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h new file mode 100644 index 00000000000000..3ccdd3b1d8cb41 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_IRO_HSI_H +#define _QED_IRO_HSI_H + +#include + +enum { + IRO_YSTORM_FLOW_CONTROL_MODE_GTT, + IRO_PSTORM_PKT_DUPLICATION_CFG, + IRO_TSTORM_PORT_STAT, + IRO_TSTORM_LL2_PORT_STAT, + IRO_TSTORM_PKT_DUPLICATION_CFG, + IRO_USTORM_VF_PF_CHANNEL_READY_GTT, + IRO_USTORM_FLR_FINAL_ACK_GTT, + IRO_USTORM_EQE_CONS_GTT, + IRO_USTORM_ETH_QUEUE_ZONE_GTT, + IRO_USTORM_COMMON_QUEUE_CONS_GTT, + IRO_XSTORM_PQ_INFO, + IRO_XSTORM_INTEG_TEST_DATA, + IRO_YSTORM_INTEG_TEST_DATA, + IRO_PSTORM_INTEG_TEST_DATA, + IRO_TSTORM_INTEG_TEST_DATA, + IRO_MSTORM_INTEG_TEST_DATA, + IRO_USTORM_INTEG_TEST_DATA, + IRO_XSTORM_OVERLAY_BUF_ADDR, + IRO_YSTORM_OVERLAY_BUF_ADDR, + IRO_PSTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_OVERLAY_BUF_ADDR, + IRO_MSTORM_OVERLAY_BUF_ADDR, + IRO_USTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_LL2_RX_PRODS_GTT, + IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_USTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT, + IRO_MSTORM_QUEUE_STAT, + IRO_MSTORM_TPA_TIMEOUT_US, + IRO_MSTORM_ETH_VF_PRODS, + IRO_MSTORM_ETH_PF_PRODS_GTT, + IRO_MSTORM_ETH_PF_STAT, + IRO_USTORM_QUEUE_STAT, + IRO_USTORM_ETH_PF_STAT, + IRO_PSTORM_QUEUE_STAT, + IRO_PSTORM_ETH_PF_STAT, + IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT, + IRO_TSTORM_ETH_PRS_INPUT, + IRO_ETH_RX_RATE_LIMIT, + IRO_TSTORM_ETH_RSS_UPDATE_GTT, + IRO_XSTORM_ETH_QUEUE_ZONE_GTT, + IRO_YSTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_GRQ_PROD, + IRO_TSTORM_SCSI_CMDQ_CONS_GTT, + IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_TSTORM_ISCSI_RX_STATS, + IRO_MSTORM_ISCSI_RX_STATS, + IRO_USTORM_ISCSI_RX_STATS, + IRO_XSTORM_ISCSI_TX_STATS, + IRO_YSTORM_ISCSI_TX_STATS, + IRO_PSTORM_ISCSI_TX_STATS, + IRO_TSTORM_FCOE_RX_STATS, + IRO_PSTORM_FCOE_TX_STATS, + IRO_PSTORM_RDMA_QUEUE_STAT, + IRO_TSTORM_RDMA_QUEUE_STAT, + IRO_XSTORM_RDMA_ASSERT_LEVEL, + IRO_YSTORM_RDMA_ASSERT_LEVEL, + IRO_PSTORM_RDMA_ASSERT_LEVEL, + IRO_TSTORM_RDMA_ASSERT_LEVEL, + IRO_MSTORM_RDMA_ASSERT_LEVEL, + IRO_USTORM_RDMA_ASSERT_LEVEL, + IRO_XSTORM_IWARP_RXMIT_STATS, + IRO_TSTORM_ROCE_EVENTS_STAT, + IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS, + IRO_YSTORM_ROCE_ERROR_STATS, + IRO_PSTORM_ROCE_DCQCN_SENT_STATS, + IRO_USTORM_ROCE_CQE_STATS, +}; + +/* Pstorm LiteL2 queue statistics */ + +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \ + + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size) + +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size) + +/* Ustorm LiteL2 queue statistics */ +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size) + +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[IRO_ETH_RX_RATE_LIMIT].base \ + + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size) + +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \ + (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \ + + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1)) +#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size) + +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size) + +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone + * size mode. + */ +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ + (IRO[IRO_MSTORM_ETH_VF_PRODS].base \ + + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \ + + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size) + +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size) + +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size) + +/* Mstorm overlay buffer host address */ +#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base) +#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size) + +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_MSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1)) +#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size) + +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size) + +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \ + + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size) + +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size) + +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_FCOE_TX_STATS].base \ + + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size) + +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size) + +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size) + +/* Pstorm overlay buffer host address */ +#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base) +#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size) + +/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg + * data type. + */ +#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1)) +#define PSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size) + +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_PSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size) + +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size) + +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \ + + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size) + +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size) + +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \ + + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1)) +#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size) + +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_FCOE_RX_STATS].base \ + + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size) + +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size) + +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size) + +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_LL2_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size) + +/* Tstorm producers */ +#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \ + + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1)) +#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size) + +/* Tstorm overlay buffer host address */ +#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base) + +#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size) + +/* Tstorm LL2 packet duplication configuration. + * Use tstorm_pkt_dup_cfg data type. + */ +#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1)) +#define TSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size) + +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size) + +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size) + +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \ + + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size) + +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id. + */ +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \ + + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1)) +#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size) + +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1)) +#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size) + +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_EQE_CONS_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1)) +#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size) + +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size) + +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1)) +#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size) + +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size) + +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_USTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size) + +/* Ustorm overlay buffer host address */ +#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base) +#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size) + +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_USTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size) + +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_USTORM_ROCE_CQE_STATS].base \ + + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size) + +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_USTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size) + +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[IRO_USTORM_TOE_GRQ_PROD].base \ + + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size) + +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \ + + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1)) +#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size) + +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \ + (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size) + +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size) + +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \ + + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size) + +/* Xstorm overlay buffer host address */ +#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base) +#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size) + +/* Xstorm common PQ info */ +#define XSTORM_PQ_INFO_OFFSET(pq_id) \ + (IRO[IRO_XSTORM_PQ_INFO].base \ + + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1)) +#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size) + +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size) + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base) +#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size) + +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size) + +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size) + +/* Ystorm overlay buffer host address */ +#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base) +#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size) + +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size) + +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_YSTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size) + +/* Per-chip offsets in iro_arr in dwords */ +#define E4_IRO_ARR_OFFSET 0 +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index db926d8b303347..511ab214eb9c8a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -29,6 +29,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iro_hsi.h" #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" @@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dab7a5d09f874d..dec2b00259d42d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn); void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** - * @brief - Fills provided statistics struct with statistics. + * qed_get_protocol_stats_iscsi(): Fills provided statistics + * struct with statistics. * - * @param cdev - * @param stats - points to struct that will be filled with statistics. + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 186d0048a9d14e..1d1d4caad680b4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT); + p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT); p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index dfaf10edfabfd4..2edd6bf64a3cc2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -28,6 +28,7 @@ #include "qed_dev_api.h" #include #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_l2.h" @@ -37,7 +38,6 @@ #include "qed_sp.h" #include "qed_sriov.h" - #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 @@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, { u32 init_prod_val = 0; - *pp_prod = p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), @@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, { int rc; - rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, pbl_size, qed_get_cm_pq_idx_mcos(p_hwfn, tc)); @@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_cb, struct qed_ntuple_filter_params *p_params) { - struct rx_update_gft_filter_data *p_ramrod = NULL; + struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 abs_rx_q_id = 0; @@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, } rc = qed_sp_init_request(p_hwfn, &p_ent, - ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, CAU_SB_ENTRY_TIMER_RES0); address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); @@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, CAU_SB_ENTRY_TIMER_RES1); address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); @@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev, return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } -static int qed_configure_filter(struct qed_dev *cdev, - struct qed_filter_params *params) -{ - enum qed_filter_rx_mode_type accept_flags; - - switch (params->type) { - case QED_FILTER_TYPE_UCAST: - return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); - case QED_FILTER_TYPE_MCAST: - return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); - case QED_FILTER_TYPE_RX_MODE: - accept_flags = params->filter.accept_flags; - return qed_configure_filter_rx_mode(cdev, accept_flags); - default: - DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); - return -EINVAL; - } -} - static int qed_configure_arfs_searcher(struct qed_dev *cdev, enum qed_filter_config_mode mode) { @@ -2867,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } -static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac) { int i, ret; @@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .q_rx_stop = &qed_stop_rxq, .q_tx_start = &qed_start_txq, .q_tx_stop = &qed_stop_txq, - .filter_config = &qed_configure_filter, + .filter_config_rx_mode = &qed_configure_filter_rx_mode, + .filter_config_ucast = &qed_configure_filter_ucast, + .filter_config_mcast = &qed_configure_filter_mcast, .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8eceeebb1a7be1..a538cf478c14e7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -92,18 +92,18 @@ struct qed_filter_mcast { }; /** - * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. * - * @param p_hwfn - * @param p_rxq Handler of queue to close - * @param eq_completion_only If True completion will be on - * EQe, if False completion will be - * on EQe if p_hwfn opaque - * different from the RXQ opaque - * otherwise on CQe. - * @param cqe_completion If True completion will be - * receive on CQe. - * @return int + * @p_hwfn: HW device data. + * @p_rxq: Handler of queue to close + * @eq_completion_only: If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @cqe_completion: If True completion will be receive on CQe. + * + * Return: Int. */ int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, bool eq_completion_only, bool cqe_completion); /** - * @brief qed_eth_tx_queue_stop - closes a Tx queue + * qed_eth_tx_queue_stop(): Closes a Tx queue. * - * @param p_hwfn - * @param p_txq - handle to Tx queue needed to be closed + * @p_hwfn: HW device data. + * @p_txq: handle to Tx queue needed to be closed. * - * @return int + * Return: Int. */ int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); @@ -146,7 +146,6 @@ struct qed_sp_vport_start_params { int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); - struct qed_filter_accept_flags { u8 update_rx_mode_config; u8 update_tx_mode_config; @@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_vport_stop - - * - * This ramrod closes a VPort after all its RX and TX queues are terminated. - * An Assert is generated if any queues are left open. + * qed_sp_vport_stop: This ramrod closes a VPort after all its + * RX and TX queues are terminated. + * An Assert is generated if any queues are left open. * - * @param p_hwfn - * @param opaque_fid - * @param vport_id VPort ID + * @p_hwfn: HW device data. + * @opaque_fid: Opaque FID + * @vport_id: VPort ID. * - * @return int + * Return: Int. */ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); @@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_rx_eth_queues_update - + * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. + * It is used for setting the active state + * of the queue and updating the TPA and + * SGE parameters. + * @p_hwfn: HW device data. + * @pp_rxq_handlers: An array of queue handlers to be updated. + * @num_rxqs: number of queues to update. + * @complete_cqe_flg: Post completion to the CQE Ring if set. + * @complete_event_flg: Post completion to the Event Ring if set. + * @comp_mode: Comp mode. + * @p_comp_data: Pointer Comp data. * - * This ramrod updates an RX queue. It is used for setting the active state - * of the queue and updating the TPA and SGE parameters. + * Return: Int. * - * @note At the moment - only used by non-linux VFs. - * - * @param p_hwfn - * @param pp_rxq_handlers An array of queue handlers to be updated. - * @param num_rxqs number of queues to update. - * @param complete_cqe_flg Post completion to the CQE Ring if set - * @param complete_event_flg Post completion to the Event Ring if set - * @param comp_mode - * @param p_comp_data - * - * @return int + * Note At the moment - only used by non-linux VFs. */ int @@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); /** - * *@brief qed_arfs_mode_configure - - * - **Enable or disable rfs mode. It must accept atleast one of tcp or udp true - **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * qed_arfs_mode_configure(): Enable or disable rfs mode. + * It must accept at least one of tcp or udp true + * and at least one of ipv4 or ipv6 true to enable + * rfs mode. * - **@param p_hwfn - **@param p_ptt - **@param p_cfg_params - arfs mode configuration parameters. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_cfg_params: arfs mode configuration parameters. * + * Return. Void. */ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params); /** - * @brief - qed_configure_rfs_ntuple_filter + * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add + * or remove arfs hw filter * - * This ramrod should be used to add or remove arfs hw filter + * @p_hwfn: HW device data. + * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @p_params: Pointer to params. * - * @params p_hwfn - * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize - * it with cookie and callback function address, if not - * using this mode then client must pass NULL. - * @params p_params + * Return: Void. */ int qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, @@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); /** - * @brief - Starts an Rx queue, when queue_cid is already prepared + * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param bd_max_bytes - * @param bd_chain_phys_addr - * @param cqe_pbl_addr - * @param cqe_pbl_size + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @bd_max_bytes: Max bytes. + * @bd_chain_phys_addr: Chain physcial address. + * @cqe_pbl_addr: PBL address. + * @cqe_pbl_size: PBL size. * - * @return int + * Return: Int. */ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, @@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); /** - * @brief - Starts a Tx queue, where queue_cid is already prepared + * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param pbl_addr - * @param pbl_size - * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL size. + * @pq_id: Parameters for choosing the PQ for this Tx queue. * - * @return int + * Return: Int. */ int qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c46a7f756ed5f3..ed274f033626d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -28,6 +28,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_ll2.h" @@ -43,6 +44,8 @@ #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) +#define QED_LL2_INVALID_STATS_ID 0xff + struct qed_cb_ll2_info { int rx_cnt; u32 rx_size; @@ -62,6 +65,29 @@ struct qed_ll2_buffer { dma_addr_t phys_addr; }; +static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, + u8 ll2_queue_type, u8 qid) +{ + u8 stats_id; + + /* For legacy (RAM based) queues, the stats_id will be set as the + * queue_id. Otherwise (context based queue), it will be set to + * the "abs_pf_id" offset from the end of the RAM based queue IDs. + * If the final value exceeds the total counters amount, return + * INVALID value to indicate that the stats for this connection should + * be disabled. + */ + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + stats_id = qid; + else + stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; + + if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) + return stats_id; + else + return QED_LL2_INVALID_STATS_ID; +} + static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, @@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev, } static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, - struct qed_ll2_buffer *buffer) + struct qed_ll2_buffer *buffer) { spin_lock_bh(&cdev->ll2->lock); @@ -352,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) num_bds_in_packet = p_pkt->bd_used; list_del(&p_pkt->list_entry); - if (num_bds < num_bds_in_packet) { + if (unlikely(num_bds < num_bds_in_packet)) { DP_NOTICE(p_hwfn, "Rest of BDs does not cover whole packet\n"); goto out; @@ -462,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, if (!list_empty(&p_rx->active_descq)) p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); - if (!p_pkt) { + if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "[%d] LL2 Rx completion but active_descq is empty\n", p_ll2_conn->input.conn_type); @@ -475,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); else qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); - if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) + if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); @@ -597,18 +623,18 @@ static bool qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, struct core_rx_slow_path_cqe *p_cqe) { - struct ooo_opaque *iscsi_ooo; + struct ooo_opaque *ooo_opq; u32 cid; if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) return false; - iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; - if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data; + if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES) return false; /* Need to make a flush */ - cid = le32_to_cpu(iscsi_ooo->cid); + cid = le32_to_cpu(ooo_opq->cid); qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); return true; @@ -624,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; struct qed_ooo_buffer *p_buffer; - struct ooo_opaque *iscsi_ooo; + struct ooo_opaque *ooo_opq; u8 placement_offset = 0; u8 cqe_type; @@ -645,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, &cqe->rx_cqe_sp)) continue; - if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { + if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", cqe_type); @@ -657,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); packet_length = le16_to_cpu(p_cqe_fp->packet_length); vlan = le16_to_cpu(p_cqe_fp->vlan); - iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; - qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, - iscsi_ooo); - cid = le32_to_cpu(iscsi_ooo->cid); + ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq); + cid = le32_to_cpu(ooo_opq->cid); /* Process delete isle first */ - if (iscsi_ooo->drop_size) + if (ooo_opq->drop_size) qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->drop_isle, - iscsi_ooo->drop_size); + ooo_opq->drop_isle, + ooo_opq->drop_size); - if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) + if (ooo_opq->ooo_opcode == TCP_EVENT_NOP) continue; /* Now process create/add/join isles */ - if (list_empty(&p_rx->active_descq)) { + if (unlikely(list_empty(&p_rx->active_descq))) { DP_NOTICE(p_hwfn, "LL2 OOO RX chain has no submitted buffers\n" ); @@ -682,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); - if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { - if (!p_pkt) { + if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN || + ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) { + if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "LL2 OOO RX packet is not valid\n"); return -EIO; @@ -701,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_chain_consume(&p_rx->rxq_chain); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); - switch (iscsi_ooo->ooo_opcode) { + switch (ooo_opq->ooo_opcode) { case TCP_EVENT_ADD_NEW_ISLE: qed_ooo_add_new_isle(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer); break; case TCP_EVENT_ADD_ISLE_RIGHT: qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer, QED_OOO_RIGHT_BUF); break; @@ -721,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer, QED_OOO_LEFT_BUF); break; @@ -729,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle + - 1, + ooo_opq->ooo_isle + 1, p_buffer, QED_OOO_LEFT_BUF); qed_ooo_join_isles(p_hwfn, p_hwfn->p_ooo_info, - cid, iscsi_ooo->ooo_isle); + cid, ooo_opq->ooo_isle); break; case TCP_EVENT_ADD_PEN: num_ooo_add_to_peninsula++; @@ -747,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, } else { DP_NOTICE(p_hwfn, "Unexpected event (%d) TX OOO completion\n", - iscsi_ooo->ooo_opcode); + ooo_opq->ooo_opcode); } } @@ -859,16 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return 0; - if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn))) return 0; new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); - if (!num_bds) + if (unlikely(!num_bds)) return 0; while (num_bds) { @@ -877,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); - if (!p_pkt) + if (unlikely(!p_pkt)) return -EINVAL; - if (p_pkt->bd_used != 1) { + if (unlikely(p_pkt->bd_used != 1)) { DP_NOTICE(p_hwfn, "Unexpectedly many BDs(%d) in TX OOO completion\n", p_pkt->bd_used); @@ -1008,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) + if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -1124,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ @@ -1533,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_info *p_ll2_conn; struct qed_hwfn *p_hwfn = cxt; @@ -1544,7 +1569,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) int rc = -EINVAL; u32 i, capacity; size_t desc_size; - u8 qid; + u8 qid, stats_id; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) @@ -1610,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, p_ll2_conn->input.rx_conn_type); + stats_id = qed_ll2_handle_to_stats_id(p_hwfn, + p_ll2_conn->input.rx_conn_type, + qid); p_ll2_conn->queue_id = qid; - p_ll2_conn->tx_stats_id = qid; + p_ll2_conn->tx_stats_id = stats_id; - DP_VERBOSE(p_hwfn, QED_MSG_LL2, - "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n", - p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid); + /* If there is no valid stats id for this connection, disable stats */ + if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { + p_ll2_conn->tx_stats_en = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Disabling stats for queue %d - not enough counters\n", + qid); + } + + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n", + p_hwfn->rel_pf_id, + p_ll2_conn->input.rx_conn_type, qid, stats_id); if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { - p_rx->set_prod_addr = p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid); + p_rx->set_prod_addr = + (u8 __iomem *)p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_LL2_RX_PRODS, qid); } else { /* QED_LL2_RX_TYPE_CTX - using doorbell */ p_rx->ctx_based = 1; @@ -1762,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt, } } - /* If we're lacking entires, let's try to flush buffers to FW */ + /* If we're lacking entries, let's try to flush buffers to FW */ if (!p_curp || !p_curb) { rc = -EBUSY; p_curp = NULL; @@ -1842,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, } start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - if (QED_IS_IWARP_PERSONALITY(p_hwfn) && - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { + if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) && + p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); } else { @@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt, int rc = 0; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return -EINVAL; p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; - if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet) + if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); - if (p_tx->cur_send_packet) { + if (unlikely(p_tx->cur_send_packet)) { rc = -EEXIST; goto out; } /* Get entry, but only if we have tx elements for it */ - if (!list_empty(&p_tx->free_descq)) + if (unlikely(!list_empty(&p_tx->free_descq))) p_curp = list_first_entry(&p_tx->free_descq, struct qed_ll2_tx_packet, list_entry); - if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) + if (unlikely(p_curp && + qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)) p_curp = NULL; - if (!p_curp) { + if (unlikely(!p_curp)) { rc = -EBUSY; goto out; } @@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt, unsigned long flags; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return -EINVAL; - if (!p_ll2_conn->tx_queue.cur_send_packet) + if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet)) return -EINVAL; p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; - if (cur_send_frag_num >= p_cur_send_packet->bd_used) + if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used)) return -EINVAL; /* Fill the BD information, and possibly notify FW */ @@ -2609,7 +2651,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) DP_NOTICE(cdev, "Failed to add an LLH filter\n"); goto err3; } - } ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); @@ -2651,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, */ nr_frags = skb_shinfo(skb)->nr_frags; - if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 1 + nr_frags); return -EINVAL; @@ -2693,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, */ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle, &pkt, 1); - if (rc) + if (unlikely(rc)) goto err; for (i = 0; i < nr_frags; i++) { @@ -2717,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, /* if failed not much to do here, partial packet has been posted * we can't free memory, will need to wait for completion */ - if (rc) + if (unlikely(rc)) goto err2; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index df88d00053a290..0bfc375161ed65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -32,7 +32,6 @@ #define QED_LL2_LEGACY_CONN_BASE_PF 0 #define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF - struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -119,41 +118,41 @@ struct qed_ll2_info { extern const struct qed_ll2_ops qed_ll2_ops_pass; /** - * @brief qed_ll2_acquire_connection - allocate resources, - * starts rx & tx (if relevant) queues pair. Provides - * connecion handler as output parameter. + * qed_ll2_acquire_connection(): Allocate resources, + * starts rx & tx (if relevant) queues pair. + * Provides connecion handler as output + * parameter. * + * @cxt: Pointer to the hw-function [opaque to some]. + * @data: Describes connection parameters. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param data - describes connection parameters - * @return int + * Return: Int. */ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** - * @brief qed_ll2_establish_connection - start previously - * allocated LL2 queues pair + * qed_ll2_establish_connection(): start previously allocated LL2 queues pair * - * @param cxt - pointer to the hw-function [opaque to some] - * @param p_ptt - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. + * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param addr rx (physical address) buffers to submit - * @param cookie - * @param notify_fw produce corresponding Rx BD immediately + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: RX (physical address) buffers to submit. + * @buf_len: Buffer Len. + * @cookie: Cookie. + * @notify_fw: Produce corresponding Rx BD immediately. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, @@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt, u16 buf_len, void *cookie, u8 notify_fw); /** - * @brief qed_ll2_prepare_tx_packet - request for start Tx BD - * to prepare Tx packet submission to FW. + * qed_ll2_prepare_tx_packet(): Request for start Tx BD + * to prepare Tx packet submission to FW. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle - * @param pkt - info regarding the tx packet - * @param notify_fw - issue doorbell to fw for this packet + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: Connection handle. + * @pkt: Info regarding the tx packet. + * @notify_fw: Issue doorbell to fw for this packet. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, @@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt, bool notify_fw); /** - * @brief qed_ll2_release_connection - releases resources - * allocated for LL2 connection + * qed_ll2_release_connection(): Releases resources allocated for LL2 + * connection. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * Return: Void. */ void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill - * Tx BD of BDs requested by - * qed_ll2_prepare_tx_packet + * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill + * Tx BD of BDs requested by + * qed_ll2_prepare_tx_packet * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection - * @param addr - * @param nbytes + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: Address. + * @nbytes: Number of bytes. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); /** - * @brief qed_ll2_terminate_connection - stops Tx/Rx queues - * + * qed_ll2_terminate_connection(): Stops Tx/Rx queues * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_get_stats - get LL2 queue's statistics + * qed_ll2_get_stats(): Get LL2 queue's statistics * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @p_stats: Pointer Status. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param p_stats - * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** - * @brief qed_ll2_alloc - Allocates LL2 connections set + * qed_ll2_alloc(): Allocates LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_ll2_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_setup - Inits LL2 connections set + * qed_ll2_setup(): Inits LL2 connections set. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_free - Releases LL2 connections set + * qed_ll2_free(): Releases LL2 connections set + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d10e1cd6d2ba96..7673b3e07736ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = { ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, }; -static const u32 qed_mfw_ext_20g[] __initconst = { - ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, -}; - static const u32 qed_mfw_ext_25g[] __initconst = { ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, @@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), - QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, @@ -262,7 +257,7 @@ module_exit(qed_exit); /* Check if the DMA controller on the machine can properly handle the DMA * addressing required by the device. -*/ + */ static int qed_set_coherency_mask(struct qed_dev *cdev) { struct device *dev = &cdev->pdev->dev; @@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, goto err2; } - DP_INFO(cdev, "qed_probe completed successfully\n"); + DP_INFO(cdev, "%s completed successfully\n", __func__); return cdev; @@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, rc = qed_set_int_mode(cdev, false); if (rc) { - DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); + DP_ERR(cdev, "%s ERR\n", __func__); return rc; } @@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(wq_flag, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); @@ -1382,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); - strlcpy(drv_version.name, params->name, + strscpy(drv_version.name, params->name, MCP_DRV_VER_STR_SIZE - 4); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); @@ -2892,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active) return status; } -static int qed_update_mac(struct qed_dev *cdev, u8 *mac) +static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; @@ -3079,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn) DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, "Scheduling slowpath task [Flag: %d]\n", QED_SLOWPATH_MFW_TLV_REQ); + /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); @@ -3159,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, return 0; } + +unsigned long qed_get_epoch_time(void) +{ + return ktime_get_real_seconds(); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 24cd4156777569..64678a256f3b3a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -17,6 +17,7 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" +#include "qed_mfw_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -30,11 +31,11 @@ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ - qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ _val) #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ - qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ @@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); /* Get the union data */ - if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) { + if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); @@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); memset(&union_data, 0, sizeof(union_data)); - if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size) + if (p_mb_params->p_data_src && p_mb_params->data_src_size) memcpy(&union_data, p_mb_params->p_data_src, p_mb_params->data_src_size); qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, @@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) { struct qed_mcp_mb_params mb_params; u8 raw_data[MCP_DRV_NVM_BUF_LEN]; @@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, /* Use the maximal value since the actual one is part of the response */ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + if (b_can_sleep) + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) @@ -916,7 +920,6 @@ enum qed_load_req_force { }; static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, - enum qed_load_req_force force_cmd, u8 *p_mfw_force_cmd) { @@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { ext_speed = 0; if (params->ext_speed.autoneg) - ext_speed |= ETH_EXT_SPEED_AN; + ext_speed |= ETH_EXT_SPEED_NONE; val = params->ext_speed.forced_speed; if (val & QED_EXT_SPEED_1G) ext_speed |= ETH_EXT_SPEED_1G; if (val & QED_EXT_SPEED_10G) ext_speed |= ETH_EXT_SPEED_10G; - if (val & QED_EXT_SPEED_20G) - ext_speed |= ETH_EXT_SPEED_20G; if (val & QED_EXT_SPEED_25G) ext_speed |= ETH_EXT_SPEED_25G; if (val & QED_EXT_SPEED_40G) @@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ext_speed |= ETH_EXT_ADV_SPEED_1G; if (val & QED_EXT_SPEED_MASK_10G) ext_speed |= ETH_EXT_ADV_SPEED_10G; - if (val & QED_EXT_SPEED_MASK_20G) - ext_speed |= ETH_EXT_ADV_SPEED_20G; if (val & QED_EXT_SPEED_MASK_25G) ext_speed |= ETH_EXT_ADV_SPEED_25G; if (val & QED_EXT_SPEED_MASK_40G) @@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id) { - u32 global_offsize; + u32 global_offsize, public_base; if (IS_VF(p_hwfn->cdev)) { if (p_hwfn->vf_iov_info) { @@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, } } + public_base = p_hwfn->mcp_info->public_base; global_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_hwfn-> - mcp_info->public_base, + SECTION_OFFSIZE_ADDR(public_base, PUBLIC_GLOBAL)); *p_mfw_ver = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, mfw_ver)); - if (p_running_bundle_id != NULL) { + if (p_running_bundle_id) { *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, @@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, return 0; } + static bool qed_is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) { @@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to Legacy capabilities, L2 personality is %08x\n", - (u32) *p_proto); + (u32)*p_proto); } static int @@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", - (u32) *p_proto, resp, param); + (u32)*p_proto, resp, param); return 0; } @@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; - case FUNC_MF_CFG_PROTOCOL_NVMETCP: - *p_proto = QED_PCI_NVMETCP; - break; case FUNC_MF_CFG_PROTOCOL_FCOE: *p_proto = QED_PCI_FCOE; break; @@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, } int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac) + struct qed_ptt *p_ptt, const u8 *mac) { struct qed_mcp_mb_params mb_params; u32 mfw_mac[2]; @@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), false); if (rc || (resp != FW_MSG_CODE_NVM_OK)) { DP_NOTICE(cdev, "MCP command rc = %d\n", rc); @@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) } /* This can be a lengthy process, and it's possible scheduler - * isn't preemptable. Sleep a bit to prevent CPU hogging. + * isn't preemptible. Sleep a bit to prevent CPU hogging. */ if (bytes_left % 0x1000 < (bytes_left - read_len) % 0x1000) @@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, * to be delivered to MFW. */ if (param && cmd == QED_PUT_FILE_DATA) { - buf_idx = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); - buf_size = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + buf_idx = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); } else { buf_idx += buf_size; buf_size = min_t(u32, (len - buf_idx), @@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, ¶m, &buf_size, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), true); if (rc) { DP_NOTICE(p_hwfn, "Failed to send a transceiver read command to the MFW. rc = %d.\n", @@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, DRV_MSG_CODE_BIST_TEST, param, &resp, &resp_param, &buf_size, - (u32 *)p_image_att); + (u32 *)p_image_att, false); if (rc) return rc; @@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, type = NVM_TYPE_DEFAULT_CFG; break; case QED_NVM_IMAGE_NVM_META: - type = NVM_TYPE_META; + type = NVM_TYPE_NVM_META; break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", @@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; - if (QED_IS_E5(p_hwfn->cdev)) - features |= - DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL; - return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } @@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_NVM_CFG_OPTION, - mb_param, &resp, ¶m, p_len, (u32 *)p_buf); + mb_param, &resp, ¶m, p_len, + (u32 *)p_buf, false); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8edb450d0abfc8..564723800d1594 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -266,97 +266,97 @@ union qed_mfw_tlv_data { #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) /** - * @brief - returns the link params of the hw function + * qed_mcp_get_link_params(): Returns the link params of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link params + * Returns: Pointer to link params. */ -struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); /** - * @brief - return the link state of the hw function + * qed_mcp_get_link_state(): Return the link state of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link state + * Returns: Pointer to link state. */ -struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); /** - * @brief - return the link capabilities of the hw function + * qed_mcp_get_link_capabilities(): Return the link capabilities of the + * hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link capabilities + * Returns: Pointer to link capabilities. */ struct qed_mcp_link_capabilities *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); /** - * @brief Request the MFW to set the the link according to 'link_input'. + * qed_mcp_set_link(): Request the MFW to set the link according + * to 'link_input'. * - * @param p_hwfn - * @param p_ptt - * @param b_up - raise link if `true'. Reset link if `false'. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_up: Raise link if `true'. Reset link if `false'. * - * @return int + * Return: Int. */ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up); /** - * @brief Get the management firmware version value + * qed_mcp_get_mfw_ver(): Get the management firmware version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mfw_ver - mfw version value - * @param p_running_bundle_id - image id in nvram; Optional. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mfw_ver: MFW version value. + * @p_running_bundle_id: Image id in nvram; Optional. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); /** - * @brief Get the MBI version value + * qed_mcp_get_mbi_ver(): Get the MBI version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mbi_ver); /** - * @brief Get media type value of the port. + * qed_mcp_get_media_type(): Get media type value of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param mfw_ver - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @media_type: Media type value * - * @return int - - * 0 - Operation was successul. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *media_type); /** - * @brief Get transceiver data of the port. + * qed_mcp_get_transceiver_data(): Get transceiver data of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_transceiver_state - transceiver state. - * @param p_transceiver_type - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_transceiver_state: Transceiver state. + * @p_tranceiver_type: Media type value. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, u32 *p_tranceiver_type); /** - * @brief Get transceiver supported speed mask. + * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_speed_mask - Bit mask of all supported speeds. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_speed_mask: Bit mask of all supported speeds. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask); /** - * @brief Get board configuration. + * qed_mcp_get_board_config(): Get board configuration. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_board_config - Board config. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_board_config: Board config. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_board_config); /** - * @brief General function for sending commands to the MCP - * mailbox. It acquire mutex lock for the entire - * operation, from sending the request until the MCP - * response. Waiting for MCP response will be checked up - * to 5 seconds every 5ms. + * qed_mcp_cmd(): General function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 5ms. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param cmd - command to be sent to the MCP. - * @param param - Optional param - * @param o_mcp_resp - The MCP response code (exclude sequence). - * @param o_mcp_param- Optional parameter provided by the MCP + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP * response - * @return int - 0 - operation - * was successul. + * + * Return: Int - 0 - Operation was successul. */ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param); /** - * @brief - drains the nig, allowing completion to pass in case of pauses. - * (Should be called only from sleepable context) + * qed_mcp_drain(): drains the nig, allowing completion to pass in + * case of pauses. + * (Should be called only from sleepable context) * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. */ int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the flash size value + * qed_mcp_get_flash_size(): Get the flash size value. * - * @param p_hwfn - * @param p_ptt - * @param p_flash_size - flash size in bytes to be filled. + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_flash_size: Flash size in bytes to be filled. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_flash_size); /** - * @brief Send driver version to MFW + * qed_mcp_send_drv_version(): Send driver version to MFW. * - * @param p_hwfn - * @param p_ptt - * @param version - Version value - * @param name - Protocol driver name + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_ver: Version value. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, @@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** - * @brief Read the MFW process kill counter + * qed_get_process_kill_counter(): Read the MFW process kill counter. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return u32 + * Return: u32. */ u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Trigger a recovery process + * qed_start_recovery_process(): Trigger a recovery process. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief A recovery handler must call this function as its first step. - * It is assumed that the handler is not run from an interrupt context. + * qed_recovery_prolog(): A recovery handler must call this function + * as its first step. + * It is assumed that the handler is not run from + * an interrupt context. * - * @param cdev - * @param p_ptt + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_recovery_prolog(struct qed_dev *cdev); /** - * @brief Notify MFW about the change in base device properties + * qed_mcp_ov_update_current_config(): Notify MFW about the change in base + * device properties * - * @param p_hwfn - * @param p_ptt - * @param client - qed client type + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @client: Qed client type. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_client client); /** - * @brief Notify MFW about the driver state + * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. * - * @param p_hwfn - * @param p_ptt - * @param drv_state - Driver state + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @drv_state: Driver state. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_driver_state drv_state); /** - * @brief Send MTU size to MFW + * qed_mcp_ov_update_mtu(): Send MTU size to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mtu - MTU size + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mtu: MTU size. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 mtu); /** - * @brief Send MAC address to MFW + * qed_mcp_ov_update_mac(): Send MAC address to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mac - MAC address + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mac: MAC address. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac); + struct qed_ptt *p_ptt, const u8 *mac); /** - * @brief Send WOL mode to MFW + * qed_mcp_ov_update_wol(): Send WOL mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param wol - WOL mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @wol: WOL mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_wol wol); /** - * @brief Set LED status + * qed_mcp_set_led(): Set LED status. * - * @param p_hwfn - * @param p_ptt - * @param mode - LED mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mode: LED mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); /** - * @brief Read from nvm + * qed_mcp_nvm_read(): Read from NVM. * - * @param cdev - * @param addr - nvm offset - * @param p_buf - nvm read buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @p_buf: NVM read buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); /** - * @brief Write to nvm + * qed_mcp_nvm_write(): Write to NVM. * - * @param cdev - * @param addr - nvm offset - * @param cmd - nvm command - * @param p_buf - nvm write buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @cmd: NVM command. + * @p_buf: NVM write buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Check latest response + * qed_mcp_nvm_resp(): Check latest response. * - * @param cdev - * @param p_buf - nvm write buffer + * @cdev: Qed dev pointer. + * @p_buf: NVM write buffer. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); @@ -604,13 +606,13 @@ struct qed_nvm_image_att { }; /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image to get attributes for - * @param p_image_att - image attributes structure into which to fill data + * @p_hwfn: HW device data. + * @image_id: Image to get attributes for. + * @p_image_att: Image attributes structure into which to fill data. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, @@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att); /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image requested for reading - * @param p_buffer - allocated buffer into which to fill data - * @param buffer_len - length of the allocated buffer. + * @p_hwfn: HW device data. + * @image_id: image requested for reading. + * @p_buffer: allocated buffer into which to fill data. + * @buffer_len: length of the allocated buffer. * - * @return 0 iff p_buffer now contains the nvram image. + * Return: 0 if p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); /** - * @brief Bist register test + * qed_mcp_bist_register_test(): Bist register test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist clock test + * qed_mcp_bist_clock_test(): Bist clock test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist nvm test - get number of images + * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param num_images - number of images if operation was + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @num_images: number of images if operation was * successful. 0 if not. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *num_images); /** - * @brief Bist nvm test - get image attributes by index + * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes + * by index. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_image_att - Attributes of image - * @param image_index - Index of image to get information for + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_image_att: Attributes of image. + * @image_index: Index of image to get information for. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, u32 image_index); /** - * @brief - Processes the TLV request from MFW i.e., get the required TLV info - * from the qed client and send it to the MFW. + * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., + * get the required TLV info + * from the qed client and send it to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Send raw debug data to the MFW + * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_buf: raw debug data buffer. + * @size: Buffer size. * - * @param p_hwfn - * @param p_ptt - * @param p_buf - raw debug data buffer - * @param size - buffer size + * Return : Int. */ int qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, @@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) } /** - * @brief Initialize the interface with the MCP + * qed_mcp_cmd_init(): Initialize the interface with the MCP. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Initialize the port interface with the MCP + * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. * - * @param p_hwfn - * @param p_ptt * Can only be called after `num_ports_in_engines' is set */ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Releases resources allocated during the init process. + * qed_mcp_free(): Releases resources allocated during the init process. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW function. * - * @return int + * Return: Int. */ int qed_mcp_free(struct qed_hwfn *p_hwfn); /** - * @brief This function is called from the DPC context. After - * pointing PTT to the mfw mb, check for events sent by the MCP - * to the driver and ack them. In case a critical event - * detected, it will be handled here, otherwise the work will be - * queued to a sleepable work-queue. + * qed_mcp_handle_events(): This function is called from the DPC context. + * After pointing PTT to the mfw mb, check for events sent by + * the MCP to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @p_hwfn: HW function. + * @p_ptt: PTT required for register access. * - * @param p_hwfn - HW function - * @param p_ptt - PTT required for register access - * @return int - 0 - operation - * was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -858,169 +866,177 @@ struct qed_load_req_params { }; /** - * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, - * returns whether this PF is the first on the engine/port or function. + * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the + * operation succeeds, returns whether this PF is + * the first on the engine/port or function. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params); /** - * @brief Sends a LOAD_DONE message to the MFW + * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_REQ message to the MFW + * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_DONE message to the MFW + * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read the MFW mailbox into Current buffer. + * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Ack to mfw that driver finished FLR process for VFs + * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs * - * @param p_hwfn - * @param p_ptt - * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. * - * @param return int - 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *vfs_to_ack); /** - * @brief - calls during init to read shmem of all function-related info. + * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of + * all function-related info. * - * @param p_hwfn + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Reset the MCP using mailbox command. + * qed_mcp_reset(): Reset the MCP using mailbox command. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Sends an NVM read command request to the MFW to get - * a buffer. - * - * @param p_hwfn - * @param p_ptt - * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or - * DRV_MSG_CODE_NVM_READ_NVRAM commands - * @param param - [0:23] - Offset [24:31] - Size - * @param o_mcp_resp - MCP response - * @param o_mcp_param - MCP response param - * @param o_txn_size - Buffer size output - * @param o_buf - Pointer to the buffer returned by the MFW. + * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get + * a buffer. * - * @param return 0 upon success. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands. + * @param: [0:23] - Offset [24:31] - Size. + * @o_mcp_resp: MCP response. + * @o_mcp_param: MCP response param. + * @o_txn_size: Buffer size output. + * @o_buf: Pointer to the buffer returned by the MFW. + * @b_can_sleep: Can sleep. + * + * Return: 0 upon success. */ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep); /** - * @brief Read from sfp + * qed_mcp_phy_sfp_read(): Read from sfp. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param port - transceiver port - * @param addr - I2C address - * @param offset - offset in sfp - * @param len - buffer length - * @param p_buf - buffer to read into + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @port: transceiver port. + * @addr: I2C address. + * @offset: offset in sfp. + * @len: buffer length. + * @p_buf: buffer to read into. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); /** - * @brief indicates whether the MFW objects [under mcp_info] are accessible + * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] + * are accessible * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return true iff MFW is running and mcp_info is initialized + * Return: true if MFW is running and mcp_info is initialized. */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); /** - * @brief request MFW to configure MSI-X for a VF + * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. * - * @param p_hwfn - * @param p_ptt - * @param vf_id - absolute inside engine - * @param num_sbs - number of entries to request + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vf_id: absolute inside engine. + * @num: number of entries to request. * - * @return int + * Return: Int. */ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num); /** - * @brief - Halt the MCP. + * qed_mcp_halt(): Halt the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Wake up the MCP. + * qed_mcp_resume: Wake up the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); -/* @brief - Gets the mdump retained data from the MFW. +/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. * - * @param p_hwfn - * @param p_ptt - * @param p_mdump_retain + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mdump_retain: mdump retain. * - * @param return 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, @@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, struct mdump_retain_data_stc *p_mdump_retain); /** - * @brief - Sets the MFW's max value for the given resource + * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param resc_max_val - * @param p_mcp_resp + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: RES ID. + * @resc_max_val: Resec max val. + * @p_mcp_resp: MCP Resp * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, @@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, u32 resc_max_val, u32 *p_mcp_resp); /** - * @brief - Gets the MFW allocation info for the given resource + * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given + * resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param p_mcp_resp - * @param p_resc_num - * @param p_resc_start + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: Res ID. + * @p_mcp_resp: MCP resp. + * @p_resc_num: Resc num. + * @p_resc_start: Resc start. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, @@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); /** - * @brief Send eswitch mode to MFW + * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param eswitch - eswitch mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @eswitch: eswitch mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1113,12 +1130,12 @@ enum qed_resc_lock { }; /** - * @brief - Initiates PF FLR + * qed_mcp_initiate_pf_flr(): Initiates PF FLR. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); struct qed_resc_lock_params { @@ -1151,13 +1168,13 @@ struct qed_resc_lock_params { }; /** - * @brief Acquires MFW generic resource lock + * qed_mcp_resc_lock(): Acquires MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, @@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params { }; /** - * @brief Releases MFW generic resource lock + * qed_mcp_resc_unlock(): Releases MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, @@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_resc_unlock_params *p_params); /** - * @brief - default initialization for lock/unlock resource structs + * qed_mcp_resc_lock_default_init(): Default initialization for + * lock/unlock resource structs. + * + * @p_lock: lock params struct to be initialized; Can be NULL. + * @p_unlock: unlock params struct to be initialized; Can be NULL. + * @resource: the requested resource. + * @b_is_permanent: disable retries & aging when set. * - * @param p_lock - lock params struct to be initialized; Can be NULL - * @param p_unlock - unlock params struct to be initialized; Can be NULL - * @param resource - the requested resource - * @paral b_is_permanent - disable retries & aging when set + * Return: Void. */ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, @@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, resource, bool b_is_permanent); /** - * @brief - Return whether management firmware support smart AN + * qed_mcp_is_smart_an_supported(): Return whether management firmware + * support smart AN * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return bool - true if feature is supported. + * Return: bool true if feature is supported. */ bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); /** - * @brief Learn of supported MFW features; To be done during early init + * qed_mcp_get_capabilities(): Learn of supported MFW features; + * To be done during early init. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Inform MFW of set of features supported by driver. Should be done - * inside the content of the LOAD_REQ. + * qed_mcp_set_capabilities(): Inform MFW of set of features supported + * by driver. Should be done inside the content + * of the LOAD_REQ. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read ufp config from the shared memory. + * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Populate the nvm info shadow in the given hardware function + * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** - * @brief Delete nvm info shadow in the given hardware function + * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); /** - * @brief Get the engine affinity configuration. + * qed_mcp_get_engine_config(): Get the engine affinity configuration. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the PPFID bitmap. + * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get NVM config attribute value. + * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param p_len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @p_len: Len. + * + * Return: Int. */ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 *p_len); /** - * @brief Set NVM config attribute value. + * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @len: Len. + * + * Return: Int. */ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h new file mode 100644 index 00000000000000..8a0e3c5d4bda8e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -0,0 +1,2474 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_MFW_HSI_H +#define _QED_MFW_HSI_H + +#define MFW_TRACE_SIGNATURE 0x25071946 + +/* The trace in the buffer */ +#define MFW_TRACE_EVENTID_MASK 0x00ffff +#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 +#define MFW_TRACE_PRM_SIZE_OFFSET 16 +#define MFW_TRACE_ENTRY_SIZE 3 + +struct mcp_trace { + u32 signature; /* Help to identify that the trace is valid */ + u32 size; /* the size of the trace buffer in bytes */ + u32 curr_level; /* 2 - all will be written to the buffer + * 1 - debug trace will not be written + * 0 - just errors will be written to the buffer + */ + u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means + * mask it. + */ + + /* Warning: the following pointers are assumed to be 32bits as they are + * used only in the MFW. + */ + u32 trace_prod; /* The next trace will be written to this offset */ + u32 trace_oldest; /* The oldest valid trace starts at this offset + * (usually very close after the current producer). + */ +}; + +#define VF_MAX_STATIC 192 +#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32) +#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) + +#define EXT_VF_MAX_STATIC 240 +#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1) +#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) +#define ADDED_VF_BITMAP_SIZE 2 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 +#define MCP_GLOB_PORT_MAX 4 +#define MCP_GLOB_FUNC_MAX 16 + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +#define SECTION_OFFSET(_offsize) (((((_offsize) & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) + +#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) + +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET((_offsize)) + \ + (QED_SECTION_SIZE((_offsize)) * (idx))) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + ((_pub_base) + offsetof(struct mcp_public_data, sections[_section])) + +/* PHY configuration */ +struct eth_phy_cfg { + u32 speed; +#define ETH_SPEED_AUTONEG 0x0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + + u32 loopback_mode; +#define ETH_LOOPBACK_NONE 0x0 +#define ETH_LOOPBACK_INT_PHY 0x1 +#define ETH_LOOPBACK_EXT_PHY 0x2 +#define ETH_LOOPBACK_EXT 0x3 +#define ETH_LOOPBACK_MAC 0x4 +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 +#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 + + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 +#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 + + u32 link_modes; + + u32 fec_mode; +#define FEC_FORCE_MODE_MASK 0x000000ff +#define FEC_FORCE_MODE_OFFSET 0 +#define FEC_FORCE_MODE_NONE 0x00 +#define FEC_FORCE_MODE_FIRECODE 0x01 +#define FEC_FORCE_MODE_RS 0x02 +#define FEC_FORCE_MODE_AUTO 0x07 +#define FEC_EXTENDED_MODE_MASK 0xffffff00 +#define FEC_EXTENDED_MODE_OFFSET 8 +#define ETH_EXT_FEC_NONE 0x00000000 +#define ETH_EXT_FEC_10G_NONE 0x00000100 +#define ETH_EXT_FEC_10G_BASE_R 0x00000200 +#define ETH_EXT_FEC_25G_NONE 0x00000400 +#define ETH_EXT_FEC_25G_BASE_R 0x00000800 +#define ETH_EXT_FEC_25G_RS528 0x00001000 +#define ETH_EXT_FEC_40G_NONE 0x00002000 +#define ETH_EXT_FEC_40G_BASE_R 0x00004000 +#define ETH_EXT_FEC_50G_NONE 0x00008000 +#define ETH_EXT_FEC_50G_BASE_R 0x00010000 +#define ETH_EXT_FEC_50G_RS528 0x00020000 +#define ETH_EXT_FEC_50G_RS544 0x00040000 +#define ETH_EXT_FEC_100G_NONE 0x00080000 +#define ETH_EXT_FEC_100G_BASE_R 0x00100000 +#define ETH_EXT_FEC_100G_RS528 0x00200000 +#define ETH_EXT_FEC_100G_RS544 0x00400000 + + u32 extended_speed; +#define ETH_EXT_SPEED_MASK 0x0000ffff +#define ETH_EXT_SPEED_OFFSET 0 +#define ETH_EXT_SPEED_NONE 0x00000001 +#define ETH_EXT_SPEED_1G 0x00000002 +#define ETH_EXT_SPEED_10G 0x00000004 +#define ETH_EXT_SPEED_25G 0x00000008 +#define ETH_EXT_SPEED_40G 0x00000010 +#define ETH_EXT_SPEED_50G_BASE_R 0x00000020 +#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040 +#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080 +#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100 +#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200 +#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000 +#define ETH_EXT_ADV_SPEED_OFFSET 16 +#define ETH_EXT_ADV_SPEED_1G 0x00010000 +#define ETH_EXT_ADV_SPEED_10G 0x00020000 +#define ETH_EXT_ADV_SPEED_25G 0x00040000 +#define ETH_EXT_ADV_SPEED_40G 0x00080000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000 +#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000 +}; + +struct port_mf_cfg { + u32 dynamic_cfg; +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +struct eth_stats { + u64 r64; + u64 r127; + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + + union { + struct { + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + } bb0; + struct { + u64 unused1; + u64 r1519_to_max; + u64 unused2; + u64 unused3; + u64 unused4; + } ah0; + } u0; + + u64 rfcs; + u64 rxcf; + u64 rxpf; + u64 rxpp; + u64 raln; + u64 rfcr; + u64 rovr; + u64 rjbr; + u64 rund; + u64 rfrg; + u64 t64; + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + + union { + struct { + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + } bb1; + struct { + u64 t1519_to_max; + u64 unused6; + u64 unused7; + u64 unused8; + } ah1; + } u1; + + u64 txpf; + u64 txpp; + + union { + struct { + u64 tlpiec; + u64 tncl; + } bb2; + struct { + u64 unused9; + u64 unused10; + } ah2; + } u2; + + u64 rbyte; + u64 rxuca; + u64 rxmca; + u64 rxbca; + u64 rxpok; + u64 tbyte; + u64 txuca; + u64 txmca; + u64 txbca; + u64 txcf; +}; + +struct pkt_type_cnt { + u64 tc_tx_pkt_cnt[8]; + u64 tc_tx_oct_cnt[8]; + u64 priority_rx_pkt_cnt[8]; + u64 priority_rx_oct_cnt[8]; +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct eth_stats eth; +}; + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM BIT(0) + +#define PORT_CMT_PORT_ROLE BIT(1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE BIT(1) + +#define PORT_CMT_TEAM_MASK BIT(2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 BIT(2) +}; + +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 +#define MAX_TLV_BUFFER 128 + +enum _lldp_agent { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +}; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 +#define DCBX_OOO_TC_MASK 0x00000f00 +#define DCBX_OOO_TC_SHIFT 8 + u32 pri_tc_tbl[1]; +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +struct dcbx_features { + struct dcbx_ets_feature ets; + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 + + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_STATIC 4 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u32 flags; +#define LLDP_SYSTEM_TLV_VALID_MASK 0x1 +#define LLDP_SYSTEM_TLV_VALID_OFFSET 0 +#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2 +#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1 +#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000 +#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16 + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +struct lldp_received_tlvs_s { + u32 prefix_seq_num; + u32 length; + u32 tlvs_buffer[MAX_TLV_BUFFER]; + u32 suffix_seq_num; +}; + +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_SHIFT 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct generic_idc_msg_s { + u32 source_pf; + struct mcp_val64 msg; +}; + +struct pcie_stats_stc { + u32 sr_cnt_wr_byte_msb; + u32 sr_cnt_wr_byte_lsb; + u32 sr_cnt_wr_cnt; + u32 sr_cnt_rd_byte_msb; + u32 sr_cnt_rd_byte_lsb; + u32 sr_cnt_rd_cnt; +}; + +enum _attribute_commands_e { + ATTRIBUTE_CMD_READ = 0, + ATTRIBUTE_CMD_WRITE, + ATTRIBUTE_CMD_READ_CLEAR, + ATTRIBUTE_CMD_CLEAR, + ATTRIBUTE_NUM_OF_COMMANDS +}; + +struct public_global { + u32 max_path; + u32 max_ports; +#define MODE_1P 1 +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; + u32 ext_phy_upgrade_fw; + u8 runtime_port_swap_map[MODE_4P]; + u32 data_ptr; + u32 data_size; + u32 bmb_error_status_cnt; + u32 bmb_jumbo_frame_cnt; + u32 sent_to_bmc_cnt; + u32 handled_by_mfw; + u32 sent_to_nw_cnt; + u32 to_bmc_kb_per_second; + u32 bcast_dropped_to_bmc_cnt; + u32 mcast_dropped_to_bmc_cnt; + u32 ucast_dropped_to_bmc_cnt; + u32 ncsi_response_failure_cnt; + u32 device_attr; + u32 vpd_warning; +}; + +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; +}; + +struct public_path { + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit)) +}; + +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct dci_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct dci_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct dci_fc_npiv_tbl { + struct dci_fc_npiv_cfg fc_npiv_cfg; + struct dci_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +struct pause_flood_monitor { + u8 period_cnt; + u8 any_brb_prs_packet_hist; + u8 any_brb_block_is_full_hist; + u8 flags; + u32 num_of_state_changes; +}; + +struct public_port { + u32 validity_map; + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) +#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30) + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; + u32 link_change_count; + + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10 +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a + + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 module_info; + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 + + struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS]; + u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA]; + u32 phy_module_temperature; + u32 nig_reg_stat_rx_bmb_packet; + u32 nig_reg_rx_llh_ncsi_mcp_mask; + u32 nig_reg_rx_llh_ncsi_mcp_mask_2; + struct pause_flood_monitor pause_flood_monitor; + u32 nig_drain_cnt; + struct pkt_type_cnt pkt_tc_priority_cnt; +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct public_func { + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 mtu_size; + + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + + struct generic_idc_msg_s generic_idc_msg; + + u32 num_of_msix; + + u32 config; +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 +#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 + +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 + + u32 mac_upper; +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 ovlan_stag; +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; + + u32 preserve_data; + + u32 driver_last_activity_ts; + + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 + +#define LOAD_REQ_HSI_VERSION 2 +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_SHIFT) + +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 + + struct drv_version_stc drv_ver; +}; + +struct mcp_mac { + u32 mac_upper; + u32 mac_lower; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; + u32 nvm_start_addr; + u32 len; +}; + +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct iscsi_stats_stc { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct rdma_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +struct fcoe_cap_stc { + u32 max_ios; + u32 max_log; + u32 max_exch; + u32 max_npiv; + u32 max_tgt; + u32 max_outstnd; +}; + +#define MAX_NUM_OF_SENSORS 7 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; +}; + +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_BDQ_E = 17, + RESOURCE_QCN_E = 18, + RESOURCE_LLH_FILTER_E = 19, + RESOURCE_VF_MAC_ADDR = 20, + RESOURCE_LL2_CQS_E = 21, + RESOURCE_VF_CNQS = 22, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT BIT(0) +}; + +struct mcp_wwn { + u32 wwn_upper; + u32 wwn_lower; +}; + +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_SHIFT 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_SHIFT 8 +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_SHIFT 16 +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_SHIFT 20 +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_SHIFT 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_SHIFT 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_SHIFT 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + +struct mdump_retain_data_stc { + u32 valid; + u32 epoch; + u32 pf; + u32 status; +}; + +struct attribute_cmd_write_stc { + u32 val; + u32 mask; + u32 offset; +}; + +struct lldp_stats_stc { + u32 tx_frames_total; + u32 rx_frames_total; + u32 rx_frames_discarded; + u32 rx_age_outs; +}; + +struct get_att_ctrl_stc { + u32 disabled_attns; + u32 controllable_attns; +}; + +struct trace_filter_stc { + u32 level; + u32 modules; +}; + +union drv_union_data { + struct mcp_mac wol_mac; + + struct eth_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; + struct iscsi_stats_stc iscsi_stats; + struct rdma_stats_stc rdma_stats; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct resource_info resource; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; + struct mcp_mac lldp_mac; + struct mcp_wwn fcoe_fabric_name; + u32 dword; + + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + struct mdump_retain_data_stc mdump_retain; + struct attribute_cmd_write_stc attribute_cmd_write; + struct lldp_stats_stc lldp_stats; + struct pcie_stats_stc pcie_stats; + + struct get_att_ctrl_stc get_att_ctrl; + struct fcoe_cap_stc fcoe_cap; + struct trace_filter_stc trace_filter; +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define DRV_MSG_SEQ_NUMBER_OFFSET 0 +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_OFFSET 16 + + u32 drv_mb_param; + + u32 fw_mb_header; +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define FW_MSG_SEQ_NUMBER_OFFSET 0 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_OFFSET 16 + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + union drv_union_data union_data; +}; + +#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET) +enum drv_msg_code_enum { + DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001), + DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002), + DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003), + DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005), + DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006), + DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009), + DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f), + DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010), + DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011), + DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012), + DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013), + DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016), + DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a), + DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e), + DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020), + DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023), + DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025), + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b), + DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e), + DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f), + DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030), + DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031), + DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037), + DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e), + DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f), + DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201), + DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000), + DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100), + DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200), + DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300), + DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000), + DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100), + DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200), + DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300), + DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500), + DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600), + DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700), + DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800), + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900), + DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000), + DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100), + DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200), + DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300), + DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400), + DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500), + DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800), + DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900), + DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00), + DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100), + DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300), + DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000), + DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001), + DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002), + DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004), +}; + +#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 + +/* DRV_MSG_CODE_RETAIN_VMAC parameters */ +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0 +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf + +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4 + +#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce + +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 + +#define BW_MAX_MASK 0x000000ff +#define BW_MAX_OFFSET 0 +#define BW_MIN_MASK 0x0000ff00 +#define BW_MIN_OFFSET 8 + +#define DRV_MSG_FAN_FAILURE_TYPE BIT(0) +#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1) + +#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F +#define RESOURCE_CMD_REQ_RESC_SHIFT 0 +#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 +#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 +#define RESOURCE_OPCODE_REQ 1 +#define RESOURCE_OPCODE_REQ_WO_AGING 2 +#define RESOURCE_OPCODE_REQ_W_AGING 3 +#define RESOURCE_OPCODE_RELEASE 4 +#define RESOURCE_OPCODE_FORCE_RELEASE 5 +#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 +#define RESOURCE_CMD_REQ_AGE_SHIFT 8 + +#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF +#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 +#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 +#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 +#define RESOURCE_OPCODE_GNT 1 +#define RESOURCE_OPCODE_BUSY 2 +#define RESOURCE_OPCODE_RELEASED 3 +#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 +#define RESOURCE_OPCODE_WRONG_OWNER 5 +#define RESOURCE_OPCODE_UNKNOWN_CMD 255 + +#define RESOURCE_DUMP 0 + +/* DRV_MSG_CODE_MDUMP_CMD parameters */ +#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff +#define DRV_MSG_CODE_MDUMP_ACK 0x01 +#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 +#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 +#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 +#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 +#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 +#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 +#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 + +#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a + +#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b +#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c +#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d + +/* DRV_MSG_CODE_MDUMP_CMD options */ +#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100 + +/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */ +#define DRV_MB_PARAM_ADDR_SHIFT 0 +#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF +#define DRV_MB_PARAM_DEVAD_SHIFT 16 +#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000 +#define DRV_MB_PARAM_PORT_SHIFT 21 +#define DRV_MB_PARAM_PORT_MASK 0x00600000 + +/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */ +#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0 +#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF +#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8 +#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300 +#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16 +#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000 + +/* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + +/* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + +/* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + +/* LLDP / DCBX params*/ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 +#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ + DRV_MB_PARAM_WOL_DISABLED | \ + DRV_MB_PARAM_WOL_ENABLED) +#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP +#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED +#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 + +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 + + /* Resource Allocation params - Driver version support */ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 + +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 + +/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff + +/* Driver attributes params */ +#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff +#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 +#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 + +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 + +/*DRV_MSG_CODE_GET_PERM_MAC parametres*/ +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00 + +#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET) +enum fw_msg_code_enum { + FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000), + FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001), + FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040), + FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011), + FW_MSG_CODE_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017), + FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002), + FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003), + FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080), + FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087), + FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010), + FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011), + FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012), + FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021), + FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023), + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030), + FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031), + FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110), + FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011), + FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012), + FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013), + FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110), + FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400), + FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500), + FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00), + FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001), + FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a), + FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b), +}; + +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +/* Get PF RDMA protocol command response */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + +/* Get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2) +#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3) +#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4) +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) +#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7) +#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8) +#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9) +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) +#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17) +#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18) +#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19) + +#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001 + +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) + +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 + +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 + +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, + MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE, + MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED, + MFW_DRV_MSG_GENERIC_IDC, + MFW_DRV_MSG_XCVR_TX_FAULT, + MFW_DRV_MSG_XCVR_RX_LOS, + MFW_DRV_MSG_GET_FCOE_CAP, + MFW_DRV_MSG_GEN_LINK_DUMP, + MFW_DRV_MSG_GEN_IDLE_CHK, + MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +struct public_mfw_mb { + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +enum public_sections { + PUBLIC_DRV_MB, + PUBLIC_MFW_MB, + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +/* Runtime data needs about 1/2K. We use 2K to be on the safe side. + * Please make sure data does not exceed this size. + */ +#define NUM_RUNTIME_DWORDS 16 +struct drv_init_hw_stc { + u32 init_hw_bitmask[NUM_RUNTIME_DWORDS]; + u32 init_hw_data[NUM_RUNTIME_DWORDS * 32]; +}; + +struct mcp_public_data { + u32 num_sections; + u32 sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; +}; + +#define I2C_TRANSCEIVER_ADDR 0xa0 +#define MAX_I2C_TRANSACTION_SIZE 16 +#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256 + +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT, + DRV_TLV_RDMA_DRV_VERSION +}; + +#define I2C_DEV_ADDR_A2 0xa2 +#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60 +#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2 +#define SFP_EEPROM_A2_VCC_ADDR 0x62 +#define SFP_EEPROM_A2_VCC_SIZE 2 +#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64 +#define SFP_EEPROM_A2_TX_BIAS_SIZE 2 +#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66 +#define SFP_EEPROM_A2_TX_POWER_SIZE 2 +#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68 +#define SFP_EEPROM_A2_RX_POWER_SIZE 2 + +#define I2C_DEV_ADDR_A0 0xa0 +#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16 +#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2 +#define QSFP_EEPROM_A0_VCC_ADDR 0x1a +#define QSFP_EEPROM_A0_VCC_SIZE 2 +#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a +#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2 +#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32 +#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2 +#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22 +#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2 + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + + u32 mac_addr_lo; +}; + +struct nvm_cfg1_glob { + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 + + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + + u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + + u32 mbi_date; + u32 misc_sig; + + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10 + + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_network_modes_capability; + u32 nvm_cfg_version; + u32 nvm_cfg_new_option_seq; + u32 nvm_cfg_removed_option_seq; + u32 nvm_cfg_updated_value_seq; + u32 extended_serial_number[8]; + u32 option_kit_pn[8]; + u32 spare_pn[8]; + u32 mps25_active_txfir_pre; + u32 mps25_active_txfir_main; + u32 mps25_active_txfir_post; + u32 features; + u32 tx_rx_eq_25g_hlpc; + u32 tx_rx_eq_25g_llpc; + u32 tx_rx_eq_25g_ac; + u32 tx_rx_eq_10g_pc; + u32 tx_rx_eq_10g_ac; + u32 tx_rx_eq_1g; + u32 tx_rx_eq_25g_bt; + u32 tx_rx_eq_10g_bt; + u32 generic_cont4; + u32 preboot_debug_mode_std; + u32 preboot_debug_mode_ext; + u32 ext_phy_cfg1; + u32 clocks; + u32 pre2_generic_cont_1; + u32 pre2_generic_cont_2; + u32 pre2_generic_cont_3; + u32 tx_rx_eq_50g_hlpc; + u32 tx_rx_eq_50g_mlpc; + u32 tx_rx_eq_50g_llpc; + u32 tx_rx_eq_50g_ac; + u32 trace_modules; + u32 pcie_class_code_fcoe; + u32 pcie_class_code_iscsi; + u32 no_provisioned_mac; + u32 lowest_mbi_version; + u32 generic_cont5; + u32 pre2_generic_cont_4; + u32 reserved[40]; +}; + +struct nvm_cfg1_path { + u32 reserved[1]; +}; + +struct nvm_cfg1_port { + u32 rel_to_opt123; + u32 rel_to_opt124; + + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + + u32 pcie_cfg; + u32 features; + + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 + + u32 phy_cfg; + u32 mgmt_traffic; + + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + + u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + + u32 temperature; + u32 ext_phy_cfg1; + + u32 extended_speed; +#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff +#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 + + u32 extended_fec_mode; + u32 port_generic_cont_01; + u32 port_generic_cont_02; + u32 phy_temp_monitor; + u32 reserved[109]; +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; + u32 rsrv1; + u32 rsrv2; + u32 device_id; + u32 cmn_cfg; + u32 pci_cfg; + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; + u32 preboot_generic_cfg; + u32 features; + u32 mf_mode_feature; + u32 reserved[6]; +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; +}; + +struct board_info { + u16 vendor_id; + u16 eth_did_suffix; + u16 sub_vendor_id; + u16 sub_device_id; + char *board_name; + char *friendly_name; +}; + +struct trace_module_info { + char *module_name; +}; + +#define NUM_TRACE_MODULES 25 + +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_2 2 +#define PORT_3 3 + +extern struct spad_layout g_spad; +struct spad_layout { + struct nvm_cfg nvm_cfg; + struct mcp_public_data public_data; +}; + +#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ + +#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE)) + +#define TO_OFFSIZE(_offset, _size) \ + ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \ + (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET))) + +enum spad_sections { + SPAD_SECTION_TRACE, + SPAD_SECTION_NVM_CFG, + SPAD_SECTION_PUBLIC, + SPAD_SECTION_PRIVATE, + SPAD_SECTION_MAX +}; + +#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \ + __builtin_offsetof(struct static_init, f)) + +/* This section is located at a fixed location in the beginning of the + * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. + * All the rest of data has a floating location which differs from version to + * version, and is pointed by the mcp_meta_data below. + * Moreover, the spad_layout section is part of the MFW firmware, and is loaded + * with it from nvram in order to clear this portion. + */ +struct static_init { + u32 num_sections; + offsize_t sections[SPAD_SECTION_MAX]; +#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) + + u32 tim_hash[8]; +#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash))) + u32 tpu_hash[8]; +#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash))) + u32 secure_pcie_fw_ver; +#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver)))) + u32 secure_running_mfw; +#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw)))) + struct mcp_trace trace; +}; + +#define CRC_MAGIC_VALUE 0xDEBB20E3 +#define CRC32_POLYNOMIAL 0xEDB88320 +#define _KB(x) ((x) * 1024) +#define _MB(x) (_KB(x) * 1024) +#define NVM_CRC_SIZE (sizeof(u32)) +enum nvm_sw_arbitrator { + NVM_SW_ARB_HOST, + NVM_SW_ARB_MCP, + NVM_SW_ARB_UART, + NVM_SW_ARB_RESERVED +}; + +struct legacy_bootstrap_region { + u32 magic_value; +#define NVM_MAGIC_VALUE 0x669955aa + u32 sram_start_addr; + u32 code_len; + u32 code_start_addr; + u32 crc; +}; + +struct nvm_code_entry { + u32 image_type; + u32 nvm_start_addr; + u32 len; + u32 sram_start_addr; + u32 sram_run_addr; +}; + +enum nvm_image_type { + NVM_TYPE_TIM1 = 0x01, + NVM_TYPE_TIM2 = 0x02, + NVM_TYPE_MIM1 = 0x03, + NVM_TYPE_MIM2 = 0x04, + NVM_TYPE_MBA = 0x05, + NVM_TYPE_MODULES_PN = 0x06, + NVM_TYPE_VPD = 0x07, + NVM_TYPE_MFW_TRACE1 = 0x08, + NVM_TYPE_MFW_TRACE2 = 0x09, + NVM_TYPE_NVM_CFG1 = 0x0a, + NVM_TYPE_L2B = 0x0b, + NVM_TYPE_DIR1 = 0x0c, + NVM_TYPE_EAGLE_FW1 = 0x0d, + NVM_TYPE_FALCON_FW1 = 0x0e, + NVM_TYPE_PCIE_FW1 = 0x0f, + NVM_TYPE_HW_SET = 0x10, + NVM_TYPE_LIM = 0x11, + NVM_TYPE_AVS_FW1 = 0x12, + NVM_TYPE_DIR2 = 0x13, + NVM_TYPE_CCM = 0x14, + NVM_TYPE_EAGLE_FW2 = 0x15, + NVM_TYPE_FALCON_FW2 = 0x16, + NVM_TYPE_PCIE_FW2 = 0x17, + NVM_TYPE_AVS_FW2 = 0x18, + NVM_TYPE_INIT_HW = 0x19, + NVM_TYPE_DEFAULT_CFG = 0x1a, + NVM_TYPE_MDUMP = 0x1b, + NVM_TYPE_NVM_META = 0x1c, + NVM_TYPE_ISCSI_CFG = 0x1d, + NVM_TYPE_FCOE_CFG = 0x1f, + NVM_TYPE_ETH_PHY_FW1 = 0x20, + NVM_TYPE_ETH_PHY_FW2 = 0x21, + NVM_TYPE_BDN = 0x22, + NVM_TYPE_8485X_PHY_FW = 0x23, + NVM_TYPE_PUB_KEY = 0x24, + NVM_TYPE_RECOVERY = 0x25, + NVM_TYPE_PLDM = 0x26, + NVM_TYPE_UPK1 = 0x27, + NVM_TYPE_UPK2 = 0x28, + NVM_TYPE_MASTER_KC = 0x29, + NVM_TYPE_BACKUP_KC = 0x2a, + NVM_TYPE_HW_DUMP = 0x2b, + NVM_TYPE_HW_DUMP_OUT = 0x2c, + NVM_TYPE_BIN_NVM_META = 0x30, + NVM_TYPE_ROM_TEST = 0xf0, + NVM_TYPE_88X33X0_PHY_FW = 0x31, + NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, + NVM_TYPE_IDLE_CHK = 0x33, + NVM_TYPE_MAX, +}; + +#define MAX_NVM_DIR_ENTRIES 100 + +struct nvm_dir_meta { + u32 dir_id; + u32 nvm_dir_addr; + u32 num_images; + u32 next_mfw_to_run; +}; + +struct nvm_dir { + s32 seq; +#define NVM_DIR_NEXT_MFW_MASK 0x00000001 +#define NVM_DIR_SEQ_MASK 0xfffffffe +#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) +#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\ + ({ \ + _seq = (((_seq + 2) & \ + NVM_DIR_SEQ_MASK) | \ + (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\ + }) + +#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \ + NVM_DIR_SEQ_MASK) + + u32 num_images; + u32 rsrv; + struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ +}; + +#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ + ((_num_images) - 1) *\ + sizeof(struct nvm_code_entry) +\ + NVM_CRC_SIZE) + +struct nvm_vpd_image { + u32 format_revision; +#define VPD_IMAGE_VERSION 1 + + u8 vpd_data[1]; +}; + +#define DIR_ID_1 (0) +#define DIR_ID_2 (1) +#define MAX_DIR_IDS (2) + +#define MFW_BUNDLE_1 (0) +#define MFW_BUNDLE_2 (1) +#define MAX_MFW_BUNDLES (2) + +#define FLASH_PAGE_SIZE 0x1000 +#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) +#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200)) + +#define FPGA_MIM_MAX_SIZE (0x40000) + +#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \ + sizeof(struct legacy_bootstrap_region) \ + - NVM_RSV_SIZE) +#define LIM_OFFSET (NVM_OFFSET(lim_image)) +#define NVM_RSV_SIZE (44) +#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE) +#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\ + + (((idx) == NVM_TYPE_MIM2) ? \ + GET_MIM_MAX_SIZE(is_asic, is_e4)\ + : 0)) +#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \ + GET_MIM_MAX_SIZE(is_asic,\ + is_e4) * 2) + +union nvm_dir_union { + struct nvm_dir dir; + u8 page[FLASH_PAGE_SIZE]; +}; + +struct nvm_image { + struct legacy_bootstrap_region bootstrap; + u8 rsrv[NVM_RSV_SIZE]; + u8 lim_image[LIM_MAX_SIZE]; + union nvm_dir_union dir[MAX_MFW_BUNDLES]; +}; + +#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f))))) + +struct hw_set_info { + u32 reg_type; +#define GRC_REG_TYPE 1 +#define PHY_REG_TYPE 2 +#define PCI_REG_TYPE 4 + + u32 bank_num; + u32 pf_num; + u32 operation; +#define READ_OP 1 +#define WRITE_OP 2 +#define RMW_SET_OP 3 +#define RMW_CLR_OP 4 + + u32 reg_addr; + u32 reg_data; + + u32 reset_type; +#define POR_RESET_TYPE BIT(0) +#define HARD_RESET_TYPE BIT(1) +#define CORE_RESET_TYPE BIT(2) +#define MCP_RESET_TYPE BIT(3) +#define PERSET_ASSERT BIT(4) +#define PERSET_DEASSERT BIT(5) +}; + +struct hw_set_image { + u32 format_version; +#define HW_SET_IMAGE_VERSION 1 + u32 no_hw_sets; + struct hw_set_info hw_sets[1]; +}; + +#define MAX_SUPPORTED_NVM_OPTIONS 1000 + +#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff +#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0 +#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000 +#define NVM_META_BIN_OPTION_LEN_OFFSET 16 +#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000 +#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24 +#define NVM_META_BIN_OPTION_ENTITY_GLOB 0 +#define NVM_META_BIN_OPTION_ENTITY_PORT 1 +#define NVM_META_BIN_OPTION_ENTITY_FUNC 2 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2 + +struct nvm_meta_bin_t { + u32 magic; +#define NVM_META_BIN_MAGIC 0x669955bb + u32 version; +#define NVM_META_BIN_VERSION 1 + u32 num_options; + u32 options[0]; +}; +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index b8c5641b29a8e3..5d725f59db247d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -26,12 +26,12 @@ static struct qed_ooo_archipelago u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; struct qed_ooo_archipelago *p_archipelago; - if (idx >= p_ooo_info->max_num_archipelagos) + if (unlikely(idx >= p_ooo_info->max_num_archipelagos)) return NULL; p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; - if (list_empty(&p_archipelago->isles_list)) + if (unlikely(list_empty(&p_archipelago->isles_list))) return NULL; return p_archipelago; @@ -46,7 +46,7 @@ static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, u8 the_num_of_isle = 1; p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); - if (!p_archipelago) { + if (unlikely(!p_archipelago)) { DP_NOTICE(p_hwfn, "Connection %d is not found in OOO list\n", cid); return NULL; @@ -362,7 +362,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, if (ooo_isle > 1) { p_prev_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle - 1); - if (!p_prev_isle) { + if (unlikely(!p_prev_isle)) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", ooo_isle - 1, cid); @@ -370,7 +370,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, } } p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); - if (!p_archipelago && (ooo_isle != 1)) { + if (unlikely(!p_archipelago && ooo_isle != 1)) { DP_NOTICE(p_hwfn, "Connection %d is not found in OOO list\n", cid); return; @@ -381,7 +381,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, struct qed_ooo_isle, list_entry); list_del(&p_isle->list_entry); - if (!list_empty(&p_isle->buffers_list)) { + if (unlikely(!list_empty(&p_isle->buffers_list))) { DP_NOTICE(p_hwfn, "Free isle is not empty\n"); INIT_LIST_HEAD(&p_isle->buffers_list); } @@ -418,13 +418,13 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_isle *p_isle = NULL; p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle); - if (!p_isle) { + if (unlikely(!p_isle)) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", ooo_isle, cid); return; } - if (buffer_side == QED_OOO_LEFT_BUF) + if (unlikely(buffer_side == QED_OOO_LEFT_BUF)) list_add(&p_buffer->list_entry, &p_isle->buffers_list); else list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list); @@ -438,7 +438,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, left_isle + 1); - if (!p_right_isle) { + if (unlikely(!p_right_isle)) { DP_NOTICE(p_hwfn, "Right isle %d is not found(cid %d)\n", left_isle + 1, cid); @@ -450,7 +450,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, if (left_isle) { p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, left_isle); - if (!p_left_isle) { + if (unlikely(!p_left_isle)) { DP_NOTICE(p_hwfn, "Left isle %d is not found(cid %d)\n", left_isle, cid); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 2c62d732e5c258..295ce435a1a411 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -63,12 +63,12 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); return -EBUSY; - } else if (!rc && !params.b_granted) { + } else if (!params.b_granted) { DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); return -EBUSY; } - return rc; + return 0; } static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 4f4b79250a2b2f..7f3e84b8622dd9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -22,6 +22,7 @@ #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -33,7 +34,6 @@ #include "qed_roce.h" #include "qed_sp.h" - int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 max_count, char *name) { @@ -865,8 +865,8 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) } qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_COMMON_QUEUE_CONS, qz_num); REG_WR16(p_hwfn, addr, prod); @@ -1903,7 +1903,6 @@ void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); } - void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->db_bar_no_edpm = true; @@ -1966,7 +1965,7 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, u8 *old_mac_address, - u8 *new_mac_address) + const u8 *new_mac_address) { int rc = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6a1de3a252570e..2753723011dd76 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -168,16 +168,19 @@ static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp) return false; } + #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else -static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} -static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) + {return -EINVAL; } static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index da1b7fdcbda7c3..6f1a52e6beb2f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -126,6 +126,8 @@ 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL +#define QM_REG_RLGLBLUPPERBOUND \ + 0x2f3c00UL #define TCFC_REG_WEAK_ENABLE_VF \ 0x2d0704UL #define TCFC_REG_STRONG_ENABLE_PF \ @@ -576,7 +578,7 @@ #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_VXLAN_PORT 0x1f0738UL -#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL #define NIG_REG_ENC_TYPE_ENABLE 0x501058UL #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) @@ -595,8 +597,8 @@ #define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL #define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL #define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL #define NIG_REG_NGE_IP_ENABLE 0x508b28UL #define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL @@ -606,7 +608,10 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL - +#define QM_REG_WFQVPUPPERBOUND \ + 0x2fb000UL +#define QM_REG_WFQVPCRD \ + 0x2fc000UL #define PGLCS_REG_DBG_SELECT_K2_E5 \ 0x001d14UL #define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \ @@ -1437,29 +1442,29 @@ 0x1401140UL #define XSEM_REG_SYNC_DBG_EMPTY \ 0x1401160UL -#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define XSEM_REG_SLOW_DBG_ACTIVE \ 0x1401400UL -#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define XSEM_REG_SLOW_DBG_MODE \ 0x1401404UL -#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define XSEM_REG_DBG_FRAME_MODE \ 0x1401408UL #define XSEM_REG_DBG_GPRE_VECT \ 0x1401410UL -#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define XSEM_REG_DBG_MODE1_CFG \ 0x1401420UL #define XSEM_REG_FAST_MEMORY \ 0x1440000UL #define YSEM_REG_SYNC_DBG_EMPTY \ 0x1501160UL -#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define YSEM_REG_SLOW_DBG_ACTIVE \ 0x1501400UL -#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define YSEM_REG_SLOW_DBG_MODE \ 0x1501404UL -#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define YSEM_REG_DBG_FRAME_MODE \ 0x1501408UL #define YSEM_REG_DBG_GPRE_VECT \ 0x1501410UL -#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define YSEM_REG_DBG_MODE1_CFG \ 0x1501420UL #define YSEM_REG_FAST_MEMORY \ 0x1540000UL @@ -1467,15 +1472,15 @@ 0x1601140UL #define PSEM_REG_SYNC_DBG_EMPTY \ 0x1601160UL -#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define PSEM_REG_SLOW_DBG_ACTIVE \ 0x1601400UL -#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define PSEM_REG_SLOW_DBG_MODE \ 0x1601404UL -#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define PSEM_REG_DBG_FRAME_MODE \ 0x1601408UL #define PSEM_REG_DBG_GPRE_VECT \ 0x1601410UL -#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define PSEM_REG_DBG_MODE1_CFG \ 0x1601420UL #define PSEM_REG_FAST_MEMORY \ 0x1640000UL @@ -1483,15 +1488,15 @@ 0x1701140UL #define TSEM_REG_SYNC_DBG_EMPTY \ 0x1701160UL -#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define TSEM_REG_SLOW_DBG_ACTIVE \ 0x1701400UL -#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define TSEM_REG_SLOW_DBG_MODE \ 0x1701404UL -#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define TSEM_REG_DBG_FRAME_MODE \ 0x1701408UL #define TSEM_REG_DBG_GPRE_VECT \ 0x1701410UL -#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define TSEM_REG_DBG_MODE1_CFG \ 0x1701420UL #define TSEM_REG_FAST_MEMORY \ 0x1740000UL @@ -1499,15 +1504,15 @@ 0x1801140UL #define MSEM_REG_SYNC_DBG_EMPTY \ 0x1801160UL -#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define MSEM_REG_SLOW_DBG_ACTIVE \ 0x1801400UL -#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define MSEM_REG_SLOW_DBG_MODE \ 0x1801404UL -#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define MSEM_REG_DBG_FRAME_MODE \ 0x1801408UL #define MSEM_REG_DBG_GPRE_VECT \ 0x1801410UL -#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define MSEM_REG_DBG_MODE1_CFG \ 0x1801420UL #define MSEM_REG_FAST_MEMORY \ 0x1840000UL @@ -1517,21 +1522,21 @@ 20480 #define USEM_REG_SYNC_DBG_EMPTY \ 0x1901160UL -#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define USEM_REG_SLOW_DBG_ACTIVE \ 0x1901400UL -#define USEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define USEM_REG_SLOW_DBG_MODE \ 0x1901404UL -#define USEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define USEM_REG_DBG_FRAME_MODE \ 0x1901408UL #define USEM_REG_DBG_GPRE_VECT \ 0x1901410UL -#define USEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define USEM_REG_DBG_MODE1_CFG \ 0x1901420UL #define USEM_REG_FAST_MEMORY \ 0x1940000UL #define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \ 0x000748UL -#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \ +#define SEM_FAST_REG_DBG_MODSRC_DISABLE \ 0x00074cUL #define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \ 0x000750UL @@ -1561,7 +1566,7 @@ 0x341500UL #define BRB_REG_BIG_RAM_DATA_SIZE \ 64 -#define SEM_FAST_REG_STALL_0_BB_K2 \ +#define SEM_FAST_REG_STALL_0 \ 0x000488UL #define SEM_FAST_REG_STALLED \ 0x000494UL @@ -1619,35 +1624,35 @@ 0x008c14UL #define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ 0x0006c4UL -#define MS_REG_MS_CMU_K2_E5 \ +#define MS_REG_MS_CMU_K2 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_REG_PHY0_K2_E5 \ +#define PHY_PCIE_REG_PHY0_K2 \ 0x620000UL -#define PHY_PCIE_REG_PHY1_K2_E5 \ +#define PHY_PCIE_REG_PHY1_K2 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index cf5baa5e59bcc9..071b4aeaddf286 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -792,7 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->orq_num_pages * RDMA_RING_PAGE_SIZE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index e27dd9a4547e89..7a3bd749e1e4cc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -6,47 +6,47 @@ #include /** - * @brief qed_selftest_memory - Perform memory test + * qed_selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_memory(struct qed_dev *cdev); /** - * @brief qed_selftest_interrupt - Perform interrupt test + * qed_selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_interrupt(struct qed_dev *cdev); /** - * @brief qed_selftest_register - Perform register test + * qed_selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_register(struct qed_dev *cdev); /** - * @brief qed_selftest_clock - Perform clock test + * qed_selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_clock(struct qed_dev *cdev); /** - * @brief qed_selftest_nvram - Perform nvram test + * qed_selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_nvram(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 60ff3222bf551b..4fb02a5579ee8c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -23,31 +23,26 @@ enum spq_mode { }; struct qed_spq_comp_cb { - void (*function)(struct qed_hwfn *, - void *, - union event_ring_data *, + void (*function)(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, u8 fw_return_code); void *cookie; }; /** - * @brief qed_eth_cqe_completion - handles the completion of a - * ramrod on the cqe ring + * qed_eth_cqe_completion(): handles the completion of a + * ramrod on the cqe ring. * - * @param p_hwfn - * @param cqe + * @p_hwfn: HW device data. + * @cqe: CQE. * - * @return int + * Return: Int. */ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe); -/** - * @file - * - * QED Slow-hwfn queue interface - */ - + /* QED Slow-hwfn queue interface */ union ramrod_data { struct pf_start_ramrod_data pf_start; struct pf_update_ramrod_data pf_update; @@ -58,7 +53,7 @@ union ramrod_data { struct tx_queue_stop_ramrod_data tx_queue_stop; struct vport_start_ramrod_data vport_start; struct vport_stop_ramrod_data vport_stop; - struct rx_update_gft_filter_data rx_update_gft; + struct rx_update_gft_filter_ramrod_data rx_update_gft; struct vport_update_ramrod_data vport_update; struct core_rx_start_ramrod_data core_rx_queue_start; struct core_rx_stop_ramrod_data core_rx_queue_stop; @@ -207,117 +202,128 @@ struct qed_spq { }; /** - * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that - * Pends it to the future list. + * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. * - * @param p_hwfn - * @param p_req + * @p_hwfn: HW device data. + * @p_ent: Ent. + * @fw_return_code: Return code from firmware. * - * @return int + * Return: Int. */ int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code); /** - * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. + * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_spq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_setup - Reset the SPQ to its start state. + * qed_spq_setup(): Reset the SPQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_spq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_deallocate - Deallocates the given SPQ struct. + * qed_spq_free(): Deallocates the given SPQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_spq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_get_entry - Obtain an entrry from the spq - * free pool list. - * - * + * qed_spq_get_entry(): Obtain an entrry from the spq + * free pool list. * - * @param p_hwfn - * @param pp_ent + * @p_hwfn: HW device data. + * @pp_ent: PP ENT. * - * @return int + * Return: Int. */ int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent); /** - * @brief qed_spq_return_entry - Return an entry to spq free - * pool list + * qed_spq_return_entry(): Return an entry to spq free pool list. * - * @param p_hwfn - * @param p_ent + * @p_hwfn: HW device data. + * @p_ent: P ENT. + * + * Return: Void. */ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); /** - * @brief qed_eq_allocate - Allocates & initializes an EQ struct + * qed_eq_alloc(): Allocates & initializes an EQ struct. * - * @param p_hwfn - * @param num_elem number of elements in the eq + * @p_hwfn: HW device data. + * @num_elem: number of elements in the eq. * - * @return int + * Return: Int. */ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); /** - * @brief qed_eq_setup - Reset the EQ to its start state. + * qed_eq_setup(): Reset the EQ to its start state. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_eq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_free - deallocates the given EQ struct. + * qed_eq_free(): deallocates the given EQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_eq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_prod_update - update the FW with default EQ producer + * qed_eq_prod_update(): update the FW with default EQ producer. + * + * @p_hwfn: HW device data. + * @prod: Prod. * - * @param p_hwfn - * @param prod + * Return: Void. */ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod); /** - * @brief qed_eq_completion - Completes currently pending EQ elements + * qed_eq_completion(): Completes currently pending EQ elements. * - * @param p_hwfn - * @param cookie + * @p_hwfn: HW device data. + * @cookie: Cookie. * - * @return int + * Return: Int. */ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_spq_completion - Completes a single event + * qed_spq_completion(): Completes a single event. * - * @param p_hwfn - * @param echo - echo value from cookie (used for determining completion) - * @param p_data - data from cookie (used in callback function if applicable) + * @p_hwfn: HW device data. + * @echo: echo value from cookie (used for determining completion). + * @fw_return_code: FW return code. + * @p_data: data from cookie (used in callback function if applicable). * - * @return int + * Return: Int. */ int qed_spq_completion(struct qed_hwfn *p_hwfn, __le16 echo, @@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, union event_ring_data *p_data); /** - * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u32 - SPQ CID + * Return: u32 - SPQ CID. */ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_alloc - Allocates & initializes an ConsQ - * struct + * qed_consq_alloc(): Allocates & initializes an ConsQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_consq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_setup - Reset the ConsQ to its start state. + * qed_consq_setup(): Reset the ConsQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return Void. */ void qed_consq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_free - deallocates the given ConsQ struct. + * qed_consq_free(): deallocates the given ConsQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_consq_free(struct qed_hwfn *p_hwfn); int qed_spq_pend_post(struct qed_hwfn *p_hwfn); -/** - * @file - * - * @brief Slow-hwfn low-level commands (Ramrods) function definitions. - */ +/* Slow-hwfn low-level commands (Ramrods) function definitions. */ #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 @@ -377,12 +382,15 @@ struct qed_sp_init_data { }; /** - * @brief Returns a SPQ entry to the pool / frees the entry if allocated. - * Should be called on in error flows after initializing the SPQ entry - * and before posting it. + * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the + * entry if allocated. Should be called on in error + * flows after initializing the SPQ entry + * and before posting it. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. * - * @param p_hwfn - * @param p_ent + * Return: Void. */ void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); @@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_sp_init_data *p_data); /** - * @brief qed_sp_pf_start - PF Function Start Ramrod + * qed_sp_pf_start(): PF Function Start Ramrod. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_tunn: P_tunn. + * @allow_npar_tx_switch: Allow NPAR TX Switch. + * + * Return: Int. * * This ramrod is sent to initialize a physical function (PF). It will * configure the function related parameters and write its completion to the @@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * allocated by the driver on host memory and its parameters are written * to the internal RAM of the UStorm by the Function Start Ramrod. * - * @param p_hwfn - * @param p_ptt - * @param p_tunn - * @param allow_npar_tx_switch - * - * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, @@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, bool allow_npar_tx_switch); /** - * @brief qed_sp_pf_update - PF Function Update Ramrod + * qed_sp_pf_update(): PF Function Update Ramrod. * - * This ramrod updates function-related parameters. Every parameter can be - * updated independently, according to configuration flags. + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. * - * @return int + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. */ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_update_stag - Update firmware of new outer tag + * qed_sp_pf_update_stag(): Update firmware of new outer tag. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_stop - PF Function Stop Ramrod - * - * This ramrod is sent to close a Physical Function (PF). It is the last ramrod - * sent and the last completion written to the PFs Event Ring. This ramrod also - * deletes the context for the Slowhwfn connection on this PF. - * - * @note Not required for first packet. - * - * @param p_hwfn - * - * @return int - */ - -/** - * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * qed_sp_pf_update_ufp(): PF ufp update Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); @@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * qed_sp_heartbeat_ramrod(): Send empty Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index b4ed54ffef9b72..648176dfb871d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; - DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + + /* Place consolidation queue address in ramrod */ + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); + p_ramrod->consolid_q_num_pages = page_cnt; qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); @@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; - p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; - p_ramrod->num_vfs = (u8) p_iov->total_vfs; + p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8)p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0bc1a0aeb56eb5..e0473729b16173 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -20,6 +20,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_iscsi.h" @@ -31,8 +32,8 @@ #include "qed_rdma.h" /*************************************************************************** -* Structures & Definitions -***************************************************************************/ + * Structures & Definitions + ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) @@ -42,8 +43,8 @@ #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** -* Blocking Imp. (BLOCK/EBLOCK mode) -***************************************************************************/ + * Blocking Imp. (BLOCK/EBLOCK mode) + ***************************************************************************/ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, void *cookie, union event_ring_data *data, u8 fw_return_code) @@ -149,8 +150,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* SPQ entries inner API -***************************************************************************/ + * SPQ entries inner API + ***************************************************************************/ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { @@ -184,12 +185,12 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* HSI access -***************************************************************************/ + * HSI access + ***************************************************************************/ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_cxt_info cxt_info; u16 physical_q; int rc; @@ -207,23 +208,20 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->xstorm_ag_context.flags10, - E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags1, - E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags9, - E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); - p_cxt->xstorm_st_context.spq_base_lo = + p_cxt->xstorm_st_context.spq_base_addr.lo = DMA_LO_LE(p_spq->chain.p_phys_addr); - p_cxt->xstorm_st_context.spq_base_hi = + p_cxt->xstorm_st_context.spq_base_addr.hi = DMA_HI_LE(p_spq->chain.p_phys_addr); - - DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, - p_hwfn->p_consq->chain.p_phys_addr); } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, @@ -265,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Asynchronous events -***************************************************************************/ + * Asynchronous events + ***************************************************************************/ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) @@ -311,12 +309,12 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* EQ API -***************************************************************************/ + * EQ API + ***************************************************************************/ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) { - u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); + u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_EQE_CONS, p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); } @@ -433,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn) } /*************************************************************************** -* CQE API - manipulate EQ functionality -***************************************************************************/ + * CQE API - manipulate EQ functionality + ***************************************************************************/ static int qed_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) @@ -464,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Slow hwfn Queue (spq) -***************************************************************************/ + * Slow hwfn Queue (spq) + ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; @@ -548,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) int ret; /* SPQ struct */ - p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); + p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; @@ -676,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { - if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; @@ -725,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Accessor -***************************************************************************/ + * Accessor + ***************************************************************************/ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_spq) @@ -735,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) } /*************************************************************************** -* Posting new Ramrods -***************************************************************************/ + * Posting new Ramrods + ***************************************************************************/ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, struct list_head *head, u32 keep_reserve) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ed2b6fe5a78d37..8ac38828ba4597 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -11,6 +11,7 @@ #include #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -19,12 +20,13 @@ #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, - __le16 echo, - union event_ring_data *data, u8 fw_return_code); static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); +static u16 qed_vf_from_entity_id(__le16 entity_id) +{ + return le16_to_cpu(entity_id) - MAX_NUM_PFS; +} + static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { u8 legacy = 0; @@ -169,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, b_enabled_only, false)) vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; else - DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", - relative_vf_id); + DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n", + __func__, relative_vf_id); return vf; } @@ -308,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, struct qed_dmae_params params; struct qed_vf_info *p_vf; - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return -EINVAL; @@ -420,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) bulletin_p = p_iov_info->bulletins_phys; if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { DP_ERR(p_hwfn, - "qed_iov_setup_vfdb called without allocating mem first\n"); + "%s called without allocating mem first\n", __func__); return; } @@ -464,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); + "%s for %d VFs\n", __func__, num_vfs); /* Allocate PF Mailbox buffer (per-VF) */ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; @@ -500,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) QED_MSG_IOV, "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", p_iov_info->mbx_msg_virt_addr, - (u64) p_iov_info->mbx_msg_phys_addr, + (u64)p_iov_info->mbx_msg_phys_addr, p_iov_info->mbx_reply_virt_addr, - (u64) p_iov_info->mbx_reply_phys_addr, - p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); + (u64)p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); return 0; } @@ -608,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) if (rc) return rc; - /* We want PF IOV to be synonemous with the existance of p_iov_info; + /* We want PF IOV to be synonemous with the existence of p_iov_info; * In case the capability is published but there are no VFs, simply * de-allocate the struct. */ @@ -714,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, int i; /* Set VF masks and configuration - pretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); /* iterate over all queues, clear sb consumer */ for (i = 0; i < vf->num_sbs; i++) @@ -734,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, { u32 igu_vf_conf; - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); @@ -746,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); } static int @@ -807,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, if (rc) return rc; - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); @@ -816,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.hw_mode); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); vf->state = VF_FREE; @@ -904,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, p_block->igu_sb_id * sizeof(u64), 2, NULL); } - vf->num_sbs = (u8) num_rx_queues; + vf->num_sbs = (u8)num_rx_queues; return vf->num_sbs; } @@ -988,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); if (!vf) { - DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } @@ -1092,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!vf) { - DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } @@ -1220,8 +1222,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn, * channel would be re-set to ready prior to that. */ REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1); qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, mbx->req_virt->first_tlv.reply_address, @@ -1545,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, memset(resp, 0, sizeof(*resp)); /* Write the PF version so that VF would know which version - * is supported - might be later overriden. This guarantees that + * is supported - might be later overridden. This guarantees that * VF could recognize legacy PF based on lack of versions in reply. */ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; @@ -1603,7 +1605,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->db_size = 0; - pfdev_info->indices_per_sb = PIS_PER_SB_E4; + pfdev_info->indices_per_sb = PIS_PER_SB; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; @@ -1897,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, int sb_id; int rc; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Failed to get VF info, invalid vfid [%d]\n", @@ -1957,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { DP_ERR(p_hwfn, - "qed_iov_vf_mbx_start_vport returned error %d\n", rc); + "%s returned error %d\n", __func__, rc); status = PFVF_STATUS_FAILURE; } else { vf->vport_instance++; @@ -1993,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); if (rc) { - DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", - rc); + DP_ERR(p_hwfn, "%s returned error %d\n", + __func__, rc); status = PFVF_STATUS_FAILURE; } @@ -2138,10 +2140,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, * calculate on their own and clean the producer prior to this. */ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) - REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), - 0); + qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY + + SEM_FAST_REG_INT_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, + req->rx_qid), 0); rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, req->bd_max_bytes, @@ -3030,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, goto out; } p_rss_params = vzalloc(sizeof(*p_rss_params)); - if (p_rss_params == NULL) { + if (!p_rss_params) { status = PFVF_STATUS_FAILURE; goto out; } @@ -3550,6 +3552,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, sizeof(struct pfvf_def_resp_tlv), status); } + static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) @@ -3557,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, int cnt; u32 val; - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); for (cnt = 0; cnt < 50; cnt++) { val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); @@ -3565,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, break; msleep(20); } - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); if (cnt == 50) { DP_ERR(p_hwfn, @@ -3577,48 +3580,73 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, return 0; } +#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) + static int qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { - u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; - int i, cnt; + u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; + u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; + u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; + u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; + u8 port_id, tc, tc_id = 0, voq = 0; + int cnt; - /* Read initial consumers & producers */ - for (i = 0; i < MAX_NUM_VOQS_E4; i++) { - u32 prod; + memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); + memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); - cons[i] = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + - i * 0x40); - prod = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + - i * 0x40); - distance[i] = prod - cons[i]; + /* Read initial consumers & producers */ + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; + voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); + cons[voq] = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + prod = qed_rd(p_hwfn, p_ptt, + prod_voq0_addr + voq * 0x40); + distance[voq] = prod - cons[voq]; + } } /* Wait for consumers to pass the producers */ - i = 0; + port_id = 0; + tc = 0; for (cnt = 0; cnt < 50; cnt++) { - for (; i < MAX_NUM_VOQS_E4; i++) { - u32 tmp; + for (; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? + tc : PURE_LB_TC; + voq = VOQ(port_id, + tc_id, max_phys_tcs_per_port); + tmp = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + if (distance[voq] > tmp - cons[voq]) + break; + } - tmp = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + - i * 0x40); - if (distance[i] > tmp - cons[i]) + if (tc == max_phys_tcs_per_port + 1) + tc = 0; + else break; } - if (i == MAX_NUM_VOQS_E4) + if (port_id == max_ports_per_engine) break; msleep(20); } if (cnt == 50) { - DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", - p_vf->abs_vf_id, i); + DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n", + p_vf->abs_vf_id, (int)voq); + + DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n", + (int)voq, (int)port_id, (int)tc_id); + return -EBUSY; } @@ -3680,8 +3708,8 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, * doesn't do that as a part of FLR. */ REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, vfid), 1); /* VF_STOPPED has to be set only after final cleanup * but prior to re-enabling the VF. @@ -3842,7 +3870,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx; struct qed_vf_info *p_vf; - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return; @@ -3979,7 +4007,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, u16 abs_vfid) { - u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { DP_VERBOSE(p_hwfn, @@ -3989,7 +4017,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, return NULL; } - return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; + return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; } static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, @@ -4013,13 +4041,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, return 0; } -static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, - struct malicious_vf_eqe_data *p_data) +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) { struct qed_vf_info *p_vf; - p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); - + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id + (p_data->entity_id)); if (!p_vf) return; @@ -4036,16 +4064,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, - union event_ring_data *data, u8 fw_return_code) +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), &data->vf_pf_channel.msg_addr); - case COMMON_EVENT_MALICIOUS_VF: - qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); - return 0; default: DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", opcode); @@ -4075,7 +4100,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, struct qed_dmae_params params; struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return -EINVAL; @@ -4176,7 +4201,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf_info; u64 feature; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set forced MAC, invalid vfid [%d]\n", vfid); @@ -4226,7 +4251,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return false; @@ -4237,7 +4262,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return true; @@ -4248,7 +4273,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return false; @@ -4266,7 +4291,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) goto out; } - vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf) goto out; @@ -4345,7 +4370,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, return rc; rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ - return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); + return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val, + QM_RL_TYPE_NORMAL); } static int @@ -4376,7 +4402,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) struct qed_wfq_data *vf_vp_wfq; struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return 0; @@ -4395,8 +4421,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) */ void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) { + /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(flag, &hwfn->iov_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); @@ -4407,8 +4435,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev) int i; for_each_hwfn(cdev, i) - queue_delayed_work(cdev->hwfns[i].iov_wq, - &cdev->hwfns[i].iov_task, 0); + queue_delayed_work(cdev->hwfns[i].iov_wq, + &cdev->hwfns[i].iov_task, 0); } int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) @@ -4416,8 +4444,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) int i, j; for_each_hwfn(cdev, i) - if (cdev->hwfns[i].iov_wq) - flush_workqueue(cdev->hwfns[i].iov_wq); + if (cdev->hwfns[i].iov_wq) + flush_workqueue(cdev->hwfns[i].iov_wq); /* Mark VFs for disablement */ qed_iov_set_vfs_to_disable(cdev, true); @@ -5010,7 +5038,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) } qed_for_each_vf(hwfn, i) - qed_iov_post_vf_bulletin(hwfn, i, ptt); + qed_iov_post_vf_bulletin(hwfn, i, ptt); qed_ptt_release(hwfn, ptt); } @@ -5196,7 +5224,6 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); } - flush_workqueue(cdev->hwfns[i].iov_wq); destroy_workqueue(cdev->hwfns[i].iov_wq); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index eacd6457f195ce..f448e3dd6c8ba4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -142,7 +142,7 @@ struct qed_vf_queue { enum vf_state { VF_FREE = 0, /* VF ready to be acquired holds no resc */ - VF_ACQUIRED, /* VF, acquired, but not initalized */ + VF_ACQUIRED, /* VF, acquired, but not initialized */ VF_ENABLED, /* VF, Enabled */ VF_RESET, /* VF, FLR'd, pending cleanup */ VF_STOPPED /* VF, Stopped */ @@ -250,29 +250,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; #ifdef CONFIG_QED_SRIOV /** - * @brief Check if given VF ID @vfid is valid - * w.r.t. @b_enabled_only value - * if b_enabled_only = true - only enabled VF id is valid - * else any VF id less than max_vfs is valid + * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled + * VF id is valid. + * else any VF id less than max_vfs is valid. * - * @param p_hwfn - * @param rel_vf_id - Relative VF ID - * @param b_enabled_only - consider only enabled VF - * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * @p_hwfn: HW device data. + * @rel_vf_id: Relative VF ID. + * @b_enabled_only: consider only enabled VF. + * @b_non_malicious: true iff we want to validate vf isn't malicious. * - * @return bool - true for valid VF ID + * Return: bool - true for valid VF ID */ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only, bool b_non_malicious); /** - * @brief - Given a VF index, return index of next [including that] active VF. + * qed_iov_get_next_active_vf(): Given a VF index, return index of + * next [including that] active VF. * - * @param p_hwfn - * @param rel_vf_id + * @p_hwfn: HW device data. + * @rel_vf_id: VF ID. * - * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. */ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); @@ -280,83 +282,117 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port); /** - * @brief Read sriov related information and allocated resources - * reads from configuration space, shmem, etc. + * qed_iov_hw_info(): Read sriov related information and allocated resources + * reads from configuration space, shmem, etc. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_hw_info(struct qed_hwfn *p_hwfn); /** - * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * qed_add_tlv(): place a given tlv on the tlv buffer at next offset * - * @param p_hwfn - * @param p_iov - * @param type - * @param length + * @p_hwfn: HW device data. + * @offset: offset. + * @type: Type + * @length: Length. * - * @return pointer to the newly placed tlv + * Return: pointer to the newly placed tlv */ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); /** - * @brief list the types and lengths of the tlvs on the buffer + * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer * - * @param p_hwfn - * @param tlvs_list + * @p_hwfn: HW device data. + * @tlvs_list: Tlvs_list. + * + * Return: Void. */ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); /** - * @brief qed_iov_alloc - allocate sriov related resources + * qed_sriov_vfpf_malicious(): Handle malicious VF/PF. + * + * @p_hwfn: HW device data. + * @p_data: Pointer to data. + * + * Return: Void. + */ +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data); + +/** + * qed_sriov_eqe_event(): Callback for SRIOV events. + * + * @p_hwfn: HW device data. + * @opcode: Opcode. + * @echo: Echo. + * @data: data + * @fw_return_code: FW return code. + * + * Return: Int. + */ +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code); + +/** + * qed_iov_alloc(): allocate sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_setup - setup sriov related resources + * qed_iov_setup(): setup sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_iov_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_free - free sriov related resources + * qed_iov_free(): free sriov related resources + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_iov_free(struct qed_hwfn *p_hwfn); /** - * @brief free sriov related memory that was allocated during hw_prepare + * qed_iov_free_hw_info(): free sriov related memory that was + * allocated during hw_prepare + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_iov_free_hw_info(struct qed_dev *cdev); /** - * @brief Mark structs of vfs that have been FLR-ed. + * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. * - * @param p_hwfn - * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * @p_hwfn: HW device data. + * @disabled_vfs: bitmask of all VFs on path that were FLRed * - * @return true iff one of the PF's vfs got FLRed. false otherwise. + * Return: true iff one of the PF's vfs got FLRed. false otherwise. */ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); /** - * @brief Search extended TLVs in request/reply buffer. + * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. * - * @param p_hwfn - * @param p_tlvs_list - Pointer to tlvs list - * @param req_type - Type of TLV + * @p_hwfn: HW device data. + * @p_tlvs_list: Pointer to tlvs list + * @req_type: Type of TLV * - * @return pointer to tlv type if found, otherwise returns NULL. + * Return: pointer to tlv type if found, otherwise returns NULL. */ void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type); @@ -442,6 +478,18 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { } + +static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) +{ +} + +static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + return 0; +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72a38d53d33f68..597cd9cd57b541 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -27,7 +27,7 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) "preparing to send 0x%04x tlv over vf pf channel\n", type); - /* Reset Requst offset */ + /* Reset Request offset */ p_iov->offset = (u8 *)p_iov->vf2pf_request; /* Clear mailbox - both request and reply */ @@ -444,7 +444,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) u32 reg; int rc; - /* Set number of hwfns - might be overriden once leading hwfn learns + /* Set number of hwfns - might be overridden once leading hwfn learns * actual configuration from PF. */ if (IS_LEAD_HWFN(p_hwfn)) @@ -504,7 +504,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) QED_MSG_IOV, "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", p_iov->vf2pf_request, - (u64) p_iov->vf2pf_request_phys, + (u64)p_iov->vf2pf_request_phys, p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); /* Allocate Bulletin board */ @@ -561,6 +561,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return -ENOMEM; } + #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) @@ -1285,8 +1286,8 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); - req->opcode = (u8) p_ucast->opcode; - req->type = (u8) p_ucast->type; + req->opcode = (u8)p_ucast->opcode; + req->type = (u8)p_ucast->type; memcpy(req->mac, p_ucast->mac, ETH_ALEN); req->vlan = p_ucast->vlan; @@ -1372,7 +1373,7 @@ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, - u8 *p_mac) + const u8 *p_mac) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_bulletin_update_mac_tlv *p_req; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 60d2bb64e65fb6..306b5f4bc63210 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -48,7 +48,7 @@ struct channel_tlv { u16 length; }; -/* header of first vf->pf tlv carries the offset used to calculate reponse +/* header of first vf->pf tlv carries the offset used to calculate response * buffer address */ struct vfpf_first_tlv { @@ -85,8 +85,8 @@ struct vfpf_acquire_tlv { struct vfpf_first_tlv first_tlv; struct vf_pf_vfdev_info { -#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ -#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */ +#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */ /* A requirement for supporting multi-Tx queues on a single queue-zone, * VF would pass qids as additional information whenever passing queue * references. @@ -688,13 +688,16 @@ struct qed_vf_iov { }; /** - * @brief VF - Set Rx/Tx coalesce per VF's relative queue. - * Coalesce value '0' will omit the configuration. + * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the + * configuration. * - * @param p_hwfn - * @param rx_coal - coalesce value in micro second for rx queue - * @param tx_coal - coalesce value in micro second for tx queue - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @rx_coal: coalesce value in micro second for rx queue. + * @tx_coal: coalesce value in micro second for tx queue. + * @p_cid: queue cid. + * + * Return: Int. * **/ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, @@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 tx_coal, struct qed_queue_cid *p_cid); /** - * @brief VF - Get coalesce per VF's relative queue. + * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. * - * @param p_hwfn - * @param p_coal - coalesce value in micro second for VF queues. - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @p_coal: coalesce value in micro second for VF queues. + * @p_cid: queue cid. * + * Return: Int. **/ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, struct qed_queue_cid *p_cid); #ifdef CONFIG_QED_SRIOV /** - * @brief Read the VF bulletin and act on it if needed + * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. * - * @param p_hwfn - * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * @p_hwfn: HW device data. + * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. * - * @return enum _qed_status + * Return: enum _qed_status. */ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); /** - * @brief Get link paramters for VF from qed + * qed_vf_get_link_params(): Get link parameters for VF from qed + * + * @p_hwfn: HW device data. + * @params: the link params structure to be filled for the VF. * - * @param p_hwfn - * @param params - the link params structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params); /** - * @brief Get link state for VF from qed + * qed_vf_get_link_state(): Get link state for VF from qed. + * + * @p_hwfn: HW device data. + * @link: the link state structure to be filled for the VF * - * @param p_hwfn - * @param link - the link state structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link); /** - * @brief Get link capabilities for VF from qed + * qed_vf_get_link_caps(): Get link capabilities for VF from qed. * - * @param p_hwfn - * @param p_link_caps - the link capabilities structure to be filled for the VF + * @p_hwfn: HW device data. + * @p_link_caps: the link capabilities structure to be filled for the VF + * + * Return: Void. */ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_rxqs: allocated RX queues * - * @param p_hwfn - * @param num_rxqs - allocated RX queues + * Return: Void. */ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed * - * @param p_hwfn - * @param num_txqs - allocated RX queues + * @p_hwfn: HW device data. + * @num_txqs: allocated RX queues + * + * Return: Void. */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); /** - * @brief Get number of available connections [both Rx and Tx] for VF + * qed_vf_get_num_cids(): Get number of available connections + * [both Rx and Tx] for VF + * + * @p_hwfn: HW device data. + * @num_cids: allocated number of connections * - * @param p_hwfn - * @param num_cids - allocated number of connections + * Return: Void. */ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); /** - * @brief Get port mac address for VF + * qed_vf_get_port_mac(): Get port mac address for VF. * - * @param p_hwfn - * @param port_mac - destination location for port mac + * @p_hwfn: HW device data. + * @port_mac: destination location for port mac + * + * Return: Void. */ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); /** - * @brief Get number of VLAN filters allocated for VF by qed + * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated + * for VF by qed. + * + * @p_hwfn: HW device data. + * @num_vlan_filters: allocated VLAN filters * - * @param p_hwfn - * @param num_rxqs - allocated VLAN filters + * Return: Void. */ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** - * @brief Get number of MAC filters allocated for VF by qed + * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated + * for VF by qed * - * @param p_hwfn - * @param num_rxqs - allocated MAC filters + * @p_hwfn: HW device data. + * @num_mac_filters: allocated MAC filters + * + * Return: Void. */ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); /** - * @brief Check if VF can set a MAC address + * qed_vf_check_mac(): Check if VF can set a MAC address * - * @param p_hwfn - * @param mac + * @p_hwfn: HW device data. + * @mac: Mac. * - * @return bool + * Return: bool. */ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); /** - * @brief Set firmware version information in dev_info from VFs acquire response tlv + * qed_vf_get_fw_version(): Set firmware version information + * in dev_info from VFs acquire response tlv + * + * @p_hwfn: HW device data. + * @fw_major: FW major. + * @fw_minor: FW minor. + * @fw_rev: FW rev. + * @fw_eng: FW eng. * - * @param p_hwfn - * @param fw_major - * @param fw_minor - * @param fw_rev - * @param fw_eng + * Return: Void. */ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng); /** - * @brief hw preparation for VF - * sends ACQUIRE message + * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** - * @brief VF - start the RX Queue by sending a message to the PF - * @param p_hwfn - * @param p_cid - Only relative fields are relevant - * @param bd_max_bytes - maximum number of bytes per bd - * @param bd_chain_phys_addr - physical address of bd chain - * @param cqe_pbl_addr - physical address of pbl - * @param cqe_pbl_size - pbl size - * @param pp_prod - pointer to the producer to be - * used in fastpath + * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF + * + * @p_hwfn: HW device data. + * @p_cid: Only relative fields are relevant + * @bd_max_bytes: maximum number of bytes per bd + * @bd_chain_phys_addr: physical address of bd chain + * @cqe_pbl_addr: physical address of pbl + * @cqe_pbl_size: pbl size + * @pp_prod: pointer to the producer to be used in fastpath * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, @@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod); /** - * @brief VF - start the TX queue by sending a message to the - * PF. + * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the + * PF. * - * @param p_hwfn - * @param tx_queue_id - zero based within the VF - * @param sb - status block for this queue - * @param sb_index - index within the status block - * @param bd_chain_phys_addr - physical address of tx chain - * @param pp_doorbell - pointer to address to which to - * write the doorbell too.. + * @p_hwfn: HW device data. + * @p_cid: CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL Size. + * @pp_doorbell: pointer to address to which to write the doorbell too. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, @@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell); /** - * @brief VF - stop the RX queue by sending a message to the PF + * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. * - * @param p_hwfn - * @param p_cid - * @param cqe_completion + * @p_hwfn: HW device data. + * @p_cid: CID. + * @cqe_completion: CQE Completion. * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion); /** - * @brief VF - stop the TX queue by sending a message to the PF + * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. * - * @param p_hwfn - * @param tx_qid + * @p_hwfn: HW device data. + * @p_cid: CID. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** - * @brief VF - send a vport update command + * qed_vf_pf_vport_update(): VF - send a vport update command. * - * @param p_hwfn - * @param params + * @p_hwfn: HW device data. + * @p_params: Params * - * @return int + * Return: Int. */ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params); /** + * qed_vf_pf_reset(): VF - send a close message to PF. * - * @brief VF - send a close message to PF + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); /** - * @brief VF - free vf`s memories + * qed_vf_pf_release(): VF - free vf`s memories. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); /** - * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous * - * @param p_hwfn - * @param sb_id + * @p_hwfn: HW device data. + * @sb_id: SB ID. * - * @return INLINE u16 + * Return: INLINE u16 */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief Stores [or removes] a configured sb_info. + * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info. + * + * @p_hwfn: HW device data. + * @sb_id: zero-based SB index [for fastpath] + * @p_sb: may be NULL [during removal]. * - * @param p_hwfn - * @param sb_id - zero-based SB index [for fastpath] - * @param sb_info - may be NULL [during removal]. + * Return: Void. */ void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, struct qed_sb_info *p_sb); /** - * @brief qed_vf_pf_vport_start - perform vport start for VF. + * qed_vf_pf_vport_start(): perform vport start for VF. * - * @param p_hwfn - * @param vport_id - * @param mtu - * @param inner_vlan_removal - * @param tpa_mode - * @param max_buffers_per_cqe, - * @param only_untagged - default behavior regarding vlan acceptance + * @p_hwfn: HW device data. + * @vport_id: Vport ID. + * @mtu: MTU. + * @inner_vlan_removal: Innter VLAN removal. + * @tpa_mode: TPA mode + * @max_buffers_per_cqe: Max buffer pre CQE. + * @only_untagged: default behavior regarding vlan acceptance * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, @@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 max_buffers_per_cqe, u8 only_untagged); /** - * @brief qed_vf_pf_vport_stop - stop the VF's vport + * qed_vf_pf_vport_stop(): stop the VF's vport * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); @@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd); /** - * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * qed_vf_pf_int_cleanup(): clean the SB of the VF * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); /** - * @brief - return the link params in a given bulletin board + * __qed_vf_get_link_params(): return the link params in a given bulletin board * - * @param p_hwfn - * @param p_params - pointer to a struct to fill with link params - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_params: pointer to a struct to fill with link params + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link state in a given bulletin board + * __qed_vf_get_link_state(): return the link state in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_link: pointer to a struct to fill with link state + * @p_bulletin: Bulletin. * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link state - * @param p_bulletin + * Return: Void. */ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link capabilities in a given bulletin board + * __qed_vf_get_link_caps(): return the link capabilities in a given + * bulletin board * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link capabilities - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_link_caps: pointer to a struct to fill with link capabilities + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, @@ -1029,11 +1062,15 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); /** - * @brief - Ask PF to update the MAC address in it's bulletin board + * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in + * it's bulletin board + * + * @p_hwfn: HW device data. + * @p_mac: mac address to be updated in bulletin board * - * @param p_mac - mac address to be updated in bulletin board + * Return: Int. */ -int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac); #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, @@ -1222,7 +1259,7 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, - u8 *p_mac) + const u8 *p_mac) { return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a2e4dfb5cb44e7..3010833ddde33d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -557,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) return; } - ether_addr_copy(edev->ndev->dev_addr, mac); + eth_hw_addr_set(edev->ndev, mac); __qede_unlock(edev); } @@ -617,32 +617,30 @@ void qede_fill_rss_params(struct qede_dev *edev, static int qede_set_ucast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, - unsigned char mac[ETH_ALEN]) + const unsigned char mac[ETH_ALEN]) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.mac_valid = 1; + ether_addr_copy(ucast.mac, mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_set_ucast_rx_vlan(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, u16 vid) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.vlan_valid = 1; + ucast.vlan = vid; - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) @@ -1057,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char *mac, int num_macs) { - struct qed_filter_params filter_cmd; + struct qed_filter_mcast_params mcast; int i; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; + memset(&mcast, 0, sizeof(mcast)); + mcast.type = opcode; + mcast.num = num_macs; for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + ether_addr_copy(mcast.mac[i], mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_mcast(edev->cdev, &mcast); } int qede_set_mac_addr(struct net_device *ndev, void *p) @@ -1104,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) goto out; } - ether_addr_copy(ndev->dev_addr, addr->sa_data); + eth_hw_addr_set(ndev, addr->sa_data); DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data); if (edev->state != QEDE_STATE_OPEN) { @@ -1194,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev) { enum qed_filter_rx_mode_type accept_flags; struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; unsigned char *uc_macs, *temp; struct netdev_hw_addr *ha; int rc, uc_count; @@ -1220,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev) netif_addr_unlock_bh(ndev); - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - /* Remove all previous unicast secondary macs and multicast macs * (configure / leave the primary mac) */ @@ -1271,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev) qede_config_accept_any_vlan(edev, false); } - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); + edev->ops->filter_config_rx_mode(edev->cdev, accept_flags); out: kfree(uc_macs); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9837bdb89cd403..06c6a581360639 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -836,7 +836,7 @@ static void qede_init_ndev(struct qede_dev *edev) ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; /* Set network device HW mac */ - ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); + eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac); ndev->mtu = edev->dev_info.common.mtu; } @@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->devlink = qed_ops->common->devlink_register(cdev); if (IS_ERR(edev->devlink)) { DP_NOTICE(edev, "Cannot register devlink\n"); + rc = PTR_ERR(edev->devlink); edev->devlink = NULL; - /* Go on, we can live without devlink */ + goto err3; } } else { struct net_device *ndev = pci_get_drvdata(pdev); + struct qed_devlink *qdl; edev = netdev_priv(ndev); - - if (edev->devlink) { - struct qed_devlink *qdl = devlink_priv(edev->devlink); - - qdl->cdev = cdev; - } + qdl = devlink_priv(edev->devlink); + qdl->cdev = cdev; edev->cdev = cdev; memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); @@ -1397,7 +1395,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, static int qede_alloc_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int rc; @@ -2802,10 +2800,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data) } /** - * qede_io_error_detected - called when PCI error is detected + * qede_io_error_detected(): Called when PCI error is detected + * * @pdev: Pointer to PCI device * @state: The current pci connection state * + *Return: pci_ers_result_t. + * * This function is called after a PCI bus error affecting * this device has been detected. */ diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c00ad57575eab1..1e6d72adfe4399 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -508,10 +508,12 @@ static void eeprom_readword(struct ql3_adapter *qdev, static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) { - __le16 *p = (__le16 *)ndev->dev_addr; - p[0] = cpu_to_le16(addr[0]); - p[1] = cpu_to_le16(addr[1]); - p[2] = cpu_to_le16(addr[2]); + __le16 buf[ETH_ALEN / 2]; + + buf[0] = cpu_to_le16(addr[0]); + buf[1] = cpu_to_le16(addr[1]); + buf[2] = cpu_to_le16(addr[2]); + eth_hw_addr_set(ndev, (u8 *)buf); } static int ql_get_nvram_params(struct ql3_adapter *qdev) @@ -3564,7 +3566,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 75960a29f80ea8..ed84f0f9762305 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -304,7 +304,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) if (ret) return ret; - memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, mac_addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ @@ -356,7 +356,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); qlcnic_set_multi(adapter->netdev); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 87b8c032195d0e..06104d2ff5b356 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -420,7 +420,7 @@ static void emac_mac_dma_config(struct emac_adapter *adpt) } /* set MAC address */ -static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr) +static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr) { u32 sta; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 9015a38eaced81..a55c52696d49f6 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -545,13 +545,10 @@ static int emac_probe_resources(struct platform_device *pdev, struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - char maddr[ETH_ALEN]; int ret = 0; /* get mac address */ - if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN)) - ether_addr_copy(netdev->dev_addr, maddr); - else + if (device_get_ethdev_address(&pdev->dev, netdev)) eth_hw_addr_random(netdev); /* Core 0 interrupt */ diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 8427fe1b8fd1c9..955cce644392a9 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -968,7 +968,7 @@ qca_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, qcaspi_devs); - ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr); + ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&spi->dev, "Using random MAC address: %pM\n", diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index ce3f7ce31adc1a..27c4f43176aaaf 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -347,7 +347,7 @@ static int qca_uart_probe(struct serdev_device *serdev) of_property_read_u32(serdev->dev.of_node, "current-speed", &speed); - ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr); + ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&serdev->dev, "Using random MAC address: %pM\n", diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 13d8eb43a48595..1b2119b1d48aac 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -224,7 +224,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; - eth_random_addr(rmnet_dev->dev_addr); + eth_hw_addr_random(rmnet_dev); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 01ef5efd7bc2ac..a6bf7d50517862 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -453,7 +453,7 @@ static void r6040_down(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - u16 *adrp; + const u16 *adrp; /* Stop MAC */ iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ @@ -462,7 +462,7 @@ static void r6040_down(struct net_device *dev) r6040_reset_mac(lp); /* Restore MAC Address to MIDx */ - adrp = (u16 *) dev->dev_addr; + adrp = (const u16 *) dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); @@ -731,13 +731,13 @@ static void r6040_mac_address(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - u16 *adrp; + const u16 *adrp; /* Reset MAC */ r6040_reset_mac(lp); /* Restore MAC Address */ - adrp = (u16 *) dev->dev_addr; + adrp = (const u16 *) dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); @@ -849,13 +849,13 @@ static void r6040_multicast_list(struct net_device *dev) unsigned long flags; struct netdev_hw_addr *ha; int i; - u16 *adrp; + const u16 *adrp; u16 hash_table[4] = { 0 }; spin_lock_irqsave(&lp->lock, flags); /* Keep our MAC Address */ - adrp = (u16 *)dev->dev_addr; + adrp = (const u16 *)dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); @@ -1031,8 +1031,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioaddr; int err, io_size = R6040_IO_SIZE; static int card_idx = -1; + u16 addr[ETH_ALEN / 2]; int bar = 0; - u16 *adrp; pr_info("%s\n", version); @@ -1102,14 +1102,14 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Set MAC address */ card_idx++; - adrp = (u16 *)dev->dev_addr; - adrp[0] = ioread16(ioaddr + MID_0L); - adrp[1] = ioread16(ioaddr + MID_0M); - adrp[2] = ioread16(ioaddr + MID_0H); + addr[0] = ioread16(ioaddr + MID_0L); + addr[1] = ioread16(ioaddr + MID_0M); + addr[2] = ioread16(ioaddr + MID_0H); + eth_hw_addr_set(dev, (u8 *)addr); /* Some bootloader/BIOSes do not initialize * MAC address, warn about that */ - if (!(adrp[0] || adrp[1] || adrp[2])) { + if (!(addr[0] || addr[1] || addr[2])) { netdev_warn(dev, "MAC address not initialized, " "generating random\n"); eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 2b84b4565e64f1..4f39f843bb3a51 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1624,7 +1624,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&cp->lock); @@ -1889,6 +1889,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *regs; resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; + __le16 addr[ETH_ALEN / 2]; pr_info_once("%s", version); @@ -1979,8 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((__le16 *) (dev->dev_addr))[i] = - cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); + addr[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); + eth_hw_addr_set(dev, (u8 *)addr); dev->netdev_ops = &cp_netdev_ops; netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 2e6923cc653e21..15b40fd93cd2e7 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -945,6 +945,7 @@ static int rtl8139_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct rtl8139_private *tp; + __le16 addr[ETH_ALEN / 2]; int i, addr_len, option; void __iomem *ioaddr; static int board_idx = -1; @@ -994,8 +995,8 @@ static int rtl8139_init_one(struct pci_dev *pdev, addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((__le16 *) (dev->dev_addr))[i] = - cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + eth_hw_addr_set(dev, (u8 *)addr); /* The Rtl8139-specific entries in the device structure. */ dev->netdev_ops = &rtl8139_netdev_ops; @@ -2238,7 +2239,7 @@ static int rtl8139_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&tp->lock); diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index b6c849b258a048..6cbcb31643679e 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -368,6 +368,7 @@ static int __init atp_probe1(long ioaddr) static void __init get_node_ID(struct net_device *dev) { long ioaddr = dev->base_addr; + __be16 addr[ETH_ALEN / 2]; int sa_offset = 0; int i; @@ -379,8 +380,9 @@ static void __init get_node_ID(struct net_device *dev) sa_offset = 15; for (i = 0; i < 3; i++) - ((__be16 *)dev->dev_addr)[i] = + addr[i] = cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i))); + eth_hw_addr_set(dev, (u8 *)addr); write_reg(ioaddr, CMR2, CMR2_NULL); } diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 2728df46ec4105..8da4b66b71b5de 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -37,7 +37,7 @@ enum mac_version { RTL_GIGA_MAC_VER_24, RTL_GIGA_MAC_VER_25, RTL_GIGA_MAC_VER_26, - RTL_GIGA_MAC_VER_27, + /* support for RTL_GIGA_MAC_VER_27 has been removed */ RTL_GIGA_MAC_VER_28, RTL_GIGA_MAC_VER_29, RTL_GIGA_MAC_VER_30, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 2918947dd57c9e..bbe21db204172d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -118,7 +118,6 @@ static const struct { [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, - [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, @@ -986,33 +985,6 @@ DECLARE_RTL_COND(rtl_ocpar_cond) return RTL_R32(tp, OCPAR) & OCPAR_FLAG; } -static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) -{ - RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); -} - -static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - r8168dp_1_mdio_access(tp, reg, - OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); -} - -static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) -{ - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); - - mdelay(1); - RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; -} - #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 static void r8168dp_2_mdio_start(struct rtl8169_private *tp) @@ -1054,9 +1026,6 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) static void rtl_writephy(struct rtl8169_private *tp, int location, int val) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - r8168dp_1_mdio_write(tp, location, val); - break; case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); @@ -1073,8 +1042,6 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) static int rtl_readphy(struct rtl8169_private *tp, int location) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - return r8168dp_1_mdio_read(tp, location); case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); @@ -1236,7 +1203,6 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp) static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; @@ -2041,8 +2007,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168DP family. */ /* It seems this early RTL8168dp version never made it to - * the wild. Let's see whether somebody complains, if not - * we'll remove support for this chip version completely. + * the wild. Support has been removed. * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, */ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, @@ -2372,7 +2337,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp) r8168c_hw_jumbo_disable(tp); } break; - case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_28: if (jumbo) r8168dp_hw_jumbo_enable(tp); else @@ -3720,7 +3685,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, - [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, @@ -3983,7 +3947,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) goto no_reset; switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); @@ -5255,7 +5218,7 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) static void rtl_init_mac_address(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; - u8 *mac_addr = dev->dev_addr; + u8 mac_addr[ETH_ALEN]; int rc; rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); @@ -5273,6 +5236,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) eth_hw_addr_random(dev); dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); done: + eth_hw_addr_set(dev, mac_addr); rtl_rar_set(tp, mac_addr); } diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 50f0f621b1aa8a..f7ad5487879b83 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -548,64 +548,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); } -static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x10, 0x0008 }, - { 0x0d, 0x006c }, - - { 0x1f, 0x0000 }, - { 0x0d, 0xf880 }, - - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0001 }, - { 0x0b, 0xa4d8 }, - { 0x09, 0x281c }, - { 0x07, 0x2883 }, - { 0x0a, 0x6b35 }, - { 0x1d, 0x3da4 }, - { 0x1c, 0xeffd }, - { 0x14, 0x7f52 }, - { 0x18, 0x7fc6 }, - { 0x08, 0x0601 }, - { 0x06, 0x4063 }, - { 0x10, 0xf074 }, - { 0x1f, 0x0003 }, - { 0x13, 0x0789 }, - { 0x12, 0xf4bd }, - { 0x1a, 0x04fd }, - { 0x14, 0x84b0 }, - { 0x1f, 0x0000 }, - { 0x00, 0x9200 }, - - { 0x1f, 0x0005 }, - { 0x01, 0x0340 }, - { 0x1f, 0x0001 }, - { 0x04, 0x4000 }, - { 0x03, 0x1d21 }, - { 0x02, 0x0c32 }, - { 0x01, 0x0200 }, - { 0x00, 0x5554 }, - { 0x04, 0x4800 }, - { 0x04, 0x4000 }, - { 0x04, 0xf000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x101a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0xf000 }, - { 0x1f, 0x0000 }, - }; - - rtl_writephy_batch(phydev, phy_reg_init); - r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); -} - static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1332,7 +1274,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, - [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 47c5377e4f4247..08062d73df10c4 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -81,6 +81,7 @@ enum ravb_reg { RQC3 = 0x00A0, RQC4 = 0x00A4, RPC = 0x00B0, + RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */ UFCW = 0x00BC, UFCS = 0x00C0, UFCV0 = 0x00C4, @@ -187,19 +188,23 @@ enum ravb_reg { PIR = 0x0520, PSR = 0x0528, PIPR = 0x052c, + CXR31 = 0x0530, /* RZ/G2L only */ MPR = 0x0558, PFTCR = 0x055c, PFRCR = 0x0560, GECMR = 0x05b0, MAHR = 0x05c0, MALR = 0x05c8, - TROCR = 0x0700, /* R-Car Gen3 only */ + TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */ + CXR41 = 0x0708, /* RZ/G2L only */ + CXR42 = 0x0710, /* RZ/G2L only */ CEFCR = 0x0740, FRECR = 0x0748, TSFRCR = 0x0750, TLFRCR = 0x0758, RFCR = 0x0760, MAFCR = 0x0778, + CSR0 = 0x0800, /* RZ/G2L only */ }; @@ -810,10 +815,11 @@ enum ECMR_BIT { ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */ ECMR_RXF = 0x00020000, ECMR_PFR = 0x00040000, - ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */ + ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */ ECMR_RZPF = 0x00100000, ECMR_DPAD = 0x00200000, ECMR_RCSC = 0x00800000, + ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */ ECMR_TRCCM = 0x04000000, }; @@ -823,6 +829,7 @@ enum ECSR_BIT { ECSR_MPD = 0x00000002, ECSR_LCHNG = 0x00000004, ECSR_PHYI = 0x00000008, + ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */ }; /* ECSIPR */ @@ -857,9 +864,13 @@ enum MPR_BIT { /* GECMR */ enum GECMR_BIT { - GECMR_SPEED = 0x00000001, - GECMR_SPEED_100 = 0x00000000, - GECMR_SPEED_1000 = 0x00000001, + GECMR_SPEED = 0x00000001, + GECMR_SPEED_100 = 0x00000000, + GECMR_SPEED_1000 = 0x00000001, + GBETH_GECMR_SPEED = 0x00000030, + GBETH_GECMR_SPEED_10 = 0x00000000, + GBETH_GECMR_SPEED_100 = 0x00000010, + GBETH_GECMR_SPEED_1000 = 0x00000020, }; /* The Ethernet AVB descriptor definitions. */ @@ -949,6 +960,16 @@ enum RAVB_QUEUE { RAVB_NC, /* Network Control Queue */ }; +enum CXR31_BIT { + CXR31_SEL_LINK0 = 0x00000001, + CXR31_SEL_LINK1 = 0x00000008, +}; + +enum CSR0_BIT { + CSR0_TPE = 0x00000010, + CSR0_RPE = 0x00000020, +}; + #define DBAT_ENTRY_NUM 22 #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 @@ -956,6 +977,9 @@ enum RAVB_QUEUE { #define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) +#define GBETH_RX_BUFF_MAX 8192 +#define GBETH_RX_DESC_DATA_SIZE 4080 + struct ravb_tstamp_skb { struct list_head list; struct sk_buff *skb; @@ -985,8 +1009,8 @@ struct ravb_hw_info { void *(*alloc_rx_desc)(struct net_device *ndev, int q); bool (*receive)(struct net_device *ndev, int *quota, int q); void (*set_rate)(struct net_device *ndev); - int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features); - void (*dmac_init)(struct net_device *ndev); + int (*set_feature)(struct net_device *ndev, netdev_features_t features); + int (*dmac_init)(struct net_device *ndev); void (*emac_init)(struct net_device *ndev); const char (*gstrings_stats)[ETH_GSTRING_LEN]; size_t gstrings_size; @@ -994,14 +1018,20 @@ struct ravb_hw_info { netdev_features_t net_features; int stats_len; size_t max_rx_len; + u32 tccr_mask; + u32 rx_max_buf_size; unsigned aligned_tx: 1; /* hardware features */ unsigned internal_delay:1; /* AVB-DMAC has internal delays */ unsigned tx_counters:1; /* E-MAC has TX counters */ + unsigned carrier_counters:1; /* E-MAC has carrier counters */ unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ - unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */ - unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */ + unsigned gptp:1; /* AVB-DMAC has gPTP support */ + unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ + unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ + unsigned magic_pkt:1; /* E-MAC supports magic packet detection */ + unsigned half_duplex:1; /* E-MAC supports half duplex mode */ }; struct ravb_private { @@ -1018,9 +1048,11 @@ struct ravb_private { struct ravb_desc *desc_bat; dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; + struct ravb_rx_desc *gbeth_rx_ring; struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; void *tx_align[NUM_TX_QUEUE]; + struct sk_buff *rx_1st_skb; struct sk_buff **rx_skb[NUM_RX_QUEUE]; struct sk_buff **tx_skb[NUM_TX_QUEUE]; u32 rx_over_errors; @@ -1056,6 +1088,8 @@ struct ravb_private { unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */ unsigned int num_tx_desc; /* TX descriptors per packet */ + int duplex; + const struct ravb_hw_info *info; struct reset_control *rstc; }; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0f85f2d97b18d8..b4c597f4040c8b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -83,7 +83,24 @@ static int ravb_config(struct net_device *ndev) return error; } -static void ravb_set_rate(struct net_device *ndev) +static void ravb_set_rate_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + switch (priv->speed) { + case 10: /* 10BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR); + break; + case 100: /* 100BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR); + break; + case 1000: /* 1000BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR); + break; + } +} + +static void ravb_set_rate_rcar(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -115,17 +132,19 @@ static void ravb_read_mac_address(struct device_node *np, { int ret; - ret = of_get_mac_address(np, ndev->dev_addr); + ret = of_get_ethdev_address(np, ndev); if (ret) { u32 mahr = ravb_read(ndev, MAHR); u32 malr = ravb_read(ndev, MALR); + u8 addr[ETH_ALEN]; - ndev->dev_addr[0] = (mahr >> 24) & 0xFF; - ndev->dev_addr[1] = (mahr >> 16) & 0xFF; - ndev->dev_addr[2] = (mahr >> 8) & 0xFF; - ndev->dev_addr[3] = (mahr >> 0) & 0xFF; - ndev->dev_addr[4] = (malr >> 8) & 0xFF; - ndev->dev_addr[5] = (malr >> 0) & 0xFF; + addr[0] = (mahr >> 24) & 0xFF; + addr[1] = (mahr >> 16) & 0xFF; + addr[2] = (mahr >> 8) & 0xFF; + addr[3] = (mahr >> 0) & 0xFF; + addr[4] = (malr >> 8) & 0xFF; + addr[5] = (malr >> 0) & 0xFF; + eth_hw_addr_set(ndev, addr); } } @@ -217,7 +236,32 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) return free_num; } -static void ravb_rx_ring_free(struct net_device *ndev, int q) +static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + unsigned int i; + + if (!priv->gbeth_rx_ring) + return; + + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + } + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, + priv->rx_desc_dma[q]); + priv->gbeth_rx_ring = NULL; +} + +static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int ring_size; @@ -283,7 +327,38 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; } -static void ravb_rx_ring_format(struct net_device *ndev, int q) +static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_rx_desc *rx_desc; + unsigned int rx_ring_size; + dma_addr_t dma_addr; + unsigned int i; + + rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; + memset(priv->gbeth_rx_ring, 0, rx_ring_size); + /* Build RX ring buffer */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + /* RX descriptor */ + rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); + rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->die_dt = DT_FEMPTY; + } + rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ +} + +static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); struct ravb_ex_rx_desc *rx_desc; @@ -356,7 +431,20 @@ static void ravb_ring_format(struct net_device *ndev, int q) desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); } -static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) +static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); + + priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->gbeth_rx_ring; +} + +static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int ring_size; @@ -426,7 +514,37 @@ static int ravb_ring_init(struct net_device *ndev, int q) return -ENOMEM; } -static void ravb_rcar_emac_init(struct net_device *ndev) +static void ravb_emac_init_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + /* Receive frame limit set register */ + ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); + + /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ + ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE | ECMR_RCPT | + ECMR_TXF | ECMR_RXF, ECMR); + + ravb_set_rate_gbeth(ndev); + + /* Set MAC address */ + ravb_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); + + /* E-MAC status register clear */ + ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); + ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); + + /* E-MAC interrupt enable register */ + ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); + + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0); +} + +static void ravb_emac_init_rcar(struct net_device *ndev) { /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); @@ -436,7 +554,7 @@ static void ravb_rcar_emac_init(struct net_device *ndev) (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | ECMR_TE | ECMR_RE, ECMR); - ravb_set_rate(ndev); + ravb_set_rate_rcar(ndev); /* Set MAC address */ ravb_write(ndev, @@ -461,10 +579,58 @@ static void ravb_emac_init(struct net_device *ndev) info->emac_init(ndev); } -static void ravb_rcar_dmac_init(struct net_device *ndev) +static int ravb_dmac_init_gbeth(struct net_device *ndev) +{ + int error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + + /* Set DMAC RX */ + ravb_write(ndev, 0x60000000, RCR); + + /* Set Max Frame Length (RTC) */ + ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC); + + /* Set FIFO size */ + ravb_write(ndev, 0x00222200, TGC); + + ravb_write(ndev, 0, TCCR); + + /* Frame receive */ + ravb_write(ndev, RIC0_FRE0, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0x0, RIC1); + /* Receive FIFO full error, descriptor empty */ + ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2); + + ravb_write(ndev, TIC_FTE0, TIC); + + return 0; +} + +static int ravb_dmac_init_rcar(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + int error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + error = ravb_ring_init(ndev, RAVB_NC); + if (error) { + ravb_ring_free(ndev, RAVB_BE); + return error; + } + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + ravb_ring_format(ndev, RAVB_NC); /* Set AVB RX */ ravb_write(ndev, @@ -491,6 +657,8 @@ static void ravb_rcar_dmac_init(struct net_device *ndev) ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); + + return 0; } /* Device init function for Ethernet AVB */ @@ -505,20 +673,9 @@ static int ravb_dmac_init(struct net_device *ndev) if (error) return error; - error = ravb_ring_init(ndev, RAVB_BE); + error = info->dmac_init(ndev); if (error) return error; - error = ravb_ring_init(ndev, RAVB_NC); - if (error) { - ravb_ring_free(ndev, RAVB_BE); - return error; - } - - /* Descriptor format */ - ravb_ring_format(ndev, RAVB_BE); - ravb_ring_format(ndev, RAVB_NC); - - info->dmac_init(ndev); /* Setting the control will start the AVB-DMAC process. */ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); @@ -579,7 +736,151 @@ static void ravb_rx_csum(struct sk_buff *skb) skb_trim(skb, skb->len - sizeof(__sum16)); } -static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q) +static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, + struct ravb_rx_desc *desc) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + + skb = priv->rx_skb[RAVB_BE][entry]; + priv->rx_skb[RAVB_BE][entry] = NULL; + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE); + + return skb; +} + +/* Packet receive function for Gigabit Ethernet */ +static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct net_device_stats *stats; + struct ravb_rx_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + u8 desc_status; + int boguscnt; + u16 pkt_len; + u8 die_dt; + int entry; + int limit; + + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; + stats = &priv->stats[q]; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + desc = &priv->gbeth_rx_ring[entry]; + while (desc->die_dt != DT_FEMPTY) { + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + desc_status = desc->msc; + pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; + + if (--boguscnt < 0) + break; + + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + + if (desc_status & MSC_MC) + stats->multicast++; + + if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) { + stats->rx_errors++; + if (desc_status & MSC_CRC) + stats->rx_crc_errors++; + if (desc_status & MSC_RFE) + stats->rx_frame_errors++; + if (desc_status & (MSC_RTLF | MSC_RTSF)) + stats->rx_length_errors++; + if (desc_status & MSC_CEEF) + stats->rx_missed_errors++; + } else { + die_dt = desc->die_dt & 0xF0; + switch (die_dt) { + case DT_FSINGLE: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi[q], skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + break; + case DT_FSTART: + priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_put(priv->rx_1st_skb, pkt_len); + break; + case DT_FMID: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_copy_to_linear_data_offset(priv->rx_1st_skb, + priv->rx_1st_skb->len, + skb->data, + pkt_len); + skb_put(priv->rx_1st_skb, pkt_len); + dev_kfree_skb(skb); + break; + case DT_FEND: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_copy_to_linear_data_offset(priv->rx_1st_skb, + priv->rx_1st_skb->len, + skb->data, + pkt_len); + skb_put(priv->rx_1st_skb, pkt_len); + dev_kfree_skb(skb); + priv->rx_1st_skb->protocol = + eth_type_trans(priv->rx_1st_skb, ndev); + napi_gro_receive(&priv->napi[q], + priv->rx_1st_skb); + stats->rx_packets++; + stats->rx_bytes += priv->rx_1st_skb->len; + break; + } + } + + entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + } + + /* Refill the RX ring buffers. */ + for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { + entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); + + if (!priv->rx_skb[q][entry]) { + skb = netdev_alloc_skb(ndev, info->max_rx_len); + if (!skb) + break; + ravb_set_buffer_align(skb); + dma_addr = dma_map_single(ndev->dev.parent, + skb->data, + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->ds_cc = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + priv->rx_skb[q][entry] = skb; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + desc->die_dt = DT_FEMPTY; + } + + *quota -= limit - (++boguscnt); + + return boguscnt <= 0; +} + +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; @@ -717,11 +1018,13 @@ static void ravb_rcv_snd_enable(struct net_device *ndev) /* function for waiting dma process finished */ static int ravb_stop_dma(struct net_device *ndev) { + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int error; /* Wait for stopping the hardware TX process */ - error = ravb_wait(ndev, TCCR, - TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0); + error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); + if (error) return error; @@ -859,6 +1162,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; irqreturn_t result = IRQ_NONE; u32 iss; @@ -875,8 +1179,13 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; /* Network control and best effort queue RX/TX */ - for (q = RAVB_NC; q >= RAVB_BE; q--) { - if (ravb_queue_interrupt(ndev, q)) + if (info->nc_queues) { + for (q = RAVB_NC; q >= RAVB_BE; q--) { + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + } + } else { + if (ravb_queue_interrupt(ndev, RAVB_BE)) result = IRQ_HANDLED; } } @@ -966,16 +1275,25 @@ static int ravb_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + bool gptp = info->gptp || info->ccc_gac; + struct ravb_rx_desc *desc; unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); int quota = budget; + unsigned int entry; + if (!gptp) { + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + } /* Processing RX Descriptor Ring */ /* Clear RX interrupt */ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (ravb_rx(ndev, "a, q)) - goto out; + if (gptp || desc->die_dt != DT_FEMPTY) { + if (ravb_rx(ndev, "a, q)) + goto out; + } /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); @@ -1000,7 +1318,8 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; - priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; + if (info->nc_queues) + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) @@ -1009,6 +1328,13 @@ static int ravb_poll(struct napi_struct *napi, int budget) return budget - quota; } +static void ravb_set_duplex_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); +} + /* PHY state control function */ static void ravb_adjust_link(struct net_device *ndev) { @@ -1025,6 +1351,12 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_rcv_snd_disable(ndev); if (phydev->link) { + if (info->half_duplex && phydev->duplex != priv->duplex) { + new_state = true; + priv->duplex = phydev->duplex; + ravb_set_duplex_gbeth(ndev); + } + if (phydev->speed != priv->speed) { new_state = true; priv->speed = phydev->speed; @@ -1039,6 +1371,8 @@ static void ravb_adjust_link(struct net_device *ndev) new_state = true; priv->link = 0; priv->speed = 0; + if (info->half_duplex) + priv->duplex = -1; } /* Enable TX and RX right over here, if E-MAC change is ignored */ @@ -1061,6 +1395,7 @@ static int ravb_phy_init(struct net_device *ndev) { struct device_node *np = ndev->dev.parent->of_node; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct phy_device *phydev; struct device_node *pn; phy_interface_t iface; @@ -1068,6 +1403,7 @@ static int ravb_phy_init(struct net_device *ndev) priv->link = 0; priv->speed = 0; + priv->duplex = -1; /* Try connecting to PHY */ pn = of_parse_phandle(np, "phy-handle", 0); @@ -1106,15 +1442,17 @@ static int ravb_phy_init(struct net_device *ndev) netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } - /* 10BASE, Pause and Asym Pause is not supported */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + if (!info->half_duplex) { + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); - /* Half Duplex is not supported */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + /* Half Duplex is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + } phy_attached_info(phydev); @@ -1157,6 +1495,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value) priv->msg_enable = value; } +static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = { + "rx_queue_0_current", + "tx_queue_0_current", + "rx_queue_0_dirty", + "tx_queue_0_dirty", + "rx_queue_0_packets", + "tx_queue_0_packets", + "rx_queue_0_bytes", + "tx_queue_0_bytes", + "rx_queue_0_mcast_packets", + "rx_queue_0_errors", + "rx_queue_0_crc_errors", + "rx_queue_0_frame_errors", + "rx_queue_0_length_errors", + "rx_queue_0_csum_offload_errors", + "rx_queue_0_over_errors", +}; + static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_queue_0_current", "tx_queue_0_current", @@ -1208,11 +1564,14 @@ static void ravb_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *estats, u64 *data) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int num_rx_q; int i = 0; int q; + num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; /* Device-specific stats */ - for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) { + for (q = RAVB_BE; q < num_rx_q; q++) { struct net_device_stats *stats = &priv->stats[q]; data[i++] = priv->cur_rx[q]; @@ -1274,7 +1633,7 @@ static int ravb_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { netif_device_detach(ndev); /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ error = ravb_stop_dma(ndev); @@ -1287,7 +1646,8 @@ static int ravb_set_ringparam(struct net_device *ndev, /* Free all the skb's in the RX queue and the DMA buffers. */ ravb_ring_free(ndev, RAVB_BE); - ravb_ring_free(ndev, RAVB_NC); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); } /* Set new parameters */ @@ -1306,7 +1666,7 @@ static int ravb_set_ringparam(struct net_device *ndev, ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_init(ndev, priv->pdev); netif_device_attach(ndev); @@ -1319,6 +1679,7 @@ static int ravb_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *hw_info = priv->info; info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | @@ -1332,7 +1693,8 @@ static int ravb_get_ts_info(struct net_device *ndev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_ALL); - info->phc_index = ptp_clock_index(priv->ptp.clock); + if (hw_info->gptp || hw_info->ccc_gac) + info->phc_index = ptp_clock_index(priv->ptp.clock); return 0; } @@ -1348,8 +1710,9 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; - if (wol->wolopts & ~WAKE_MAGIC) + if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) return -EOPNOTSUPP; priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); @@ -1403,7 +1766,8 @@ static int ravb_open(struct net_device *ndev) int error; napi_enable(&priv->napi[RAVB_BE]); - napi_enable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_enable(&priv->napi[RAVB_NC]); if (!info->multi_irqs) { error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, @@ -1446,7 +1810,7 @@ static int ravb_open(struct net_device *ndev) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1460,7 +1824,7 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); out_free_irq_nc_tx: if (!info->multi_irqs) @@ -1477,7 +1841,8 @@ static int ravb_open(struct net_device *ndev) out_free_irq: free_irq(ndev->irq, ndev); out_napi_off: - napi_disable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); return error; } @@ -1508,7 +1873,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ @@ -1526,7 +1891,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) } ravb_ring_free(ndev, RAVB_BE); - ravb_ring_free(ndev, RAVB_NC); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); /* Device init */ error = ravb_dmac_init(ndev); @@ -1543,7 +1909,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) out: /* Initialise PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1553,6 +1919,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; unsigned int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; @@ -1629,28 +1996,30 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc->dptr = cpu_to_le32(dma_addr); /* TX timestamp required */ - if (q == RAVB_NC) { - ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); - if (!ts_skb) { - if (num_tx_desc > 1) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, - len, DMA_TO_DEVICE); + if (info->gptp || info->ccc_gac) { + if (q == RAVB_NC) { + ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); + if (!ts_skb) { + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } + goto unmap; } - goto unmap; + ts_skb->skb = skb_get(skb); + ts_skb->tag = priv->ts_skb_tag++; + priv->ts_skb_tag &= 0x3ff; + list_add_tail(&ts_skb->list, &priv->ts_skb_list); + + /* TAG and timestamp required flag */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); } - ts_skb->skb = skb_get(skb); - ts_skb->tag = priv->ts_skb_tag++; - priv->ts_skb_tag &= 0x3ff; - list_add_tail(&ts_skb->list, &priv->ts_skb_list); - /* TAG and timestamp required flag */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; - desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); + skb_tx_timestamp(skb); } - - skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); if (num_tx_desc > 1) { @@ -1698,28 +2067,45 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev) nstats = &ndev->stats; stats0 = &priv->stats[RAVB_BE]; - stats1 = &priv->stats[RAVB_NC]; if (info->tx_counters) { nstats->tx_dropped += ravb_read(ndev, TROCR); ravb_write(ndev, 0, TROCR); /* (write clear) */ } - nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; - nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; - nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes; - nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes; - nstats->multicast = stats0->multicast + stats1->multicast; - nstats->rx_errors = stats0->rx_errors + stats1->rx_errors; - nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors; - nstats->rx_frame_errors = - stats0->rx_frame_errors + stats1->rx_frame_errors; - nstats->rx_length_errors = - stats0->rx_length_errors + stats1->rx_length_errors; - nstats->rx_missed_errors = - stats0->rx_missed_errors + stats1->rx_missed_errors; - nstats->rx_over_errors = - stats0->rx_over_errors + stats1->rx_over_errors; + if (info->carrier_counters) { + nstats->collisions += ravb_read(ndev, CXR41); + ravb_write(ndev, 0, CXR41); /* (write clear) */ + nstats->tx_carrier_errors += ravb_read(ndev, CXR42); + ravb_write(ndev, 0, CXR42); /* (write clear) */ + } + + nstats->rx_packets = stats0->rx_packets; + nstats->tx_packets = stats0->tx_packets; + nstats->rx_bytes = stats0->rx_bytes; + nstats->tx_bytes = stats0->tx_bytes; + nstats->multicast = stats0->multicast; + nstats->rx_errors = stats0->rx_errors; + nstats->rx_crc_errors = stats0->rx_crc_errors; + nstats->rx_frame_errors = stats0->rx_frame_errors; + nstats->rx_length_errors = stats0->rx_length_errors; + nstats->rx_missed_errors = stats0->rx_missed_errors; + nstats->rx_over_errors = stats0->rx_over_errors; + if (info->nc_queues) { + stats1 = &priv->stats[RAVB_NC]; + + nstats->rx_packets += stats1->rx_packets; + nstats->tx_packets += stats1->tx_packets; + nstats->rx_bytes += stats1->rx_bytes; + nstats->tx_bytes += stats1->tx_bytes; + nstats->multicast += stats1->multicast; + nstats->rx_errors += stats1->rx_errors; + nstats->rx_crc_errors += stats1->rx_crc_errors; + nstats->rx_frame_errors += stats1->rx_frame_errors; + nstats->rx_length_errors += stats1->rx_length_errors; + nstats->rx_missed_errors += stats1->rx_missed_errors; + nstats->rx_over_errors += stats1->rx_over_errors; + } return nstats; } @@ -1752,7 +2138,7 @@ static int ravb_close(struct net_device *ndev) ravb_write(ndev, 0, TIC); /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ @@ -1761,10 +2147,12 @@ static int ravb_close(struct net_device *ndev) "device will be stopped after h/w processes are done.\n"); /* Clear the timestamp list */ - list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { - list_del(&ts_skb->list); - kfree_skb(ts_skb->skb); - kfree(ts_skb); + if (info->gptp || info->ccc_gac) { + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { + list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); + kfree(ts_skb); + } } /* PHY disconnect */ @@ -1784,12 +2172,14 @@ static int ravb_close(struct net_device *ndev) } free_irq(ndev->irq, ndev); - napi_disable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); /* Free all the skb's in the RX queue and the DMA buffers. */ ravb_ring_free(ndev, RAVB_BE); - ravb_ring_free(ndev, RAVB_NC); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); return 0; } @@ -1918,8 +2308,15 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable) spin_unlock_irqrestore(&priv->lock, flags); } -static int ravb_set_features_rx_csum(struct net_device *ndev, - netdev_features_t features) +static int ravb_set_features_gbeth(struct net_device *ndev, + netdev_features_t features) +{ + /* Place holder */ + return 0; +} + +static int ravb_set_features_rcar(struct net_device *ndev, + netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; @@ -1937,7 +2334,7 @@ static int ravb_set_features(struct net_device *ndev, struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - return info->set_rx_csum_feature(ndev, features); + return info->set_feature(ndev, features); } static const struct net_device_ops ravb_netdev_ops = { @@ -2001,43 +2398,72 @@ static int ravb_mdio_release(struct ravb_private *priv) } static const struct ravb_hw_info ravb_gen3_hw_info = { - .rx_ring_free = ravb_rx_ring_free, - .rx_ring_format = ravb_rx_ring_format, - .alloc_rx_desc = ravb_alloc_rx_desc, - .receive = ravb_rcar_rx, - .set_rate = ravb_set_rate, - .set_rx_csum_feature = ravb_set_features_rx_csum, - .dmac_init = ravb_rcar_dmac_init, - .emac_init = ravb_rcar_emac_init, + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, .gstrings_stats = ravb_gstrings_stats, .gstrings_size = sizeof(ravb_gstrings_stats), .net_hw_features = NETIF_F_RXCSUM, .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, - .ptp_cfg_active = 1, + .ccc_gac = 1, + .nc_queues = 1, + .magic_pkt = 1, }; static const struct ravb_hw_info ravb_gen2_hw_info = { - .rx_ring_free = ravb_rx_ring_free, - .rx_ring_format = ravb_rx_ring_format, - .alloc_rx_desc = ravb_alloc_rx_desc, - .receive = ravb_rcar_rx, - .set_rate = ravb_set_rate, - .set_rx_csum_feature = ravb_set_features_rx_csum, - .dmac_init = ravb_rcar_dmac_init, - .emac_init = ravb_rcar_emac_init, + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, .gstrings_stats = ravb_gstrings_stats, .gstrings_size = sizeof(ravb_gstrings_stats), .net_hw_features = NETIF_F_RXCSUM, .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, .aligned_tx = 1, - .no_ptp_cfg_active = 1, + .gptp = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + +static const struct ravb_hw_info gbeth_hw_info = { + .rx_ring_free = ravb_rx_ring_free_gbeth, + .rx_ring_format = ravb_rx_ring_format_gbeth, + .alloc_rx_desc = ravb_alloc_rx_desc_gbeth, + .receive = ravb_rx_gbeth, + .set_rate = ravb_set_rate_gbeth, + .set_feature = ravb_set_features_gbeth, + .dmac_init = ravb_dmac_init_gbeth, + .emac_init = ravb_emac_init_gbeth, + .gstrings_stats = ravb_gstrings_stats_gbeth, + .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), + .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), + .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN), + .tccr_mask = TCCR_TSRQ0, + .rx_max_buf_size = SZ_8K, + .aligned_tx = 1, + .tx_counters = 1, + .carrier_counters = 1, + .half_duplex = 1, }; static const struct of_device_id ravb_match_table[] = { @@ -2046,6 +2472,7 @@ static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, { } }; MODULE_DEVICE_TABLE(of, ravb_match_table); @@ -2080,13 +2507,15 @@ static void ravb_set_config_mode(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - if (info->no_ptp_cfg_active) { + if (info->gptp) { ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); - } else { + } else if (info->ccc_gac) { ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); + } else { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); } } @@ -2192,8 +2621,11 @@ static int ravb_probe(struct platform_device *pdev) priv->pdev = pdev; priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; - priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; - priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; + if (info->nc_queues) { + priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; + priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; + } + priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->addr)) { error = PTR_ERR(priv->addr); @@ -2252,7 +2684,7 @@ static int ravb_probe(struct platform_device *pdev) } clk_prepare_enable(priv->refclk); - ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer @@ -2269,13 +2701,15 @@ static int ravb_probe(struct platform_device *pdev) /* Set AVB config mode */ ravb_set_config_mode(ndev); - /* Set GTI value */ - error = ravb_set_gti(ndev); - if (error) - goto out_disable_refclk; + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ + error = ravb_set_gti(ndev); + if (error) + goto out_disable_refclk; - /* Request GTI loading */ - ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + } if (info->internal_delay) { ravb_parse_delay_mode(np, ndev); @@ -2301,7 +2735,7 @@ static int ravb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&priv->ts_skb_list); /* Initialise PTP Clock driver */ - if (info->ptp_cfg_active) + if (info->ccc_gac) ravb_ptp_init(ndev, pdev); /* Debug message level */ @@ -2323,7 +2757,8 @@ static int ravb_probe(struct platform_device *pdev) } netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); - netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); + if (info->nc_queues) + netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); /* Network device register */ error = register_netdev(ndev); @@ -2341,7 +2776,9 @@ static int ravb_probe(struct platform_device *pdev) return 0; out_napi_del: - netif_napi_del(&priv->napi[RAVB_NC]); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); out_dma_free: @@ -2349,7 +2786,7 @@ static int ravb_probe(struct platform_device *pdev) priv->desc_bat_dma); /* Stop PTP Clock driver */ - if (info->ptp_cfg_active) + if (info->ccc_gac) ravb_ptp_stop(ndev); out_disable_refclk: clk_disable_unprepare(priv->refclk); @@ -2369,7 +2806,7 @@ static int ravb_remove(struct platform_device *pdev) const struct ravb_hw_info *info = priv->info; /* Stop PTP Clock driver */ - if (info->ptp_cfg_active) + if (info->ccc_gac) ravb_ptp_stop(ndev); clk_disable_unprepare(priv->refclk); @@ -2380,7 +2817,8 @@ static int ravb_remove(struct platform_device *pdev) ravb_write(ndev, CCC_OPC_RESET, CCC); pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); - netif_napi_del(&priv->napi[RAVB_NC]); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); @@ -2394,6 +2832,7 @@ static int ravb_remove(struct platform_device *pdev) static int ravb_wol_setup(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; /* Disable interrupts by clearing the interrupt masks. */ ravb_write(ndev, 0, RIC0); @@ -2402,7 +2841,8 @@ static int ravb_wol_setup(struct net_device *ndev) /* Only allow ECI interrupts */ synchronize_irq(priv->emac_irq); - napi_disable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); @@ -2415,9 +2855,11 @@ static int ravb_wol_setup(struct net_device *ndev) static int ravb_wol_restore(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int ret; - napi_enable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_enable(&priv->napi[RAVB_NC]); napi_enable(&priv->napi[RAVB_BE]); /* Disable MagicPacket */ @@ -2468,13 +2910,15 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Set AVB config mode */ ravb_set_config_mode(ndev); - /* Set GTI value */ - ret = ravb_set_gti(ndev); - if (ret) - return ret; + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ + ret = ravb_set_gti(ndev); + if (ret) + return ret; - /* Request GTI loading */ - ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + } if (info->internal_delay) ravb_set_delay_mode(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 1374faa229a276..a3fbb2221c9afc 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1153,17 +1153,19 @@ static void update_mac_address(struct net_device *ndev) static void read_mac_address(struct net_device *ndev, unsigned char *mac) { if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { - memcpy(ndev->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(ndev, mac); } else { u32 mahr = sh_eth_read(ndev, MAHR); u32 malr = sh_eth_read(ndev, MALR); - - ndev->dev_addr[0] = (mahr >> 24) & 0xFF; - ndev->dev_addr[1] = (mahr >> 16) & 0xFF; - ndev->dev_addr[2] = (mahr >> 8) & 0xFF; - ndev->dev_addr[3] = (mahr >> 0) & 0xFF; - ndev->dev_addr[4] = (malr >> 8) & 0xFF; - ndev->dev_addr[5] = (malr >> 0) & 0xFF; + u8 addr[ETH_ALEN]; + + addr[0] = (mahr >> 24) & 0xFF; + addr[1] = (mahr >> 16) & 0xFF; + addr[2] = (mahr >> 8) & 0xFF; + addr[3] = (mahr >> 0) & 0xFF; + addr[4] = (malr >> 8) & 0xFF; + addr[5] = (malr >> 0) & 0xFF; + eth_hw_addr_set(ndev, addr); } } diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 3364b6a56bd1eb..ba4062881eed17 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1954,7 +1954,7 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); if (err) return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -2545,11 +2545,13 @@ static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) { const struct rocker *rocker = rocker_port->rocker; const struct pci_dev *pdev = rocker->pdev; + u8 addr[ETH_ALEN]; int err; - err = rocker_cmd_get_port_settings_macaddr(rocker_port, - rocker_port->dev->dev_addr); - if (err) { + err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr); + if (!err) { + eth_hw_addr_set(rocker_port->dev, addr); + } else { dev_warn(&pdev->dev, "failed to get mac address, using random\n"); eth_hw_addr_random(rocker_port->dev); } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h index 049dc6cf461167..0f45107db8dda3 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -329,7 +329,7 @@ struct sxgbe_core_ops { /* Set power management mode (e.g. magic frame) */ void (*pmt)(void __iomem *ioaddr, unsigned long mode); /* Set/Get Unicast MAC addresses */ - void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr, + void (*set_umac_addr)(void __iomem *ioaddr, const unsigned char *addr, unsigned int reg_n); void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr, unsigned int reg_n); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c index e96e2bd295efbd..7d9f257de92a89 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c @@ -85,7 +85,8 @@ static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode) } /* Set/Get Unicast MAC addresses */ -static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, +static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, + const unsigned char *addr, unsigned int reg_n) { u32 high_word, low_word; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 6781aa636d588a..32161a56726c1a 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -931,10 +931,13 @@ static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv) static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv) { if (!is_valid_ether_addr(priv->dev->dev_addr)) { + u8 addr[ETH_ALEN]; + priv->hw->mac->get_umac_addr((void __iomem *) - priv->ioaddr, - priv->dev->dev_addr, 0); - if (!is_valid_ether_addr(priv->dev->dev_addr)) + priv->ioaddr, addr, 0); + if (is_valid_ether_addr(addr)) + eth_hw_addr_set(priv->dev, addr); + else eth_hw_addr_random(priv->dev); } dev_info(priv->device, "device MAC address %pM\n", diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 4639ed9438a3f4..92653246669110 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -118,7 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) } /* Get MAC address if available (DT) */ - of_get_mac_address(node, priv->dev->dev_addr); + of_get_ethdev_address(node, priv->dev); /* Get the TX/RX IRQ numbers */ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 37ff25a84030eb..96065dfc747bb7 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -167,7 +167,7 @@ static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) struct sgiseeq_private *sp = netdev_priv(dev); struct sockaddr *sa = addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + eth_hw_addr_set(dev, sa->sa_data); spin_lock_irq(&sp->tx_lock); __sgiseeq_set_mac_address(dev); @@ -764,7 +764,7 @@ static int sgiseeq_probe(struct platform_device *pdev) setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); - memcpy(dev->dev_addr, pd->mac, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac); #ifdef DEBUG gpriv = sp; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e7e2223aebbf51..cf366ed2557cc5 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1038,7 +1038,7 @@ int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) } int efx_ef10_vport_add_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) + unsigned int port_id, const u8 *mac) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); @@ -1050,7 +1050,7 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx, } int efx_ef10_vport_del_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) + unsigned int port_id, const u8 *mac) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 518268ce206445..6aa81229b68a9e 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1250,7 +1250,7 @@ int ef100_probe_pf(struct efx_nic *efx) if (rc) goto fail; /* Assign MAC address */ - memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN); + eth_hw_addr_set(net_dev, net_dev->perm_addr); memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN); return 0; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 752d6406f07ed0..7f5aa4a8c45122 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -480,7 +480,7 @@ static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id, return rc; } -int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) { struct efx_ef10_nic_data *nic_data = efx->nic_data; struct ef10_vf *vf; @@ -523,7 +523,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) goto fail; if (vf->efx) - ether_addr_copy(vf->efx->net_dev->dev_addr, mac); + eth_hw_addr_set(vf->efx->net_dev, mac); } ether_addr_copy(vf->mac, mac); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index cfe556d17313f6..3c703ca878b0a1 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -39,7 +39,7 @@ static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} void efx_ef10_sriov_fini(struct efx_nic *efx); static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {} -int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac); +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos); @@ -60,9 +60,9 @@ int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); int efx_ef10_vport_add_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac); + unsigned int port_id, const u8 *mac); int efx_ef10_vport_del_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac); + unsigned int port_id, const u8 *mac); int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, u32 *port_flags, u32 *vadaptor_flags, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 43ef4f52902817..6960a2fe2b53d8 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -136,7 +136,7 @@ static int efx_probe_port(struct efx_nic *efx) return rc; /* Initialise MAC address to permanent address */ - ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); return 0; } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 896b5925319722..f187631b2c5cac 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -181,11 +181,11 @@ int efx_set_mac_address(struct net_device *net_dev, void *data) /* save old address */ ether_addr_copy(old_addr, net_dev->dev_addr); - ether_addr_copy(net_dev->dev_addr, new_addr); + eth_hw_addr_set(net_dev, new_addr); if (efx->type->set_mac_address) { rc = efx->type->set_mac_address(efx); if (rc) { - ether_addr_copy(net_dev->dev_addr, old_addr); + eth_hw_addr_set(net_dev, old_addr); return rc; } } diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index bf1443539a1a45..bd552c7dffcb1b 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -563,20 +563,14 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_link_state *link_state = &efx->link_state; - u32 supported; mutex_lock(&efx->mac_lock); efx_mcdi_phy_get_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); /* Both MACs support pause frames (bidirectional and respond-only) */ - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - - supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); if (LOOPBACK_INTERNAL(efx)) { cmd->base.speed = link_state->speed; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 423bdf81200fd7..c68837a951f47e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -1044,7 +1044,7 @@ static int ef4_probe_port(struct ef4_nic *efx) return rc; /* Initialise MAC address to permanent address */ - ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); return 0; } @@ -2162,11 +2162,11 @@ static int ef4_set_mac_address(struct net_device *net_dev, void *data) /* save old address */ ether_addr_copy(old_addr, net_dev->dev_addr); - ether_addr_copy(net_dev->dev_addr, new_addr); + eth_hw_addr_set(net_dev, new_addr); if (efx->type->set_mac_address) { rc = efx->type->set_mac_address(efx); if (rc) { - ether_addr_copy(net_dev->dev_addr, old_addr); + eth_hw_addr_set(net_dev, old_addr); return rc; } } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f6981810039d06..cc15ee8812d9eb 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1440,7 +1440,7 @@ struct efx_nic_type { bool (*sriov_wanted)(struct efx_nic *efx); void (*sriov_reset)(struct efx_nic *efx); void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); - int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac); int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos); int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 441e7f3e537515..f12851a527d9f7 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1591,7 +1591,7 @@ void efx_fini_sriov(void) destroy_workqueue(vfdi_workqueue); } -int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) { struct siena_nic_data *nic_data = efx->nic_data; struct siena_vf *vf; diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h index e441c89c25ce5b..e548c4daf18971 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.h +++ b/drivers/net/ethernet/sfc/siena_sriov.h @@ -46,7 +46,7 @@ bool efx_siena_sriov_wanted(struct efx_nic *efx); void efx_siena_sriov_reset(struct efx_nic *efx); void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); -int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac); +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf, u16 vlan, u8 qos); int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 062f7844c496bf..e2d009866a7b34 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -243,7 +243,7 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr) struct ioc3_private *ip = netdev_priv(dev); struct sockaddr *sa = addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + eth_hw_addr_set(dev, sa->sa_data); spin_lock_irq(&ip->ioc3_lock); __ioc3_set_mac_address(dev); @@ -920,7 +920,7 @@ static int ioc3eth_probe(struct platform_device *pdev) ioc3_mii_start(ip); ioc3_ssram_disc(ip); - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, mac_addr); /* The IOC3-specific entries in the device structure. */ dev->watchdog_timeo = 5 * HZ; diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index efce834d8ee673..6d850ea2b94c23 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -836,7 +836,7 @@ static int meth_probe(struct platform_device *pdev) dev->watchdog_timeo = timeout; dev->irq = MACE_ETHERNET_IRQ; dev->base_addr = (unsigned long)&mace->eth; - memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); + eth_hw_addr_set(dev, o2meth_eaddr); priv = netdev_priv(dev); priv->pdev = pdev; diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index 1fd08a04bd4ec1..ff4197f5e46dd2 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1400,6 +1400,7 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem* port_base; struct net_device *dev; struct sc92031_priv *priv; + u8 addr[ETH_ALEN]; u32 mac0, mac1; err = pci_enable_device(pdev); @@ -1458,12 +1459,13 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id) mac0 = ioread32(port_base + MAC0); mac1 = ioread32(port_base + MAC0 + 4); - dev->dev_addr[0] = mac0 >> 24; - dev->dev_addr[1] = mac0 >> 16; - dev->dev_addr[2] = mac0 >> 8; - dev->dev_addr[3] = mac0; - dev->dev_addr[4] = mac1 >> 8; - dev->dev_addr[5] = mac1; + addr[0] = mac0 >> 24; + addr[1] = mac0 >> 16; + addr[2] = mac0 >> 8; + addr[3] = mac0; + addr[4] = mac1 >> 8; + addr[5] = mac1; + eth_hw_addr_set(dev, addr); err = register_netdev(dev); if (err < 0) diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 3d1a18a01ce505..216bb2d34d7c8f 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1070,7 +1070,7 @@ static int sis190_open(struct net_device *dev) /* * Rx and Tx descriptors need 256 bytes alignment. - * pci_alloc_consistent() guarantees a stronger alignment. + * dma_alloc_coherent() guarantees a stronger alignment. */ tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES, &tp->tx_dma, GFP_KERNEL); @@ -1586,6 +1586,7 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + __le16 addr[ETH_ALEN / 2]; u16 sig; int i; @@ -1606,8 +1607,9 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, for (i = 0; i < ETH_ALEN / 2; i++) { u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); + addr[i] = cpu_to_le16(w); } + eth_hw_addr_set(dev, (u8 *)addr); sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); @@ -1629,6 +1631,7 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev, static const u16 ids[] = { 0x0965, 0x0966, 0x0968 }; struct sis190_private *tp = netdev_priv(dev); struct pci_dev *isa_bridge; + u8 addr[ETH_ALEN]; u8 reg, tmp8; unsigned int i; @@ -1657,8 +1660,9 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev, for (i = 0; i < ETH_ALEN; i++) { outb(0x9 + i, 0x78); - dev->dev_addr[i] = inb(0x79); + addr[i] = inb(0x79); } + eth_hw_addr_set(dev, addr); outb(0x12, 0x78); reg = inb(0x79); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 60a0c0e9ded22f..cc2d907c4c4bc9 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -258,6 +258,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; + u16 addr[ETH_ALEN / 2]; u16 signature; int i; @@ -271,7 +272,8 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev, /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) - ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); + addr[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); + eth_hw_addr_set(net_dev, (u8 *)addr); return 1; } @@ -290,6 +292,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev, struct net_device *net_dev) { struct pci_dev *isa_bridge = NULL; + u8 addr[ETH_ALEN]; u8 reg; int i; @@ -306,8 +309,9 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev, for (i = 0; i < 6; i++) { outb(0x09 + i, 0x70); - ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); + addr[i] = inb(0x71); } + eth_hw_addr_set(net_dev, addr); pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); pci_dev_put(isa_bridge); @@ -331,6 +335,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; + u16 addr[ETH_ALEN / 2]; u32 rfcrSave; u32 i; @@ -345,8 +350,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev, /* load MAC addr to filter data register */ for (i = 0 ; i < 3 ; i++) { sw32(rfcr, (i << RFADDR_shift)); - *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr); + addr[i] = sr16(rfdr); } + eth_hw_addr_set(net_dev, (u8 *)addr); /* enable packet filtering */ sw32(rfcr, rfcrSave | RFEN); @@ -375,17 +381,18 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; + u16 addr[ETH_ALEN / 2]; int wait, rc = 0; sw32(mear, EEREQ); for (wait = 0; wait < 2000; wait++) { if (sr32(mear) & EEGNT) { - u16 *mac = (u16 *)net_dev->dev_addr; int i; /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) - mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); + addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); + eth_hw_addr_set(net_dev, (u8 *)addr); rc = 1; break; @@ -1098,7 +1105,7 @@ sis900_init_rxfilter (struct net_device * net_dev) /* load MAC addr to filter data register */ for (i = 0 ; i < 3 ; i++) { - u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i); + u32 w = (u32) *((const u16 *)(net_dev->dev_addr)+i); sw32(rfcr, i << RFADDR_shift); sw32(rfdr, w); diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 44daf79a8f9725..a0654e88444cff 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -325,6 +325,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *dev; struct epic_private *ep; int i, ret, option = 0, duplex = 0; + __le16 addr[ETH_ALEN / 2]; void *ring_space; dma_addr_t ring_dma; @@ -416,7 +417,8 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Note: the '175 does not have a serial EEPROM. */ for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4)); + addr[i] = cpu_to_le16(er16(LAN0 + i*4)); + eth_hw_addr_set(dev, (u8 *)addr); if (debug > 2) { dev_dbg(&pdev->dev, "EEPROM contents:\n"); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index b008b4e8a2a5ab..89381f79698552 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1788,6 +1788,7 @@ static int smc911x_probe(struct net_device *dev) struct dma_slave_config config; dma_cap_mask_t mask; #endif + u8 addr[ETH_ALEN]; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); @@ -1892,7 +1893,8 @@ static int smc911x_probe(struct net_device *dev) spin_lock_init(&lp->lock); /* Get the MAC address */ - SMC_GET_MAC_ADDR(lp, dev->dev_addr); + SMC_GET_MAC_ADDR(lp, addr); + eth_hw_addr_set(dev, addr); /* now, reset the chip, and put it into a known state */ smc911x_reset(dev); diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 42fc37c7887a9b..37c822e27207a0 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -347,6 +347,7 @@ static void smc91c92_detach(struct pcmcia_device *link) static int cvt_ascii_address(struct net_device *dev, char *s) { + u8 mac[ETH_ALEN]; int i, j, da, c; if (strlen(s) != 12) @@ -359,8 +360,9 @@ static int cvt_ascii_address(struct net_device *dev, char *s) da += ((c >= '0') && (c <= '9')) ? (c - '0') : ((c & 0x0f) + 9); } - dev->dev_addr[i] = da; + mac[i] = da; } + eth_hw_addr_set(dev, mac); return 0; } @@ -539,6 +541,7 @@ static int mot_setup(struct pcmcia_device *link) struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; int i, wait, loop; + u8 mac[ETH_ALEN]; u_int addr; /* Read Ethernet address from Serial EEPROM */ @@ -559,9 +562,10 @@ static int mot_setup(struct pcmcia_device *link) return -1; addr = inw(ioaddr + GENERAL); - dev->dev_addr[2*i] = addr & 0xff; - dev->dev_addr[2*i+1] = (addr >> 8) & 0xff; + mac[2*i] = addr & 0xff; + mac[2*i+1] = (addr >> 8) & 0xff; } + eth_hw_addr_set(dev, mac); return 0; } @@ -666,14 +670,13 @@ static int pcmcia_osi_mac(struct pcmcia_device *p_dev, void *priv) { struct net_device *dev = priv; - int i; if (tuple->TupleDataLen < 8) return -EINVAL; if (tuple->TupleData[0] != 0x04) return -EINVAL; - for (i = 0; i < 6; i++) - dev->dev_addr[i] = tuple->TupleData[i+2]; + + eth_hw_addr_set(dev, &tuple->TupleData[2]); return 0; }; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 813ea941b91a35..a31c159e96eaa4 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1851,6 +1851,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, int retval; unsigned int val, revision_register; const char *version_string; + u8 addr[ETH_ALEN]; DBG(2, dev, "%s: %s\n", CARDNAME, __func__); @@ -1922,7 +1923,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, /* Get the MAC address */ SMC_SELECT_BANK(lp, 1); - SMC_GET_MAC_ADDR(lp, dev->dev_addr); + SMC_GET_MAC_ADDR(lp, addr); + eth_hw_addr_set(dev, addr); /* now, reset the chip, and put it into a known state */ smc_reset(dev); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 199a9733928064..7a50ba00f8ae30 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1503,7 +1503,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) /* Sets the device MAC address to dev_addr, called with mac_lock held */ static void -smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) +smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, const u8 dev_addr[6]) { u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | @@ -1939,7 +1939,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&pdata->mac_lock); smsc911x_set_hw_mac_address(pdata, dev->dev_addr); @@ -2162,13 +2162,15 @@ static void smsc911x_read_mac_address(struct net_device *dev) struct smsc911x_data *pdata = netdev_priv(dev); u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); + u8 addr[ETH_ALEN]; - dev->dev_addr[0] = (u8)(mac_low32); - dev->dev_addr[1] = (u8)(mac_low32 >> 8); - dev->dev_addr[2] = (u8)(mac_low32 >> 16); - dev->dev_addr[3] = (u8)(mac_low32 >> 24); - dev->dev_addr[4] = (u8)(mac_high16); - dev->dev_addr[5] = (u8)(mac_high16 >> 8); + addr[0] = (u8)(mac_low32); + addr[1] = (u8)(mac_low32 >> 8); + addr[2] = (u8)(mac_low32 >> 16); + addr[3] = (u8)(mac_low32 >> 24); + addr[4] = (u8)(mac_high16); + addr[5] = (u8)(mac_high16 >> 8); + eth_hw_addr_set(dev, addr); } /* Initializing private device structures, only called from probe */ @@ -2375,7 +2377,7 @@ static int smsc911x_probe_config(struct smsc911x_platform_config *config, phy_interface = PHY_INTERFACE_MODE_NA; config->phy_interface = phy_interface; - device_get_mac_address(dev, config->mac, ETH_ALEN); + device_get_mac_address(dev, config->mac); err = device_property_read_u32(dev, "reg-io-width", &width); if (err == -ENXIO) @@ -2525,7 +2527,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) SMSC_TRACE(pdata, probe, "MAC Address is specified by configuration"); } else if (is_valid_ether_addr(pdata->config.mac)) { - memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN); + eth_hw_addr_set(dev, pdata->config.mac); SMSC_TRACE(pdata, probe, "MAC Address specified by platform data"); } else { diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index fdbd2a43e26754..d937af18973e8f 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -404,7 +404,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = { static void smsc9420_set_mac_address(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - u8 *dev_addr = dev->dev_addr; + const u8 *dev_addr = dev->dev_addr; u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; @@ -416,6 +416,7 @@ static void smsc9420_set_mac_address(struct net_device *dev) static void smsc9420_check_mac_address(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); + u8 addr[ETH_ALEN]; /* Check if mac address has been specified when bringing interface up */ if (is_valid_ether_addr(dev->dev_addr)) { @@ -427,15 +428,16 @@ static void smsc9420_check_mac_address(struct net_device *dev) * it will already have been set */ u32 mac_high16 = smsc9420_reg_read(pd, ADDRH); u32 mac_low32 = smsc9420_reg_read(pd, ADDRL); - dev->dev_addr[0] = (u8)(mac_low32); - dev->dev_addr[1] = (u8)(mac_low32 >> 8); - dev->dev_addr[2] = (u8)(mac_low32 >> 16); - dev->dev_addr[3] = (u8)(mac_low32 >> 24); - dev->dev_addr[4] = (u8)(mac_high16); - dev->dev_addr[5] = (u8)(mac_high16 >> 8); - - if (is_valid_ether_addr(dev->dev_addr)) { + addr[0] = (u8)(mac_low32); + addr[1] = (u8)(mac_low32 >> 8); + addr[2] = (u8)(mac_low32 >> 16); + addr[3] = (u8)(mac_low32 >> 24); + addr[4] = (u8)(mac_high16); + addr[5] = (u8)(mac_high16 >> 8); + + if (is_valid_ether_addr(addr)) { /* eeprom values are valid so use them */ + eth_hw_addr_set(dev, addr); netif_dbg(pd, probe, pd->dev, "Mac Address is read from EEPROM\n"); } else { @@ -788,7 +790,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) PKT_BUF_SZ, DMA_FROM_DEVICE); if (dma_mapping_error(&pd->pdev->dev, mapping)) { dev_kfree_skb_any(skb); - netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); + netif_warn(pd, rx_err, pd->dev, "dma_map_single failed!\n"); return -ENOMEM; } @@ -940,7 +942,7 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, DMA_TO_DEVICE); if (dma_mapping_error(&pd->pdev->dev, mapping)) { netif_warn(pd, tx_err, pd->dev, - "pci_map_single failed, dropping packet\n"); + "dma_map_single failed, dropping packet\n"); return NETDEV_TX_BUSY; } @@ -1551,7 +1553,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!pd->rx_ring) goto out_free_io_4; - /* descriptors are aligned due to the nature of pci_alloc_consistent */ + /* descriptors are aligned due to the nature of dma_alloc_coherent */ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); pd->tx_dma_addr = pd->rx_dma_addr + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 1f46af136aa8cd..de7d8bf2c226fa 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1860,10 +1860,9 @@ static int netsec_of_probe(struct platform_device *pdev, *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ - if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "phy_ref_clk not found\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), + "phy_ref_clk not found\n"); priv->freq = clk_get_rate(priv->clk); return 0; @@ -1886,19 +1885,17 @@ static int netsec_acpi_probe(struct platform_device *pdev, priv->phy_interface = PHY_INTERFACE_MODE_NA; ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); - if (ret) { - dev_err(&pdev->dev, - "missing required property 'phy-channel'\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "missing required property 'phy-channel'\n"); ret = device_property_read_u32(&pdev->dev, "socionext,phy-clock-frequency", &priv->freq); if (ret) - dev_err(&pdev->dev, - "missing required property 'socionext,phy-clock-frequency'\n"); - return ret; + return dev_err_probe(&pdev->dev, ret, + "missing required property 'socionext,phy-clock-frequency'\n"); + return 0; } static void netsec_unregister_mdio(struct netsec_priv *priv) @@ -1981,7 +1978,6 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) static int netsec_probe(struct platform_device *pdev) { struct resource *mmio_res, *eeprom_res, *irq_res; - u8 *mac, macbuf[ETH_ALEN]; struct netsec_priv *priv; u32 hw_ver, phy_addr = 0; struct net_device *ndev; @@ -2037,21 +2033,19 @@ static int netsec_probe(struct platform_device *pdev) goto free_ndev; } - mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf)); - if (mac) - ether_addr_copy(ndev->dev_addr, mac); - - if (priv->eeprom_base && - (!mac || !is_valid_ether_addr(ndev->dev_addr))) { + ret = device_get_ethdev_address(&pdev->dev, ndev); + if (ret && priv->eeprom_base) { void __iomem *macp = priv->eeprom_base + NETSEC_EEPROM_MAC_ADDRESS; - - ndev->dev_addr[0] = readb(macp + 3); - ndev->dev_addr[1] = readb(macp + 2); - ndev->dev_addr[2] = readb(macp + 1); - ndev->dev_addr[3] = readb(macp + 0); - ndev->dev_addr[4] = readb(macp + 7); - ndev->dev_addr[5] = readb(macp + 6); + u8 addr[ETH_ALEN]; + + addr[0] = readb(macp + 3); + addr[1] = readb(macp + 2); + addr[2] = readb(macp + 1); + addr[3] = readb(macp + 0); + addr[4] = readb(macp + 7); + addr[5] = readb(macp + 6); + eth_hw_addr_set(ndev, addr); } if (!is_valid_ether_addr(ndev->dev_addr)) { diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index ae31ed93aaf02d..2c48f8b8ab714e 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev) ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); - ret = of_get_mac_address(np, ndev->dev_addr); + ret = of_get_ethdev_address(np, ndev); if (ret) { /* if the mac address is invalid, use random mac address */ eth_hw_addr_random(ndev); @@ -1935,6 +1935,17 @@ static const struct ave_soc_data ave_pxs3_data = { .get_pinmode = ave_pxs3_get_pinmode, }; +static const struct ave_soc_data ave_nx1_data = { + .is_desc_64bit = true, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_pxs3_get_pinmode, +}; + static const struct of_device_id of_ave_match[] = { { .compatible = "socionext,uniphier-pro4-ave4", @@ -1956,6 +1967,10 @@ static const struct of_device_id of_ave_match[] = { .compatible = "socionext,uniphier-pxs3-ave4", .data = &ave_pxs3_data, }, + { + .compatible = "socionext,uniphier-nx1-ave4", + .data = &ave_nx1_data, + }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_ave_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b6d945ea903d44..9160f9ed363a2c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -546,13 +546,13 @@ int dwmac4_setup(struct stmmac_priv *priv); int dwxgmac2_setup(struct stmmac_priv *priv); int dwxlgmac2_setup(struct stmmac_priv *priv); -void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low); void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); void stmmac_set_mac(void __iomem *ioaddr, bool enable); -void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low); void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4422baeed3d89b..617d0e4c649583 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -634,7 +634,7 @@ static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable) * If addr is NULL, clear the slot */ static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index d046e33b8a2976..66fc8be34bb717 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -171,10 +171,9 @@ static int visconti_eth_clock_probe(struct platform_device *pdev, int err; dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(dwmac->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - return PTR_ERR(dwmac->phy_ref_clk); - } + if (IS_ERR(dwmac->phy_ref_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->phy_ref_clk), + "phy_ref_clk clock not found.\n"); err = clk_prepare_enable(dwmac->phy_ref_clk); if (err < 0) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index fc8759f146c7c1..76edb9b726756e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -104,7 +104,7 @@ static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) } static void dwmac1000_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index ebcad8dd99dbbf..75071a7d551a82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -68,7 +68,7 @@ static int dwmac100_irq_status(struct mac_device_info *hw, } static void dwmac100_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index b2174536898393..fd41db65fe1df4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -322,7 +322,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) } static void dwmac4_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, unsigned int reg_n) + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9292a1fab7d320..d1c605777985fb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -187,7 +187,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, return ret; } -void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low) { unsigned long data; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index d1c31200bb9111..caa4bfc4c1d62e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -239,7 +239,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr) do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); } -void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low) { unsigned long data; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index c4d78fa93663b5..c6c4d7948fe5f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -335,7 +335,8 @@ static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode) } static void dwxgmac2_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, unsigned int reg_n) + const unsigned char *addr, + unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; u32 value; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index fe2660d5694d79..f7dc447f05a08f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -330,7 +330,8 @@ struct stmmac_ops { /* Set power management mode (e.g. magic frame) */ void (*pmt)(struct mac_device_info *hw, unsigned long mode); /* Set/Get Unicast MAC addresses */ - void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + void (*set_umac_addr)(struct mac_device_info *hw, + const unsigned char *addr, unsigned int reg_n); void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, unsigned int reg_n); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3d67d1fa36906a..d3f350c25b9b64 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2818,9 +2818,13 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) */ static void stmmac_check_ether_addr(struct stmmac_priv *priv) { + u8 addr[ETH_ALEN]; + if (!is_valid_ether_addr(priv->dev->dev_addr)) { - stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); - if (!is_valid_ether_addr(priv->dev->dev_addr)) + stmmac_get_umac_addr(priv, priv->hw, addr, 0); + if (is_valid_ether_addr(addr)) + eth_hw_addr_set(priv->dev, addr); + else eth_hw_addr_random(priv->dev); dev_info(priv->device, "device MAC address %pM\n", priv->dev->dev_addr); @@ -3510,6 +3514,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request Rx MSI irq */ for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (i >= MTL_MAX_RX_QUEUES) + break; if (priv->rx_irq[i] == 0) continue; @@ -3533,6 +3539,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request Tx MSI irq */ for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (i >= MTL_MAX_TX_QUEUES) + break; if (priv->tx_irq[i] == 0) continue; @@ -6815,7 +6823,7 @@ int stmmac_dvr_probe(struct device *device, priv->tx_irq[i] = res->tx_irq[i]; if (!is_zero_ether_addr(res->mac)) - memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); + eth_hw_addr_set(priv->dev, res->mac); dev_set_drvdata(device, priv->dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 0462dcc93e536e..be3cb63675a5e0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -36,7 +36,7 @@ struct stmmac_packet_attrs { int vlan_id_in; int vlan_id_out; unsigned char *src; - unsigned char *dst; + const unsigned char *dst; u32 ip_src; u32 ip_dst; int tcp; @@ -249,8 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + const unsigned char *dst = tpriv->packet->dst; unsigned char *src = tpriv->packet->src; - unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@ -1104,13 +1104,13 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) goto cleanup_sel; } - actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); + actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL); if (!actions) { ret = -ENOMEM; goto cleanup_exts; } - act = kzalloc(nk * sizeof(*act), GFP_KERNEL); + act = kcalloc(nk, sizeof(*act), GFP_KERNEL); if (!act) { ret = -ENOMEM; goto cleanup_actions; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 287ae4c538aaea..d2d4f47c7e2847 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -3027,7 +3027,7 @@ static void cas_mac_reset(struct cas *cp) /* Must be invoked under cp->lock. */ static void cas_init_mac(struct cas *cp) { - unsigned char *e = &cp->dev->dev_addr[0]; + const unsigned char *e = &cp->dev->dev_addr[0]; int i; cas_mac_reset(cp); @@ -3379,6 +3379,7 @@ static void cas_check_pci_invariants(struct cas *cp) static int cas_check_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; + u8 addr[ETH_ALEN]; u32 cfg; int i; @@ -3407,8 +3408,8 @@ static int cas_check_invariants(struct cas *cp) /* finish phy determination. MDIO1 takes precedence over MDIO0 if * they're both connected. */ - cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, - PCI_SLOT(pdev->devfn)); + cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn)); + eth_hw_addr_set(cp->dev, addr); if (cp->phy_type & CAS_PHY_SERDES) { cp->cas_flags |= CAS_FLAG_1000MB_CAP; return 0; /* no more checking needed */ diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 50bd4e3b0af9d9..6b59b14e74b13e 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -230,7 +230,6 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], { struct net_device *dev; struct vnet_port *port; - int i; dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1); if (!dev) @@ -238,10 +237,8 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->needed_headroom = VNET_PACKET_SKIP + 8; dev->needed_tailroom = 8; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = hwaddr[i]; - dev->perm_addr[i] = dev->dev_addr[i]; - } + eth_hw_addr_set(dev, hwaddr); + ether_addr_copy(dev->perm_addr, dev->dev_addr); sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index a68a01d1b2b104..ba8ad76313a9c4 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -2603,7 +2603,7 @@ static int niu_init_link(struct niu *np) return 0; } -static void niu_set_primary_mac(struct niu *np, unsigned char *addr) +static void niu_set_primary_mac(struct niu *np, const unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; @@ -6386,7 +6386,7 @@ static int niu_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) return 0; @@ -8312,6 +8312,7 @@ static void niu_pci_vpd_validate(struct niu *np) { struct net_device *dev = np->dev; struct niu_vpd *vpd = &np->vpd; + u8 addr[ETH_ALEN]; u8 val8; if (!is_valid_ether_addr(&vpd->local_mac[0])) { @@ -8344,17 +8345,20 @@ static void niu_pci_vpd_validate(struct niu *np) return; } - memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); + ether_addr_copy(addr, vpd->local_mac); - val8 = dev->dev_addr[5]; - dev->dev_addr[5] += np->port; - if (dev->dev_addr[5] < val8) - dev->dev_addr[4]++; + val8 = addr[5]; + addr[5] += np->port; + if (addr[5] < val8) + addr[4]++; + + eth_hw_addr_set(dev, addr); } static int niu_pci_probe_sprom(struct niu *np) { struct net_device *dev = np->dev; + u8 addr[ETH_ALEN]; int len, i; u64 val, sum; u8 val8; @@ -8446,27 +8450,29 @@ static int niu_pci_probe_sprom(struct niu *np) val = nr64(ESPC_MAC_ADDR0); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); - dev->dev_addr[0] = (val >> 0) & 0xff; - dev->dev_addr[1] = (val >> 8) & 0xff; - dev->dev_addr[2] = (val >> 16) & 0xff; - dev->dev_addr[3] = (val >> 24) & 0xff; + addr[0] = (val >> 0) & 0xff; + addr[1] = (val >> 8) & 0xff; + addr[2] = (val >> 16) & 0xff; + addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); - dev->dev_addr[4] = (val >> 0) & 0xff; - dev->dev_addr[5] = (val >> 8) & 0xff; + addr[4] = (val >> 0) & 0xff; + addr[5] = (val >> 8) & 0xff; - if (!is_valid_ether_addr(&dev->dev_addr[0])) { + if (!is_valid_ether_addr(addr)) { dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", - dev->dev_addr); + addr); return -EINVAL; } - val8 = dev->dev_addr[5]; - dev->dev_addr[5] += np->port; - if (dev->dev_addr[5] < val8) - dev->dev_addr[4]++; + val8 = addr[5]; + addr[5] += np->port; + if (addr[5] < val8) + addr[4]++; + + eth_hw_addr_set(dev, addr); val = nr64(ESPC_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, @@ -9235,7 +9241,7 @@ static int niu_get_of_props(struct niu *np) netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", dp, prop_len); } - memcpy(dev->dev_addr, mac_addr, dev->addr_len); + eth_hw_addr_set(dev, mac_addr); if (!is_valid_ether_addr(&dev->dev_addr[0])) { netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index c646575e79d5c8..531a6f449afadd 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -623,7 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, bool non_blocking) void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; __u32 bblk_dvma = (__u32)bp->bblock_dvma; - unsigned char *e = &bp->dev->dev_addr[0]; + const unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); @@ -1076,7 +1076,6 @@ static int bigmac_ether_init(struct platform_device *op, struct net_device *dev; u8 bsizes, bsizes_more; struct bigmac *bp; - int i; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); @@ -1086,8 +1085,7 @@ static int bigmac_ether_init(struct platform_device *op, if (version_printed++ == 0) printk(KERN_INFO "%s", version); - for (i = 0; i < 6; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index d72018a60c0f7d..036856102c505b 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1810,7 +1810,7 @@ static u32 gem_setup_multicast(struct gem *gp) static void gem_init_mac(struct gem *gp) { - unsigned char *e = &gp->dev->dev_addr[0]; + const unsigned char *e = &gp->dev->dev_addr[0]; writel(0x1bf0, gp->regs + MAC_SNDPAUSE); @@ -2087,7 +2087,7 @@ static void gem_stop_phy(struct gem *gp, int wol) writel(mifcfg, gp->regs + MIF_CFG); if (wol && gp->has_wol) { - unsigned char *e = &gp->dev->dev_addr[0]; + const unsigned char *e = &gp->dev->dev_addr[0]; u32 csr; /* Setup wake-on-lan for MAGIC packet */ @@ -2431,13 +2431,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) static int gem_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *macaddr = (struct sockaddr *) addr; + const unsigned char *e = &dev->dev_addr[0]; struct gem *gp = netdev_priv(dev); - unsigned char *e = &dev->dev_addr[0]; if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, macaddr->sa_data); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) @@ -2797,9 +2797,12 @@ static int gem_get_device_address(struct gem *gp) return -1; #endif } - memcpy(dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); #else - get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); + u8 addr[ETH_ALEN]; + + get_gem_mac_nonobp(gp->pdev, addr); + eth_hw_addr_set(gp->dev, addr); #endif return 0; } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 62f81b0d14ed8f..ad9029ae684851 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1395,13 +1395,13 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp, /* hp->happy_lock must be held */ static int happy_meal_init(struct happy_meal *hp) { + const unsigned char *e = &hp->dev->dev_addr[0]; void __iomem *gregs = hp->gregs; void __iomem *etxregs = hp->etxregs; void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; u32 regtmp, rxcfg; - unsigned char *e = &hp->dev->dev_addr[0]; /* If auto-negotiation timer is running, kill it. */ del_timer(&hp->happy_timer); @@ -2661,6 +2661,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) struct happy_meal *hp; struct net_device *dev; int i, qfe_slot = -1; + u8 addr[ETH_ALEN]; int err = -ENODEV; sbus_dp = op->dev.parent->of_node; @@ -2698,7 +2699,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) } if (i < 6) { /* a mac address was given */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = macaddr[i]; + addr[i] = macaddr[i]; + eth_hw_addr_set(dev, addr); macaddr[5]++; } else { const unsigned char *addr; @@ -2707,9 +2709,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) addr = of_get_property(dp, "local-mac-address", &len); if (qfe_slot != -1 && addr && len == ETH_ALEN) - memcpy(dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); else - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + eth_hw_addr_set(dev, idprom->id_ethaddr); } hp = netdev_priv(dev); @@ -2969,6 +2971,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, unsigned long hpreg_res; int i, qfe_slot = -1; char prom_name[64]; + u8 addr[ETH_ALEN]; int err; /* Now make sure pci_dev cookie is there. */ @@ -3044,7 +3047,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, } if (i < 6) { /* a mac address was given */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = macaddr[i]; + addr[i] = macaddr[i]; + eth_hw_addr_set(dev, addr); macaddr[5]++; } else { #ifdef CONFIG_SPARC @@ -3055,12 +3059,15 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, (addr = of_get_property(dp, "local-mac-address", &len)) != NULL && len == 6) { - memcpy(dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); } else { - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + eth_hw_addr_set(dev, idprom->id_ethaddr); } #else - get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); + u8 addr[ETH_ALEN]; + + get_hme_mac_nonsparc(pdev, addr); + eth_hw_addr_set(dev, addr); #endif } diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 577cd9753d8e38..efe0d33f6024e2 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -144,7 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq) void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; - unsigned char *e = &qep->dev->dev_addr[0]; + const unsigned char *e = &qep->dev->dev_addr[0]; __u32 qblk_dvma = (__u32)qep->qblock_dvma; u32 tmp; int i; @@ -844,7 +844,7 @@ static int qec_ether_init(struct platform_device *op) if (!dev) return -ENOMEM; - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + eth_hw_addr_set(dev, idprom->id_ethaddr); qe = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 58ee89223951e1..da8119625cf334 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -285,6 +285,7 @@ static struct vnet *vnet_new(const u64 *local_mac, struct vio_dev *vdev) { struct net_device *dev; + u8 addr[ETH_ALEN]; struct vnet *vp; int err, i; @@ -295,7 +296,8 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->needed_tailroom = 8; for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; + addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; + eth_hw_addr_set(dev, addr); vp = netdev_priv(dev); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c index df26cea459048a..5c9b6c90942b64 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c @@ -78,7 +78,7 @@ static int xlgmac_init(struct xlgmac_pdata *pdata) netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->mac_regs; xlgmac_read_mac_addr(pdata); - memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, pdata->mac_addr); /* Set all the function pointers */ xlgmac_init_all_ops(pdata); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c index bf6c1c6779ff37..76eb7db80f133b 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c @@ -57,7 +57,7 @@ static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) return 0; } -static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr) +static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 1db7104fef3aaf..d435519236e43e 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -798,7 +798,7 @@ static int xlgmac_set_mac_address(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, saddr->sa_data); hw_ops->set_mac_address(pdata, netdev->dev_addr); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h index 8598aaf3ec994f..98e3a271e017ae 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h @@ -410,7 +410,7 @@ struct xlgmac_hw_ops { void (*dev_xmit)(struct xlgmac_channel *channel); int (*dev_read)(struct xlgmac_channel *channel); - int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr); + int (*set_mac_address)(struct xlgmac_pdata *pdata, const u8 *addr); int (*config_rx_mode)(struct xlgmac_pdata *pdata); int (*enable_rx_csum)(struct xlgmac_pdata *pdata); int (*disable_rx_csum)(struct xlgmac_pdata *pdata); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 6b409f9c586395..0775a5542f2f92 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -832,7 +832,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p) if (netif_running(dev)) return -EBUSY */ - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); bdx_restore_mac(ndev, priv); RET(0); } @@ -840,6 +840,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p) static int bdx_read_mac(struct bdx_priv *priv) { u16 macAddress[3], i; + u8 addr[ETH_ALEN]; ENTER; macAddress[2] = READ_REG(priv, regUNC_MAC0_A); @@ -849,9 +850,10 @@ static int bdx_read_mac(struct bdx_priv *priv) macAddress[0] = READ_REG(priv, regUNC_MAC2_A); macAddress[0] = READ_REG(priv, regUNC_MAC2_A); for (i = 0; i < 3; i++) { - priv->ndev->dev_addr[i * 2 + 1] = macAddress[i]; - priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8; + addr[i * 2 + 1] = macAddress[i]; + addr[i * 2] = macAddress[i] >> 8; } + eth_hw_addr_set(priv->ndev, addr); RET(0); } diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 6e4d4f9e32e06d..b05de9b61ad619 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -61,7 +61,7 @@ struct am65_cpsw_regdump_item { #define AM65_CPSW_REGDUMP_REC(mod, start, end) { \ .hdr.module_id = (mod), \ - .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \ + .hdr.len = (end + 4 - start) * 2 + \ sizeof(struct am65_cpsw_regdump_hdr), \ .start_ofs = (start), \ .end_ofs = end, \ diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 130346f74ee8a1..c092cb61416a18 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1918,7 +1918,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) port->port_id, port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { - random_ether_addr(port->slave.mac_addr); + eth_random_addr(port->slave.mac_addr); dev_err(dev, "Use random MAC address\n"); } } @@ -1970,7 +1970,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) ndev_priv->msg_enable = AM65_CPSW_DEBUG; SET_NETDEV_DEV(port->ndev, dev); - ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr); + eth_hw_addr_set(port->ndev, port->slave.mac_addr); port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; @@ -2429,12 +2429,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) dl_priv = devlink_priv(common->devlink); dl_priv->common = common; - ret = devlink_register(common->devlink); - if (ret) { - dev_err(dev, "devlink reg fail ret:%d\n", ret); - goto dl_free; - } - /* Provide devlink hook to switch mode when multiple external ports * are present NUSS switchdev driver is enabled. */ @@ -2447,7 +2441,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) dev_err(dev, "devlink params reg fail ret:%d\n", ret); goto dl_unreg; } - devlink_params_publish(common->devlink); } for (i = 1; i <= common->port_num; i++) { @@ -2468,7 +2461,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) } devlink_port_type_eth_set(dl_port, port->ndev); } - + devlink_register(common->devlink); return ret; dl_port_unreg: @@ -2479,10 +2472,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) devlink_port_unregister(dl_port); } dl_unreg: - devlink_unregister(common->devlink); -dl_free: devlink_free(common->devlink); - return ret; } @@ -2492,6 +2482,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) struct am65_cpsw_port *port; int i; + devlink_unregister(common->devlink); + for (i = 1; i <= common->port_num; i++) { port = am65_common_get_port(common, i); dl_port = &port->devlink_port; @@ -2500,13 +2492,11 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) } if (!AM65_CPSW_IS_CPSW2G(common) && - IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { - devlink_params_unpublish(common->devlink); - devlink_params_unregister(common->devlink, am65_cpsw_devlink_params, + IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) + devlink_params_unregister(common->devlink, + am65_cpsw_devlink_params, ARRAY_SIZE(am65_cpsw_devlink_params)); - } - devlink_unregister(common->devlink); devlink_free(common->devlink); } diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 02d4e51f73064e..7449436fc87ca5 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1112,7 +1112,7 @@ static int cpmac_probe(struct platform_device *pdev) priv->dev = dev; priv->ring_size = 64; priv->msg_enable = netif_msg_init(debug_level, 0xff); - memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); + eth_hw_addr_set(dev, pdata->dev_addr); snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 66f7ddd9b1f994..33142d505fc816 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -985,7 +985,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) flags, vid); memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); - memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, priv->mac_addr); for_each_slave(priv, cpsw_set_slave_mac, priv); pm_runtime_put(cpsw->dev); @@ -1460,7 +1460,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); } - memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, priv_sl2->mac_addr); priv_sl2->emac_port = 1; cpsw->slaves[1].ndev = ndev; @@ -1639,7 +1639,7 @@ static int cpsw_probe(struct platform_device *pdev) dev_info(dev, "Random MACID = %pM\n", priv->mac_addr); } - memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, priv->mac_addr); cpsw->slaves[0].ndev = ndev; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 7968f24d99c855..279e261e472074 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1000,7 +1000,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) flags, vid); ether_addr_copy(priv->mac_addr, addr->sa_data); - ether_addr_copy(ndev->dev_addr, priv->mac_addr); + eth_hw_addr_set(ndev, priv->mac_addr); cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv); pm_runtime_put(cpsw->dev); @@ -1401,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) dev_info(cpsw->dev, "Random MACID = %pM\n", priv->mac_addr); } - ether_addr_copy(ndev->dev_addr, slave_data->mac_addr); + eth_hw_addr_set(ndev, slave_data->mac_addr); ether_addr_copy(priv->mac_addr, slave_data->mac_addr); cpsw->slaves[i].ndev = ndev; @@ -1810,12 +1810,6 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) dl_priv = devlink_priv(cpsw->devlink); dl_priv->cpsw = cpsw; - ret = devlink_register(cpsw->devlink); - if (ret) { - dev_err(dev, "DL reg fail ret:%d\n", ret); - goto dl_free; - } - ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, ARRAY_SIZE(cpsw_devlink_params)); if (ret) { @@ -1823,22 +1817,19 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) goto dl_unreg; } - devlink_params_publish(cpsw->devlink); + devlink_register(cpsw->devlink); return ret; dl_unreg: - devlink_unregister(cpsw->devlink); -dl_free: devlink_free(cpsw->devlink); return ret; } static void cpsw_unregister_devlink(struct cpsw_common *cpsw) { - devlink_params_unpublish(cpsw->devlink); + devlink_unregister(cpsw->devlink); devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, ARRAY_SIZE(cpsw_devlink_params)); - devlink_unregister(cpsw->devlink); devlink_free(cpsw->devlink); } diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 43222a34cba069..dc70a6bfaa6a16 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -669,10 +669,10 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) goto mux_fail; } - parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents), - GFP_KERNEL); + parent_names = devm_kcalloc(cpts->dev, num_parents, + sizeof(*parent_names), GFP_KERNEL); - mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents, + mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table), GFP_KERNEL); if (!mux_table || !parent_names) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index e8291d8488391e..2d2dcf70563f32 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1132,7 +1132,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); - memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, sa->sa_data); /* MAC address is configured only after the interface is enabled. */ if (netif_running(ndev)) { @@ -1402,7 +1402,6 @@ static int match_first_device(struct device *dev, const void *data) static int emac_dev_open(struct net_device *ndev) { struct device *emac_dev = &ndev->dev; - u32 cnt; struct resource *res; int q, m, ret; int res_num = 0, irq_num = 0; @@ -1420,8 +1419,7 @@ static int emac_dev_open(struct net_device *ndev) } netif_carrier_off(ndev); - for (cnt = 0; cnt < ETH_ALEN; cnt++) - ndev->dev_addr[cnt] = priv->mac_addr[cnt]; + eth_hw_addr_set(ndev, priv->mac_addr); /* Configuration items */ priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; @@ -1899,7 +1897,7 @@ static int davinci_emac_probe(struct platform_device *pdev) rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); if (!rc) - ether_addr_copy(ndev->dev_addr, priv->mac_addr); + eth_hw_addr_set(ndev, priv->mac_addr); if (!is_valid_ether_addr(priv->mac_addr)) { /* Use random MAC if still none obtained. */ diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index eda2961c0fe2a8..b818e4579f6f1a 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2028,16 +2028,16 @@ static int netcp_create_interface(struct netcp_device *netcp_device, emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); if (is_valid_ether_addr(efuse_mac_addr)) - ether_addr_copy(ndev->dev_addr, efuse_mac_addr); + eth_hw_addr_set(ndev, efuse_mac_addr); else - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); } else { - ret = of_get_mac_address(node_interface, ndev->dev_addr); + ret = of_get_ethdev_address(node_interface, ndev); if (ret) - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); } ret = of_property_read_string(node_interface, "rx-channel", diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 77c448ad67ce1f..741c42c6a41776 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -184,7 +184,7 @@ static void tlan_print_list(struct tlan_list *, char *, int); static void tlan_read_and_clear_stats(struct net_device *, int); static void tlan_reset_adapter(struct net_device *); static void tlan_finish_reset(struct net_device *); -static void tlan_set_mac(struct net_device *, int areg, char *mac); +static void tlan_set_mac(struct net_device *, int areg, const char *mac); static void __tlan_phy_print(struct net_device *); static void tlan_phy_print(struct net_device *); @@ -817,6 +817,7 @@ static int tlan_init(struct net_device *dev) int err; int i; struct tlan_priv *priv; + u8 addr[ETH_ALEN]; priv = netdev_priv(dev); @@ -842,7 +843,7 @@ static int tlan_init(struct net_device *dev) for (i = 0; i < ETH_ALEN; i++) err |= tlan_ee_read_byte(dev, (u8) priv->adapter->addr_ofs + i, - (u8 *) &dev->dev_addr[i]); + addr + i); if (err) { pr_err("%s: Error reading MAC from eeprom: %d\n", dev->name, err); @@ -850,11 +851,12 @@ static int tlan_init(struct net_device *dev) /* Olicom OC-2325/OC-2326 have the address byte-swapped */ if (priv->adapter->addr_ofs == 0xf8) { for (i = 0; i < ETH_ALEN; i += 2) { - char tmp = dev->dev_addr[i]; - dev->dev_addr[i] = dev->dev_addr[i + 1]; - dev->dev_addr[i + 1] = tmp; + char tmp = addr[i]; + addr[i] = addr[i + 1]; + addr[i + 1] = tmp; } } + eth_hw_addr_set(dev, addr); netif_carrier_off(dev); @@ -2346,7 +2348,7 @@ tlan_finish_reset(struct net_device *dev) * **************************************************************/ -static void tlan_set_mac(struct net_device *dev, int areg, char *mac) +static void tlan_set_mac(struct net_device *dev, int areg, const char *mac) { int i; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 55e652624bd76b..3dbfb1b2064974 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1477,7 +1477,7 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) __func__, status); return -EINVAL; } - memcpy(netdev->dev_addr, &v1, ETH_ALEN); + eth_hw_addr_set(netdev, (u8 *)&v1); if (card->vlan_required) { netdev->hard_header_len += VLAN_HLEN; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 66d4e024d11e90..f50f9a43d3ea2a 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1296,7 +1296,7 @@ spider_net_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, addr->sa_data); /* switch off GMACTPE and GMACRPE */ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 52245ac60fc74f..ce38f751522545 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -708,7 +708,7 @@ static int tc35815_read_plat_dev_addr(struct net_device *dev) lp->pci_dev, tc35815_mac_match); if (pd) { if (pd->platform_data) - memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN); + eth_hw_addr_set(dev, pd->platform_data); put_device(pd); return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; } @@ -725,6 +725,7 @@ static int tc35815_init_dev_addr(struct net_device *dev) { struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; + u8 addr[ETH_ALEN]; int i; while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) @@ -735,9 +736,10 @@ static int tc35815_init_dev_addr(struct net_device *dev) while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) ; data = tc_readl(&tr->PROM_Data); - dev->dev_addr[i] = data & 0xff; - dev->dev_addr[i+1] = data >> 8; + addr[i] = data & 0xff; + addr[i+1] = data >> 8; } + eth_hw_addr_set(dev, addr); if (!is_valid_ether_addr(dev->dev_addr)) return tc35815_read_plat_dev_addr(dev); return 0; @@ -1859,7 +1861,8 @@ static struct net_device_stats *tc35815_get_stats(struct net_device *dev) return &dev->stats; } -static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) +static void tc35815_set_cam_entry(struct net_device *dev, int index, + const unsigned char *addr) { struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 3b73a9c55a5a76..509c5e9b29dfa4 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -899,6 +899,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks, struct net_device *dev; struct rhine_private *rp; int i, rc, phy_id; + u8 addr[ETH_ALEN]; const char *name; /* this should always be supported */ @@ -933,7 +934,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks, rhine_hw_init(dev, pioaddr); for (i = 0; i < 6; i++) - dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); + addr[i] = ioread8(ioaddr + StationAddr + i); + eth_hw_addr_set(dev, addr); if (!is_valid_ether_addr(dev->dev_addr)) { /* Report it and use a random ethernet address instead */ diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 4b9c30f735b560..be2b992f24d99c 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2767,6 +2767,7 @@ static int velocity_probe(struct device *dev, int irq, struct velocity_info *vptr; struct mac_regs __iomem *regs; int ret = -ENOMEM; + u8 addr[ETH_ALEN]; /* FIXME: this driver, like almost all other ethernet drivers, * can support more than MAX_UNITS. @@ -2820,7 +2821,8 @@ static int velocity_probe(struct device *dev, int irq, mac_wol_reset(regs); for (i = 0; i < 6; i++) - netdev->dev_addr[i] = readb(®s->PAR[i]); + addr[i] = readb(®s->PAR[i]); + eth_hw_addr_set(netdev, addr); velocity_get_options(&vptr->options, velocity_nics); diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 2b84848dc26a59..7779a36da3c85d 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -463,7 +463,9 @@ static int w5100_spi_probe(struct spi_device *spi) static int w5100_spi_remove(struct spi_device *spi) { - return w5100_remove(&spi->dev); + w5100_remove(&spi->dev); + + return 0; } static const struct spi_device_id w5100_spi_ids[] = { diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index f974e70a82e8b8..ae24d6b868031d 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -985,7 +985,7 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(sock_addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, sock_addr->sa_data); w5100_write_macaddr(priv); return 0; } @@ -1064,7 +1064,9 @@ static int w5100_mmio_probe(struct platform_device *pdev) static int w5100_mmio_remove(struct platform_device *pdev) { - return w5100_remove(&pdev->dev); + w5100_remove(&pdev->dev); + + return 0; } void *w5100_ops_priv(const struct net_device *ndev) @@ -1155,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, INIT_WORK(&priv->restart_work, w5100_restart_work); if (mac_addr) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, mac_addr); else eth_hw_addr_random(ndev); @@ -1210,7 +1212,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, } EXPORT_SYMBOL_GPL(w5100_probe); -int w5100_remove(struct device *dev) +void w5100_remove(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); @@ -1226,7 +1228,6 @@ int w5100_remove(struct device *dev) unregister_netdev(ndev); free_netdev(ndev); - return 0; } EXPORT_SYMBOL_GPL(w5100_remove); diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h index 5d3d4b541fec45..481af3b6d9e8c6 100644 --- a/drivers/net/ethernet/wiznet/w5100.h +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -31,6 +31,6 @@ void *w5100_ops_priv(const struct net_device *ndev); int w5100_probe(struct device *dev, const struct w5100_ops *ops, int sizeof_ops_priv, const void *mac_addr, int irq, int link_gpio); -int w5100_remove(struct device *dev); +void w5100_remove(struct device *dev); extern const struct dev_pm_ops w5100_pm_ops; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 46aae30c463672..402d5036f26683 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -472,7 +472,7 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(sock_addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, sock_addr->sa_data); w5300_write_macaddr(priv); return 0; } @@ -534,7 +534,7 @@ static int w5300_hw_probe(struct platform_device *pdev) int ret; if (data && is_valid_ether_addr(data->mac_addr)) { - memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, data->mac_addr); } else { eth_hw_addr_random(ndev); } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 463094ced104ac..e7065c9a8e3898 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) static int temac_init_mac_address(struct net_device *ndev, const void *address) { - memcpy(ndev->dev_addr, address, ETH_ALEN); + eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); @@ -451,7 +451,7 @@ static int temac_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data); temac_do_set_mac_address(ndev); return 0; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 871b5ec3183d68..9b068b81ae0939 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -360,7 +360,7 @@ static void axienet_set_mac_address(struct net_device *ndev, struct axienet_local *lp = netdev_priv(ndev); if (address) - memcpy(ndev->dev_addr, address, ETH_ALEN); + eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); @@ -1525,7 +1525,7 @@ static void axienet_validate(struct phylink_config *config, netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", phy_modes(state->interface), phy_modes(lp->phy_mode)); - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(supported); return; } } @@ -1558,10 +1558,8 @@ static void axienet_validate(struct phylink_config *config, break; } - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); } static void axienet_mac_pcs_get_state(struct phylink_config *config, diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b780aad3550aa3..0815de581c7f5c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -206,12 +206,13 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) * This function writes data from a 16-bit aligned buffer to a 32-bit aligned * address in the EmacLite device. */ -static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, +static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, unsigned length) { + const u16 *from_u16_ptr; u32 align_buffer; u32 *to_u32_ptr; - u16 *from_u16_ptr, *to_u16_ptr; + u16 *to_u16_ptr; to_u32_ptr = dest_ptr; from_u16_ptr = src_ptr; @@ -470,7 +471,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) * buffers (if configured). */ static void xemaclite_update_address(struct net_local *drvdata, - u8 *address_ptr) + const u8 *address_ptr) { void __iomem *addr; u32 reg_data; @@ -511,7 +512,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address) if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); xemaclite_update_address(lp, dev->dev_addr); return 0; } @@ -1157,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); - rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + rc = of_get_ethdev_address(ofdev->dev.of_node, ndev); if (rc) { dev_warn(dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index ae611e46da6af3..f9587e55b84283 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -671,7 +671,6 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, void *priv) { struct net_device *dev = priv; - int i; if (tuple->TupleDataLen != 13) return -EINVAL; @@ -679,8 +678,7 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, (tuple->TupleData[2] != 6)) return -EINVAL; /* another try (James Lehmer's CE2 version 4.1)*/ - for (i = 2; i < 6; i++) - dev->dev_addr[i] = tuple->TupleData[i+2]; + dev_addr_mod(dev, 2, &tuple->TupleData[2], 4); return 0; }; @@ -742,11 +740,9 @@ xirc2ps_config(struct pcmcia_device * link) len = pcmcia_get_tuple(link, 0x89, &buf); /* data layout looks like tuple 0x22 */ if (buf && len == 8) { - if (*buf == CISTPL_FUNCE_LAN_NODE_ID) { - int i; - for (i = 2; i < 6; i++) - dev->dev_addr[i] = buf[i+2]; - } else + if (*buf == CISTPL_FUNCE_LAN_NODE_ID) + dev_addr_mod(dev, 2, &buf[2], 4); + else err = -1; } kfree(buf); @@ -1271,7 +1267,7 @@ struct set_address_info { unsigned int ioaddr; }; -static void set_address(struct set_address_info *sa_info, char *addr) +static void set_address(struct set_address_info *sa_info, const char *addr) { unsigned int ioaddr = sa_info->ioaddr; int i; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 931494cc1c39e1..65fdad1107fc59 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1103,10 +1103,9 @@ static int init_queues(struct port *port) return -ENOMEM; } - if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, - &port->desc_tab_phys))) + port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys); + if (!port->desc_tab) return -ENOMEM; - memset(port->desc_tab, 0, POOL_ALLOC_SIZE); memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); @@ -1524,7 +1523,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) port->plat = plat; npe_port_tab[NPE_ID(port->id)] = port; - memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN); + eth_hw_addr_set(ndev, plat->hwaddr); platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 6d1e3f49a3d3da..b584ffe38ad681 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -1028,7 +1028,7 @@ static void dfx_bus_config_check(DFX_board_t *bp) * or read adapter MAC address * * Assumptions: - * Memory allocated from pci_alloc_consistent() call is physically + * Memory allocated from dma_alloc_coherent() call is physically * contiguous, locked memory. * * Side Effects: @@ -1117,7 +1117,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, * dfx_ctl_set_mac_address. */ - memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); + dev_addr_set(dev, bp->factory_mac_addr); if (dfx_bus_tc) board_name = "DEFTA"; if (dfx_bus_eisa) @@ -1474,7 +1474,7 @@ static int dfx_open(struct net_device *dev) * address. */ - memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); + dev_addr_set(dev, bp->factory_mac_addr); /* Clear local unicast/multicast address tables and counts */ @@ -2379,7 +2379,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) /* Copy unicast address to driver-maintained structs and update count */ - memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */ + dev_addr_set(dev, p_sockaddr->sa_data); /* update device struct */ memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */ bp->uc_count = 1; @@ -3249,7 +3249,7 @@ static void dfx_rcv_queue_process( * is contained in a single physically contiguous buffer * in which the virtual address of the start of packet * (skb->data) can be converted to a physical address - * by using pci_map_single(). + * by using dma_map_single(). * * Since the adapter architecture requires a three byte * packet request header to prepend the start of packet, @@ -3402,7 +3402,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, * skb->data. * 6. The physical address of the start of packet * can be determined from the virtual address - * by using pci_map_single() and is only 32-bits + * by using dma_map_single() and is only 32-bits * wide. */ diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 0de2c4552f5eb3..f5c25acaa57713 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1380,7 +1380,7 @@ static int fza_probe(struct device *bdev) goto err_out_irq; fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr)); - memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN); + dev_addr_set(dev, (u8 *)&hw_addr); fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev)); fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev)); diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h index 3814a2ff64aee7..b0e6ce0d893eb7 100644 --- a/drivers/net/fddi/skfp/h/smc.h +++ b/drivers/net/fddi/skfp/h/smc.h @@ -470,7 +470,7 @@ void card_stop(struct s_smc *smc); void init_board(struct s_smc *smc, u_char *mac_addr); int init_fplus(struct s_smc *smc); void init_plc(struct s_smc *smc); -int init_smt(struct s_smc *smc, u_char *mac_addr); +int init_smt(struct s_smc *smc, const u_char *mac_addr); void mac1_irq(struct s_smc *smc, u_short stu, u_short stl); void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l); void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l); diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index c5cb421f9890d6..2b6a607ac0b788 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -78,6 +78,7 @@ static const char * const boot_msg = #include #include #include +#include #include #include #include @@ -433,7 +434,7 @@ static int skfp_driver_init(struct net_device *dev) } read_address(smc, NULL); pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a); - memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN); + eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a); smt_reset_defaults(smc, 0); @@ -500,7 +501,7 @@ static int skfp_open(struct net_device *dev) * address. */ read_address(smc, NULL); - memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN); + eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a); init_smt(smc, NULL); smt_online(smc, 1); @@ -924,7 +925,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr) unsigned long Flags; - memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); + dev_addr_set(dev, p_sockaddr->sa_data); spin_lock_irqsave(&bp->DriverLock, Flags); ResetAdapter(smc); spin_unlock_irqrestore(&bp->DriverLock, Flags); @@ -1012,7 +1013,7 @@ static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __ * is contained in a single physically contiguous buffer * in which the virtual address of the start of packet * (skb->data) can be converted to a physical address - * by using pci_map_single(). + * by using dma_map_single(). * * We have an internal queue for packets we can not send * immediately. Packets in this queue can be given to the diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c index c9898c83fe3045..8b172c1956852a 100644 --- a/drivers/net/fddi/skfp/smtinit.c +++ b/drivers/net/fddi/skfp/smtinit.c @@ -19,7 +19,7 @@ #include "h/fddi.h" #include "h/smc.h" -void init_fddi_driver(struct s_smc *smc, u_char *mac_addr); +void init_fddi_driver(struct s_smc *smc, const u_char *mac_addr); /* define global debug variable */ #if defined(DEBUG) && !defined(DEBUG_BRD) @@ -57,7 +57,7 @@ static void set_oem_spec_val(struct s_smc *smc) /* * Init SMT */ -int init_smt(struct s_smc *smc, u_char *mac_addr) +int init_smt(struct s_smc *smc, const u_char *mac_addr) /* u_char *mac_addr; canonical address or NULL */ { int p ; diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index 065bb0a40b1d1e..704e949484d0c1 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -137,7 +137,8 @@ static void fjes_hw_free_epbuf(struct epbuf_handler *epbh) epbh->ring = NULL; } -void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu) +void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, const u8 *mac_addr, + u32 mtu) { union ep_buffer_info *info = epbh->info; u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX]; diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h index b4608ea2a2d57c..997c7b37a40237 100644 --- a/drivers/net/fjes/fjes_hw.h +++ b/drivers/net/fjes/fjes_hw.h @@ -330,7 +330,7 @@ int fjes_hw_register_buff_addr(struct fjes_hw *, int, int fjes_hw_unregister_buff_addr(struct fjes_hw *, int); void fjes_hw_init_command_registers(struct fjes_hw *, struct fjes_device_command_param *); -void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32); +void fjes_hw_setup_epbuf(struct epbuf_handler *, const u8 *, u32); int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK); void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool); u32 fjes_hw_capture_interrupt_status(struct fjes_hw *); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 185c8a3986816a..b06c17ac8d4eed 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1203,6 +1203,7 @@ static int fjes_probe(struct platform_device *plat_dev) struct net_device *netdev; struct resource *res; struct fjes_hw *hw; + u8 addr[ETH_ALEN]; int err; err = -ENOMEM; @@ -1266,12 +1267,13 @@ static int fjes_probe(struct platform_device *plat_dev) goto err_free_control_wq; /* setup MAC address (02:00:00:00:00:[epid])*/ - netdev->dev_addr[0] = 2; - netdev->dev_addr[1] = 0; - netdev->dev_addr[2] = 0; - netdev->dev_addr[3] = 0; - netdev->dev_addr[4] = 0; - netdev->dev_addr[5] = hw->my_epid; /* EPID */ + addr[0] = 2; + addr[1] = 0; + addr[2] = 0; + addr[3] = 0; + addr[4] = 0; + addr[5] = hw->my_epid; /* EPID */ + eth_hw_addr_set(netdev, addr); err = register_netdev(netdev); if (err) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 30e0a10595a16d..24e5c54d06c158 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -539,7 +539,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(&rt->dst); } - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); + skb_dst_update_pmtu_no_confirm(skb, mtu); if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 6192244b304ab9..f4e8793e995d7a 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -288,7 +288,7 @@ static int sp_set_mac_address(struct net_device *dev, void *addr) netif_tx_lock_bh(dev); netif_addr_lock(dev); - memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); + __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); @@ -317,7 +317,7 @@ static void sp_setup(struct net_device *dev) /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); dev->flags = 0; } @@ -726,7 +726,7 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file, } netif_tx_lock_bh(dev); - memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); + __dev_addr_set(dev, &addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 6b6f28d5b8d5d3..a03d0b47464178 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -791,7 +791,7 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr) struct sockaddr *sa = (struct sockaddr *)addr; /* addr is an AX.25 shifted ASCII mac address */ - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } @@ -1159,7 +1159,7 @@ static void baycom_probe(struct net_device *dev) dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */ dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&null_ax25_address); dev->tx_queue_len = 16; /* New style flags */ diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index d967b0748773d6..30af0081e2bef5 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -302,7 +302,7 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = (struct sockaddr *)addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } @@ -457,9 +457,6 @@ static void bpq_setup(struct net_device *dev) dev->netdev_ops = &bpq_netdev_ops; dev->needs_free_netdev = true; - memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); - dev->flags = 0; dev->features = NETIF_F_LLTX; /* Allow recursion */ @@ -472,6 +469,8 @@ static void bpq_setup(struct net_device *dev) dev->mtu = AX25_DEF_PACLEN; dev->addr_len = AX25_ADDR_LEN; + memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); } /* diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index f4c3efc3e0745c..7e527499d3ad6a 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -426,7 +426,7 @@ static void __init dev_setup(struct net_device *dev) dev->addr_len = AX25_ADDR_LEN; dev->tx_queue_len = 64; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); } static const struct net_device_ops scc_netdev_ops = { @@ -956,8 +956,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) static int scc_set_mac_address(struct net_device *dev, void *sa) { - memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data, - dev->addr_len); + dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data); return 0; } diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 5805cfc83854bd..b0edb91bb10aa7 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -415,7 +415,7 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr) struct sockaddr *sa = (struct sockaddr *)addr; /* addr is an AX.25 shifted ASCII mac address */ - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } @@ -675,7 +675,7 @@ static void hdlcdrv_setup(struct net_device *dev) dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */ dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); dev->tx_queue_len = 16; } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 8666110bec555c..867252a0247b10 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -344,7 +344,7 @@ static int ax_set_mac_address(struct net_device *dev, void *addr) netif_tx_lock_bh(dev); netif_addr_lock(dev); - memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); + __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); @@ -647,7 +647,7 @@ static void ax_setup(struct net_device *dev) memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); dev->flags = IFF_BROADCAST | IFF_MULTICAST; } @@ -850,7 +850,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file, } netif_tx_lock_bh(dev); - memcpy(dev->dev_addr, addr, AX25_ADDR_LEN); + __dev_addr_set(dev, addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index e0bb131a33d76b..3d59dac063ac43 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -1563,9 +1563,6 @@ static void scc_net_setup(struct net_device *dev) dev->netdev_ops = &scc_netdev_ops; dev->header_ops = &ax25_header_ops; - memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); - dev->flags = 0; dev->type = ARPHRD_AX25; @@ -1573,6 +1570,8 @@ static void scc_net_setup(struct net_device *dev) dev->mtu = AX25_DEF_PACLEN; dev->addr_len = AX25_ADDR_LEN; + memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); } /* ----> open network device <---- */ @@ -1951,7 +1950,7 @@ static int scc_net_siocdevprivate(struct net_device *dev, static int scc_net_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = (struct sockaddr *) addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 6ddacbdb224ba1..6376b8485976cf 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1063,7 +1063,7 @@ static int yam_set_mac_address(struct net_device *dev, void *addr) struct sockaddr *sa = (struct sockaddr *) addr; /* addr is an AX.25 shifted ASCII mac address */ - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } @@ -1107,7 +1107,7 @@ static void yam_setup(struct net_device *dev) dev->mtu = AX25_MTU; dev->addr_len = AX25_ADDR_LEN; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); + dev_addr_set(dev, (u8 *)&ax25_defaddr); } static int __init yam_init_driver(void) diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 7661dbb31162bb..16105292b140bb 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -502,6 +502,7 @@ static unsigned int write_eeprom(struct rr_private *rrpriv, static int rr_init(struct net_device *dev) { + u8 addr[HIPPI_ALEN] __aligned(4); struct rr_private *rrpriv; struct rr_regs __iomem *regs; u32 sram_size, rev; @@ -537,10 +538,11 @@ static int rr_init(struct net_device *dev) * other method I've seen. -VAL */ - *(__be16 *)(dev->dev_addr) = + *(__be16 *)(addr) = htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA))); - *(__be32 *)(dev->dev_addr+2) = + *(__be32 *)(addr+2) = htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4]))); + dev_addr_set(dev, addr); printk(" MAC: %pM\n", dev->dev_addr); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 382bebc2420dff..7e66ae1d2a59bf 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -803,6 +803,7 @@ void netvsc_linkstatus_callback(struct net_device *net, schedule_delayed_work(&ndev_ctx->dwork, 0); } +/* This function should only be called after skb_record_rx_queue() */ static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) { int rc; @@ -2536,7 +2537,7 @@ static int netvsc_probe(struct hv_device *dev, goto rndis_failed; } - memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); + eth_hw_addr_set(net, device_info->mac_adr); /* We must get rtnl lock before scheduling nvdev->subchan_work, * otherwise netvsc_subchan_work() can get rtnl lock first and wait @@ -2742,8 +2743,7 @@ static int netvsc_netdev_event(struct notifier_block *this, return NOTIFY_DONE; /* Avoid Bonding master dev with same MAC registering as VF */ - if ((event_dev->priv_flags & IFF_BONDING) && - (event_dev->flags & IFF_MASTER)) + if (netif_is_bond_master(event_dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 3a2824f24caa87..ece6ff6049f66b 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2938,9 +2938,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv) */ static void ca8210_dev_com_clear(struct ca8210_priv *priv) { - flush_workqueue(priv->mlme_workqueue); destroy_workqueue(priv->mlme_workqueue); - flush_workqueue(priv->irq_workqueue); destroy_workqueue(priv->irq_workqueue); } diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index e9258a9f3702ca..31f522b8e54e6e 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -75,8 +76,12 @@ static void ifb_ri_tasklet(struct tasklet_struct *t) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { + /* Skip tc and netfilter to prevent redirection loop. */ skb->redirected = 0; +#ifdef CONFIG_NET_CLS_ACT skb->tc_skip_classify = 1; +#endif + nf_skip_egress(skb, true); u64_stats_update_begin(&txp->tsync); txp->tx_packets++; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index c0b21a5580d522..1d2f4e7d732458 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -579,7 +579,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, * world but keep using the physical-dev address for the outgoing * packets. */ - memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN); + eth_hw_addr_set(dev, phy_dev->dev_addr); dev->priv_flags |= IFF_NO_RX_HANDLER; @@ -787,7 +787,7 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_CHANGEADDR: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); + eth_hw_addr_set(ipvlan->dev, dev->dev_addr); call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev); } break; diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 1cedb634f4f7b6..ef02f2cf5ce13d 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -162,7 +162,7 @@ static int ipvtap_device_event(struct notifier_block *unused, devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor); classdev = device_create(&ipvtap_class, &dev->dev, devt, - dev, tap_name); + dev, "%s", tap_name); if (IS_ERR(classdev)) { tap_free_minor(ipvtap_major, &vlantap->tap); return notifier_from_errno(PTR_ERR(classdev)); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 93dc48b9b4f24d..16aa3a478e9e8d 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -250,7 +250,7 @@ static bool send_sci(const struct macsec_secy *secy) (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); } -static sci_t make_sci(u8 *addr, __be16 port) +static sci_t make_sci(const u8 *addr, __be16 port) { sci_t sci; @@ -3614,7 +3614,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p) dev_uc_del(real_dev, dev->dev_addr); out: - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); /* If h/w offloading is available, propagate to the device */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 35f46ad040b0d4..d2f830ec2969c9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -202,7 +202,7 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan, /* Now that we are unhashed it is safe to change the device * address without confusing packet delivery. */ - memcpy(vlan->dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(vlan->dev, addr); macvlan_hash_add(vlan); } @@ -698,7 +698,8 @@ static int macvlan_stop(struct net_device *dev) return 0; } -static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) +static int macvlan_sync_address(struct net_device *dev, + const unsigned char *addr) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; @@ -707,7 +708,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) if (!(dev->flags & IFF_UP)) { /* Just copy in the new address */ - ether_addr_copy(dev->dev_addr, addr); + eth_hw_addr_set(dev, addr); } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr)) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 694e2f5dbbe591..6b12902a803f0d 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -169,7 +169,7 @@ static int macvtap_device_event(struct notifier_block *unused, devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor); classdev = device_create(&macvtap_class, &dev->dev, devt, - dev, tap_name); + dev, "%s", tap_name); if (IS_ERR(classdev)) { tap_free_minor(macvtap_major, &vlantap->tap); return notifier_from_errno(PTR_ERR(classdev)); diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 2a4892402ed8c4..86ec5aae42891a 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -748,8 +748,7 @@ struct failover *net_failover_create(struct net_device *standby_dev) failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; failover_dev->features |= failover_dev->hw_features; - memcpy(failover_dev->dev_addr, standby_dev->dev_addr, - failover_dev->addr_len); + dev_addr_set(failover_dev, standby_dev->dev_addr); failover_dev->min_mtu = standby_dev->min_mtu; failover_dev->max_mtu = standby_dev->max_mtu; diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index 29f5627d11e67a..25cb2e600d530a 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -24,39 +23,6 @@ static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev) return container_of(dev, struct nsim_bus_dev, dev); } -static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev, - unsigned int num_vfs) -{ - struct nsim_dev *nsim_dev; - int err = 0; - - if (nsim_bus_dev->max_vfs < num_vfs) - return -ENOMEM; - - if (!nsim_bus_dev->vfconfigs) - return -ENOMEM; - nsim_bus_dev->num_vfs = num_vfs; - - nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); - if (nsim_esw_mode_is_switchdev(nsim_dev)) { - err = nsim_esw_switchdev_enable(nsim_dev, NULL); - if (err) - nsim_bus_dev->num_vfs = 0; - } - - return err; -} - -void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev) -{ - struct nsim_dev *nsim_dev; - - nsim_bus_dev->num_vfs = 0; - nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); - if (nsim_esw_mode_is_switchdev(nsim_dev)) - nsim_esw_legacy_enable(nsim_dev, NULL); -} - static ssize_t nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -69,27 +35,13 @@ nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; - mutex_lock(&nsim_bus_dev->vfs_lock); - if (nsim_bus_dev->num_vfs == num_vfs) - goto exit_good; - if (nsim_bus_dev->num_vfs && num_vfs) { - ret = -EBUSY; - goto exit_unlock; - } - - if (num_vfs) { - ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs); - if (ret) - goto exit_unlock; - } else { - nsim_bus_dev_vfs_disable(nsim_bus_dev); - } -exit_good: - ret = count; -exit_unlock: - mutex_unlock(&nsim_bus_dev->vfs_lock); + device_lock(dev); + ret = -ENOENT; + if (dev_get_drvdata(dev)) + ret = nsim_drv_configure_vfs(nsim_bus_dev, num_vfs); + device_unlock(dev); - return ret; + return ret ? ret : count; } static ssize_t @@ -105,79 +57,6 @@ static struct device_attribute nsim_bus_dev_numvfs_attr = __ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show, nsim_bus_dev_numvfs_store); -ssize_t nsim_bus_dev_max_vfs_read(struct file *file, - char __user *data, - size_t count, loff_t *ppos) -{ - struct nsim_bus_dev *nsim_bus_dev = file->private_data; - char buf[11]; - ssize_t len; - - len = snprintf(buf, sizeof(buf), "%u\n", nsim_bus_dev->max_vfs); - if (len < 0) - return len; - - return simple_read_from_buffer(data, count, ppos, buf, len); -} - -ssize_t nsim_bus_dev_max_vfs_write(struct file *file, - const char __user *data, - size_t count, loff_t *ppos) -{ - struct nsim_bus_dev *nsim_bus_dev = file->private_data; - struct nsim_vf_config *vfconfigs; - ssize_t ret; - char buf[10]; - u32 val; - - if (*ppos != 0) - return 0; - - if (count >= sizeof(buf)) - return -ENOSPC; - - mutex_lock(&nsim_bus_dev->vfs_lock); - /* Reject if VFs are configured */ - if (nsim_bus_dev->num_vfs) { - ret = -EBUSY; - goto unlock; - } - - ret = copy_from_user(buf, data, count); - if (ret) { - ret = -EFAULT; - goto unlock; - } - - buf[count] = '\0'; - ret = kstrtouint(buf, 10, &val); - if (ret) { - ret = -EIO; - goto unlock; - } - - /* max_vfs limited by the maximum number of provided port indexes */ - if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE) { - ret = -ERANGE; - goto unlock; - } - - vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config), GFP_KERNEL | __GFP_NOWARN); - if (!vfconfigs) { - ret = -ENOMEM; - goto unlock; - } - - kfree(nsim_bus_dev->vfconfigs); - nsim_bus_dev->vfconfigs = vfconfigs; - nsim_bus_dev->max_vfs = val; - *ppos += count; - ret = count; -unlock: - mutex_unlock(&nsim_bus_dev->vfs_lock); - return ret; -} - static ssize_t new_port_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -201,7 +80,7 @@ new_port_store(struct device *dev, struct device_attribute *attr, return -EBUSY; } - ret = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); + ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return ret ? ret : count; } @@ -231,7 +110,7 @@ del_port_store(struct device *dev, struct device_attribute *attr, return -EBUSY; } - ret = nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); + ret = nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return ret ? ret : count; } @@ -371,14 +250,14 @@ static int nsim_bus_probe(struct device *dev) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); - return nsim_dev_probe(nsim_bus_dev); + return nsim_drv_probe(nsim_bus_dev); } static void nsim_bus_remove(struct device *dev) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); - nsim_dev_remove(nsim_bus_dev); + nsim_drv_remove(nsim_bus_dev); } static int nsim_num_vf(struct device *dev) @@ -420,26 +299,15 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu nsim_bus_dev->initial_net = current->nsproxy->net_ns; nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS; mutex_init(&nsim_bus_dev->nsim_bus_reload_lock); - mutex_init(&nsim_bus_dev->vfs_lock); /* Disallow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, false); - nsim_bus_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs, - sizeof(struct nsim_vf_config), - GFP_KERNEL | __GFP_NOWARN); - if (!nsim_bus_dev->vfconfigs) { - err = -ENOMEM; - goto err_nsim_bus_dev_id_free; - } - err = device_register(&nsim_bus_dev->dev); if (err) - goto err_nsim_vfs_free; + goto err_nsim_bus_dev_id_free; return nsim_bus_dev; -err_nsim_vfs_free: - kfree(nsim_bus_dev->vfconfigs); err_nsim_bus_dev_id_free: ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); err_nsim_bus_dev_free: @@ -453,7 +321,6 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev) smp_store_release(&nsim_bus_dev->init, false); device_unregister(&nsim_bus_dev->dev); ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); - kfree(nsim_bus_dev->vfconfigs); kfree(nsim_bus_dev); } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 54313bd5779731..54345c096a16f0 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -56,6 +56,22 @@ static inline unsigned int nsim_dev_port_index_to_vf_index(unsigned int port_ind static struct dentry *nsim_dev_ddir; +unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev) +{ + WARN_ON(!lockdep_rtnl_is_held() && + !lockdep_is_held(&nsim_dev->vfs_lock)); + + return nsim_dev->nsim_bus_dev->num_vfs; +} + +static void +nsim_bus_dev_set_vfs(struct nsim_bus_dev *nsim_bus_dev, unsigned int num_vfs) +{ + rtnl_lock(); + nsim_bus_dev->num_vfs = num_vfs; + rtnl_unlock(); +} + #define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32) static int @@ -211,6 +227,70 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = { .owner = THIS_MODULE, }; +static ssize_t nsim_bus_dev_max_vfs_read(struct file *file, char __user *data, + size_t count, loff_t *ppos) +{ + struct nsim_dev *nsim_dev = file->private_data; + char buf[11]; + ssize_t len; + + len = scnprintf(buf, sizeof(buf), "%u\n", + READ_ONCE(nsim_dev->nsim_bus_dev->max_vfs)); + + return simple_read_from_buffer(data, count, ppos, buf, len); +} + +static ssize_t nsim_bus_dev_max_vfs_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct nsim_vf_config *vfconfigs; + struct nsim_dev *nsim_dev; + char buf[10]; + ssize_t ret; + u32 val; + + if (*ppos != 0) + return 0; + + if (count >= sizeof(buf)) + return -ENOSPC; + + ret = copy_from_user(buf, data, count); + if (ret) + return -EFAULT; + buf[count] = '\0'; + + ret = kstrtouint(buf, 10, &val); + if (ret) + return -EINVAL; + + /* max_vfs limited by the maximum number of provided port indexes */ + if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE) + return -ERANGE; + + vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config), + GFP_KERNEL | __GFP_NOWARN); + if (!vfconfigs) + return -ENOMEM; + + nsim_dev = file->private_data; + mutex_lock(&nsim_dev->vfs_lock); + /* Reject if VFs are configured */ + if (nsim_dev_get_vfs(nsim_dev)) { + ret = -EBUSY; + } else { + swap(nsim_dev->vfconfigs, vfconfigs); + WRITE_ONCE(nsim_dev->nsim_bus_dev->max_vfs, val); + *ppos += count; + ret = count; + } + mutex_unlock(&nsim_dev->vfs_lock); + + kfree(vfconfigs); + return ret; +} + static const struct file_operations nsim_dev_max_vfs_fops = { .open = simple_open, .read = nsim_bus_dev_max_vfs_read, @@ -259,11 +339,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) debugfs_create_bool("fail_trap_policer_counter_get", 0600, nsim_dev->ddir, &nsim_dev->fail_trap_policer_counter_get); - nsim_dev->max_vfs = debugfs_create_file("max_vfs", - 0600, - nsim_dev->ddir, - nsim_dev->nsim_bus_dev, - &nsim_dev_max_vfs_fops); + debugfs_create_file("max_vfs", 0600, nsim_dev->ddir, + nsim_dev, &nsim_dev_max_vfs_fops); + nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir); if (IS_ERR(nsim_dev->nodes_ddir)) { err = PTR_ERR(nsim_dev->nodes_ddir); @@ -328,9 +406,9 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev, unsigned int vf_id = nsim_dev_port_index_to_vf_index(port_index); debugfs_create_u16("tx_share", 0400, nsim_dev_port->ddir, - &nsim_bus_dev->vfconfigs[vf_id].min_tx_rate); + &nsim_dev->vfconfigs[vf_id].min_tx_rate); debugfs_create_u16("tx_max", 0400, nsim_dev_port->ddir, - &nsim_bus_dev->vfconfigs[vf_id].max_tx_rate); + &nsim_dev->vfconfigs[vf_id].max_tx_rate); nsim_dev_port->rate_parent = debugfs_create_file("rate_parent", 0400, nsim_dev_port->ddir, @@ -490,7 +568,9 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev) } static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port); -int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack) + +static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, + struct netlink_ext_ack *extack) { struct devlink *devlink = priv_to_devlink(nsim_dev); struct nsim_dev_port *nsim_dev_port, *tmp; @@ -505,13 +585,14 @@ int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *ex return 0; } -int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack) +static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, + struct netlink_ext_ack *extack) { struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev; int i, err; - for (i = 0; i < nsim_bus_dev->num_vfs; i++) { - err = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i); + for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) { + err = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports"); pr_err("Failed to initialize VF id=%d. %d.\n", i, err); @@ -523,7 +604,7 @@ int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack err_port_add_vfs: for (i--; i >= 0; i--) - nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i); + nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i); return err; } @@ -533,7 +614,7 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct nsim_dev *nsim_dev = devlink_priv(devlink); int err = 0; - mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock); + mutex_lock(&nsim_dev->vfs_lock); if (mode == nsim_dev->esw_mode) goto unlock; @@ -545,7 +626,7 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = -EINVAL; unlock: - mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock); + mutex_unlock(&nsim_dev->vfs_lock); return err; } @@ -1093,7 +1174,7 @@ static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv, u64 tx_share, struct netlink_ext_ack *extack) { struct nsim_dev_port *nsim_dev_port = priv; - struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev; int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index); int err; @@ -1101,7 +1182,7 @@ static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv, if (err) return err; - nsim_bus_dev->vfconfigs[vf_id].min_tx_rate = tx_share; + nsim_dev->vfconfigs[vf_id].min_tx_rate = tx_share; return 0; } @@ -1109,7 +1190,7 @@ static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv, u64 tx_max, struct netlink_ext_ack *extack) { struct nsim_dev_port *nsim_dev_port = priv; - struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev; int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index); int err; @@ -1117,7 +1198,7 @@ static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv, if (err) return err; - nsim_bus_dev->vfconfigs[vf_id].max_tx_rate = tx_max; + nsim_dev->vfconfigs[vf_id].max_tx_rate = tx_max; return 0; } @@ -1273,13 +1354,12 @@ static const struct devlink_ops nsim_dev_devlink_ops = { static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type, unsigned int port_index) { - struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev; struct devlink_port_attrs attrs = {}; struct nsim_dev_port *nsim_dev_port; struct devlink_port *devlink_port; int err; - if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_bus_dev->num_vfs) + if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_dev_get_vfs(nsim_dev)) return -EINVAL; nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL); @@ -1442,7 +1522,7 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev, return err; } -int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) +int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev) { struct nsim_dev *nsim_dev; struct devlink *devlink; @@ -1457,6 +1537,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id); get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len); INIT_LIST_HEAD(&nsim_dev->port_list); + mutex_init(&nsim_dev->vfs_lock); mutex_init(&nsim_dev->port_list_lock); nsim_dev->fw_update_status = true; nsim_dev->fw_update_overwrite_mask = 0; @@ -1466,13 +1547,17 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev); - err = nsim_dev_resources_register(devlink); - if (err) + nsim_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs, + sizeof(struct nsim_vf_config), + GFP_KERNEL | __GFP_NOWARN); + if (!nsim_dev->vfconfigs) { + err = -ENOMEM; goto err_devlink_free; + } - err = devlink_register(devlink); + err = nsim_dev_resources_register(devlink); if (err) - goto err_resources_unregister; + goto err_vfc_free; err = devlink_params_register(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); @@ -1514,9 +1599,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) if (err) goto err_psample_exit; - devlink_params_publish(devlink); - devlink_reload_enable(devlink); nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY; + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; err_psample_exit: @@ -1537,11 +1622,12 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); err_dl_unregister: - devlink_unregister(devlink); -err_resources_unregister: devlink_resources_unregister(devlink, NULL); +err_vfc_free: + kfree(nsim_dev->vfconfigs); err_devlink_free: devlink_free(devlink); + dev_set_drvdata(&nsim_bus_dev->dev, NULL); return err; } @@ -1553,10 +1639,13 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev) return; debugfs_remove(nsim_dev->take_snapshot); - mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock); - if (nsim_dev->nsim_bus_dev->num_vfs) - nsim_bus_dev_vfs_disable(nsim_dev->nsim_bus_dev); - mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock); + mutex_lock(&nsim_dev->vfs_lock); + if (nsim_dev_get_vfs(nsim_dev)) { + nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0); + if (nsim_esw_mode_is_switchdev(nsim_dev)) + nsim_esw_legacy_enable(nsim_dev, NULL); + } + mutex_unlock(&nsim_dev->vfs_lock); nsim_dev_port_del_all(nsim_dev); nsim_dev_psample_exit(nsim_dev); @@ -1567,22 +1656,22 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev) mutex_destroy(&nsim_dev->port_list_lock); } -void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev) +void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev) { struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); struct devlink *devlink = priv_to_devlink(nsim_dev); - devlink_reload_disable(devlink); - + devlink_unregister(devlink); nsim_dev_reload_destroy(nsim_dev); nsim_bpf_dev_exit(nsim_dev); nsim_dev_debugfs_exit(nsim_dev); devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); - devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); + kfree(nsim_dev->vfconfigs); devlink_free(devlink); + dev_set_drvdata(&nsim_bus_dev->dev, NULL); } static struct nsim_dev_port * @@ -1598,7 +1687,7 @@ __nsim_dev_port_lookup(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type, return NULL; } -int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type, +int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type, unsigned int port_index) { struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); @@ -1613,7 +1702,7 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type return err; } -int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type, +int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type, unsigned int port_index) { struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); @@ -1630,6 +1719,43 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type return err; } +int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev, + unsigned int num_vfs) +{ + struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); + int ret = 0; + + mutex_lock(&nsim_dev->vfs_lock); + if (nsim_bus_dev->num_vfs == num_vfs) + goto exit_unlock; + if (nsim_bus_dev->num_vfs && num_vfs) { + ret = -EBUSY; + goto exit_unlock; + } + if (nsim_bus_dev->max_vfs < num_vfs) { + ret = -ENOMEM; + goto exit_unlock; + } + + nsim_bus_dev_set_vfs(nsim_bus_dev, num_vfs); + if (nsim_esw_mode_is_switchdev(nsim_dev)) { + if (num_vfs) { + ret = nsim_esw_switchdev_enable(nsim_dev, NULL); + if (ret) { + nsim_bus_dev_set_vfs(nsim_bus_dev, 0); + goto exit_unlock; + } + } else { + nsim_esw_legacy_enable(nsim_dev, NULL); + } + } + +exit_unlock: + mutex_unlock(&nsim_dev->vfs_lock); + + return ret; +} + int nsim_dev_init(void) { nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL); diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index b03a0513eb7e7b..0ab6a40be61147 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -81,6 +81,30 @@ static int nsim_set_ringparam(struct net_device *dev, return 0; } +static void +nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct netdevsim *ns = netdev_priv(dev); + + ch->max_combined = ns->nsim_bus_dev->num_queues; + ch->combined_count = ns->ethtool.channels; +} + +static int +nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct netdevsim *ns = netdev_priv(dev); + int err; + + err = netif_set_real_num_queues(dev, ch->combined_count, + ch->combined_count); + if (err) + return err; + + ns->ethtool.channels = ch->combined_count; + return 0; +} + static int nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { @@ -118,6 +142,8 @@ static const struct ethtool_ops nsim_ethtool_ops = { .get_coalesce = nsim_get_coalesce, .get_ringparam = nsim_get_ringparam, .set_ringparam = nsim_set_ringparam, + .get_channels = nsim_get_channels, + .set_channels = nsim_set_channels, .get_fecparam = nsim_get_fecparam, .set_fecparam = nsim_set_fecparam, }; @@ -141,6 +167,8 @@ void nsim_ethtool_init(struct netdevsim *ns) ns->ethtool.fec.fec = ETHTOOL_FEC_NONE; ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE; + ns->ethtool.channels = ns->nsim_bus_dev->num_queues; + ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir); debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err); diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index 04aebdf8574744..aa77af4a68df6f 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -110,26 +110,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len) if (err) return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_bool_put(fmsg, true); - if (err) - return err; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_u8_put(fmsg, i); - if (err) - return err; - } err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) return err; @@ -146,18 +126,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len) if (err) return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_u64_put(fmsg, i); - if (err) - return err; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects"); if (err) return err; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 50572e0f1f5290..e470e3398abc27 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -82,12 +82,12 @@ nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; /* Only refuse multicast addresses, zero address can mean unset/any. */ - if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac)) + if (vf >= nsim_dev_get_vfs(nsim_dev) || is_multicast_ether_addr(mac)) return -EINVAL; - memcpy(nsim_bus_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN); + memcpy(nsim_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN); return 0; } @@ -96,14 +96,14 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; - if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7) + if (vf >= nsim_dev_get_vfs(nsim_dev) || vlan > 4095 || qos > 7) return -EINVAL; - nsim_bus_dev->vfconfigs[vf].vlan = vlan; - nsim_bus_dev->vfconfigs[vf].qos = qos; - nsim_bus_dev->vfconfigs[vf].vlan_proto = vlan_proto; + nsim_dev->vfconfigs[vf].vlan = vlan; + nsim_dev->vfconfigs[vf].qos = qos; + nsim_dev->vfconfigs[vf].vlan_proto = vlan_proto; return 0; } @@ -111,18 +111,18 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf, static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) { pr_err("Not supported in switchdev mode. Please use devlink API.\n"); return -EOPNOTSUPP; } - if (vf >= nsim_bus_dev->num_vfs) + if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; - nsim_bus_dev->vfconfigs[vf].min_tx_rate = min; - nsim_bus_dev->vfconfigs[vf].max_tx_rate = max; + nsim_dev->vfconfigs[vf].min_tx_rate = min; + nsim_dev->vfconfigs[vf].max_tx_rate = max; return 0; } @@ -130,11 +130,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max) static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; - if (vf >= nsim_bus_dev->num_vfs) + if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; - nsim_bus_dev->vfconfigs[vf].spoofchk_enabled = val; + nsim_dev->vfconfigs[vf].spoofchk_enabled = val; return 0; } @@ -142,11 +142,11 @@ static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val) static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; - if (vf >= nsim_bus_dev->num_vfs) + if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; - nsim_bus_dev->vfconfigs[vf].rss_query_enabled = val; + nsim_dev->vfconfigs[vf].rss_query_enabled = val; return 0; } @@ -154,11 +154,11 @@ static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; - if (vf >= nsim_bus_dev->num_vfs) + if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; - nsim_bus_dev->vfconfigs[vf].trusted = val; + nsim_dev->vfconfigs[vf].trusted = val; return 0; } @@ -167,22 +167,22 @@ static int nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; - if (vf >= nsim_bus_dev->num_vfs) + if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; ivi->vf = vf; - ivi->linkstate = nsim_bus_dev->vfconfigs[vf].link_state; - ivi->min_tx_rate = nsim_bus_dev->vfconfigs[vf].min_tx_rate; - ivi->max_tx_rate = nsim_bus_dev->vfconfigs[vf].max_tx_rate; - ivi->vlan = nsim_bus_dev->vfconfigs[vf].vlan; - ivi->vlan_proto = nsim_bus_dev->vfconfigs[vf].vlan_proto; - ivi->qos = nsim_bus_dev->vfconfigs[vf].qos; - memcpy(&ivi->mac, nsim_bus_dev->vfconfigs[vf].vf_mac, ETH_ALEN); - ivi->spoofchk = nsim_bus_dev->vfconfigs[vf].spoofchk_enabled; - ivi->trusted = nsim_bus_dev->vfconfigs[vf].trusted; - ivi->rss_query_en = nsim_bus_dev->vfconfigs[vf].rss_query_enabled; + ivi->linkstate = nsim_dev->vfconfigs[vf].link_state; + ivi->min_tx_rate = nsim_dev->vfconfigs[vf].min_tx_rate; + ivi->max_tx_rate = nsim_dev->vfconfigs[vf].max_tx_rate; + ivi->vlan = nsim_dev->vfconfigs[vf].vlan; + ivi->vlan_proto = nsim_dev->vfconfigs[vf].vlan_proto; + ivi->qos = nsim_dev->vfconfigs[vf].qos; + memcpy(&ivi->mac, nsim_dev->vfconfigs[vf].vf_mac, ETH_ALEN); + ivi->spoofchk = nsim_dev->vfconfigs[vf].spoofchk_enabled; + ivi->trusted = nsim_dev->vfconfigs[vf].trusted; + ivi->rss_query_en = nsim_dev->vfconfigs[vf].rss_query_enabled; return 0; } @@ -190,9 +190,9 @@ nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; + struct nsim_dev *nsim_dev = ns->nsim_dev; - if (vf >= nsim_bus_dev->num_vfs) + if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; switch (state) { @@ -204,7 +204,7 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) return -EINVAL; } - nsim_bus_dev->vfconfigs[vf].link_state = state; + nsim_dev->vfconfigs[vf].link_state = state; return 0; } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 793c86dc5a9c17..c49771f27f17b3 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -62,6 +62,7 @@ struct nsim_ethtool_pauseparam { struct nsim_ethtool { u32 get_err; u32 set_err; + u32 channels; struct nsim_ethtool_pauseparam pauseparam; struct ethtool_coalesce coalesce; struct ethtool_ringparam ring; @@ -216,6 +217,19 @@ struct nsim_dev_port { struct netdevsim *ns; }; +struct nsim_vf_config { + int link_state; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan; + __be16 vlan_proto; + u16 qos; + u8 vf_mac[ETH_ALEN]; + bool spoofchk_enabled; + bool trusted; + bool rss_query_enabled; +}; + struct nsim_dev { struct nsim_bus_dev *nsim_bus_dev; struct nsim_fib_data *fib_data; @@ -223,8 +237,11 @@ struct nsim_dev { struct dentry *ddir; struct dentry *ports_ddir; struct dentry *take_snapshot; - struct dentry *max_vfs; struct dentry *nodes_ddir; + + struct mutex vfs_lock; /* Protects vfconfigs */ + struct nsim_vf_config *vfconfigs; + struct bpf_offload_dev *bpf_dev; bool bpf_bind_accept; bool bpf_bind_verifier_accept; @@ -264,9 +281,6 @@ struct nsim_dev { u16 esw_mode; }; -int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack); -int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack); - static inline bool nsim_esw_mode_is_legacy(struct nsim_dev *nsim_dev) { return nsim_dev->esw_mode == DEVLINK_ESWITCH_MODE_LEGACY; @@ -284,14 +298,18 @@ static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev) int nsim_dev_init(void); void nsim_dev_exit(void); -int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev); -void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev); -int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, +int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev); +void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev); +int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type, unsigned int port_index); -int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, +int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type, unsigned int port_index); +int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev, + unsigned int num_vfs); + +unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev); struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, struct netlink_ext_ack *extack); @@ -299,14 +317,6 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data); u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, enum nsim_resource_id res_id, bool max); -ssize_t nsim_bus_dev_max_vfs_read(struct file *file, - char __user *data, - size_t count, loff_t *ppos); -ssize_t nsim_bus_dev_max_vfs_write(struct file *file, - const char __user *data, - size_t count, loff_t *ppos); -void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev); - static inline bool nsim_dev_port_is_pf(struct nsim_dev_port *nsim_dev_port) { return nsim_dev_port->port_type == NSIM_DEV_PORT_TYPE_PF; @@ -335,19 +345,6 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) } #endif -struct nsim_vf_config { - int link_state; - u16 min_tx_rate; - u16 max_tx_rate; - u16 vlan; - __be16 vlan_proto; - u16 qos; - u8 vf_mac[ETH_ALEN]; - bool spoofchk_enabled; - bool trusted; - bool rss_query_enabled; -}; - struct nsim_bus_dev { struct device dev; struct list_head list; @@ -358,8 +355,6 @@ struct nsim_bus_dev { */ unsigned int max_vfs; unsigned int num_vfs; - struct mutex vfs_lock; /* Protects vfconfigs */ - struct nsim_vf_config *vfconfigs; /* Lock for devlink->reload_enabled in netdevsim module */ struct mutex nsim_bus_reload_lock; bool in_reload; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index a5bab614ff8459..98ca6b18415e7e 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -428,7 +428,7 @@ static int ntb_netdev_probe(struct device *client_dev) ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); eth_random_addr(ndev->perm_addr); - memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); + dev_addr_set(ndev, ndev->perm_addr); ndev->netdev_ops = &ntb_netdev_ops; ndev->ethtool_ops = &ntb_ethtool_ops; diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 7de631f5356fc8..cd6742e6ba8b9c 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -646,7 +646,7 @@ void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, if (state->interface == PHY_INTERFACE_MODE_NA) return; - bitmap_zero(xpcs_supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(xpcs_supported); compat = xpcs_find_compat(xpcs->id, state->interface); diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index bdac087058b266..dae95d9a07e88e 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -33,14 +33,17 @@ #define AT803X_SFC_DISABLE_JABBER BIT(0) #define AT803X_SPECIFIC_STATUS 0x11 -#define AT803X_SS_SPEED_MASK (3 << 14) -#define AT803X_SS_SPEED_1000 (2 << 14) -#define AT803X_SS_SPEED_100 (1 << 14) -#define AT803X_SS_SPEED_10 (0 << 14) +#define AT803X_SS_SPEED_MASK GENMASK(15, 14) +#define AT803X_SS_SPEED_1000 2 +#define AT803X_SS_SPEED_100 1 +#define AT803X_SS_SPEED_10 0 #define AT803X_SS_DUPLEX BIT(13) #define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11) #define AT803X_SS_MDIX BIT(6) +#define QCA808X_SS_SPEED_MASK GENMASK(9, 7) +#define QCA808X_SS_SPEED_2500 4 + #define AT803X_INTR_ENABLE 0x12 #define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15) #define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14) @@ -70,7 +73,8 @@ #define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0) #define AT803X_LED_CONTROL 0x18 -#define AT803X_DEVICE_ADDR 0x03 +#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 +#define AT803X_WOL_EN BIT(5) #define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C #define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B #define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A @@ -86,15 +90,22 @@ #define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 -#define AT803X_DEBUG_REG_0 0x00 +#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00 +#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2) +#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2) #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) -#define AT803X_DEBUG_REG_5 0x05 +#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05 #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) +#define AT803X_DEBUG_REG_HIB_CTRL 0x0b +#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10) +#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13) + #define AT803X_DEBUG_REG_3C 0x3C -#define AT803X_DEBUG_REG_3D 0x3D +#define AT803X_DEBUG_REG_GREEN 0x3D +#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6) #define AT803X_DEBUG_REG_1F 0x1F #define AT803X_DEBUG_PLL_ON BIT(2) @@ -150,8 +161,12 @@ #define ATH8035_PHY_ID 0x004dd072 #define AT8030_PHY_ID_MASK 0xffffffef -#define QCA8327_PHY_ID 0x004dd034 +#define QCA8081_PHY_ID 0x004dd101 + +#define QCA8327_A_PHY_ID 0x004dd033 +#define QCA8327_B_PHY_ID 0x004dd034 #define QCA8337_PHY_ID 0x004dd036 +#define QCA9561_PHY_ID 0x004dd042 #define QCA8K_PHY_ID_MASK 0xffffffff #define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0) @@ -163,7 +178,84 @@ #define AT803X_KEEP_PLL_ENABLED BIT(0) #define AT803X_DISABLE_SMARTEEE BIT(1) -MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver"); +/* ADC threshold */ +#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80 +#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0) +#define QCA808X_ADC_THRESHOLD_80MV 0 +#define QCA808X_ADC_THRESHOLD_100MV 0xf0 +#define QCA808X_ADC_THRESHOLD_200MV 0x0f +#define QCA808X_ADC_THRESHOLD_300MV 0xff + +/* CLD control */ +#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007 +#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4) +#define QCA808X_8023AZ_AFE_EN 0x90 + +/* AZ control */ +#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008 +#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32 + +#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014 +#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529 + +#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E +#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341 + +#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E +#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419 + +#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020 +#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341 + +#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c +#define QCA808X_TOP_OPTION1_DATA 0x0 + +#define QCA808X_PHY_MMD3_DEBUG_1 0xa100 +#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203 +#define QCA808X_PHY_MMD3_DEBUG_2 0xa101 +#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad +#define QCA808X_PHY_MMD3_DEBUG_3 0xa103 +#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698 +#define QCA808X_PHY_MMD3_DEBUG_4 0xa105 +#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001 +#define QCA808X_PHY_MMD3_DEBUG_5 0xa106 +#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111 +#define QCA808X_PHY_MMD3_DEBUG_6 0xa011 +#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85 + +/* master/slave seed config */ +#define QCA808X_PHY_DEBUG_LOCAL_SEED 9 +#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1) +#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2) +#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32 + +/* Hibernation yields lower power consumpiton in contrast with normal operation mode. + * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s. + */ +#define QCA808X_DBG_AN_TEST 0xb +#define QCA808X_HIBERNATION_EN BIT(15) + +#define QCA808X_CDT_ENABLE_TEST BIT(15) +#define QCA808X_CDT_INTER_CHECK_DIS BIT(13) +#define QCA808X_CDT_LENGTH_UNIT BIT(10) + +#define QCA808X_MMD3_CDT_STATUS 0x8064 +#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065 +#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066 +#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067 +#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068 +#define QCA808X_CDT_DIAG_LENGTH GENMASK(7, 0) + +#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12) +#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8) +#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4) +#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0) +#define QCA808X_CDT_STATUS_STAT_FAIL 0 +#define QCA808X_CDT_STATUS_STAT_NORMAL 1 +#define QCA808X_CDT_STATUS_STAT_OPEN 2 +#define QCA808X_CDT_STATUS_STAT_SHORT 3 + +MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver"); MODULE_AUTHOR("Matus Ujhelyi"); MODULE_LICENSE("GPL"); @@ -276,25 +368,25 @@ static int at803x_read_page(struct phy_device *phydev) static int at803x_enable_rx_delay(struct phy_device *phydev) { - return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0, AT803X_DEBUG_RX_CLK_DLY_EN); } static int at803x_enable_tx_delay(struct phy_device *phydev) { - return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0, AT803X_DEBUG_TX_CLK_DLY_EN); } static int at803x_disable_rx_delay(struct phy_device *phydev) { - return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, AT803X_DEBUG_RX_CLK_DLY_EN, 0); } static int at803x_disable_tx_delay(struct phy_device *phydev) { - return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, AT803X_DEBUG_TX_CLK_DLY_EN, 0); } @@ -327,9 +419,9 @@ static int at803x_set_wol(struct phy_device *phydev, { struct net_device *ndev = phydev->attached_dev; const u8 *mac; - int ret; - u32 value; - unsigned int i, offsets[] = { + int ret, irq_enabled; + unsigned int i; + const unsigned int offsets[] = { AT803X_LOC_MAC_ADDR_32_47_OFFSET, AT803X_LOC_MAC_ADDR_16_31_OFFSET, AT803X_LOC_MAC_ADDR_0_15_OFFSET, @@ -345,37 +437,63 @@ static int at803x_set_wol(struct phy_device *phydev, return -EINVAL; for (i = 0; i < 3; i++) - phy_write_mmd(phydev, AT803X_DEVICE_ADDR, offsets[i], + phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i], mac[(i * 2) + 1] | (mac[(i * 2)] << 8)); - value = phy_read(phydev, AT803X_INTR_ENABLE); - value |= AT803X_INTR_ENABLE_WOL; - ret = phy_write(phydev, AT803X_INTR_ENABLE, value); + /* Enable WOL function */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL, + 0, AT803X_WOL_EN); + if (ret) + return ret; + /* Enable WOL interrupt */ + ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL); if (ret) return ret; - value = phy_read(phydev, AT803X_INTR_STATUS); } else { - value = phy_read(phydev, AT803X_INTR_ENABLE); - value &= (~AT803X_INTR_ENABLE_WOL); - ret = phy_write(phydev, AT803X_INTR_ENABLE, value); + /* Disable WoL function */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + if (ret) + return ret; + /* Disable WOL interrupt */ + ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0); if (ret) return ret; - value = phy_read(phydev, AT803X_INTR_STATUS); } - return ret; + /* Clear WOL status */ + ret = phy_read(phydev, AT803X_INTR_STATUS); + if (ret < 0) + return ret; + + /* Check if there are other interrupts except for WOL triggered when PHY is + * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can + * be passed up to the interrupt PIN. + */ + irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE); + if (irq_enabled < 0) + return irq_enabled; + + irq_enabled &= ~AT803X_INTR_ENABLE_WOL; + if (ret & irq_enabled && !phy_polling_mode(phydev)) + phy_trigger_machine(phydev); + + return 0; } static void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - u32 value; + int value; wol->supported = WAKE_MAGIC; wol->wolopts = 0; - value = phy_read(phydev, AT803X_INTR_ENABLE); - if (value & AT803X_INTR_ENABLE_WOL) + value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL); + if (value < 0) + return; + + if (value & AT803X_WOL_EN) wol->wolopts |= WAKE_MAGIC; } @@ -703,6 +821,15 @@ static int at803x_get_features(struct phy_device *phydev) if (err) return err; + if (phydev->drv->phy_id == QCA8081_PHY_ID) { + err = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_NG_EXTABLE); + if (err < 0) + return err; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported, + err & MDIO_PMA_NG_EXTABLE_2_5GBT); + } + if (phydev->drv->phy_id != ATH8031_PHY_ID) return 0; @@ -921,27 +1048,9 @@ static void at803x_link_change_notify(struct phy_device *phydev) } } -static int at803x_read_status(struct phy_device *phydev) +static int at803x_read_specific_status(struct phy_device *phydev) { - int ss, err, old_link = phydev->link; - - /* Update the link, but return if there was an error */ - err = genphy_update_link(phydev); - if (err) - return err; - - /* why bother the PHY if nothing can have changed */ - if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) - return 0; - - phydev->speed = SPEED_UNKNOWN; - phydev->duplex = DUPLEX_UNKNOWN; - phydev->pause = 0; - phydev->asym_pause = 0; - - err = genphy_read_lpa(phydev); - if (err < 0) - return err; + int ss; /* Read the AT8035 PHY-Specific Status register, which indicates the * speed and duplex that the PHY is actually using, irrespective of @@ -952,13 +1061,19 @@ static int at803x_read_status(struct phy_device *phydev) return ss; if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) { - int sfc; + int sfc, speed; sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL); if (sfc < 0) return sfc; - switch (ss & AT803X_SS_SPEED_MASK) { + /* qca8081 takes the different bits for speed value from at803x */ + if (phydev->drv->phy_id == QCA8081_PHY_ID) + speed = FIELD_GET(QCA808X_SS_SPEED_MASK, ss); + else + speed = FIELD_GET(AT803X_SS_SPEED_MASK, ss); + + switch (speed) { case AT803X_SS_SPEED_10: phydev->speed = SPEED_10; break; @@ -968,6 +1083,9 @@ static int at803x_read_status(struct phy_device *phydev) case AT803X_SS_SPEED_1000: phydev->speed = SPEED_1000; break; + case QCA808X_SS_SPEED_2500: + phydev->speed = SPEED_2500; + break; } if (ss & AT803X_SS_DUPLEX) phydev->duplex = DUPLEX_FULL; @@ -992,6 +1110,35 @@ static int at803x_read_status(struct phy_device *phydev) } } + return 0; +} + +static int at803x_read_status(struct phy_device *phydev) +{ + int err, old_link = phydev->link; + + /* Update the link, but return if there was an error */ + err = genphy_update_link(phydev); + if (err) + return err; + + /* why bother the PHY if nothing can have changed */ + if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) + return 0; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + err = genphy_read_lpa(phydev); + if (err < 0) + return err; + + err = at803x_read_specific_status(phydev); + if (err < 0) + return err; + if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) phy_resolve_aneg_pause(phydev); @@ -1039,7 +1186,30 @@ static int at803x_config_aneg(struct phy_device *phydev) return ret; } - return genphy_config_aneg(phydev); + /* Do not restart auto-negotiation by setting ret to 0 defautly, + * when calling __genphy_config_aneg later. + */ + ret = 0; + + if (phydev->drv->phy_id == QCA8081_PHY_ID) { + int phy_ctrl = 0; + + /* The reg MII_BMCR also needs to be configured for force mode, the + * genphy_config_aneg is also needed. + */ + if (phydev->autoneg == AUTONEG_DISABLE) + genphy_c45_pma_setup_forced(phydev); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising)) + phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G; + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl); + if (ret < 0) + return ret; + } + + return __genphy_config_aneg(phydev, ret); } static int at803x_get_downshift(struct phy_device *phydev, u8 *d) @@ -1175,8 +1345,14 @@ static int at803x_cdt_start(struct phy_device *phydev, int pair) { u16 cdt; - cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) | - AT803X_CDT_ENABLE_TEST; + /* qca8081 takes the different bit 15 to enable CDT test */ + if (phydev->drv->phy_id == QCA8081_PHY_ID) + cdt = QCA808X_CDT_ENABLE_TEST | + QCA808X_CDT_LENGTH_UNIT | + QCA808X_CDT_INTER_CHECK_DIS; + else + cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) | + AT803X_CDT_ENABLE_TEST; return phy_write(phydev, AT803X_CDT, cdt); } @@ -1184,10 +1360,16 @@ static int at803x_cdt_start(struct phy_device *phydev, int pair) static int at803x_cdt_wait_for_completion(struct phy_device *phydev) { int val, ret; + u16 cdt_en; + + if (phydev->drv->phy_id == QCA8081_PHY_ID) + cdt_en = QCA808X_CDT_ENABLE_TEST; + else + cdt_en = AT803X_CDT_ENABLE_TEST; /* One test run takes about 25ms */ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val, - !(val & AT803X_CDT_ENABLE_TEST), + !(val & cdt_en), 30000, 100000, true); return ret < 0 ? ret : 0; @@ -1236,7 +1418,8 @@ static int at803x_cable_test_get_status(struct phy_device *phydev, int pair, ret; if (phydev->phy_id == ATH9331_PHY_ID || - phydev->phy_id == ATH8032_PHY_ID) + phydev->phy_id == ATH8032_PHY_ID || + phydev->phy_id == QCA9561_PHY_ID) pair_mask = 0x3; else pair_mask = 0xf; @@ -1276,7 +1459,8 @@ static int at803x_cable_test_start(struct phy_device *phydev) phy_write(phydev, MII_BMCR, BMCR_ANENABLE); phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA); if (phydev->phy_id != ATH9331_PHY_ID && - phydev->phy_id != ATH8032_PHY_ID) + phydev->phy_id != ATH8032_PHY_ID && + phydev->phy_id != QCA9561_PHY_ID) phy_write(phydev, MII_CTRL1000, 0); /* we do all the (time consuming) work later */ @@ -1292,9 +1476,9 @@ static int qca83xx_config_init(struct phy_device *phydev) switch (switch_revision) { case 1: /* For 100M waveform */ - at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea); + at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea); /* Turn on Gigabit clock */ - at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0); + at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0); break; case 2: @@ -1302,12 +1486,387 @@ static int qca83xx_config_init(struct phy_device *phydev) fallthrough; case 4: phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f); - at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860); - at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46); + at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860); + at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46); at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000); break; } + /* QCA8327 require DAC amplitude adjustment for 100m set to +6%. + * Disable on init and enable only with 100m speed following + * qca original source code. + */ + if (phydev->drv->phy_id == QCA8327_A_PHY_ID || + phydev->drv->phy_id == QCA8327_B_PHY_ID) + at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + QCA8327_DEBUG_MANU_CTRL_EN, 0); + + /* Following original QCA sourcecode set port to prefer master */ + phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER); + + return 0; +} + +static void qca83xx_link_change_notify(struct phy_device *phydev) +{ + /* QCA8337 doesn't require DAC Amplitude adjustement */ + if (phydev->drv->phy_id == QCA8337_PHY_ID) + return; + + /* Set DAC Amplitude adjustment to +6% for 100m on link running */ + if (phydev->state == PHY_RUNNING) { + if (phydev->speed == SPEED_100) + at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + QCA8327_DEBUG_MANU_CTRL_EN, + QCA8327_DEBUG_MANU_CTRL_EN); + } else { + /* Reset DAC Amplitude adjustment */ + at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, + QCA8327_DEBUG_MANU_CTRL_EN, 0); + } +} + +static int qca83xx_resume(struct phy_device *phydev) +{ + int ret, val; + + /* Skip reset if not suspended */ + if (!phydev->suspended) + return 0; + + /* Reinit the port, reset values set by suspend */ + qca83xx_config_init(phydev); + + /* Reset the port on port resume */ + phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); + + /* On resume from suspend the switch execute a reset and + * restart auto-negotiation. Wait for reset to complete. + */ + ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET), + 50000, 600000, true); + if (ret) + return ret; + + msleep(1); + + return 0; +} + +static int qca83xx_suspend(struct phy_device *phydev) +{ + u16 mask = 0; + + /* Only QCA8337 support actual suspend. + * QCA8327 cause port unreliability when phy suspend + * is set. + */ + if (phydev->drv->phy_id == QCA8337_PHY_ID) { + genphy_suspend(phydev); + } else { + mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX); + phy_modify(phydev, MII_BMCR, mask, 0); + } + + at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN, + AT803X_DEBUG_GATE_CLK_IN1000, 0); + + at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL, + AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE | + AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0); + + return 0; +} + +static int qca808x_phy_fast_retrain_config(struct phy_device *phydev) +{ + int ret; + + /* Enable fast retrain */ + ret = genphy_c45_fast_retrain(phydev, true); + if (ret) + return ret; + + phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1, + QCA808X_TOP_OPTION1_DATA); + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB, + QCA808X_MSE_THRESHOLD_20DB_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB, + QCA808X_MSE_THRESHOLD_17DB_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB, + QCA808X_MSE_THRESHOLD_27DB_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB, + QCA808X_MSE_THRESHOLD_28DB_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1, + QCA808X_MMD3_DEBUG_1_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4, + QCA808X_MMD3_DEBUG_4_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5, + QCA808X_MMD3_DEBUG_5_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3, + QCA808X_MMD3_DEBUG_3_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6, + QCA808X_MMD3_DEBUG_6_VALUE); + phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2, + QCA808X_MMD3_DEBUG_2_VALUE); + + return 0; +} + +static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev) +{ + u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE); + + return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, + QCA808X_MASTER_SLAVE_SEED_CFG, + FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value)); +} + +static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable) +{ + u16 seed_enable = 0; + + if (enable) + seed_enable = QCA808X_MASTER_SLAVE_SEED_ENABLE; + + return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, + QCA808X_MASTER_SLAVE_SEED_ENABLE, seed_enable); +} + +static int qca808x_config_init(struct phy_device *phydev) +{ + int ret; + + /* Active adc&vga on 802.3az for the link 1000M and 100M */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7, + QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN); + if (ret) + return ret; + + /* Adjust the threshold on 802.3az for the link 1000M */ + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + QCA808X_PHY_MMD3_AZ_TRAINING_CTRL, QCA808X_MMD3_AZ_TRAINING_VAL); + if (ret) + return ret; + + /* Config the fast retrain for the link 2500M */ + ret = qca808x_phy_fast_retrain_config(phydev); + if (ret) + return ret; + + /* Configure lower ramdom seed to make phy linked as slave mode */ + ret = qca808x_phy_ms_random_seed_set(phydev); + if (ret) + return ret; + + /* Enable seed */ + ret = qca808x_phy_ms_seed_enable(phydev, true); + if (ret) + return ret; + + /* Configure adc threshold as 100mv for the link 10M */ + return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD, + QCA808X_ADC_THRESHOLD_MASK, QCA808X_ADC_THRESHOLD_100MV); +} + +static int qca808x_read_status(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); + if (ret < 0) + return ret; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising, + ret & MDIO_AN_10GBT_STAT_LP2_5G); + + ret = genphy_read_status(phydev); + if (ret) + return ret; + + ret = at803x_read_specific_status(phydev); + if (ret < 0) + return ret; + + if (phydev->link && phydev->speed == SPEED_2500) + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + else + phydev->interface = PHY_INTERFACE_MODE_SMII; + + /* generate seed as a lower random value to make PHY linked as SLAVE easily, + * except for master/slave configuration fault detected. + * the reason for not putting this code into the function link_change_notify is + * the corner case where the link partner is also the qca8081 PHY and the seed + * value is configured as the same value, the link can't be up and no link change + * occurs. + */ + if (!phydev->link) { + if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) { + qca808x_phy_ms_seed_enable(phydev, false); + } else { + qca808x_phy_ms_random_seed_set(phydev); + qca808x_phy_ms_seed_enable(phydev, true); + } + } + + return 0; +} + +static int qca808x_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + + return qca808x_phy_ms_seed_enable(phydev, true); +} + +static bool qca808x_cdt_fault_length_valid(int cdt_code) +{ + switch (cdt_code) { + case QCA808X_CDT_STATUS_STAT_SHORT: + case QCA808X_CDT_STATUS_STAT_OPEN: + return true; + default: + return false; + } +} + +static int qca808x_cable_test_result_trans(int cdt_code) +{ + switch (cdt_code) { + case QCA808X_CDT_STATUS_STAT_NORMAL: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case QCA808X_CDT_STATUS_STAT_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case QCA808X_CDT_STATUS_STAT_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case QCA808X_CDT_STATUS_STAT_FAIL: + default: + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} + +static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair) +{ + int val; + u32 cdt_length_reg = 0; + + switch (pair) { + case ETHTOOL_A_CABLE_PAIR_A: + cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A; + break; + case ETHTOOL_A_CABLE_PAIR_B: + cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B; + break; + case ETHTOOL_A_CABLE_PAIR_C: + cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C; + break; + case ETHTOOL_A_CABLE_PAIR_D: + cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D; + break; + default: + return -EINVAL; + } + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg); + if (val < 0) + return val; + + return (FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val) * 824) / 10; +} + +static int qca808x_cable_test_start(struct phy_device *phydev) +{ + int ret; + + /* perform CDT with the following configs: + * 1. disable hibernation. + * 2. force PHY working in MDI mode. + * 3. for PHY working in 1000BaseT. + * 4. configure the threshold. + */ + + ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0); + if (ret < 0) + return ret; + + ret = at803x_config_mdix(phydev, ETH_TP_MDI); + if (ret < 0) + return ret; + + /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */ + phydev->duplex = DUPLEX_FULL; + phydev->speed = SPEED_1000; + ret = genphy_c45_pma_setup_forced(phydev); + if (ret < 0) + return ret; + + ret = genphy_setup_forced(phydev); + if (ret < 0) + return ret; + + /* configure the thresholds for open, short, pair ok test */ + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040); + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040); + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060); + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050); + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060); + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060); + + return 0; +} + +static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished) +{ + int ret, val; + int pair_a, pair_b, pair_c, pair_d; + + *finished = false; + + ret = at803x_cdt_start(phydev, 0); + if (ret) + return ret; + + ret = at803x_cdt_wait_for_completion(phydev); + if (ret) + return ret; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS); + if (val < 0) + return val; + + pair_a = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, val); + pair_b = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, val); + pair_c = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, val); + pair_d = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, val); + + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + qca808x_cable_test_result_trans(pair_a)); + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B, + qca808x_cable_test_result_trans(pair_b)); + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C, + qca808x_cable_test_result_trans(pair_c)); + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D, + qca808x_cable_test_result_trans(pair_d)); + + if (qca808x_cdt_fault_length_valid(pair_a)) + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, + qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A)); + if (qca808x_cdt_fault_length_valid(pair_b)) + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B, + qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B)); + if (qca808x_cdt_fault_length_valid(pair_c)) + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C, + qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C)); + if (qca808x_cdt_fault_length_valid(pair_d)) + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D, + qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D)); + + *finished = true; + return 0; } @@ -1407,19 +1966,89 @@ static struct phy_driver at803x_driver[] = { .read_status = at803x_read_status, .soft_reset = genphy_soft_reset, .config_aneg = at803x_config_aneg, +}, { + /* Qualcomm Atheros QCA9561 */ + PHY_ID_MATCH_EXACT(QCA9561_PHY_ID), + .name = "Qualcomm Atheros QCA9561 built-in PHY", + .suspend = at803x_suspend, + .resume = at803x_resume, + .flags = PHY_POLL_CABLE_TEST, + /* PHY_BASIC_FEATURES */ + .config_intr = &at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, + .cable_test_start = at803x_cable_test_start, + .cable_test_get_status = at803x_cable_test_get_status, + .read_status = at803x_read_status, + .soft_reset = genphy_soft_reset, + .config_aneg = at803x_config_aneg, }, { /* QCA8337 */ - .phy_id = QCA8337_PHY_ID, - .phy_id_mask = QCA8K_PHY_ID_MASK, - .name = "QCA PHY 8337", + .phy_id = QCA8337_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8337 internal PHY", + /* PHY_GBIT_FEATURES */ + .link_change_notify = qca83xx_link_change_notify, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, + .soft_reset = genphy_soft_reset, + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + .suspend = qca83xx_suspend, + .resume = qca83xx_resume, +}, { + /* QCA8327-A from switch QCA8327-AL1A */ + .phy_id = QCA8327_A_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8327-A internal PHY", /* PHY_GBIT_FEATURES */ - .probe = at803x_probe, - .flags = PHY_IS_INTERNAL, - .config_init = qca83xx_config_init, - .soft_reset = genphy_soft_reset, - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, + .link_change_notify = qca83xx_link_change_notify, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, + .soft_reset = genphy_soft_reset, + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + .suspend = qca83xx_suspend, + .resume = qca83xx_resume, +}, { + /* QCA8327-B from switch QCA8327-BL1A */ + .phy_id = QCA8327_B_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8327-B internal PHY", + /* PHY_GBIT_FEATURES */ + .link_change_notify = qca83xx_link_change_notify, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, + .soft_reset = genphy_soft_reset, + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + .suspend = qca83xx_suspend, + .resume = qca83xx_resume, +}, { + /* Qualcomm QCA8081 */ + PHY_ID_MATCH_EXACT(QCA8081_PHY_ID), + .name = "Qualcomm QCA8081", + .flags = PHY_POLL_CABLE_TEST, + .config_intr = at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, + .get_tunable = at803x_get_tunable, + .set_tunable = at803x_set_tunable, + .set_wol = at803x_set_wol, + .get_wol = at803x_get_wol, + .get_features = at803x_get_features, + .config_aneg = at803x_config_aneg, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_status = qca808x_read_status, + .config_init = qca808x_config_init, + .soft_reset = qca808x_soft_reset, + .cable_test_start = qca808x_cable_test_start, + .cable_test_get_status = qca808x_cable_test_get_status, }, }; module_phy_driver(at803x_driver); @@ -1430,6 +2059,11 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = { { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) }, { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) }, { } }; diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 27b6a3f507ae60..75593e7d1118f1 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -415,6 +415,190 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev) return bcm7xxx_28nm_ephy_apd_enable(phydev); } +static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev) +{ + int tmp, rcalcode, rcalnewcodelp, rcalnewcode11, rcalnewcode11d2; + + /* Reset PHY */ + tmp = genphy_soft_reset(phydev); + if (tmp) + return tmp; + + /* Reset AFE and PLL */ + bcm_phy_write_exp_sel(phydev, 0x0003, 0x0006); + /* Clear reset */ + bcm_phy_write_exp_sel(phydev, 0x0003, 0x0000); + + /* Write PLL/AFE control register to select 54MHz crystal */ + bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0000); + bcm_phy_write_misc(phydev, 0x0031, 0x0000, 0x044a); + + /* Change Ka,Kp,Ki to pdiv=1 */ + bcm_phy_write_misc(phydev, 0x0033, 0x0002, 0x71a1); + /* Configuration override */ + bcm_phy_write_misc(phydev, 0x0033, 0x0001, 0x8000); + + /* Change PLL_NDIV and PLL_NUDGE */ + bcm_phy_write_misc(phydev, 0x0031, 0x0001, 0x2f68); + bcm_phy_write_misc(phydev, 0x0031, 0x0002, 0x0000); + + /* Reference frequency is 54Mhz, config_mode[15:14] = 3 (low + * phase) + */ + bcm_phy_write_misc(phydev, 0x0030, 0x0003, 0xc036); + + /* Initialize bypass mode */ + bcm_phy_write_misc(phydev, 0x0032, 0x0003, 0x0000); + /* Bypass code, default: VCOCLK enabled */ + bcm_phy_write_misc(phydev, 0x0033, 0x0000, 0x0002); + /* LDOs at default setting */ + bcm_phy_write_misc(phydev, 0x0030, 0x0002, 0x01c0); + /* Release PLL reset */ + bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0001); + + /* Bandgap curvature correction to correct default */ + bcm_phy_write_misc(phydev, 0x0038, 0x0000, 0x0010); + + /* Run RCAL */ + bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x0038); + bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003b); + udelay(2); + bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003f); + mdelay(5); + + /* AFE_CAL_CONFIG_0, Vref=1000, Target=10, averaging enabled */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x1c82); + /* AFE_CAL_CONFIG_0, no reset and analog powerup */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e82); + udelay(2); + /* AFE_CAL_CONFIG_0, start calibration */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f82); + udelay(100); + /* AFE_CAL_CONFIG_0, clear start calibration, set HiBW */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e86); + udelay(2); + /* AFE_CAL_CONFIG_0, start calibration with hi BW mode set */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f86); + udelay(100); + + /* Adjust 10BT amplitude additional +7% and 100BT +2% */ + bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7ea); + /* Adjust 1G mode amplitude and 1G testmode1 */ + bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0); + + /* Read CORE_EXPA9 */ + tmp = bcm_phy_read_exp(phydev, 0x00a9); + /* CORE_EXPA9[6:1] is rcalcode[5:0] */ + rcalcode = (tmp & 0x7e) / 2; + /* Correct RCAL code + 1 is -1% rprogr, LP: +16 */ + rcalnewcodelp = rcalcode + 16; + /* Correct RCAL code + 1 is -15 rprogr, 11: +10 */ + rcalnewcode11 = rcalcode + 10; + /* Saturate if necessary */ + if (rcalnewcodelp > 0x3f) + rcalnewcodelp = 0x3f; + if (rcalnewcode11 > 0x3f) + rcalnewcode11 = 0x3f; + /* REXT=1 BYP=1 RCAL_st1<5:0>=new rcal code */ + tmp = 0x00f8 + rcalnewcodelp * 256; + /* Program into AFE_CAL_CONFIG_2 */ + bcm_phy_write_misc(phydev, 0x0039, 0x0003, tmp); + /* AFE_BIAS_CONFIG_0 10BT bias code (Bias: E4) */ + bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7e4); + /* invert adc clock output and 'adc refp ldo current To correct + * default + */ + bcm_phy_write_misc(phydev, 0x003b, 0x0000, 0x8002); + /* 100BT stair case, high BW, 1G stair case, alternate encode */ + bcm_phy_write_misc(phydev, 0x003c, 0x0003, 0xf882); + /* 1000BT DAC transition method per Erol, bits[32], DAC Shuffle + * sequence 1 + 10BT imp adjust bits + */ + bcm_phy_write_misc(phydev, 0x003d, 0x0000, 0x3201); + /* Non-overlap fix */ + bcm_phy_write_misc(phydev, 0x003a, 0x0002, 0x0c00); + + /* pwdb override (rxconfig<5>) to turn on RX LDO indpendent of + * pwdb controls from DSP_TAP10 + */ + bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0020); + + /* Remove references to channel 2 and 3 */ + bcm_phy_write_misc(phydev, 0x003b, 0x0002, 0x0000); + bcm_phy_write_misc(phydev, 0x003b, 0x0003, 0x0000); + + /* Set cal_bypassb bit rxconfig<43> */ + bcm_phy_write_misc(phydev, 0x003a, 0x0003, 0x0800); + udelay(2); + + /* Revert pwdb_override (rxconfig<5>) to 0 so that the RX pwr + * is controlled by DSP. + */ + bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0000); + + /* Drop LSB */ + rcalnewcode11d2 = (rcalnewcode11 & 0xfffe) / 2; + tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0001); + /* Clear bits [11:5] */ + tmp &= ~0xfe0; + /* set txcfg_ch0<5>=1 (enable + set local rcal) */ + tmp |= 0x0020 | (rcalnewcode11d2 * 64); + bcm_phy_write_misc(phydev, 0x003d, 0x0001, tmp); + bcm_phy_write_misc(phydev, 0x003d, 0x0002, tmp); + + tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0000); + /* set txcfg<45:44>=11 (enable Rextra + invert fullscaledetect) + */ + tmp &= ~0x3000; + tmp |= 0x3000; + bcm_phy_write_misc(phydev, 0x003d, 0x0000, tmp); + + return 0; +} + +static int bcm7xxx_16nm_ephy_config_init(struct phy_device *phydev) +{ + int ret, val; + + ret = bcm7xxx_16nm_ephy_afe_config(phydev); + if (ret) + return ret; + + ret = bcm_phy_set_eee(phydev, true); + if (ret) + return ret; + + ret = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3); + if (ret < 0) + return ret; + + val = ret; + + /* Auto power down of DLL enabled, + * TXC/RXC disabled during auto power down. + */ + val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS; + val |= BIT(8); + + ret = bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val); + if (ret < 0) + return ret; + + return bcm_phy_enable_apd(phydev, true); +} + +static int bcm7xxx_16nm_ephy_resume(struct phy_device *phydev) +{ + int ret; + + /* Re-apply workarounds coming out suspend/resume */ + ret = bcm7xxx_16nm_ephy_config_init(phydev); + if (ret) + return ret; + + return genphy_config_aneg(phydev); +} + #define MII_BCM7XXX_REG_INVALID 0xff static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum) @@ -716,9 +900,25 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev) .resume = bcm7xxx_config_init, \ } +#define BCM7XXX_16NM_EPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + /* PHY_BASIC_FEATURES */ \ + .flags = PHY_IS_INTERNAL, \ + .probe = bcm7xxx_28nm_probe, \ + .remove = bcm7xxx_28nm_remove, \ + .config_init = bcm7xxx_16nm_ephy_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .resume = bcm7xxx_16nm_ephy_resume, \ +} + static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"), BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"), + BCM7XXX_16NM_EPHY(PHY_ID_BCM72165, "Broadcom BCM72165"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"), @@ -736,11 +936,13 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), + BCM7XXX_16NM_EPHY(PHY_ID_BCM7712, "Broadcom BCM7712"), }; static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM72113, 0xfffffff0 }, { PHY_ID_BCM72116, 0xfffffff0, }, + { PHY_ID_BCM72165, 0xfffffff0, }, { PHY_ID_BCM7250, 0xfffffff0, }, { PHY_ID_BCM7255, 0xfffffff0, }, { PHY_ID_BCM7260, 0xfffffff0, }, @@ -757,6 +959,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7439, 0xfffffff0, }, { PHY_ID_BCM7435, 0xfffffff0, }, { PHY_ID_BCM7445, 0xfffffff0, }, + { PHY_ID_BCM7712, 0xfffffff0, }, { } }; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 83aea5c5cd03c2..bb5104ae46104c 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -392,10 +392,50 @@ static int bcm54xx_config_init(struct phy_device *phydev) return 0; } +static int bcm54xx_iddq_set(struct phy_device *phydev, bool enable) +{ + int ret = 0; + + if (!(phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND)) + return ret; + + ret = bcm_phy_read_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL); + if (ret < 0) + goto out; + + if (enable) + ret |= BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP; + else + ret &= ~(BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP); + + ret = bcm_phy_write_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL, ret); +out: + return ret; +} + +static int bcm54xx_suspend(struct phy_device *phydev) +{ + int ret; + + /* We cannot use a read/modify/write here otherwise the PHY gets into + * a bad state where its LEDs keep flashing, thus defeating the purpose + * of low power mode. + */ + ret = phy_write(phydev, MII_BMCR, BMCR_PDOWN); + if (ret < 0) + return ret; + + return bcm54xx_iddq_set(phydev, true); +} + static int bcm54xx_resume(struct phy_device *phydev) { int ret; + ret = bcm54xx_iddq_set(phydev, false); + if (ret < 0) + return ret; + /* Writes to register other than BMCR would be ignored * unless we clear the PDOWN bit first */ @@ -408,6 +448,15 @@ static int bcm54xx_resume(struct phy_device *phydev) */ fsleep(40); + /* Issue a soft reset after clearing the power down bit + * and before doing any other configuration. + */ + if (phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND) { + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + } + return bcm54xx_config_init(phydev); } @@ -702,6 +751,36 @@ static void bcm54xx_get_stats(struct phy_device *phydev, bcm_phy_get_stats(phydev, priv->stats, stats, data); } +static void bcm54xx_link_change_notify(struct phy_device *phydev) +{ + u16 mask = MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE | + MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE; + int ret; + + if (phydev->state != PHY_RUNNING) + return; + + /* Don't change the DAC wake settings if auto power down + * is not requested. + */ + if (!(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) + return; + + ret = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP08); + if (ret < 0) + return; + + /* Enable/disable 10BaseT auto and forced early DAC wake depending + * on the negotiated speed, those settings should only be done + * for 10Mbits/sec. + */ + if (phydev->speed == SPEED_10) + ret |= mask; + else + ret &= ~mask; + bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret); +} + static struct phy_driver broadcom_drivers[] = { { .phy_id = PHY_ID_BCM5411, @@ -715,6 +794,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, @@ -727,6 +807,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54210E, .phy_id_mask = 0xfffffff0, @@ -739,6 +820,9 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, + .suspend = bcm54xx_suspend, + .resume = bcm54xx_resume, }, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, @@ -751,6 +835,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54612E, .phy_id_mask = 0xfffffff0, @@ -763,6 +848,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54616S, .phy_id_mask = 0xfffffff0, @@ -774,6 +860,7 @@ static struct phy_driver broadcom_drivers[] = { .handle_interrupt = bcm_phy_handle_interrupt, .read_status = bcm54616s_read_status, .probe = bcm54616s_probe, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, @@ -788,6 +875,7 @@ static struct phy_driver broadcom_drivers[] = { .handle_interrupt = bcm_phy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, @@ -801,6 +889,7 @@ static struct phy_driver broadcom_drivers[] = { .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54810, .phy_id_mask = 0xfffffff0, @@ -814,8 +903,9 @@ static struct phy_driver broadcom_drivers[] = { .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, - .suspend = genphy_suspend, + .suspend = bcm54xx_suspend, .resume = bcm54xx_resume, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54811, .phy_id_mask = 0xfffffff0, @@ -829,8 +919,9 @@ static struct phy_driver broadcom_drivers[] = { .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, - .suspend = genphy_suspend, + .suspend = bcm54xx_suspend, .resume = bcm54xx_resume, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, @@ -843,6 +934,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, @@ -855,6 +947,9 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, + .suspend = bcm54xx_suspend, + .resume = bcm54xx_resume, }, { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, @@ -867,6 +962,9 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, + .suspend = bcm54xx_suspend, + .resume = bcm54xx_resume, }, { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, @@ -879,6 +977,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, @@ -905,6 +1004,7 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM53125, .phy_id_mask = 0xfffffff0, @@ -918,6 +1018,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM89610, .phy_id_mask = 0xfffffff0, @@ -930,6 +1031,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, } }; module_phy_driver(broadcom_drivers); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 6bbc81ad295fb2..8561f2d4443bfc 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -182,7 +182,7 @@ static int dp83867_set_wol(struct phy_device *phydev, { struct net_device *ndev = phydev->attached_dev; u16 val_rxcfg, val_micr; - u8 *mac; + const u8 *mac; val_rxcfg = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG); val_micr = phy_read(phydev, MII_DP83867_MICR); @@ -193,7 +193,7 @@ static int dp83867_set_wol(struct phy_device *phydev, val_micr |= MII_DP83867_MICR_WOL_INT_EN; if (wol->wolopts & WAKE_MAGIC) { - mac = (u8 *)ndev->dev_addr; + mac = (const u8 *)ndev->dev_addr; if (!is_valid_ether_addr(mac)) return -EINVAL; @@ -619,6 +619,25 @@ static int dp83867_of_init(struct phy_device *phydev) #else static int dp83867_of_init(struct phy_device *phydev) { + struct dp83867_private *dp83867 = phydev->priv; + u16 delay; + + /* For non-OF device, the RX and TX ID values are either strapped + * or take from default value. So, we init RX & TX ID values here + * so that the RGMIIDCTL is configured correctly later in + * dp83867_config_init(); + */ + delay = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL); + dp83867->rx_id_delay = delay & DP83867_RGMII_RX_CLK_DELAY_MAX; + dp83867->tx_id_delay = (delay >> DP83867_RGMII_TX_CLK_DELAY_SHIFT) & + DP83867_RGMII_TX_CLK_DELAY_MAX; + + /* Per datasheet, IO impedance is default to 50-ohm, so we set the + * same here or else the default '0' means highest IO impedance + * which is wrong. + */ + dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2; + return 0; } #endif /* CONFIG_OF_MDIO */ diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index 755220c6451fb0..7113925606f7c1 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -246,7 +246,7 @@ static int dp83869_set_wol(struct phy_device *phydev, { struct net_device *ndev = phydev->attached_dev; int val_rxcfg, val_micr; - u8 *mac; + const u8 *mac; int ret; val_rxcfg = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG); @@ -264,7 +264,7 @@ static int dp83869_set_wol(struct phy_device *phydev, if (wol->wolopts & WAKE_MAGIC || wol->wolopts & WAKE_MAGICSECURE) { - mac = (u8 *)ndev->dev_addr; + mac = (const u8 *)ndev->dev_addr; if (!is_valid_ether_addr(mac)) return -EINVAL; diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index bd310e8d5e43d1..b6fea119fe137e 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -22,6 +22,7 @@ * If both the fiber and copper ports are connected, the first to gain * link takes priority and the other port is completely locked out. */ +#include #include #include #include @@ -33,6 +34,8 @@ #define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe #define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa) +#define MV_VERSION(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d)) + enum { MV_PMA_FW_VER0 = 0xc011, MV_PMA_FW_VER1 = 0xc012, @@ -62,6 +65,15 @@ enum { MV_PCS_CSCR1_MDIX_MDIX = 0x0020, MV_PCS_CSCR1_MDIX_AUTO = 0x0060, + MV_PCS_DSC1 = 0x8003, + MV_PCS_DSC1_ENABLE = BIT(9), + MV_PCS_DSC1_10GBT = 0x01c0, + MV_PCS_DSC1_1GBR = 0x0038, + MV_PCS_DSC1_100BTX = 0x0007, + MV_PCS_DSC2 = 0x8004, + MV_PCS_DSC2_2P5G = 0xf000, + MV_PCS_DSC2_5G = 0x0f00, + MV_PCS_CSSR1 = 0x8008, MV_PCS_CSSR1_SPD1_MASK = 0xc000, MV_PCS_CSSR1_SPD1_SPD2 = 0xc000, @@ -125,6 +137,7 @@ enum { }; struct mv3310_chip { + bool (*has_downshift)(struct phy_device *phydev); void (*init_supported_interfaces)(unsigned long *mask); int (*get_mactype)(struct phy_device *phydev); int (*init_interface)(struct phy_device *phydev, int mactype); @@ -138,6 +151,7 @@ struct mv3310_priv { DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX); u32 firmware_ver; + bool has_downshift; bool rate_match; phy_interface_t const_interface; @@ -330,6 +344,71 @@ static int mv3310_reset(struct phy_device *phydev, u32 unit) 5000, 100000, true); } +static int mv3310_get_downshift(struct phy_device *phydev, u8 *ds) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + int val; + + if (!priv->has_downshift) + return -EOPNOTSUPP; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1); + if (val < 0) + return val; + + if (val & MV_PCS_DSC1_ENABLE) + /* assume that all fields are the same */ + *ds = 1 + FIELD_GET(MV_PCS_DSC1_10GBT, (u16)val); + else + *ds = DOWNSHIFT_DEV_DISABLE; + + return 0; +} + +static int mv3310_set_downshift(struct phy_device *phydev, u8 ds) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + u16 val; + int err; + + if (!priv->has_downshift) + return -EOPNOTSUPP; + + if (ds == DOWNSHIFT_DEV_DISABLE) + return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1, + MV_PCS_DSC1_ENABLE); + + /* DOWNSHIFT_DEV_DEFAULT_COUNT is confusing. It looks like it should + * set the default settings for the PHY. However, it is used for + * "ethtool --set-phy-tunable ethN downshift on". The intention is + * to enable downshift at a default number of retries. The default + * settings for 88x3310 are for two retries with downshift disabled. + * So let's use two retries with downshift enabled. + */ + if (ds == DOWNSHIFT_DEV_DEFAULT_COUNT) + ds = 2; + + if (ds > 8) + return -E2BIG; + + ds -= 1; + val = FIELD_PREP(MV_PCS_DSC2_2P5G, ds); + val |= FIELD_PREP(MV_PCS_DSC2_5G, ds); + err = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC2, + MV_PCS_DSC2_2P5G | MV_PCS_DSC2_5G, val); + if (err < 0) + return err; + + val = MV_PCS_DSC1_ENABLE; + val |= FIELD_PREP(MV_PCS_DSC1_10GBT, ds); + val |= FIELD_PREP(MV_PCS_DSC1_1GBR, ds); + val |= FIELD_PREP(MV_PCS_DSC1_100BTX, ds); + + return phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1, + MV_PCS_DSC1_ENABLE | MV_PCS_DSC1_10GBT | + MV_PCS_DSC1_1GBR | MV_PCS_DSC1_100BTX, val); +} + static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd) { int val; @@ -448,6 +527,9 @@ static int mv3310_probe(struct phy_device *phydev) priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255, (priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255); + if (chip->has_downshift) + priv->has_downshift = chip->has_downshift(phydev); + /* Powering down the port when not in use saves about 600mW */ ret = mv3310_power_down(phydev); if (ret) @@ -616,7 +698,16 @@ static int mv3310_config_init(struct phy_device *phydev) } /* Enable EDPD mode - saving 600mW */ - return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); + err = mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); + if (err) + return err; + + /* Allow downshift */ + err = mv3310_set_downshift(phydev, DOWNSHIFT_DEV_DEFAULT_COUNT); + if (err && err != -EOPNOTSUPP) + return err; + + return 0; } static int mv3310_get_features(struct phy_device *phydev) @@ -886,6 +977,8 @@ static int mv3310_get_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, void *data) { switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return mv3310_get_downshift(phydev, data); case ETHTOOL_PHY_EDPD: return mv3310_get_edpd(phydev, data); default: @@ -897,6 +990,8 @@ static int mv3310_set_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, const void *data) { switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return mv3310_set_downshift(phydev, *(u8 *)data); case ETHTOOL_PHY_EDPD: return mv3310_set_edpd(phydev, *(u16 *)data); default: @@ -904,6 +999,14 @@ static int mv3310_set_tunable(struct phy_device *phydev, } } +static bool mv3310_has_downshift(struct phy_device *phydev) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + + /* Fails to downshift with firmware older than v0.3.5.0 */ + return priv->firmware_ver >= MV_VERSION(0,3,5,0); +} + static void mv3310_init_supported_interfaces(unsigned long *mask) { __set_bit(PHY_INTERFACE_MODE_SGMII, mask); @@ -943,6 +1046,7 @@ static void mv2111_init_supported_interfaces(unsigned long *mask) } static const struct mv3310_chip mv3310_type = { + .has_downshift = mv3310_has_downshift, .init_supported_interfaces = mv3310_init_supported_interfaces, .get_mactype = mv3310_get_mactype, .init_interface = mv3310_init_interface, @@ -953,6 +1057,7 @@ static const struct mv3310_chip mv3310_type = { }; static const struct mv3310_chip mv3340_type = { + .has_downshift = mv3310_has_downshift, .init_supported_interfaces = mv3340_init_supported_interfaces, .get_mactype = mv3310_get_mactype, .init_interface = mv3340_init_interface, diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6865d9319197f7..c204067f189026 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -936,6 +936,28 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) } EXPORT_SYMBOL_GPL(mdiobus_modify); +/** + * mdiobus_modify_changed - Convenience function for modifying a given mdio + * device register and returning if it changed + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + */ +int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, + u16 mask, u16 set) +{ + int err; + + mutex_lock(&bus->mdio_lock); + err = __mdiobus_modify_changed(bus, addr, regnum, mask, set); + mutex_unlock(&bus->mdio_lock); + + return err; +} +EXPORT_SYMBOL_GPL(mdiobus_modify_changed); + /** * mdio_bus_match - determine if given MDIO driver supports the given * MDIO device @@ -949,8 +971,14 @@ EXPORT_SYMBOL_GPL(mdiobus_modify); */ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { + struct mdio_driver *mdiodrv = to_mdio_driver(drv); struct mdio_device *mdio = to_mdio_device(dev); + /* Both the driver and device must type-match */ + if (!(mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY) != + !(mdio->flags & MDIO_DEVICE_FLAG_PHY)) + return 0; + if (of_driver_match_device(dev, drv)) return 1; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 5c928f827173c3..44a24b99c89438 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -863,9 +863,9 @@ static int ksz9031_config_init(struct phy_device *phydev) MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, tx_data_skews, 4, &update); - if (update && phydev->interface != PHY_INTERFACE_MODE_RGMII) + if (update && !phy_interface_is_rgmii(phydev)) phydev_warn(phydev, - "*-skew-ps values should be used only with phy-mode = \"rgmii\"\n"); + "*-skew-ps values should be used only with RGMII PHY modes\n"); /* Silicon Errata Sheet (DS80000691D or DS80000692D): * When the device links in the 1000BASE-T slave mode only, @@ -1003,6 +1003,26 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev) txcdll_val); } +/* Silicon Errata DS80000693B + * + * When LEDs are configured in Individual Mode, LED1 is ON in a no-link + * condition. Workaround is to set register 0x1e, bit 9, this way LED1 behaves + * according to the datasheet (off if there is no link). + */ +static int ksz9131_led_errata(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, 2, 0); + if (reg < 0) + return reg; + + if (!(reg & BIT(4))) + return 0; + + return phy_set_bits(phydev, 0x1e, BIT(9)); +} + static int ksz9131_config_init(struct phy_device *phydev) { struct device_node *of_node; @@ -1058,6 +1078,10 @@ static int ksz9131_config_init(struct phy_device *phydev) if (ret < 0) return ret; + ret = ksz9131_led_errata(phydev); + if (ret < 0) + return ret; + return 0; } @@ -1537,6 +1561,65 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev, return ret; } +#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16 +#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17 +#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000 + +#define LAN8804_ALIGN_SWAP 0x4a +#define LAN8804_ALIGN_TX_A_B_SWAP 0x1 +#define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0) +#define LAN8814_CLOCK_MANAGEMENT 0xd +#define LAN8814_LINK_QUALITY 0x8e + +static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr) +{ + u32 data; + + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, + (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); + data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA); + + return data; +} + +static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr, + u16 val) +{ + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, + (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); + + val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val); + if (val) { + phydev_err(phydev, "Error: phy_write has returned error %d\n", + val); + return val; + } + return 0; +} + +static int lan8804_config_init(struct phy_device *phydev) +{ + int val; + + /* MDI-X setting for swap A,B transmit */ + val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP); + val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK; + val |= LAN8804_ALIGN_TX_A_B_SWAP; + lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val); + + /* Make sure that the PHY will not stop generating the clock when the + * link partner goes down + */ + lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e); + lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY); + + return 0; +} + static struct phy_driver ksphy_driver[] = { { .phy_id = PHY_ID_KS8737, @@ -1593,8 +1676,9 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, + /* No suspend/resume callbacks because of errata DS80000700A, + * receiver error following software power down. + */ }, { .phy_id = PHY_ID_KSZ8041RNLI, .phy_id_mask = MICREL_PHY_ID_MASK, @@ -1718,6 +1802,20 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = kszphy_resume, +}, { + .phy_id = PHY_ID_LAN8804, + .phy_id_mask = MICREL_PHY_ID_MASK, + .name = "Microchip LAN966X Gigabit PHY", + .config_init = lan8804_config_init, + .driver_data = &ksz9021_type, + .probe = kszphy_probe, + .soft_reset = genphy_soft_reset, + .read_status = ksz9031_read_status, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ9131, .phy_id_mask = MICREL_PHY_ID_MASK, @@ -1794,6 +1892,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8814, MICREL_PHY_ID_MASK }, + { PHY_ID_LAN8804, MICREL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index 4dc00bd5a8d22f..a4de3d2081c5c4 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -6,6 +6,8 @@ #include #include #include +#include +#include /* External Register Control Register */ #define LAN87XX_EXT_REG_CTL (0x14) @@ -35,8 +37,14 @@ #define PHYACC_ATTR_BANK_MISC 1 #define PHYACC_ATTR_BANK_PCS 2 #define PHYACC_ATTR_BANK_AFE 3 +#define PHYACC_ATTR_BANK_DSP 4 #define PHYACC_ATTR_BANK_MAX 7 +/* measurement defines */ +#define LAN87XX_CABLE_TEST_OK 0 +#define LAN87XX_CABLE_TEST_OPEN 1 +#define LAN87XX_CABLE_TEST_SAME_SHORT 2 + #define DRIVER_AUTHOR "Nisar Sayed " #define DRIVER_DESC "Microchip LAN87XX T1 PHY driver" @@ -226,11 +234,240 @@ static int lan87xx_config_init(struct phy_device *phydev) return rc < 0 ? rc : 0; } +static int microchip_cable_test_start_common(struct phy_device *phydev) +{ + int bmcr, bmsr, ret; + + /* If auto-negotiation is enabled, but not complete, the cable + * test never completes. So disable auto-neg. + */ + bmcr = phy_read(phydev, MII_BMCR); + if (bmcr < 0) + return bmcr; + + bmsr = phy_read(phydev, MII_BMSR); + + if (bmsr < 0) + return bmsr; + + if (bmcr & BMCR_ANENABLE) { + ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0); + if (ret < 0) + return ret; + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + } + + /* If the link is up, allow it some time to go down */ + if (bmsr & BMSR_LSTATUS) + msleep(1500); + + return 0; +} + +static int lan87xx_cable_test_start(struct phy_device *phydev) +{ + static const struct access_ereg_val cable_test[] = { + /* min wait */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 93, + 0, 0}, + /* max wait */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 94, + 10, 0}, + /* pulse cycle */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 95, + 90, 0}, + /* cable diag thresh */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 92, + 60, 0}, + /* max gain */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 79, + 31, 0}, + /* clock align for each iteration */ + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_DSP, 55, + 0, 0x0038}, + /* max cycle wait config */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 94, + 70, 0}, + /* start cable diag*/ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 90, + 1, 0}, + }; + int rc, i; + + rc = microchip_cable_test_start_common(phydev); + if (rc < 0) + return rc; + + /* start cable diag */ + /* check if part is alive - if not, return diagnostic error */ + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, + 0x00, 0); + if (rc < 0) + return rc; + + /* master/slave specific configs */ + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, + 0x0A, 0); + if (rc < 0) + return rc; + + if ((rc & 0x4000) != 0x4000) { + /* DUT is Slave */ + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_AFE, + 0x0E, 0x5, 0x7); + if (rc < 0) + return rc; + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, + 0x1A, 0x8, 0x8); + if (rc < 0) + return rc; + } else { + /* DUT is Master */ + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, + 0x10, 0x8, 0x40); + if (rc < 0) + return rc; + } + + for (i = 0; i < ARRAY_SIZE(cable_test); i++) { + if (cable_test[i].mode == PHYACC_ATTR_MODE_MODIFY) { + rc = access_ereg_modify_changed(phydev, + cable_test[i].bank, + cable_test[i].offset, + cable_test[i].val, + cable_test[i].mask); + /* wait 50ms */ + msleep(50); + } else { + rc = access_ereg(phydev, cable_test[i].mode, + cable_test[i].bank, + cable_test[i].offset, + cable_test[i].val); + } + if (rc < 0) + return rc; + } + /* cable diag started */ + + return 0; +} + +static int lan87xx_cable_test_report_trans(u32 result) +{ + switch (result) { + case LAN87XX_CABLE_TEST_OK: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case LAN87XX_CABLE_TEST_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case LAN87XX_CABLE_TEST_SAME_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + default: + /* DIAGNOSTIC_ERROR */ + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} + +static int lan87xx_cable_test_report(struct phy_device *phydev) +{ + int pos_peak_cycle = 0, pos_peak_in_phases = 0, pos_peak_phase = 0; + int neg_peak_cycle = 0, neg_peak_in_phases = 0, neg_peak_phase = 0; + int noise_margin = 20, time_margin = 89, jitter_var = 30; + int min_time_diff = 96, max_time_diff = 96 + time_margin; + bool fault = false, check_a = false, check_b = false; + int gain_idx = 0, pos_peak = 0, neg_peak = 0; + int pos_peak_time = 0, neg_peak_time = 0; + int pos_peak_in_phases_hybrid = 0; + int detect = -1; + + gain_idx = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, 151, 0); + /* read non-hybrid results */ + pos_peak = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, 153, 0); + neg_peak = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, 154, 0); + pos_peak_time = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, 156, 0); + neg_peak_time = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, 157, 0); + + pos_peak_cycle = (pos_peak_time >> 7) & 0x7F; + /* calculate non-hybrid values */ + pos_peak_phase = pos_peak_time & 0x7F; + pos_peak_in_phases = (pos_peak_cycle * 96) + pos_peak_phase; + neg_peak_cycle = (neg_peak_time >> 7) & 0x7F; + neg_peak_phase = neg_peak_time & 0x7F; + neg_peak_in_phases = (neg_peak_cycle * 96) + neg_peak_phase; + + /* process values */ + check_a = + ((pos_peak_in_phases - neg_peak_in_phases) >= min_time_diff) && + ((pos_peak_in_phases - neg_peak_in_phases) < max_time_diff) && + pos_peak_in_phases_hybrid < pos_peak_in_phases && + (pos_peak_in_phases_hybrid < (neg_peak_in_phases + jitter_var)); + check_b = + ((neg_peak_in_phases - pos_peak_in_phases) >= min_time_diff) && + ((neg_peak_in_phases - pos_peak_in_phases) < max_time_diff) && + pos_peak_in_phases_hybrid < neg_peak_in_phases && + (pos_peak_in_phases_hybrid < (pos_peak_in_phases + jitter_var)); + + if (pos_peak_in_phases > neg_peak_in_phases && check_a) + detect = 2; + else if ((neg_peak_in_phases > pos_peak_in_phases) && check_b) + detect = 1; + + if (pos_peak > noise_margin && neg_peak > noise_margin && + gain_idx >= 0) { + if (detect == 1 || detect == 2) + fault = true; + } + + if (!fault) + detect = 0; + + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + lan87xx_cable_test_report_trans(detect)); + + return 0; +} + +static int lan87xx_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int rc = 0; + + *finished = false; + + /* check if cable diag was finished */ + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP, + 90, 0); + if (rc < 0) + return rc; + + if ((rc & 2) == 2) { + /* stop cable diag*/ + rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, + PHYACC_ATTR_BANK_DSP, + 90, 0); + if (rc < 0) + return rc; + + *finished = true; + + return lan87xx_cable_test_report(phydev); + } + + return 0; +} + static struct phy_driver microchip_t1_phy_driver[] = { { .phy_id = 0x0007c150, .phy_id_mask = 0xfffffff0, .name = "Microchip LAN87xx T1", + .flags = PHY_POLL_CABLE_TEST, .features = PHY_BASIC_T1_FEATURES, @@ -241,6 +478,8 @@ static struct phy_driver microchip_t1_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, + .cable_test_start = lan87xx_cable_test_start, + .cable_test_get_status = lan87xx_cable_test_get_status, } }; diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 6e32da28e138f5..ebfeeb3c67c1d9 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -273,12 +273,12 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count) static int vsc85xx_wol_set(struct phy_device *phydev, struct ethtool_wolinfo *wol) { + const u8 *mac_addr = phydev->attached_dev->dev_addr; int rc; u16 reg_val; u8 i; u16 pwd[3] = {0, 0, 0}; struct ethtool_wolinfo *wol_conf = wol; - u8 *mac_addr = phydev->attached_dev->dev_addr; mutex_lock(&phydev->lock); rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index c617dbcad6ea73..db709d30bf8403 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -611,6 +611,41 @@ int genphy_c45_loopback(struct phy_device *phydev, bool enable) } EXPORT_SYMBOL_GPL(genphy_c45_loopback); +/** + * genphy_c45_fast_retrain - configure fast retrain registers + * @phydev: target phy_device struct + * @enable: enable fast retrain or not + * + * Description: If fast-retrain is enabled, we configure PHY as + * advertising fast retrain capable and THP Bypass Request, then + * enable fast retrain. If it is not enabled, we configure fast + * retrain disabled. + */ +int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable) +{ + int ret; + + if (!enable) + return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FSRT_CSR, + MDIO_PMA_10GBR_FSRT_ENABLE); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported)) { + ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADVFSRT2_5G); + if (ret) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_CTRL2, + MDIO_AN_THP_BP2_5GT); + if (ret) + return ret; + } + + return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FSRT_CSR, + MDIO_PMA_10GBR_FSRT_ENABLE); +} +EXPORT_SYMBOL_GPL(genphy_c45_fast_retrain); + struct phy_driver genphy_c45_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 4f9990b47a377d..74d8e1dc125f80 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3149,6 +3149,16 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) return -EINVAL; } + /* PHYLIB device drivers must not match using a DT compatible table + * as this bypasses our checks that the mdiodev that is being matched + * is backed by a struct phy_device. If such a case happens, we will + * make out-of-bounds accesses and lockup in phydev->lock. + */ + if (WARN(new_driver->mdiodrv.driver.of_match_table, + "%s: driver must not provide a DT match table\n", + new_driver->name)) + return -EINVAL; + new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; new_driver->mdiodrv.driver.name = new_driver->name; new_driver->mdiodrv.driver.bus = &mdio_bus_type; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0a0abe8e4be0b6..3ad7397b81198a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -132,6 +132,17 @@ void phylink_set_port_modes(unsigned long *mask) } EXPORT_SYMBOL_GPL(phylink_set_port_modes); +void phylink_set_10g_modes(unsigned long *mask) +{ + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); +} +EXPORT_SYMBOL_GPL(phylink_set_10g_modes); + static int phylink_is_empty_linkmode(const unsigned long *linkmode) { __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, }; @@ -155,9 +166,45 @@ static const char *phylink_an_mode_str(unsigned int mode) return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; } +static int phylink_validate_any(struct phylink *pl, unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, }; + __ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, }; + __ETHTOOL_DECLARE_LINK_MODE_MASK(s); + struct phylink_link_state t; + int intf; + + for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) { + if (test_bit(intf, pl->config->supported_interfaces)) { + linkmode_copy(s, supported); + + t = *state; + t.interface = intf; + pl->mac_ops->validate(pl->config, s, &t); + linkmode_or(all_s, all_s, s); + linkmode_or(all_adv, all_adv, t.advertising); + } + } + + linkmode_copy(supported, all_s); + linkmode_copy(state->advertising, all_adv); + + return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; +} + static int phylink_validate(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + if (!phy_interface_empty(pl->config->supported_interfaces)) { + if (state->interface == PHY_INTERFACE_MODE_NA) + return phylink_validate_any(pl, supported, state); + + if (!test_bit(state->interface, + pl->config->supported_interfaces)) + return -EINVAL; + } + pl->mac_ops->validate(pl->config, supported, state); return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; @@ -540,9 +587,15 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, linkmode_zero(state->lp_advertising); state->interface = pl->link_config.interface; state->an_enabled = pl->link_config.an_enabled; - state->speed = SPEED_UNKNOWN; - state->duplex = DUPLEX_UNKNOWN; - state->pause = MLO_PAUSE_NONE; + if (state->an_enabled) { + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + state->pause = MLO_PAUSE_NONE; + } else { + state->speed = pl->link_config.speed; + state->duplex = pl->link_config.duplex; + state->pause = pl->link_config.pause; + } state->an_complete = 0; state->link = 1; @@ -1333,7 +1386,10 @@ void phylink_suspend(struct phylink *pl, bool mac_wol) * but one would hope all packets have been sent. This * also means phylink_resolve() will do nothing. */ - netif_carrier_off(pl->netdev); + if (pl->netdev) + netif_carrier_off(pl->netdev); + else + pl->old_link_state = false; /* We do not call mac_link_down() here as we want the * link to remain up to receive the WoL packets. @@ -1598,20 +1654,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising, config.an_enabled); - /* Validate without changing the current supported mask. */ - linkmode_copy(support, pl->supported); - if (phylink_validate(pl, support, &config)) - return -EINVAL; - - /* If autonegotiation is enabled, we must have an advertisement */ - if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) - return -EINVAL; - /* If this link is with an SFP, ensure that changes to advertised modes * also cause the associated interface to be selected such that the * link can be configured correctly. */ - if (pl->sfp_port && pl->sfp_bus) { + if (pl->sfp_bus) { config.interface = sfp_select_interface(pl->sfp_bus, config.advertising); if (config.interface == PHY_INTERFACE_MODE_NA) { @@ -1631,8 +1678,17 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, __ETHTOOL_LINK_MODE_MASK_NBITS, support); return -EINVAL; } + } else { + /* Validate without changing the current supported mask. */ + linkmode_copy(support, pl->supported); + if (phylink_validate(pl, support, &config)) + return -EINVAL; } + /* If autonegotiation is enabled, we must have an advertisement */ + if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) + return -EINVAL; + mutex_lock(&pl->state_mutex); pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; @@ -1724,7 +1780,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, return -EOPNOTSUPP; if (!phylink_test(pl->supported, Asym_Pause) && - !pause->autoneg && pause->rx_pause != pause->tx_pause) + pause->rx_pause != pause->tx_pause) return -EINVAL; pause_state = 0; @@ -2522,12 +2578,10 @@ EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, struct phylink_link_state *state) { - struct mii_bus *bus = pcs->bus; - int addr = pcs->addr; int bmsr, lpa; - bmsr = mdiobus_read(bus, addr, MII_BMSR); - lpa = mdiobus_read(bus, addr, MII_LPA); + bmsr = mdiodev_read(pcs, MII_BMSR); + lpa = mdiodev_read(pcs, MII_LPA); if (bmsr < 0 || lpa < 0) { state->link = false; return; @@ -2535,7 +2589,10 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, state->link = !!(bmsr & BMSR_LSTATUS); state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); - if (!state->link) + /* If there is no link or autonegotiation is disabled, the LP advertisement + * data is not meaningful, so don't go any further. + */ + if (!state->link || !state->an_enabled) return; switch (state->interface) { @@ -2580,9 +2637,6 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, phy_interface_t interface, const unsigned long *advertising) { - struct mii_bus *bus = pcs->bus; - int addr = pcs->addr; - int val, ret; u16 adv; switch (interface) { @@ -2596,32 +2650,10 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, advertising)) adv |= ADVERTISE_1000XPSE_ASYM; - val = mdiobus_read(bus, addr, MII_ADVERTISE); - if (val < 0) - return val; - - if (val == adv) - return 0; - - ret = mdiobus_write(bus, addr, MII_ADVERTISE, adv); - if (ret < 0) - return ret; - - return 1; + return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, adv); case PHY_INTERFACE_MODE_SGMII: - val = mdiobus_read(bus, addr, MII_ADVERTISE); - if (val < 0) - return val; - - if (val == 0x0001) - return 0; - - ret = mdiobus_write(bus, addr, MII_ADVERTISE, 0x0001); - if (ret < 0) - return ret; - - return 1; + return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, 0x0001); default: /* Nothing to do for other modes */ @@ -2658,9 +2690,13 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, changed = ret > 0; /* Ensure ISOLATE bit is disabled */ - bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0; - ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR, - BMCR_ANENABLE | BMCR_ISOLATE, bmcr); + if (mode == MLO_AN_INBAND && + linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) + bmcr = BMCR_ANENABLE; + else + bmcr = 0; + + ret = mdiodev_modify(pcs, MII_BMCR, BMCR_ANENABLE | BMCR_ISOLATE, bmcr); if (ret < 0) return ret; @@ -2681,14 +2717,12 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config); */ void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs) { - struct mii_bus *bus = pcs->bus; - int val, addr = pcs->addr; + int val = mdiodev_read(pcs, MII_BMCR); - val = mdiobus_read(bus, addr, MII_BMCR); if (val >= 0) { val |= BMCR_ANRESTART; - mdiobus_write(bus, addr, MII_BMCR, val); + mdiodev_write(pcs, MII_BMCR, val); } } EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_an_restart); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 11be60333fa82a..a5671ab896b385 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -1023,6 +1023,14 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_EXACT(0x001cc942), + .name = "RTL8365MB-VC Gigabit Ethernet", + /* Interrupt handling analogous to RTL8366RB */ + .config_intr = genphy_no_config_intr, + .handle_interrupt = genphy_handle_interrupt_no_ack, + .suspend = genphy_suspend, + .resume = genphy_resume, }, }; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 7362f8c3271c97..0c6c0d1843bce3 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -373,7 +373,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, if (bus->sfp_quirk) bus->sfp_quirk->modes(id, modes); - bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_or(support, support, modes); phylink_set(support, Autoneg); phylink_set(support, Pause); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 82d6094017113f..0d491b4d666756 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -284,12 +284,16 @@ static const struct net_device_ops plip_netdev_ops = { static void plip_init_netdev(struct net_device *dev) { + static const u8 addr_init[ETH_ALEN] = { + 0xfc, 0xfc, 0xfc, + 0xfc, 0xfc, 0xfc, + }; struct net_local *nl = netdev_priv(dev); /* Then, override parts of it */ dev->tx_queue_len = 10; dev->flags = IFF_POINTOPOINT|IFF_NOARP; - memset(dev->dev_addr, 0xfc, ETH_ALEN); + eth_hw_addr_set(dev, addr_init); dev->netdev_ops = &plip_netdev_ops; dev->header_ops = &plip_header_ops; @@ -1109,7 +1113,7 @@ plip_open(struct net_device *dev) plip_init_dev(). */ const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list); if (ifa != NULL) { - memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); + dev_addr_mod(dev, 2, &ifa->ifa_local, 4); } } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index fb52cd175b45d3..1180a0e2445fbf 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1161,7 +1161,7 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) if (!ifname_is_set) { while (1) { snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret); - if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name)) + if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name)) break; unit_put(&pn->units_idr, ret); ret = unit_get(&pn->units_idr, ppp, ret + 1); diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 2056d6ad04b57a..1a95f3beb784db 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -482,6 +482,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) { int rc = 0; struct rionet_private *rnet; + u8 addr[ETH_ALEN]; u16 device_id; const size_t rionet_active_bytes = sizeof(void *) * RIO_MAX_ROUTE_ENTRIES(mport->sys_size); @@ -501,12 +502,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) /* Set the default MAC address */ device_id = rio_local_get_device_id(mport); - ndev->dev_addr[0] = 0x00; - ndev->dev_addr[1] = 0x01; - ndev->dev_addr[2] = 0x00; - ndev->dev_addr[3] = 0x01; - ndev->dev_addr[4] = device_id >> 8; - ndev->dev_addr[5] = device_id & 0xff; + addr[0] = 0x00; + addr[1] = 0x01; + addr[2] = 0x00; + addr[3] = 0x01; + addr[4] = device_id >> 8; + addr[5] = device_id & 0xff; + eth_hw_addr_set(ndev, addr); ndev->netdev_ops = &rionet_netdev_ops; ndev->mtu = RIONET_MAX_MTU; diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index f01c9db01b165e..57a6d598467b20 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -149,6 +149,7 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) unsigned short ioaddr[2], irq; unsigned int serial_number; int error = -ENODEV; + u8 addr[ETH_ALEN]; if (pnp_device_attach(pdev) < 0) return -ENODEV; @@ -203,10 +204,13 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) dev->netdev_ops = &sb1000_netdev_ops; /* hardware address is 0:0:serial_number */ - dev->dev_addr[2] = serial_number >> 24 & 0xff; - dev->dev_addr[3] = serial_number >> 16 & 0xff; - dev->dev_addr[4] = serial_number >> 8 & 0xff; - dev->dev_addr[5] = serial_number >> 0 & 0xff; + addr[0] = 0; + addr[1] = 0; + addr[2] = serial_number >> 24 & 0xff; + addr[3] = serial_number >> 16 & 0xff; + addr[4] = serial_number >> 8 & 0xff; + addr[5] = serial_number >> 0 & 0xff; + eth_hw_addr_set(dev, addr); pnp_set_drvdata(pdev, dev); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index dd7917cab2b124..8b2adc56b92a8e 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1790,7 +1790,7 @@ static int team_set_mac_address(struct net_device *dev, void *p) if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + dev_addr_set(dev, addr->sa_data); mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 9a6a8353e19215..ff5d0e98a08816 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1202,17 +1202,19 @@ static void tbnet_generate_mac(struct net_device *dev) { const struct tbnet *net = netdev_priv(dev); const struct tb_xdomain *xd = net->xd; + u8 addr[ETH_ALEN]; u8 phy_port; u32 hash; phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); /* Unicast and locally administered MAC */ - dev->dev_addr[0] = phy_port << 4 | 0x02; + addr[0] = phy_port << 4 | 0x02; hash = jhash2((u32 *)xd->local_uuid, 4, 0); - memcpy(dev->dev_addr + 1, &hash, sizeof(hash)); + memcpy(addr + 1, &hash, sizeof(hash)); hash = jhash2((u32 *)xd->local_uuid, 4, hash); - dev->dev_addr[5] = hash & 0xff; + addr[5] = hash & 0xff; + eth_hw_addr_set(dev, addr); } static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 73b97f4cc1ec11..ea06d10e1c21a6 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -119,7 +119,7 @@ static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, } static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value, - u16 index, u16 size, void *data) + u16 index, u16 size, const void *data) { int ret; @@ -714,7 +714,7 @@ static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf) if (ret) goto out; - ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr); + eth_hw_addr_set(dev->net, dev->net->perm_addr); /* Set Rx urb size */ dev->rx_urb_size = URB_SIZE; diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 38cda590895cc0..42ba4af6809072 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -791,7 +791,7 @@ int asix_set_mac_address(struct net_device *net, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(net, addr->sa_data); /* We use the 20 byte dev->data * for our 6 byte mac buffer diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 30821f6a6d7ac6..4514d35ef4c48f 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -59,7 +59,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb) static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr) { if (is_valid_ether_addr(addr)) { - memcpy(dev->net->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev->net, addr); } else { netdev_info(dev->net, "invalid hw address, using random\n"); eth_hw_addr_random(dev->net); diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index d9777d9a7c5dfe..3777c7e2e6fc00 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -176,7 +176,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = -EIO; goto free; } - memcpy(dev->net->dev_addr, buf, ETH_ALEN); + eth_hw_addr_set(dev->net, buf); dev->net->netdev_ops = &ax88172a_netdev_ops; dev->net->ethtool_ops = &ax88172a_ethtool_ops; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index f25448a0887074..ea8aa8c33241c6 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -209,7 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, } static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, - u16 size, void *data, int in_pm) + u16 size, const void *data, int in_pm) { int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); @@ -272,7 +272,7 @@ static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, } static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, - u16 index, u16 size, void *data) + u16 index, u16 size, const void *data) { int ret; @@ -313,7 +313,7 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, } static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, - u16 size, void *data) + u16 size, const void *data) { int ret; @@ -463,7 +463,7 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm) u16 tmp16; u8 tmp8; int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *); - int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *); + int (*fnw)(struct usbnet *, u8, u16, u16, u16, const void *); if (!in_pm) { fnr = ax88179_read_cmd; @@ -1015,7 +1015,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(net, addr->sa_data); /* Set the MAC address */ ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, @@ -1310,7 +1310,7 @@ static void ax88179_get_mac_addr(struct usbnet *dev) } if (is_valid_ether_addr(mac)) { - memcpy(dev->net->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(dev->net, mac); } else { netdev_info(dev->net, "invalid MAC address, using random\n"); eth_hw_addr_random(dev->net); diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 97ba67042d126f..e7fe9c0f63a9f0 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -615,7 +615,7 @@ static void catc_stats_timer(struct timer_list *t) * Receive modes. Broadcast, Multicast, Promisc. */ -static void catc_multicast(unsigned char *addr, u8 *multicast) +static void catc_multicast(const unsigned char *addr, u8 *multicast) { u32 crc; @@ -770,17 +770,23 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; - int pktsz, ret; + u8 *macbuf; + int pktsz, ret = -ENOMEM; + + macbuf = kmalloc(ETH_ALEN, GFP_KERNEL); + if (!macbuf) + goto error; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); - return -EIO; + ret = -EIO; + goto fail_mem;; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) - return -ENOMEM; + goto fail_mem; catc = netdev_priv(netdev); @@ -870,7 +876,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id dev_dbg(dev, "Getting MAC from SEEROM.\n"); - catc_get_mac(catc, netdev->dev_addr); + catc_get_mac(catc, macbuf); + eth_hw_addr_set(netdev, macbuf); dev_dbg(dev, "Setting MAC into registers.\n"); @@ -899,7 +906,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id } else { dev_dbg(dev, "Performing reset\n"); catc_reset(catc); - catc_get_mac(catc, netdev->dev_addr); + catc_get_mac(catc, macbuf); + eth_hw_addr_set(netdev, macbuf); dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; @@ -917,6 +925,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id if (ret) goto fail_clear_intfdata; + kfree(macbuf); return 0; fail_clear_intfdata: @@ -927,6 +936,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); +fail_mem: + kfree(macbuf); +error: return ret; } diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index e1da9102a540dd..ad5121e9cf5d6f 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -275,6 +275,8 @@ static const struct net_device_ops usbpn_ops = { static void usbpn_setup(struct net_device *dev) { + const u8 addr = PN_MEDIA_USB; + dev->features = 0; dev->netdev_ops = &usbpn_ops; dev->header_ops = &phonet_header_ops; @@ -284,8 +286,8 @@ static void usbpn_setup(struct net_device *dev) dev->min_mtu = PHONET_MIN_MTU; dev->max_mtu = PHONET_MAX_MTU; dev->hard_header_len = 1; - dev->dev_addr[0] = PN_MEDIA_USB; dev->addr_len = 1; + dev_addr_set(dev, &addr); dev->tx_queue_len = 3; dev->needs_free_netdev = true; diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c index d7f3b70d547754..f69d9b902da04a 100644 --- a/drivers/net/usb/ch9200.c +++ b/drivers/net/usb/ch9200.c @@ -336,6 +336,7 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) { int retval = 0; unsigned char data[2]; + u8 addr[ETH_ALEN]; retval = usbnet_get_endpoints(dev, intf); if (retval) @@ -383,7 +384,8 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02, CONTROL_TIMEOUT_MS); - retval = get_mac_address(dev, dev->net->dev_addr); + retval = get_mac_address(dev, addr); + eth_hw_addr_set(dev->net, addr); return retval; } diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index c4568a491dc4de..79a47e2fd4378b 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -146,6 +146,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) u8 link[3]; int timeout = 50; struct cx82310_priv *priv; + u8 addr[ETH_ALEN]; /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 @@ -202,12 +203,12 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) goto err; /* get the MAC address */ - ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, - dev->net->dev_addr, ETH_ALEN); + ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, addr, ETH_ALEN); if (ret) { netdev_err(dev->net, "unable to read MAC address: %d\n", ret); goto err; } + eth_hw_addr_set(dev->net, addr); /* start (does not seem to have any effect?) */ ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 907f98b1eefee0..48d7d278631e91 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -93,7 +93,8 @@ static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) value, reg, NULL, 0); } -static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) +static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, + const void *data) { usbnet_write_cmd_async(dev, DM_WRITE_REGS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, @@ -331,7 +332,7 @@ static int dm9601_set_mac_address(struct net_device *net, void *p) return -EINVAL; } - memcpy(net->dev_addr, addr->sa_data, net->addr_len); + eth_hw_addr_set(net, addr->sa_data); __dm9601_set_mac_address(dev); return 0; @@ -391,7 +392,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) * Overwrite the auto-generated address only with good ones. */ if (is_valid_ether_addr(mac)) - memcpy(dev->net->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(dev->net, mac); else { printk(KERN_WARNING "dm9601: No valid MAC address in EEPROM, using %pM\n", diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 06e2181e58108b..cd33955df0b65f 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -303,7 +303,7 @@ static int ipheth_get_macaddr(struct ipheth_device *dev) __func__, retval); retval = -EINVAL; } else { - memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN); + eth_hw_addr_set(net, dev->ctrl_buf); retval = 0; } diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index fc5895f85cee2f..9f2b70ef39aa1b 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -149,7 +149,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf) if (status) return status; - memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN); + eth_hw_addr_set(dev->net, ethernet_addr); return status; } diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 144c686b43330d..9b2bc1993ece20 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1044,8 +1044,7 @@ static int kaweth_probe( goto err_all_but_rxbuf; memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr)); - memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr, - sizeof(kaweth->configuration.hw_addr)); + eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr); netdev->netdev_ops = &kaweth_netdev_ops; netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 63cd72c5f580cc..f20376c1ef3fb1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1817,7 +1817,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) lan78xx_write_reg(dev, MAF_LO(0), addr_lo); lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); - ether_addr_copy(dev->net->dev_addr, addr); + eth_hw_addr_set(dev->net, addr); } /* MDIO read and write wrappers for phylib */ @@ -2416,7 +2416,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); addr_lo = netdev->dev_addr[0] | netdev->dev_addr[1] << 8 | diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 66866bef25df74..326cc4e749d804 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -132,7 +132,8 @@ static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr) return 0; } -static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr) +static int mcs7830_hif_set_mac_address(struct usbnet *dev, + const unsigned char *addr) { int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); @@ -159,7 +160,7 @@ static int mcs7830_set_mac_address(struct net_device *netdev, void *p) return ret; /* it worked --> adopt it on netdev side */ - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); return 0; } @@ -472,17 +473,19 @@ static const struct net_device_ops mcs7830_netdev_ops = { static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) { struct net_device *net = dev->net; + u8 addr[ETH_ALEN]; int ret; int retry; /* Initial startup: Gather MAC address setting from EEPROM */ ret = -EINVAL; for (retry = 0; retry < 5 && ret; retry++) - ret = mcs7830_hif_get_mac_address(dev, net->dev_addr); + ret = mcs7830_hif_get_mac_address(dev, addr); if (ret) { dev_warn(&dev->udev->dev, "Cannot read MAC address\n"); goto out; } + eth_hw_addr_set(net, addr); mcs7830_data_set_multicast(net); diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6a92a3fef75e5f..c4cd40b090fd05 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -357,7 +357,7 @@ static void set_ethernet_addr(pegasus_t *pegasus) goto err; } - memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id)); + eth_hw_addr_set(pegasus->net, node_id); return; err: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 33ada2c59952ef..86b814e99224c5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -835,8 +835,11 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) /* make MAC addr easily distinguishable from an IP header */ if (possibly_iphdr(dev->net->dev_addr)) { - dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ - dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ + u8 addr = dev->net->dev_addr[0]; + + addr |= 0x02; /* set local assignment bit */ + addr &= 0xbf; /* clear "IP" bit */ + dev_addr_mod(dev->net, 0, &addr, 1); } dev->net->netdev_ops = &qmi_wwan_netdev_ops; dev->net->sysfs_groups[0] = &qmi_wwan_sysfs_attr_group; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f329e39100a7dd..4a02f33f0643d7 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1571,7 +1571,7 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p, mutex_lock(&tp->control); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); @@ -1719,7 +1719,7 @@ static int set_ethernet_addr(struct r8152 *tp, bool in_resume) return ret; if (tp->version == RTL_VER_01) - ether_addr_copy(dev->dev_addr, sa.sa_data); + eth_hw_addr_set(dev, sa.sa_data); else ret = __rtl8152_set_mac_address(dev, &sa, in_resume); diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 85a8b96e39a653..4a84f90e377c40 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -421,7 +421,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) if (bp[0] & 0x02) eth_hw_addr_random(net); else - ether_addr_copy(net->dev_addr, bp); + eth_hw_addr_set(net, bp); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 4a1b0e0fc3a3a1..3d2bf2acca942d 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -262,7 +262,7 @@ static void set_ethernet_addr(rtl8150_t *dev) ret = get_registers(dev, IDR, sizeof(node_id), node_id); if (!ret) { - ether_addr_copy(dev->netdev->dev_addr, node_id); + eth_hw_addr_set(dev->netdev, node_id); } else { eth_hw_addr_random(dev->netdev); netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", @@ -278,7 +278,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr); /* Set the IDR registers. */ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 55025202dc4fa6..bb4cbe8fc846bd 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -669,6 +669,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; + u8 mod[2]; dev_dbg(&dev->udev->dev, "%s", __func__); @@ -698,8 +699,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->netdev_ops = &sierra_net_device_ops; /* change MAC addr to include, ifacenum, and to be unique */ - dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); - dev->net->dev_addr[ETH_ALEN-1] = ifacenum; + mod[0] = atomic_inc_return(&iface_counter); + mod[1] = ifacenum; + dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2); /* prepare shutdown message template */ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 76f7af1613139b..95de452ff4dad5 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -757,9 +757,10 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) static void smsc75xx_init_mac_address(struct usbnet *dev) { + u8 addr[ETH_ALEN]; + /* maybe the boot loader passed the MAC address in devicetree */ - if (!eth_platform_get_mac_address(&dev->udev->dev, - dev->net->dev_addr)) { + if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) { if (is_valid_ether_addr(dev->net->dev_addr)) { /* device tree values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n"); @@ -768,8 +769,8 @@ static void smsc75xx_init_mac_address(struct usbnet *dev) } /* try reading mac address from EEPROM */ - if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, - dev->net->dev_addr) == 0) { + if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) { + eth_hw_addr_set(dev->net, addr); if (is_valid_ether_addr(dev->net->dev_addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 26b1bd8e845b43..20fe4cd8f7840c 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -755,9 +755,10 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) static void smsc95xx_init_mac_address(struct usbnet *dev) { + u8 addr[ETH_ALEN]; + /* maybe the boot loader passed the MAC address in devicetree */ - if (!eth_platform_get_mac_address(&dev->udev->dev, - dev->net->dev_addr)) { + if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) { if (is_valid_ether_addr(dev->net->dev_addr)) { /* device tree values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n"); @@ -766,8 +767,8 @@ static void smsc95xx_init_mac_address(struct usbnet *dev) } /* try reading mac address from EEPROM */ - if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, - dev->net->dev_addr) == 0) { + if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) { + eth_hw_addr_set(dev->net, addr); if (is_valid_ether_addr(dev->net->dev_addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n"); diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 6516a37893e275..b658510cc9a42e 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -56,7 +56,8 @@ static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value) value, reg, NULL, 0); } -static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) +static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, + const void *data) { usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data, length); @@ -296,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p) return -EINVAL; } - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); sr_write_async(dev, SR_PAR, 6, netdev->dev_addr); return 0; @@ -319,6 +320,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) { struct net_device *netdev; struct mii_if_info *mii; + u8 addr[ETH_ALEN]; int ret; ret = usbnet_get_endpoints(dev, intf); @@ -349,11 +351,12 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) * EEPROM automatically to PAR. In case there is no EEPROM externally, * a default MAC address is stored in PAR for making chip work properly. */ - if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) { + if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) { netdev_err(netdev, "Error reading MAC address\n"); ret = -ENODEV; goto out; } + eth_hw_addr_set(netdev, addr); /* power up and reset phy */ sr_write_reg(dev, SR_PRR, PRR_PHY_RST); diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 576401c8b1beee..f5e19f3ef6cdd2 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -503,7 +503,7 @@ static int sr_set_mac_address(struct net_device *net, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(net, addr->sa_data); /* We use the 20 byte dev->data * for our 6 byte mac buffer @@ -731,6 +731,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf) struct sr_data *data = (struct sr_data *)&dev->data; u16 led01_mux, led23_mux; int ret, embd_phy; + u8 addr[ETH_ALEN]; u32 phyid; u16 rx_ctl; @@ -754,12 +755,12 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf) } /* Get the MAC address */ - ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, - dev->net->dev_addr); + ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, addr); if (ret < 0) { netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } + eth_hw_addr_set(dev->net, addr); netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr); /* Initialize MII structure */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index a33d7fb82a00bc..9a6450f796dcb3 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -165,12 +165,13 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints); int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) { + u8 addr[ETH_ALEN]; int tmp = -1, ret; unsigned char buf [13]; ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); if (ret == 12) - tmp = hex2bin(dev->net->dev_addr, buf, 6); + tmp = hex2bin(addr, buf, 6); if (tmp < 0) { dev_dbg(&dev->udev->dev, "bad MAC string %d fetch, %d\n", iMACAddress, tmp); @@ -178,6 +179,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) ret = -EINVAL; return ret; } + eth_hw_addr_set(dev->net, addr); return 0; } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); @@ -1726,7 +1728,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->net = net; strscpy(net->name, "usb%d", sizeof(net->name)); - memcpy (net->dev_addr, node_id, sizeof node_id); + eth_hw_addr_set(net, node_id); /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4ad25a8b0870c6..cc79343cd220ac 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -80,6 +80,7 @@ struct virtnet_sq_stats { u64 xdp_tx; u64 xdp_tx_drops; u64 kicks; + u64 tx_timeouts; }; struct virtnet_rq_stats { @@ -103,6 +104,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, { "kicks", VIRTNET_SQ_STAT(kicks) }, + { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { @@ -732,6 +734,12 @@ static struct sk_buff *receive_small(struct net_device *dev, dev->stats.rx_length_errors++; goto err_len; } + + if (likely(!vi->xdp_enabled)) { + xdp_prog = NULL; + goto skip_xdp; + } + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { @@ -814,6 +822,7 @@ static struct sk_buff *receive_small(struct net_device *dev, } rcu_read_unlock(); +skip_xdp: skb = build_skb(buf, buflen); if (!skb) { put_page(page); @@ -895,6 +904,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, dev->stats.rx_length_errors++; goto err_skb; } + + if (likely(!vi->xdp_enabled)) { + xdp_prog = NULL; + goto skip_xdp; + } + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { @@ -1022,6 +1037,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); +skip_xdp: head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, metasize, headroom); curr_skb = head_skb; @@ -1860,7 +1876,7 @@ static void virtnet_stats(struct net_device *dev, int i; for (i = 0; i < vi->max_queue_pairs; i++) { - u64 tpackets, tbytes, rpackets, rbytes, rdrops; + u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; struct receive_queue *rq = &vi->rq[i]; struct send_queue *sq = &vi->sq[i]; @@ -1868,6 +1884,7 @@ static void virtnet_stats(struct net_device *dev, start = u64_stats_fetch_begin_irq(&sq->stats.syncp); tpackets = sq->stats.packets; tbytes = sq->stats.bytes; + terrors = sq->stats.tx_timeouts; } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); do { @@ -1882,6 +1899,7 @@ static void virtnet_stats(struct net_device *dev, tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; tot->rx_dropped += rdrops; + tot->tx_errors += terrors; } tot->tx_dropped = dev->stats.tx_dropped; @@ -2534,8 +2552,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { - netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", - curr_qp + xdp_qp, vi->max_queue_pairs); + netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", + curr_qp + xdp_qp, vi->max_queue_pairs); xdp_qp = 0; } @@ -2663,6 +2681,21 @@ static int virtnet_set_features(struct net_device *dev, return 0; } +static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct virtnet_info *priv = netdev_priv(dev); + struct send_queue *sq = &priv->sq[txqueue]; + struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); + + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.tx_timeouts++; + u64_stats_update_end(&sq->stats.syncp); + + netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", + txqueue, sq->name, sq->vq->index, sq->vq->name, + jiffies_to_usecs(jiffies - txq->trans_start)); +} + static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@ -2678,6 +2711,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_features_check = passthru_features_check, .ndo_get_phys_port_name = virtnet_get_phys_port_name, .ndo_set_features = virtnet_set_features, + .ndo_tx_timeout = virtnet_tx_timeout, }; static void virtnet_config_changed_work(struct work_struct *work) @@ -3143,12 +3177,16 @@ static int virtnet_probe(struct virtio_device *vdev) dev->max_mtu = MAX_MTU; /* Configuration may specify what MAC to use. Otherwise random. */ - if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) + if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { + u8 addr[ETH_ALEN]; + virtio_cread_bytes(vdev, offsetof(struct virtio_net_config, mac), - dev->dev_addr, dev->addr_len); - else + addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); + } else { eth_hw_addr_random(dev); + } /* Set up our device-specific information */ vi = netdev_priv(dev); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 8799854bacb295..14fae317bc70f7 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); static int enable_mq = 1; static void -vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac); /* * Enable/Disable the given intr @@ -2806,7 +2806,7 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) static void -vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac) { u32 tmp; @@ -2824,7 +2824,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p) struct sockaddr *addr = p; struct vmxnet3_adapter *adapter = netdev_priv(netdev); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + dev_addr_set(netdev, addr->sa_data); vmxnet3_write_mac_addr(adapter, addr->sa_data); return 0; @@ -3638,7 +3638,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, #endif vmxnet3_read_mac_addr(adapter, mac); - memcpy(netdev->dev_addr, mac, netdev->addr_len); + dev_addr_set(netdev, mac); netdev->netdev_ops = &vmxnet3_netdev_ops; vmxnet3_set_ethtool_ops(netdev); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 5dd8360b21a02c..16f3a2057b9067 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1134,9 +1134,8 @@ static int vmxnet3_set_coalesce(struct net_device *netdev, } if (ec->use_adaptive_rx_coalesce != 0) { - if ((ec->rx_coalesce_usecs != 0) || - (ec->tx_max_coalesced_frames != 0) || - (ec->rx_max_coalesced_frames != 0)) { + if (ec->tx_max_coalesced_frames != 0 || + ec->rx_max_coalesced_frames != 0) { return -EINVAL; } memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); @@ -1146,11 +1145,6 @@ static int vmxnet3_set_coalesce(struct net_device *netdev, if ((ec->tx_max_coalesced_frames != 0) || (ec->rx_max_coalesced_frames != 0)) { - if ((ec->rx_coalesce_usecs != 0) || - (ec->use_adaptive_rx_coalesce != 0)) { - return -EINVAL; - } - if ((ec->tx_max_coalesced_frames > VMXNET3_COAL_STATIC_MAX_DEPTH) || (ec->rx_max_coalesced_frames > diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 662e261173539f..ccf677015d5bc7 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -35,6 +35,7 @@ #include #include #include +#include #define DRV_NAME "vrf" #define DRV_VERSION "1.1" @@ -424,12 +425,26 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; } +static void vrf_nf_set_untracked(struct sk_buff *skb) +{ + if (skb_get_nfct(skb) == 0) + nf_ct_set(skb, NULL, IP_CT_UNTRACKED); +} + +static void vrf_nf_reset_ct(struct sk_buff *skb) +{ + if (skb_get_nfct(skb) == IP_CT_UNTRACKED) + nf_reset_ct(skb); +} + #if IS_ENABLED(CONFIG_IPV6) static int vrf_ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; + vrf_nf_reset_ct(skb); + err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); @@ -508,6 +523,8 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk, { int err; + vrf_nf_reset_ct(skb); + err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); if (likely(err == 1)) @@ -626,8 +643,7 @@ static void vrf_finish_direct(struct sk_buff *skb) skb_pull(skb, ETH_HLEN); } - /* reset skb device */ - nf_reset_ct(skb); + vrf_nf_reset_ct(skb); } #if IS_ENABLED(CONFIG_IPV6) @@ -641,7 +657,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, struct neighbour *neigh; int ret; - nf_reset_ct(skb); + vrf_nf_reset_ct(skb); skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; @@ -752,6 +768,8 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; + vrf_nf_set_untracked(skb); + err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); @@ -858,7 +876,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s struct neighbour *neigh; bool is_v6gw = false; - nf_reset_ct(skb); + vrf_nf_reset_ct(skb); /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { @@ -980,6 +998,8 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; + vrf_nf_set_untracked(skb); + err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip_out_direct_finish); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 7637edce443ef9..81e72bc1891fa4 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1093,7 +1093,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) dev->priv_flags &= ~IFF_TX_SKB_SHARING; eth_hw_addr_random(dev); } else { - *(__be16 *)dev->dev_addr = htons(dlci); + __be16 addr = htons(dlci); + + dev_addr_set(dev, (u8 *)&addr); dlci_to_q922(dev->broadcast, dlci); } dev->netdev_ops = &pvc_ops; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 89d31adc3809b1..282192b82404bb 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -301,7 +301,7 @@ static int lapbeth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 49cc4b7ed5163a..0e9bad33fac855 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1772,9 +1772,8 @@ static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect SMCWUSBT-G2 */ - AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1, TEW444UBEU*/ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ - AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */ AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */ AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2f9be182fbfbb1..5935e0973d146a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2690,9 +2690,16 @@ static int ath10k_core_copy_target_iram(struct ath10k *ar) int i, ret; u32 len, remaining_len; - hw_mem = ath10k_coredump_get_mem_layout(ar); + /* copy target iram feature must work also when + * ATH10K_FW_CRASH_DUMP_RAM_DATA is disabled, so + * _ath10k_coredump_get_mem_layout() to accomplist that + */ + hw_mem = _ath10k_coredump_get_mem_layout(ar); if (!hw_mem) - return -ENOMEM; + /* if CONFIG_DEV_COREDUMP is disabled we get NULL, then + * just silently disable the feature by doing nothing + */ + return 0; for (i = 0; i < hw_mem->region_table.size; i++) { tmp = &hw_mem->region_table.regions[i]; @@ -3224,7 +3231,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ath10k_debug_print_board_info(ar); } - device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr)); + device_get_mac_address(ar->dev, ar->mac_addr); ret = ath10k_core_init_firmware_features(ar); if (ret) { @@ -3520,13 +3527,10 @@ EXPORT_SYMBOL(ath10k_core_create); void ath10k_core_destroy(struct ath10k *ar) { - flush_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue); - flush_workqueue(ar->workqueue_aux); destroy_workqueue(ar->workqueue_aux); - flush_workqueue(ar->workqueue_tx_complete); destroy_workqueue(ar->workqueue_tx_complete); ath10k_debug_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 7eb72290a925ce..55e7e11d06d948 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1447,11 +1447,17 @@ static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar) { - int i; - if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) return NULL; + return _ath10k_coredump_get_mem_layout(ar); +} +EXPORT_SYMBOL(ath10k_coredump_get_mem_layout); + +const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + int i; + if (WARN_ON(ar->target_version == 0)) return NULL; @@ -1464,7 +1470,6 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k return NULL; } -EXPORT_SYMBOL(ath10k_coredump_get_mem_layout); struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) { diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h index 42404e246e0e97..240d7051508889 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.h +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -176,6 +176,7 @@ int ath10k_coredump_register(struct ath10k *ar); void ath10k_coredump_unregister(struct ath10k *ar); void ath10k_coredump_destroy(struct ath10k *ar); +const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar); const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar); #else /* CONFIG_DEV_COREDUMP */ @@ -214,6 +215,12 @@ ath10k_coredump_get_mem_layout(struct ath10k *ar) return NULL; } +static inline const struct ath10k_hw_mem_layout * +_ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + return NULL; +} + #endif /* CONFIG_DEV_COREDUMP */ #endif /* _COREDUMP_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index c272b290fa73d1..1f73fbfee0c062 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -993,8 +993,12 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) ath10k_mac_vif_beacon_free(arvif); if (arvif->beacon_buf) { - dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, - arvif->beacon_buf, arvif->beacon_paddr); + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + kfree(arvif->beacon_buf); + else + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, + arvif->beacon_paddr); arvif->beacon_buf = NULL; } } @@ -1048,7 +1052,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.channel.min_power = 0; arg.channel.max_power = channel->max_power * 2; arg.channel.max_reg_power = channel->max_reg_power * 2; - arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + arg.channel.max_antenna_gain = channel->max_antenna_gain; reinit_completion(&ar->vdev_setup_done); reinit_completion(&ar->vdev_delete_done); @@ -1494,7 +1498,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, arg.channel.min_power = 0; arg.channel.max_power = chandef->chan->max_power * 2; arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; - arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain; if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { arg.ssid = arvif->u.ap.ssid; @@ -3422,7 +3426,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) ch->min_power = 0; ch->max_power = channel->max_power * 2; ch->max_reg_power = channel->max_reg_power * 2; - ch->max_antenna_gain = channel->max_antenna_gain * 2; + ch->max_antenna_gain = channel->max_antenna_gain; ch->reg_class_id = 0; /* FIXME */ /* FIXME: why use only legacy modes, why not any @@ -5576,10 +5580,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_AP) { - arvif->beacon_buf = dma_alloc_coherent(ar->dev, - IEEE80211_MAX_FRAME_LEN, - &arvif->beacon_paddr, - GFP_ATOMIC); + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { + arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN, + GFP_KERNEL); + + /* Using a kernel pointer in place of a dma_addr_t + * token can lead to undefined behavior if that + * makes it into cache management functions. Use a + * known-invalid address token instead, which + * avoids the warning and makes it easier to catch + * bugs if it does end up getting used. + */ + arvif->beacon_paddr = DMA_MAPPING_ERROR; + } else { + arvif->beacon_buf = + dma_alloc_coherent(ar->dev, + IEEE80211_MAX_FRAME_LEN, + &arvif->beacon_paddr, + GFP_ATOMIC); + } if (!arvif->beacon_buf) { ret = -ENOMEM; ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", @@ -5794,8 +5813,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, err: if (arvif->beacon_buf) { - dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, - arvif->beacon_buf, arvif->beacon_paddr); + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) + kfree(arvif->beacon_buf); + else + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, + arvif->beacon_paddr); arvif->beacon_buf = NULL; } diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 07e478f9a808c1..80fcb917fe4e11 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -864,7 +864,8 @@ static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi) ath10k_qmi_remove_msa_permission(qmi); ath10k_core_free_board_files(ar); - if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags)) + if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) && + !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags)) ath10k_snoc_fw_crashed_dump(ar); ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND); diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index b746052737e0bb..63e1c2d783c5fe 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1363,8 +1363,11 @@ static void ath10k_rx_indication_async_work(struct work_struct *work) ep->ep_ops.ep_rx_complete(ar, skb); } - if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) + if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { + local_bh_disable(); napi_schedule(&ar->napi); + local_bh_enable(); + } } static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state) @@ -2647,7 +2650,6 @@ static void ath10k_sdio_remove(struct sdio_func *func) ath10k_core_destroy(ar); - flush_workqueue(ar_sdio->workqueue); destroy_workqueue(ar_sdio->workqueue); } diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index ea00fbb1560153..9513ab696fff13 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -1477,6 +1478,74 @@ void ath10k_snoc_fw_crashed_dump(struct ath10k *ar) mutex_unlock(&ar->dump_mutex); } +static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb); + struct ath10k *ar = ar_snoc->ar; + struct qcom_ssr_notify_data *notify_data = data; + + switch (action) { + case QCOM_SSR_BEFORE_POWERUP: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n"); + clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags); + break; + + case QCOM_SSR_AFTER_POWERUP: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n"); + break; + + case QCOM_SSR_BEFORE_SHUTDOWN: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n", + notify_data->crashed ? "crashed" : "stopping"); + if (!notify_data->crashed) + set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags); + else + clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags); + break; + + case QCOM_SSR_AFTER_SHUTDOWN: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n"); + break; + + default: + ath10k_err(ar, "received unrecognized event %lu\n", action); + break; + } + + return NOTIFY_OK; +} + +static int ath10k_modem_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + void *notifier; + int ret; + + ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify; + + notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb); + if (IS_ERR(notifier)) { + ret = PTR_ERR(notifier); + ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret); + return ret; + } + + ar_snoc->notifier = notifier; + + return 0; +} + +static void ath10k_modem_deinit(struct ath10k *ar) +{ + int ret; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb); + if (ret) + ath10k_err(ar, "error %d unregistering notifier\n", ret); +} + static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size) { struct device *dev = ar->dev; @@ -1740,10 +1809,17 @@ static int ath10k_snoc_probe(struct platform_device *pdev) goto err_fw_deinit; } + ret = ath10k_modem_init(ar); + if (ret) + goto err_qmi_deinit; + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); return 0; +err_qmi_deinit: + ath10k_qmi_deinit(ar); + err_fw_deinit: ath10k_fw_deinit(ar); @@ -1771,6 +1847,7 @@ static int ath10k_snoc_free_resources(struct ath10k *ar) ath10k_fw_deinit(ar); ath10k_snoc_free_irq(ar); ath10k_snoc_release_resource(ar); + ath10k_modem_deinit(ar); ath10k_qmi_deinit(ar); ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index 5095d1893681bb..d4bce170769608 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -6,6 +6,8 @@ #ifndef _SNOC_H_ #define _SNOC_H_ +#include + #include "hw.h" #include "ce.h" #include "qmi.h" @@ -45,6 +47,7 @@ struct ath10k_snoc_ce_irq { enum ath10k_snoc_flags { ATH10K_SNOC_FLAG_REGISTERED, ATH10K_SNOC_FLAG_UNREGISTERING, + ATH10K_SNOC_FLAG_MODEM_STOPPED, ATH10K_SNOC_FLAG_RECOVERY, ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, }; @@ -75,6 +78,8 @@ struct ath10k_snoc { struct clk_bulk_data *clks; size_t num_clks; struct ath10k_qmi *qmi; + struct notifier_block nb; + void *notifier; unsigned long flags; bool xo_cal_supported; u32 xo_cal_data; diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 19b9c27e30e209..3d98f19c6ec8aa 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -525,7 +525,7 @@ static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, - size, 2 * HZ); + size, 2000); if (ret < 0) { ath10k_warn(ar, "Failed to read usb control message: %d\n", @@ -853,6 +853,11 @@ static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } + + /* Ignore broken descriptors. */ + if (usb_endpoint_maxp(endpoint) == 0) + continue; + urbcount = 0; pipe_num = diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index b8a4bbfe10b874..7c1c2658cb5f8c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2610,6 +2610,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) if (ieee80211_is_beacon(hdr->frame_control)) ath10k_mac_handle_beacon(ar, skb); + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + status->boottime_ns = ktime_get_boottime_ns(); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", skb, skb->len, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 41c1a3d339c25a..01bfd09a9d88c6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -2066,7 +2066,9 @@ struct wmi_channel { union { __le32 reginfo1; struct { + /* note: power unit is 1 dBm */ u8 antenna_max; + /* note: power unit is 0.5 dBm */ u8 max_tx_power; } __packed; } __packed; @@ -2086,6 +2088,7 @@ struct wmi_channel_arg { u32 min_power; u32 max_power; u32 max_reg_power; + /* note: power unit is 1 dBm */ u32 max_antenna_gain; u32 reg_class_id; enum wmi_phy_mode mode; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 969bf1a590d990..b5a2af3ffc3e15 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -37,7 +37,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw = { .dir = "IPQ8074/hw2.0", .board_size = 256 * 1024, - .cal_size = 256 * 1024, + .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, @@ -58,8 +58,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, - .tcl_0_only = false, - .spectral_fft_sz = 2, + + .spectral = { + .fft_sz = 2, + /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes. + * so added pad size as 2 bytes to compensate the BIN size + */ + .fft_pad_sz = 2, + .summary_pad_sz = 0, + .fft_hdr_len = 16, + .max_fft_bins = 512, + }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -71,6 +80,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .fix_l1ss = true, + .max_tx_ring = DP_TCL_NUM_RING_MAX, + .hal_params = &ath11k_hw_hal_params_ipq8074, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -78,7 +89,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw = { .dir = "IPQ6018/hw1.0", .board_size = 256 * 1024, - .cal_size = 256 * 1024, + .cal_offset = 128 * 1024, }, .max_radios = 2, .bdf_addr = 0x4ABC0000, @@ -99,8 +110,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, - .tcl_0_only = false, - .spectral_fft_sz = 4, + + .spectral = { + .fft_sz = 4, + .fft_pad_sz = 0, + .summary_pad_sz = 0, + .fft_hdr_len = 16, + .max_fft_bins = 512, + }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -112,6 +129,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .fix_l1ss = true, + .max_tx_ring = DP_TCL_NUM_RING_MAX, + .hal_params = &ath11k_hw_hal_params_ipq8074, }, { .name = "qca6390 hw2.0", @@ -119,7 +138,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw = { .dir = "QCA6390/hw2.0", .board_size = 256 * 1024, - .cal_size = 256 * 1024, + .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, @@ -140,8 +159,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, - .tcl_0_only = true, - .spectral_fft_sz = 0, + + .spectral = { + .fft_sz = 0, + .fft_pad_sz = 0, + .summary_pad_sz = 0, + .fft_hdr_len = 0, + .max_fft_bins = 0, + }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), @@ -152,6 +177,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .fix_l1ss = true, + .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, + .hal_params = &ath11k_hw_hal_params_qca6390, }, { .name = "qcn9074 hw1.0", @@ -159,7 +186,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw = { .dir = "QCN9074/hw1.0", .board_size = 256 * 1024, - .cal_size = 256 * 1024, + .cal_offset = 128 * 1024, }, .max_radios = 1, .single_pdev_only = false, @@ -179,7 +206,15 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, - .tcl_0_only = false, + + .spectral = { + .fft_sz = 2, + .fft_pad_sz = 0, + .summary_pad_sz = 16, + .fft_hdr_len = 24, + .max_fft_bins = 1024, + }, + .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), @@ -190,6 +225,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .fix_l1ss = true, + .max_tx_ring = DP_TCL_NUM_RING_MAX, + .hal_params = &ath11k_hw_hal_params_ipq8074, }, { .name = "wcn6855 hw2.0", @@ -197,7 +234,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw = { .dir = "WCN6855/hw2.0", .board_size = 256 * 1024, - .cal_size = 256 * 1024, + .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, @@ -218,8 +255,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, - .tcl_0_only = true, - .spectral_fft_sz = 0, + + .spectral = { + .fft_sz = 0, + .fft_pad_sz = 0, + .summary_pad_sz = 0, + .fft_hdr_len = 0, + .max_fft_bins = 0, + }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), @@ -230,6 +273,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .fix_l1ss = false, + .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, + .hal_params = &ath11k_hw_hal_params_qca6390, }, }; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 018fb2385f2a37..31d234a51c79b8 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -93,6 +93,8 @@ struct ath11k_skb_rxcb { bool is_first_msdu; bool is_last_msdu; bool is_continuation; + bool is_mcbc; + bool is_eapol; struct hal_rx_desc *rx_desc; u8 err_rel_src; u8 err_code; @@ -100,6 +102,8 @@ struct ath11k_skb_rxcb { u8 unmapped; u8 is_frag; u8 tid; + u16 peer_id; + u16 seq_no; }; enum ath11k_hw_rev { @@ -193,7 +197,9 @@ enum ath11k_dev_flags { }; enum ath11k_monitor_flags { - ATH11K_FLAG_MONITOR_ENABLED, + ATH11K_FLAG_MONITOR_CONF_ENABLED, + ATH11K_FLAG_MONITOR_STARTED, + ATH11K_FLAG_MONITOR_VDEV_CREATED, }; struct ath11k_vif { @@ -362,6 +368,7 @@ struct ath11k_sta { enum hal_pn_type pn_type; struct work_struct update_wk; + struct work_struct set_4addr_wk; struct rate_info txrate; struct rate_info last_txrate; u64 rx_duration; @@ -374,12 +381,15 @@ struct ath11k_sta { /* protected by conf_mutex */ bool aggr_mode; #endif + + bool use_4addr_set; + u16 tcl_metadata; }; #define ATH11K_MIN_5G_FREQ 4150 -#define ATH11K_MIN_6G_FREQ 5945 +#define ATH11K_MIN_6G_FREQ 5925 #define ATH11K_MAX_6G_FREQ 7115 -#define ATH11K_NUM_CHANS 100 +#define ATH11K_NUM_CHANS 101 #define ATH11K_MAX_5G_CHAN 173 enum ath11k_state { @@ -484,7 +494,6 @@ struct ath11k { u32 chan_tx_pwr; u32 num_stations; u32 max_num_stations; - bool monitor_present; /* To synchronize concurrent synchronous mac80211 callback operations, * concurrent debugfs configuration and concurrent FW statistics events. */ @@ -559,6 +568,7 @@ struct ath11k { struct ath11k_per_peer_tx_stats cached_stats; u32 last_ppdu_id; u32 cached_ppdu_id; + int monitor_vdev_id; #ifdef CONFIG_ATH11K_DEBUGFS struct ath11k_debug debug; #endif @@ -591,6 +601,8 @@ struct ath11k_pdev_cap { u32 tx_chain_mask_shift; u32 rx_chain_mask_shift; struct ath11k_band_cap band[NUM_NL80211_BANDS]; + bool nss_ratio_enabled; + u8 nss_ratio_info; }; struct ath11k_pdev { @@ -794,12 +806,15 @@ struct ath11k_fw_stats_pdev { s32 hw_reaped; /* Num underruns */ s32 underrun; + /* Num hw paused */ + u32 hw_paused; /* Num PPDUs cleaned up in TX abort */ s32 tx_abort; /* Num MPDUs requeued by SW */ s32 mpdus_requeued; /* excessive retries */ u32 tx_ko; + u32 tx_xretry; /* data hw rate code */ u32 data_rc; /* Scheduler self triggers */ @@ -820,6 +835,30 @@ struct ath11k_fw_stats_pdev { u32 phy_underrun; /* MPDU is more than txop limit */ u32 txop_ovf; + /* Num sequences posted */ + u32 seq_posted; + /* Num sequences failed in queueing */ + u32 seq_failed_queueing; + /* Num sequences completed */ + u32 seq_completed; + /* Num sequences restarted */ + u32 seq_restarted; + /* Num of MU sequences posted */ + u32 mu_seq_posted; + /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT + * (Reset,channel change) + */ + s32 mpdus_sw_flush; + /* Num MPDUs filtered by HW, all filter condition (TTL expired) */ + s32 mpdus_hw_filter; + /* Num MPDUs truncated by PDG (TXOP, TBTT, + * PPDU_duration based on rate, dyn_bw) + */ + s32 mpdus_truncated; + /* Num MPDUs that was tried but didn't receive ACK or BA */ + s32 mpdus_ack_failed; + /* Num MPDUs that was dropped du to expiry. */ + s32 mpdus_expired; /* PDEV RX stats */ /* Cnts any change in ring routing mid-ppdu */ @@ -845,6 +884,8 @@ struct ath11k_fw_stats_pdev { s32 phy_err_drop; /* Number of mpdu errors - FCS, MIC, ENC etc. */ s32 mpdu_errs; + /* Num overflow errors */ + s32 rx_ovfl_errs; }; struct ath11k_fw_stats_vdev { diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c index 5e1f5437b4185d..fd98ba5b1130b4 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.c +++ b/drivers/net/wireless/ath/ath11k/dbring.c @@ -8,8 +8,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar, struct ath11k_dbring *ring, - struct ath11k_dbring_element *buff, - gfp_t gfp) + struct ath11k_dbring_element *buff) { struct ath11k_base *ab = ar->ab; struct hal_srng *srng; @@ -35,7 +34,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar, goto err; spin_lock_bh(&ring->idr_lock); - buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp); + buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC); spin_unlock_bh(&ring->idr_lock); if (buf_id < 0) { ret = -ENOBUFS; @@ -72,8 +71,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar, } static int ath11k_dbring_fill_bufs(struct ath11k *ar, - struct ath11k_dbring *ring, - gfp_t gfp) + struct ath11k_dbring *ring) { struct ath11k_dbring_element *buff; struct hal_srng *srng; @@ -92,11 +90,11 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar, size = sizeof(*buff) + ring->buf_sz + align - 1; while (num_remain > 0) { - buff = kzalloc(size, gfp); + buff = kzalloc(size, GFP_ATOMIC); if (!buff) break; - ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp); + ret = ath11k_dbring_bufs_replenish(ar, ring, buff); if (ret) { ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", num_remain, req_entries); @@ -176,7 +174,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar, ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng); ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng); - ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL); + ret = ath11k_dbring_fill_bufs(ar, ring); return ret; } @@ -322,7 +320,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, } memset(buff, 0, size); - ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC); + ath11k_dbring_bufs_replenish(ar, ring, buff); } spin_unlock_bh(&srng->lock); diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 554feaf1ed5cd0..80afd35337a191 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -806,7 +806,7 @@ static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file, len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n"); len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n"); - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + for (i = 0; i < ab->hw_params.max_tx_ring; i++) len += scnprintf(buf + len, size - len, "ring%d: %u\n", i, soc_stats->tx_err.desc_na[i]); @@ -902,7 +902,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, struct htt_rx_ring_tlv_filter tlv_filter = {0}; u32 rx_filter = 0, ring_id, filter, mode; u8 buf[128] = {0}; - int i, ret; + int i, ret, rx_buf_sz = 0; ssize_t rc; mutex_lock(&ar->conf_mutex); @@ -940,6 +940,17 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, } } + /* Clear rx filter set for monitor mode and rx status */ + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, + HAL_RXDMA_MONITOR_STATUS, + rx_buf_sz, &tlv_filter); + if (ret) { + ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); + goto out; + } + } #define HTT_RX_FILTER_TLV_LITE_MODE \ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ @@ -955,6 +966,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, HTT_RX_FILTER_TLV_FLAGS_MPDU_END | HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | HTT_RX_FILTER_TLV_FLAGS_ATTENTION; + rx_buf_sz = DP_RX_BUFFER_SIZE; } else if (mode == ATH11K_PKTLOG_MODE_LITE) { ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, HTT_PPDU_STATS_TAG_PKTLOG); @@ -964,7 +976,12 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, } rx_filter = HTT_RX_FILTER_TLV_LITE_MODE; + rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; } else { + rx_buf_sz = DP_RX_BUFFER_SIZE; + tlv_filter = ath11k_mac_mon_status_filter_default; + rx_filter = tlv_filter.rx_filter; + ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, HTT_PPDU_STATS_TAG_DEFAULT); if (ret) { @@ -988,7 +1005,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, ar->dp.mac_id + i, HAL_RXDMA_MONITOR_STATUS, - DP_RX_BUFFER_SIZE, &tlv_filter); + rx_buf_sz, &tlv_filter); if (ret) { ath11k_warn(ab, "failed to set rx filter for monitor status ring\n"); @@ -996,8 +1013,8 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, } } - ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n", - filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite")); + ath11k_info(ab, "pktlog mode %s\n", + ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite")); ar->debug.pktlog_filter = filter; ar->debug.pktlog_mode = mode; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h index e5346af71f2431..ec743a015dc7a5 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.h +++ b/drivers/net/wireless/ath/ath11k/debugfs.h @@ -38,6 +38,10 @@ enum ath11k_dbg_htt_ext_stats_type { ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22, ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24, + ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS = 29, + ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF_STATS = 31, + ATH11K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, + ATH11K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, /* keep this last */ ATH11K_DBG_HTT_NUM_EXT_STATS, diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c index 9e0c90da99d303..4484235bcda418 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c @@ -10,23 +10,28 @@ #include "debug.h" #include "debugfs_htt_stats.h" -#define HTT_DBG_OUT(buf, len, fmt, ...) \ - scnprintf(buf, len, fmt "\n", ##__VA_ARGS__) - -#define HTT_MAX_STRING_LEN 256 #define HTT_MAX_PRINT_CHAR_PER_ELEM 15 #define HTT_TLV_HDR_LEN 4 -#define ARRAY_TO_STRING(out, arr, len) \ +#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \ do { \ - int index = 0; u8 i; \ + int index = 0; u8 i; const char *str_val = str; \ + const char *new_line = newline; \ + if (str_val) { \ + index += scnprintf((out + buflen), \ + (ATH11K_HTT_STATS_BUF_SIZE - buflen), \ + "%s = ", str_val); \ + } \ for (i = 0; i < len; i++) { \ - index += scnprintf(out + index, HTT_MAX_STRING_LEN - index, \ - " %u:%u,", i, arr[i]); \ - if (index < 0 || index >= HTT_MAX_STRING_LEN) \ - break; \ + index += scnprintf((out + buflen) + index, \ + (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \ + " %u:%u,", i, arr[i]); \ } \ + index += scnprintf((out + buflen) + index, \ + (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \ + "%s", new_line); \ + buflen += index; \ } while (0) static inline void htt_print_stats_string_tlv(const void *tag_buf, @@ -38,22 +43,20 @@ static inline void htt_print_stats_string_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 i; - u16 index = 0; - char data[HTT_MAX_STRING_LEN] = {0}; tag_len = tag_len >> 2; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:"); + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, + "data = "); for (i = 0; i < tag_len; i++) { - index += scnprintf(&data[index], - HTT_MAX_STRING_LEN - index, - "%.*s", 4, (char *)&(htt_stats_buf->data[i])); - if (index >= HTT_MAX_STRING_LEN) - break; + len += scnprintf(buf + len, + buf_len - len, + "%.*s", 4, (char *)&(htt_stats_buf->data[i])); } - - len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data); + /* New lines are added for better display */ + len += scnprintf(buf + len, buf_len - len, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -71,107 +74,107 @@ static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u", - htt_stats_buf->hw_queued); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u", - htt_stats_buf->hw_reaped); - len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u", - htt_stats_buf->underrun); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u", - htt_stats_buf->hw_paused); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u", - htt_stats_buf->hw_flush); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u", - htt_stats_buf->hw_filt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u", - htt_stats_buf->tx_abort); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u", - htt_stats_buf->mpdu_requeued); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u", - htt_stats_buf->tx_xretry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u", - htt_stats_buf->data_rc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u", - htt_stats_buf->mpdu_dropped_xretry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u", - htt_stats_buf->illgl_rate_phy_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u", - htt_stats_buf->cont_xretry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u", - htt_stats_buf->tx_timeout); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u", - htt_stats_buf->pdev_resets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u", - htt_stats_buf->phy_underrun); - len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u", - htt_stats_buf->txop_ovf); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u", - htt_stats_buf->seq_posted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u", - htt_stats_buf->seq_failed_queueing); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u", - htt_stats_buf->seq_completed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u", - htt_stats_buf->seq_restarted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u", - htt_stats_buf->mu_seq_posted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u", - htt_stats_buf->seq_switch_hw_paused); - len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u", - htt_stats_buf->next_seq_posted_dsr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u", - htt_stats_buf->seq_posted_isr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u", - htt_stats_buf->seq_ctrl_cached); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u", - htt_stats_buf->mpdu_count_tqm); - len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u", - htt_stats_buf->msdu_count_tqm); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u", - htt_stats_buf->mpdu_removed_tqm); - len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u", - htt_stats_buf->msdu_removed_tqm); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u", - htt_stats_buf->mpdus_sw_flush); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u", - htt_stats_buf->mpdus_hw_filter); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u", - htt_stats_buf->mpdus_truncated); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u", - htt_stats_buf->mpdus_ack_failed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u", - htt_stats_buf->mpdus_expired); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u", - htt_stats_buf->mpdus_seq_hw_retry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u", - htt_stats_buf->ack_tlv_proc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u", - htt_stats_buf->coex_abort_mpdu_cnt_valid); - len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u", - htt_stats_buf->coex_abort_mpdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u", - htt_stats_buf->num_total_ppdus_tried_ota); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u", - htt_stats_buf->num_data_ppdus_tried_ota); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u", - htt_stats_buf->local_ctrl_mgmt_enqued); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u", - htt_stats_buf->local_ctrl_mgmt_freed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u", - htt_stats_buf->local_data_enqued); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u", - htt_stats_buf->local_data_freed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u", - htt_stats_buf->mpdu_tried); - len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u", - htt_stats_buf->isr_wait_seq_posted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u", - htt_stats_buf->tx_active_dur_us_low); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n", - htt_stats_buf->tx_active_dur_us_high); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", + htt_stats_buf->hw_queued); + len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", + htt_stats_buf->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "underrun = %u\n", + htt_stats_buf->underrun); + len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n", + htt_stats_buf->hw_paused); + len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n", + htt_stats_buf->hw_flush); + len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n", + htt_stats_buf->hw_filt); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + htt_stats_buf->tx_abort); + len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n", + htt_stats_buf->mpdu_requeued); + len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n", + htt_stats_buf->tx_xretry); + len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n", + htt_stats_buf->data_rc); + len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n", + htt_stats_buf->mpdu_dropped_xretry); + len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n", + htt_stats_buf->illgl_rate_phy_err); + len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n", + htt_stats_buf->cont_xretry); + len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n", + htt_stats_buf->tx_timeout); + len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n", + htt_stats_buf->pdev_resets); + len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n", + htt_stats_buf->phy_underrun); + len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n", + htt_stats_buf->txop_ovf); + len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n", + htt_stats_buf->seq_posted); + len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n", + htt_stats_buf->seq_failed_queueing); + len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n", + htt_stats_buf->seq_completed); + len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n", + htt_stats_buf->seq_restarted); + len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n", + htt_stats_buf->mu_seq_posted); + len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n", + htt_stats_buf->seq_switch_hw_paused); + len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n", + htt_stats_buf->next_seq_posted_dsr); + len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n", + htt_stats_buf->seq_posted_isr); + len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n", + htt_stats_buf->seq_ctrl_cached); + len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n", + htt_stats_buf->mpdu_count_tqm); + len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n", + htt_stats_buf->msdu_count_tqm); + len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n", + htt_stats_buf->mpdu_removed_tqm); + len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n", + htt_stats_buf->msdu_removed_tqm); + len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n", + htt_stats_buf->mpdus_sw_flush); + len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", + htt_stats_buf->mpdus_hw_filter); + len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n", + htt_stats_buf->mpdus_truncated); + len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n", + htt_stats_buf->mpdus_ack_failed); + len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n", + htt_stats_buf->mpdus_expired); + len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n", + htt_stats_buf->mpdus_seq_hw_retry); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + htt_stats_buf->ack_tlv_proc); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n", + htt_stats_buf->coex_abort_mpdu_cnt_valid); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n", + htt_stats_buf->coex_abort_mpdu_cnt); + len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n", + htt_stats_buf->num_total_ppdus_tried_ota); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n", + htt_stats_buf->num_data_ppdus_tried_ota); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n", + htt_stats_buf->local_ctrl_mgmt_enqued); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n", + htt_stats_buf->local_ctrl_mgmt_freed); + len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n", + htt_stats_buf->local_data_enqued); + len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n", + htt_stats_buf->local_data_freed); + len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n", + htt_stats_buf->mpdu_tried); + len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n", + htt_stats_buf->isr_wait_seq_posted); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n", + htt_stats_buf->tx_active_dur_us_low); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n", + htt_stats_buf->tx_active_dur_us_high); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -190,13 +193,12 @@ htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char urrn_stats[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n"); - ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -215,13 +217,12 @@ htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char flush_errs[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n"); - ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -240,14 +241,12 @@ htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char sifs_status[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n"); - ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n", - sifs_status); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -266,13 +265,12 @@ htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char phy_errs[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n"); - ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -291,15 +289,13 @@ htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char sifs_hist_status[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n"); - ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n", - sifs_hist_status); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status, + "sifs_hist_status", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -318,23 +314,23 @@ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u", - htt_stats_buf->num_data_ppdus_legacy_su); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n", + htt_stats_buf->num_data_ppdus_legacy_su); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u", - htt_stats_buf->num_data_ppdus_ac_su); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n", + htt_stats_buf->num_data_ppdus_ac_su); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u", - htt_stats_buf->num_data_ppdus_ax_su); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n", + htt_stats_buf->num_data_ppdus_ax_su); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u", - htt_stats_buf->num_data_ppdus_ac_su_txbf); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n", + htt_stats_buf->num_data_ppdus_ac_su_txbf); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n", - htt_stats_buf->num_data_ppdus_ax_su_txbf); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n", + htt_stats_buf->num_data_ppdus_ax_su_txbf); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -353,25 +349,15 @@ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0}; u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2); - u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements; - - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u", - htt_stats_buf->hist_bin_size); - - if (required_buffer_size < HTT_MAX_STRING_LEN) { - ARRAY_TO_STRING(tried_mpdu_cnt_hist, - htt_stats_buf->tried_mpdu_cnt_hist, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n", - tried_mpdu_cnt_hist); - } else { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "INSUFFICIENT PRINT BUFFER\n"); - } + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n", + htt_stats_buf->hist_bin_size); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist, + "tried_mpdu_cnt_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -390,14 +376,14 @@ static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:"); + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n"); memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]), HTT_STATS_MAX_HW_INTR_NAME_LEN); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u", - htt_stats_buf->mask); - len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n", - htt_stats_buf->count); + len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name); + len += scnprintf(buf + len, buf_len - len, "mask = %u\n", + htt_stats_buf->mask); + len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", + htt_stats_buf->count); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -417,13 +403,13 @@ htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf, u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:"); + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n"); memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]), HTT_STATS_MAX_HW_MODULE_NAME_LEN); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ", - hw_module_name); - len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u", - htt_stats_buf->count); + len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n", + hw_module_name); + len += scnprintf(buf + len, buf_len - len, "count = %u\n", + htt_stats_buf->count); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -441,29 +427,29 @@ static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u", - htt_stats_buf->tx_abort); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u", - htt_stats_buf->tx_abort_fail_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u", - htt_stats_buf->rx_abort); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u", - htt_stats_buf->rx_abort_fail_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u", - htt_stats_buf->warm_reset); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u", - htt_stats_buf->cold_reset); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u", - htt_stats_buf->tx_flush); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u", - htt_stats_buf->tx_glb_reset); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u", - htt_stats_buf->tx_txq_reset); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n", - htt_stats_buf->rx_timeout_reset); + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + htt_stats_buf->tx_abort); + len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n", + htt_stats_buf->tx_abort_fail_count); + len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n", + htt_stats_buf->rx_abort); + len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n", + htt_stats_buf->rx_abort_fail_count); + len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n", + htt_stats_buf->warm_reset); + len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n", + htt_stats_buf->cold_reset); + len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n", + htt_stats_buf->tx_flush); + len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n", + htt_stats_buf->tx_glb_reset); + len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n", + htt_stats_buf->tx_txq_reset); + len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n", + htt_stats_buf->rx_timeout_reset); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -481,35 +467,36 @@ static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u", - htt_stats_buf->last_update_timestamp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u", - htt_stats_buf->last_add_timestamp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u", - htt_stats_buf->last_remove_timestamp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u", - htt_stats_buf->total_processed_msdu_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u", - htt_stats_buf->cur_msdu_count_in_flowq); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u", - htt_stats_buf->sw_peer_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u", - htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u", - (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >> - 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u", - (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >> - 20); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u", - htt_stats_buf->last_cycle_enqueue_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u", - htt_stats_buf->last_cycle_dequeue_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u", - htt_stats_buf->last_cycle_drop_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n", - htt_stats_buf->current_drop_th); + len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n", + htt_stats_buf->last_update_timestamp); + len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n", + htt_stats_buf->last_add_timestamp); + len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n", + htt_stats_buf->last_remove_timestamp); + len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n", + htt_stats_buf->total_processed_msdu_count); + len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n", + htt_stats_buf->cur_msdu_count_in_flowq); + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n", + htt_stats_buf->sw_peer_id); + len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n", + FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO, + htt_stats_buf->tx_flow_no__tid_num__drop_rule)); + len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", + FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM, + htt_stats_buf->tx_flow_no__tid_num__drop_rule)); + len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n", + FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE, + htt_stats_buf->tx_flow_no__tid_num__drop_rule)); + len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n", + htt_stats_buf->last_cycle_enqueue_count); + len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n", + htt_stats_buf->last_cycle_dequeue_count); + len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n", + htt_stats_buf->last_cycle_drop_count); + len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n", + htt_stats_buf->current_drop_th); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -528,38 +515,41 @@ static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf, u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; char tid_name[MAX_HTT_TID_NAME + 1] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n"); memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u", - htt_stats_buf->sw_peer_id__tid_num & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u", - (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u", - htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u", - (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & - 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x", - htt_stats_buf->tid_flags); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u", - htt_stats_buf->hw_queued); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u", - htt_stats_buf->hw_reaped); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u", - htt_stats_buf->mpdus_hw_filter); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u", - htt_stats_buf->qdepth_bytes); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u", - htt_stats_buf->qdepth_num_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u", - htt_stats_buf->qdepth_num_mpdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u", - htt_stats_buf->last_scheduled_tsmp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u", - htt_stats_buf->pause_module_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n", - htt_stats_buf->block_module_id); + len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name); + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID, + htt_stats_buf->sw_peer_id__tid_num)); + len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_TID_NUM, + htt_stats_buf->sw_peer_id__tid_num)); + len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING, + htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); + len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ, + htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); + len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n", + htt_stats_buf->tid_flags); + len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", + htt_stats_buf->hw_queued); + len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", + htt_stats_buf->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", + htt_stats_buf->mpdus_hw_filter); + len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n", + htt_stats_buf->qdepth_bytes); + len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n", + htt_stats_buf->qdepth_num_msdu); + len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n", + htt_stats_buf->qdepth_num_mpdu); + len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n", + htt_stats_buf->last_scheduled_tsmp); + len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n", + htt_stats_buf->pause_module_id); + len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n", + htt_stats_buf->block_module_id); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -578,42 +568,45 @@ static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf, u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; char tid_name[MAX_HTT_TID_NAME + 1] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n"); memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u", - htt_stats_buf->sw_peer_id__tid_num & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u", - (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u", - htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u", - (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & - 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x", - htt_stats_buf->tid_flags); - len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u", - htt_stats_buf->max_qdepth_bytes); - len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u", - htt_stats_buf->max_qdepth_n_msdus); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u", - htt_stats_buf->rsvd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u", - htt_stats_buf->qdepth_bytes); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u", - htt_stats_buf->qdepth_num_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u", - htt_stats_buf->qdepth_num_mpdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u", - htt_stats_buf->last_scheduled_tsmp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u", - htt_stats_buf->pause_module_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u", - htt_stats_buf->block_module_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x", - htt_stats_buf->allow_n_flags); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n", - htt_stats_buf->sendn_frms_allowed); + len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name); + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID, + htt_stats_buf->sw_peer_id__tid_num)); + len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM, + htt_stats_buf->sw_peer_id__tid_num)); + len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING, + htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); + len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n", + FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ, + htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); + len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n", + htt_stats_buf->tid_flags); + len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n", + htt_stats_buf->max_qdepth_bytes); + len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n", + htt_stats_buf->max_qdepth_n_msdus); + len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n", + htt_stats_buf->rsvd); + len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n", + htt_stats_buf->qdepth_bytes); + len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n", + htt_stats_buf->qdepth_num_msdu); + len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n", + htt_stats_buf->qdepth_num_mpdu); + len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n", + htt_stats_buf->last_scheduled_tsmp); + len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n", + htt_stats_buf->pause_module_id); + len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n", + htt_stats_buf->block_module_id); + len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n", + htt_stats_buf->allow_n_flags); + len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n", + htt_stats_buf->sendn_frms_allowed); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -632,21 +625,23 @@ static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf, u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; char tid_name[MAX_HTT_TID_NAME + 1] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u", - htt_stats_buf->sw_peer_id__tid_num & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u", - (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n", + FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID, + htt_stats_buf->sw_peer_id__tid_num)); + len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", + FIELD_GET(HTT_RX_TID_STATS_TID_NUM, + htt_stats_buf->sw_peer_id__tid_num)); memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u", - htt_stats_buf->dup_in_reorder); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u", - htt_stats_buf->dup_past_outside_window); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u", - htt_stats_buf->dup_past_within_window); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n", - htt_stats_buf->rxdesc_err_decrypt); + len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name); + len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n", + htt_stats_buf->dup_in_reorder); + len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n", + htt_stats_buf->dup_past_outside_window); + len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n", + htt_stats_buf->dup_past_within_window); + len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n", + htt_stats_buf->rxdesc_err_decrypt); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -663,16 +658,14 @@ static inline void htt_print_counter_tlv(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char counter_name[HTT_MAX_STRING_LEN] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:"); + len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n"); - ARRAY_TO_STRING(counter_name, - htt_stats_buf->counter_name, - HTT_MAX_COUNTER_NAME); - len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name); - len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n", - htt_stats_buf->count); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name, + "counter_name", + HTT_MAX_COUNTER_NAME, "\n"); + len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", + htt_stats_buf->count); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -690,35 +683,35 @@ static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u", - htt_stats_buf->ppdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u", - htt_stats_buf->mpdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u", - htt_stats_buf->msdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u", - htt_stats_buf->pause_bitmap); - len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u", - htt_stats_buf->block_bitmap); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d", - htt_stats_buf->rssi); - len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu", - htt_stats_buf->peer_enqueued_count_low | - ((u64)htt_stats_buf->peer_enqueued_count_high << 32)); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu", - htt_stats_buf->peer_dequeued_count_low | - ((u64)htt_stats_buf->peer_dequeued_count_high << 32)); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu", - htt_stats_buf->peer_dropped_count_low | - ((u64)htt_stats_buf->peer_dropped_count_high << 32)); - len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu", - htt_stats_buf->ppdu_transmitted_bytes_low | - ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32)); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u", - htt_stats_buf->peer_ttl_removed_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n", - htt_stats_buf->inactive_time); + len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n", + htt_stats_buf->ppdu_cnt); + len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n", + htt_stats_buf->mpdu_cnt); + len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n", + htt_stats_buf->msdu_cnt); + len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n", + htt_stats_buf->pause_bitmap); + len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n", + htt_stats_buf->block_bitmap); + len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n", + htt_stats_buf->rssi); + len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n", + htt_stats_buf->peer_enqueued_count_low | + ((u64)htt_stats_buf->peer_enqueued_count_high << 32)); + len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n", + htt_stats_buf->peer_dequeued_count_low | + ((u64)htt_stats_buf->peer_dequeued_count_high << 32)); + len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n", + htt_stats_buf->peer_dropped_count_low | + ((u64)htt_stats_buf->peer_dropped_count_high << 32)); + len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n", + htt_stats_buf->ppdu_transmitted_bytes_low | + ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32)); + len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n", + htt_stats_buf->peer_ttl_removed_count); + len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n", + htt_stats_buf->inactive_time); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -736,29 +729,38 @@ static inline void htt_print_peer_details_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u", - htt_stats_buf->peer_type); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u", - htt_stats_buf->sw_peer_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u", - htt_stats_buf->vdev_pdev_ast_idx & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u", - (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u", - (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x", - htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF, - (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8, - (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16, - (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24, - (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF), - (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x", - htt_stats_buf->peer_flags); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n", - htt_stats_buf->qpeer_flags); + len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n", + htt_stats_buf->peer_type); + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n", + htt_stats_buf->sw_peer_id); + len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n", + FIELD_GET(HTT_PEER_DETAILS_VDEV_ID, + htt_stats_buf->vdev_pdev_ast_idx)); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n", + FIELD_GET(HTT_PEER_DETAILS_PDEV_ID, + htt_stats_buf->vdev_pdev_ast_idx)); + len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n", + FIELD_GET(HTT_PEER_DETAILS_AST_IDX, + htt_stats_buf->vdev_pdev_ast_idx)); + len += scnprintf(buf + len, buf_len - len, + "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n", + FIELD_GET(HTT_MAC_ADDR_L32_0, + htt_stats_buf->mac_addr.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_L32_1, + htt_stats_buf->mac_addr.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_L32_2, + htt_stats_buf->mac_addr.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_L32_3, + htt_stats_buf->mac_addr.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_H16_0, + htt_stats_buf->mac_addr.mac_addr_h16), + FIELD_GET(HTT_MAC_ADDR_H16_1, + htt_stats_buf->mac_addr.mac_addr_h16)); + len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n", + htt_stats_buf->peer_flags); + len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n", + htt_stats_buf->qpeer_flags); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -775,74 +777,40 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char str_buf[HTT_MAX_STRING_LEN] = {0}; - char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL}; u8 j; - for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) { - tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!tx_gi[j]) - goto fail; - } - - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u", - htt_stats_buf->tx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u", - htt_stats_buf->rts_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u", - htt_stats_buf->ack_rssi); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, - htt_stats_buf->tx_nss, - HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, - htt_stats_buf->tx_bw, - HTT_TX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream, - HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n", + htt_stats_buf->tx_ldpc); + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", + htt_stats_buf->rts_cnt); + len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n", + htt_stats_buf->ack_rssi); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss", + HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw", + HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream", + HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(tx_gi[j], - htt_stats_buf->tx_gi[j], - HTT_TX_PEER_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ", - j, tx_gi[j]); + len += scnprintf(buf + len, buf_len - len, + "tx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL, + HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n"); } - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, - htt_stats_buf->tx_dcm, - HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm", + HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -850,10 +818,6 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf, buf[len] = 0; stats_req->buf_len = len; - -fail: - for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) - kfree(tx_gi[j]); } static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf, @@ -864,79 +828,48 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 j; - char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL}; - char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL}; - char str_buf[HTT_MAX_STRING_LEN] = {0}; - - for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) { - rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!rssi_chain[j]) - goto fail; - } - - for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) { - rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!rx_gi[j]) - goto fail; - } - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u", - htt_stats_buf->nsts); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u", - htt_stats_buf->rx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u", - htt_stats_buf->rts_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u", - htt_stats_buf->rssi_mgmt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u", - htt_stats_buf->rssi_data); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u", - htt_stats_buf->rssi_comb); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs, - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss, - HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm, - HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc, - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw, - HTT_RX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "nsts = %u\n", + htt_stats_buf->nsts); + len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n", + htt_stats_buf->rx_ldpc); + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", + htt_stats_buf->rts_cnt); + len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n", + htt_stats_buf->rssi_mgmt); + len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n", + htt_stats_buf->rssi_data); + len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n", + htt_stats_buf->rssi_comb); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs", + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss", + HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm", + HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc", + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw", + HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) { - ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j], - HTT_RX_PEER_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ", - j, rssi_chain[j]); + len += scnprintf(buf + len, (buf_len - len), + "rssi_chain[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL, + HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n"); } for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j], - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ", - j, rx_gi[j]); + len += scnprintf(buf + len, (buf_len - len), + "rx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL, + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream, - HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream", + HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -944,13 +877,6 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf, buf[len] = 0; stats_req->buf_len = len; - -fail: - for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) - kfree(rssi_chain[j]); - - for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) - kfree(rx_gi[j]); } static inline void @@ -962,13 +888,13 @@ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u", - htt_stats_buf->mu_mimo_sch_posted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u", - htt_stats_buf->mu_mimo_sch_failed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n", - htt_stats_buf->mu_mimo_ppdu_posted); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n", + htt_stats_buf->mu_mimo_sch_posted); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n", + htt_stats_buf->mu_mimo_sch_failed); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n", + htt_stats_buf->mu_mimo_ppdu_posted); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -987,22 +913,22 @@ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u", - htt_stats_buf->mu_mimo_mpdus_queued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u", - htt_stats_buf->mu_mimo_mpdus_tried_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u", - htt_stats_buf->mu_mimo_mpdus_failed_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u", - htt_stats_buf->mu_mimo_mpdus_requeued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u", - htt_stats_buf->mu_mimo_err_no_ba_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u", - htt_stats_buf->mu_mimo_mpdu_underrun_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n", - htt_stats_buf->mu_mimo_ampdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n", + htt_stats_buf->mu_mimo_mpdus_queued_usr); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n", + htt_stats_buf->mu_mimo_mpdus_tried_usr); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n", + htt_stats_buf->mu_mimo_mpdus_failed_usr); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n", + htt_stats_buf->mu_mimo_mpdus_requeued_usr); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n", + htt_stats_buf->mu_mimo_err_no_ba_usr); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n", + htt_stats_buf->mu_mimo_mpdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n", + htt_stats_buf->mu_mimo_ampdu_underrun_usr); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1021,11 +947,13 @@ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__hwq_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n", - (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID, + htt_stats_buf->mac_id__hwq_id__word)); + len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n", + FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID, + htt_stats_buf->mac_id__hwq_id__word)); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1044,51 +972,53 @@ htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req * u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; /* TODO: HKDBG */ - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__hwq_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u", - (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u", - htt_stats_buf->xretry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u", - htt_stats_buf->underrun_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u", - htt_stats_buf->flush_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u", - htt_stats_buf->filt_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u", - htt_stats_buf->null_mpdu_bmap); - len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u", - htt_stats_buf->user_ack_failure); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u", - htt_stats_buf->ack_tlv_proc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u", - htt_stats_buf->sched_id_proc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u", - htt_stats_buf->null_mpdu_tx_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u", - htt_stats_buf->mpdu_bmap_not_recvd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u", - htt_stats_buf->num_bar); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u", - htt_stats_buf->rts); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u", - htt_stats_buf->cts2self); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u", - htt_stats_buf->qos_null); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u", - htt_stats_buf->mpdu_tried_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u", - htt_stats_buf->mpdu_queued_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u", - htt_stats_buf->mpdu_ack_fail_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u", - htt_stats_buf->mpdu_filt_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u", - htt_stats_buf->false_mpdu_ack_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n", - htt_stats_buf->txq_timeout); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID, + htt_stats_buf->mac_id__hwq_id__word)); + len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n", + FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID, + htt_stats_buf->mac_id__hwq_id__word)); + len += scnprintf(buf + len, buf_len - len, "xretry = %u\n", + htt_stats_buf->xretry); + len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n", + htt_stats_buf->underrun_cnt); + len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n", + htt_stats_buf->flush_cnt); + len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n", + htt_stats_buf->filt_cnt); + len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n", + htt_stats_buf->null_mpdu_bmap); + len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n", + htt_stats_buf->user_ack_failure); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + htt_stats_buf->ack_tlv_proc); + len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n", + htt_stats_buf->sched_id_proc); + len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n", + htt_stats_buf->null_mpdu_tx_count); + len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n", + htt_stats_buf->mpdu_bmap_not_recvd); + len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n", + htt_stats_buf->num_bar); + len += scnprintf(buf + len, buf_len - len, "rts = %u\n", + htt_stats_buf->rts); + len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n", + htt_stats_buf->cts2self); + len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n", + htt_stats_buf->qos_null); + len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n", + htt_stats_buf->mpdu_tried_cnt); + len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n", + htt_stats_buf->mpdu_queued_cnt); + len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n", + htt_stats_buf->mpdu_ack_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n", + htt_stats_buf->mpdu_filt_cnt); + len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n", + htt_stats_buf->false_mpdu_ack_count); + len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n", + htt_stats_buf->txq_timeout); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1108,17 +1038,14 @@ htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS); - char difs_latency_hist[HTT_MAX_STRING_LEN] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u", - htt_stats_buf->hist_intvl); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n", + htt_stats_buf->hist_intvl); - ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist, - data_len); - len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n", - difs_latency_hist); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist, + "difs_latency_hist", data_len, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1138,16 +1065,14 @@ htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 data_len; - char cmd_result[HTT_MAX_STRING_LEN] = {0}; data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:"); - - ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result", + data_len, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1167,15 +1092,13 @@ htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems; - char cmd_stall_status[HTT_MAX_STRING_LEN] = {0}; num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n"); - ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n", - cmd_stall_status); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status, + "cmd_stall_status", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1195,15 +1118,14 @@ htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems; - char fes_result[HTT_MAX_STRING_LEN] = {0}; num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n"); - ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1222,27 +1144,16 @@ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0}; u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2); - u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements; - - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u", - htt_stats_buf->hist_bin_size); - - if (required_buffer_size < HTT_MAX_STRING_LEN) { - ARRAY_TO_STRING(tried_mpdu_cnt_hist, - htt_stats_buf->tried_mpdu_cnt_hist, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "tried_mpdu_cnt_hist = %s\n", - tried_mpdu_cnt_hist); - } else { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "INSUFFICIENT PRINT BUFFER "); - } + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n", + htt_stats_buf->hist_bin_size); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist, + "tried_mpdu_cnt_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1261,23 +1172,14 @@ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0}; u32 num_elements = tag_len >> 2; - u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist, + "txop_used_cnt_hist", num_elements, "\n\n"); - if (required_buffer_size < HTT_MAX_STRING_LEN) { - ARRAY_TO_STRING(txop_used_cnt_hist, - htt_stats_buf->txop_used_cnt_hist, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n", - txop_used_cnt_hist); - } else { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "INSUFFICIENT PRINT BUFFER "); - } if (len >= buf_len) buf[buf_len - 1] = 0; else @@ -1300,86 +1202,86 @@ static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf, const u32 *cbf_160 = htt_stats_buf->cbf_160; if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n"); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ", - cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u", - cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u", - cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u", - cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n"); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ", - i, - htt_stats_buf->sounding[0], - htt_stats_buf->sounding[1], - htt_stats_buf->sounding[2], - htt_stats_buf->sounding[3]); + len += scnprintf(buf + len, buf_len - len, + "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n", + i, + htt_stats_buf->sounding[0], + htt_stats_buf->sounding[1], + htt_stats_buf->sounding[2], + htt_stats_buf->sounding[3]); } } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n"); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ", - cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u", - cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u", - cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u", - cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], - cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", + cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], + cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ", - i, - htt_stats_buf->sounding[0], - htt_stats_buf->sounding[1], - htt_stats_buf->sounding[2], - htt_stats_buf->sounding[3]); + len += scnprintf(buf + len, buf_len - len, + "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n", + i, + htt_stats_buf->sounding[0], + htt_stats_buf->sounding[1], + htt_stats_buf->sounding[2], + htt_stats_buf->sounding[3]); } } @@ -1400,31 +1302,31 @@ htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u", - htt_stats_buf->su_bar); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u", - htt_stats_buf->rts); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u", - htt_stats_buf->cts2self); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u", - htt_stats_buf->qos_null); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u", - htt_stats_buf->delayed_bar_1); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u", - htt_stats_buf->delayed_bar_2); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u", - htt_stats_buf->delayed_bar_3); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u", - htt_stats_buf->delayed_bar_4); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u", - htt_stats_buf->delayed_bar_5); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u", - htt_stats_buf->delayed_bar_6); - len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n", - htt_stats_buf->delayed_bar_7); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n", + htt_stats_buf->su_bar); + len += scnprintf(buf + len, buf_len - len, "rts = %u\n", + htt_stats_buf->rts); + len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n", + htt_stats_buf->cts2self); + len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n", + htt_stats_buf->qos_null); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n", + htt_stats_buf->delayed_bar_1); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n", + htt_stats_buf->delayed_bar_2); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n", + htt_stats_buf->delayed_bar_3); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n", + htt_stats_buf->delayed_bar_4); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n", + htt_stats_buf->delayed_bar_5); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n", + htt_stats_buf->delayed_bar_6); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n", + htt_stats_buf->delayed_bar_7); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1443,21 +1345,21 @@ htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u", - htt_stats_buf->ac_su_ndpa); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u", - htt_stats_buf->ac_su_ndp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u", - htt_stats_buf->ac_mu_mimo_ndpa); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u", - htt_stats_buf->ac_mu_mimo_ndp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u", - htt_stats_buf->ac_mu_mimo_brpoll_1); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u", - htt_stats_buf->ac_mu_mimo_brpoll_2); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n", - htt_stats_buf->ac_mu_mimo_brpoll_3); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n", + htt_stats_buf->ac_su_ndpa); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n", + htt_stats_buf->ac_su_ndp); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n", + htt_stats_buf->ac_mu_mimo_ndpa); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n", + htt_stats_buf->ac_mu_mimo_ndp); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n", + htt_stats_buf->ac_mu_mimo_brpoll_1); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n", + htt_stats_buf->ac_mu_mimo_brpoll_2); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n", + htt_stats_buf->ac_mu_mimo_brpoll_3); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1476,37 +1378,37 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u", - htt_stats_buf->ax_su_ndpa); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u", - htt_stats_buf->ax_su_ndp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u", - htt_stats_buf->ax_mu_mimo_ndpa); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u", - htt_stats_buf->ax_mu_mimo_ndp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_1); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_2); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_3); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_4); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_5); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_6); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u", - htt_stats_buf->ax_mu_mimo_brpoll_7); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u", - htt_stats_buf->ax_basic_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u", - htt_stats_buf->ax_bsr_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u", - htt_stats_buf->ax_mu_bar_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n", - htt_stats_buf->ax_mu_rts_trigger); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n", + htt_stats_buf->ax_su_ndpa); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n", + htt_stats_buf->ax_su_ndp); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n", + htt_stats_buf->ax_mu_mimo_ndpa); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n", + htt_stats_buf->ax_mu_mimo_ndp); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_1); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_2); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_3); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_4); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_5); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_6); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n", + htt_stats_buf->ax_mu_mimo_brpoll_7); + len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n", + htt_stats_buf->ax_basic_trigger); + len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n", + htt_stats_buf->ax_bsr_trigger); + len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n", + htt_stats_buf->ax_mu_bar_trigger); + len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n", + htt_stats_buf->ax_mu_rts_trigger); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1525,21 +1427,21 @@ htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u", - htt_stats_buf->ac_su_ndp_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u", - htt_stats_buf->ac_su_ndpa_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u", - htt_stats_buf->ac_mu_mimo_ndpa_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u", - htt_stats_buf->ac_mu_mimo_ndp_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u", - htt_stats_buf->ac_mu_mimo_brp1_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u", - htt_stats_buf->ac_mu_mimo_brp2_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n", - htt_stats_buf->ac_mu_mimo_brp3_err); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n", + htt_stats_buf->ac_su_ndp_err); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n", + htt_stats_buf->ac_su_ndpa_err); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n", + htt_stats_buf->ac_mu_mimo_ndpa_err); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n", + htt_stats_buf->ac_mu_mimo_ndp_err); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n", + htt_stats_buf->ac_mu_mimo_brp1_err); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n", + htt_stats_buf->ac_mu_mimo_brp2_err); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n", + htt_stats_buf->ac_mu_mimo_brp3_err); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1558,37 +1460,37 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u", - htt_stats_buf->ax_su_ndp_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u", - htt_stats_buf->ax_su_ndpa_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u", - htt_stats_buf->ax_mu_mimo_ndpa_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u", - htt_stats_buf->ax_mu_mimo_ndp_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u", - htt_stats_buf->ax_mu_mimo_brp1_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u", - htt_stats_buf->ax_mu_mimo_brp2_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u", - htt_stats_buf->ax_mu_mimo_brp3_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u", - htt_stats_buf->ax_mu_mimo_brp4_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u", - htt_stats_buf->ax_mu_mimo_brp5_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u", - htt_stats_buf->ax_mu_mimo_brp6_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u", - htt_stats_buf->ax_mu_mimo_brp7_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u", - htt_stats_buf->ax_basic_trigger_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u", - htt_stats_buf->ax_bsr_trigger_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u", - htt_stats_buf->ax_mu_bar_trigger_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n", - htt_stats_buf->ax_mu_rts_trigger_err); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n", + htt_stats_buf->ax_su_ndp_err); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n", + htt_stats_buf->ax_su_ndpa_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n", + htt_stats_buf->ax_mu_mimo_ndpa_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n", + htt_stats_buf->ax_mu_mimo_ndp_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp1_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp2_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp3_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp4_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp5_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp6_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n", + htt_stats_buf->ax_mu_mimo_brp7_err); + len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n", + htt_stats_buf->ax_basic_trigger_err); + len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n", + htt_stats_buf->ax_bsr_trigger_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n", + htt_stats_buf->ax_mu_bar_trigger_err); + len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n", + htt_stats_buf->ax_mu_rts_trigger_err); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1608,35 +1510,35 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 i; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u", - htt_stats_buf->mu_mimo_sch_posted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u", - htt_stats_buf->mu_mimo_sch_failed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n", - htt_stats_buf->mu_mimo_ppdu_posted); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n", + htt_stats_buf->mu_mimo_sch_posted); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n", + htt_stats_buf->mu_mimo_sch_failed); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n", + htt_stats_buf->mu_mimo_ppdu_posted); - len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:"); + len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_sch_nusers_%u = %u", - i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_sch_nusers_%u = %u\n", + i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:"); + len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_sch_nusers_%u = %u", - i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_sch_nusers_%u = %u\n", + i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:"); + len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_ofdma_sch_nusers_%u = %u", - i, htt_stats_buf->ax_ofdma_sch_nusers[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_sch_nusers_%u = %u\n", + i, htt_stats_buf->ax_ofdma_sch_nusers[i]); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1657,114 +1559,114 @@ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) { if (!htt_stats_buf->user_index) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n"); if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_mpdus_queued_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_queued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_mpdus_tried_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_tried_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_mpdus_failed_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_failed_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_mpdus_requeued_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_requeued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_err_no_ba_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->err_no_ba_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_mpdu_underrun_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdu_underrun_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n", - htt_stats_buf->user_index, - htt_stats_buf->ampdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_queued_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_queued_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_tried_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_tried_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_failed_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_failed_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_requeued_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_err_no_ba_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->err_no_ba_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", + htt_stats_buf->user_index, + htt_stats_buf->ampdu_underrun_usr); } } if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) { if (!htt_stats_buf->user_index) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n"); if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_mpdus_queued_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_queued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_mpdus_tried_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_tried_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_mpdus_failed_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_failed_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_mpdus_requeued_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_requeued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_err_no_ba_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->err_no_ba_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_mpdu_underrun_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdu_underrun_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n", - htt_stats_buf->user_index, - htt_stats_buf->ampdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_queued_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_queued_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_tried_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_tried_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_failed_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_failed_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_requeued_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_err_no_ba_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->err_no_ba_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", + htt_stats_buf->user_index, + htt_stats_buf->ampdu_underrun_usr); } } if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) { if (!htt_stats_buf->user_index) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n"); if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_mpdus_queued_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_queued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_mpdus_tried_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_tried_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_mpdus_failed_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_failed_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_mpdus_requeued_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdus_requeued_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_err_no_ba_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->err_no_ba_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_mpdu_underrun_usr_%u = %u", - htt_stats_buf->user_index, - htt_stats_buf->mpdu_underrun_usr); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n", - htt_stats_buf->user_index, - htt_stats_buf->ampdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_queued_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_tried_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_failed_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_requeued_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_err_no_ba_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->err_no_ba_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n", + htt_stats_buf->user_index, + htt_stats_buf->mpdu_underrun_usr); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n", + htt_stats_buf->user_index, + htt_stats_buf->ampdu_underrun_usr); } } @@ -1785,15 +1687,12 @@ htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0}; u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n"); - ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n", - sched_cmd_posted); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted, + "sched_cmd_posted", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1812,15 +1711,12 @@ htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0}; u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n"); - ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n", - sched_cmd_reaped); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped, + "sched_cmd_reaped", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1839,18 +1735,15 @@ htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char sched_order_su[HTT_MAX_STRING_LEN] = {0}; /* each entry is u32, i.e. 4 bytes */ u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n"); - ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su, - sched_order_su_num_entries); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n", - sched_order_su); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su", + sched_order_su_num_entries, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1869,17 +1762,15 @@ htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char sched_ineligibility[HTT_MAX_STRING_LEN] = {0}; /* each entry is u32, i.e. 4 bytes */ u32 sched_ineligibility_num_entries = tag_len >> 2; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n"); - ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility, - sched_ineligibility_num_entries); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n", - sched_ineligibility); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility, + "sched_ineligibility", sched_ineligibility_num_entries, + "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1898,54 +1789,56 @@ htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__txq_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u", - (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u", - htt_stats_buf->sched_policy); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "last_sched_cmd_posted_timestamp = %u", - htt_stats_buf->last_sched_cmd_posted_timestamp); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "last_sched_cmd_compl_timestamp = %u", - htt_stats_buf->last_sched_cmd_compl_timestamp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u", - htt_stats_buf->sched_2_tac_lwm_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u", - htt_stats_buf->sched_2_tac_ring_full); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u", - htt_stats_buf->sched_cmd_post_failure); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u", - htt_stats_buf->num_active_tids); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u", - htt_stats_buf->num_ps_schedules); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u", - htt_stats_buf->sched_cmds_pending); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u", - htt_stats_buf->num_tid_register); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u", - htt_stats_buf->num_tid_unregister); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u", - htt_stats_buf->num_qstats_queried); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u", - htt_stats_buf->qstats_update_pending); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u", - htt_stats_buf->last_qstats_query_timestamp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u", - htt_stats_buf->num_tqm_cmdq_full); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u", - htt_stats_buf->num_de_sched_algo_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u", - htt_stats_buf->num_rt_sched_algo_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u", - htt_stats_buf->num_tqm_sched_algo_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n", - htt_stats_buf->notify_sched); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n", - htt_stats_buf->dur_based_sendn_term); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID, + htt_stats_buf->mac_id__txq_id__word)); + len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n", + FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID, + htt_stats_buf->mac_id__txq_id__word)); + len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n", + htt_stats_buf->sched_policy); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_posted_timestamp = %u\n", + htt_stats_buf->last_sched_cmd_posted_timestamp); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_compl_timestamp = %u\n", + htt_stats_buf->last_sched_cmd_compl_timestamp); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n", + htt_stats_buf->sched_2_tac_lwm_count); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n", + htt_stats_buf->sched_2_tac_ring_full); + len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n", + htt_stats_buf->sched_cmd_post_failure); + len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n", + htt_stats_buf->num_active_tids); + len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n", + htt_stats_buf->num_ps_schedules); + len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n", + htt_stats_buf->sched_cmds_pending); + len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n", + htt_stats_buf->num_tid_register); + len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n", + htt_stats_buf->num_tid_unregister); + len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n", + htt_stats_buf->num_qstats_queried); + len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n", + htt_stats_buf->qstats_update_pending); + len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n", + htt_stats_buf->last_qstats_query_timestamp); + len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n", + htt_stats_buf->num_tqm_cmdq_full); + len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n", + htt_stats_buf->num_de_sched_algo_trigger); + len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n", + htt_stats_buf->num_rt_sched_algo_trigger); + len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n", + htt_stats_buf->num_tqm_sched_algo_trigger); + len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n", + htt_stats_buf->notify_sched); + len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n", + htt_stats_buf->dur_based_sendn_term); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1963,11 +1856,11 @@ static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n", - htt_stats_buf->current_timestamp); + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n", + htt_stats_buf->current_timestamp); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -1986,16 +1879,13 @@ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0}; u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n"); - ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n", - gen_mpdu_end_reason); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason, + "gen_mpdu_end_reason", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2014,16 +1904,14 @@ htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason, + "list_mpdu_end_reason", num_elems, "\n\n"); - ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n", - list_mpdu_end_reason); if (len >= buf_len) buf[buf_len - 1] = 0; else @@ -2041,16 +1929,13 @@ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n"); - ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n", - list_mpdu_cnt_hist); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist, + "list_mpdu_cnt_hist", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2069,69 +1954,69 @@ htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u", - htt_stats_buf->msdu_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u", - htt_stats_buf->mpdu_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u", - htt_stats_buf->remove_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u", - htt_stats_buf->remove_mpdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u", - htt_stats_buf->remove_msdu_ttl); - len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u", - htt_stats_buf->send_bar); - len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u", - htt_stats_buf->bar_sync); - len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u", - htt_stats_buf->notify_mpdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u", - htt_stats_buf->sync_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u", - htt_stats_buf->write_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u", - htt_stats_buf->hwsch_trigger); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u", - htt_stats_buf->ack_tlv_proc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u", - htt_stats_buf->gen_mpdu_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u", - htt_stats_buf->gen_list_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u", - htt_stats_buf->remove_mpdu_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u", - htt_stats_buf->remove_mpdu_tried_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u", - htt_stats_buf->mpdu_queue_stats_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u", - htt_stats_buf->mpdu_head_info_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u", - htt_stats_buf->msdu_flow_stats_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u", - htt_stats_buf->remove_msdu_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u", - htt_stats_buf->remove_msdu_ttl_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u", - htt_stats_buf->flush_cache_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u", - htt_stats_buf->update_mpduq_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u", - htt_stats_buf->enqueue); - len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u", - htt_stats_buf->enqueue_notify); - len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u", - htt_stats_buf->notify_mpdu_at_head); - len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u", - htt_stats_buf->notify_mpdu_state_valid); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u", - htt_stats_buf->sched_udp_notify1); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u", - htt_stats_buf->sched_udp_notify2); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u", - htt_stats_buf->sched_nonudp_notify1); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n", - htt_stats_buf->sched_nonudp_notify2); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n", + htt_stats_buf->msdu_count); + len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n", + htt_stats_buf->mpdu_count); + len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n", + htt_stats_buf->remove_msdu); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n", + htt_stats_buf->remove_mpdu); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n", + htt_stats_buf->remove_msdu_ttl); + len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n", + htt_stats_buf->send_bar); + len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n", + htt_stats_buf->bar_sync); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n", + htt_stats_buf->notify_mpdu); + len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", + htt_stats_buf->sync_cmd); + len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", + htt_stats_buf->write_cmd); + len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n", + htt_stats_buf->hwsch_trigger); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + htt_stats_buf->ack_tlv_proc); + len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", + htt_stats_buf->gen_mpdu_cmd); + len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n", + htt_stats_buf->gen_list_cmd); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", + htt_stats_buf->remove_mpdu_cmd); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n", + htt_stats_buf->remove_mpdu_tried_cmd); + len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", + htt_stats_buf->mpdu_queue_stats_cmd); + len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", + htt_stats_buf->mpdu_head_info_cmd); + len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", + htt_stats_buf->msdu_flow_stats_cmd); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", + htt_stats_buf->remove_msdu_cmd); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n", + htt_stats_buf->remove_msdu_ttl_cmd); + len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", + htt_stats_buf->flush_cache_cmd); + len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", + htt_stats_buf->update_mpduq_cmd); + len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n", + htt_stats_buf->enqueue); + len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n", + htt_stats_buf->enqueue_notify); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n", + htt_stats_buf->notify_mpdu_at_head); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n", + htt_stats_buf->notify_mpdu_state_valid); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n", + htt_stats_buf->sched_udp_notify1); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n", + htt_stats_buf->sched_udp_notify2); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n", + htt_stats_buf->sched_nonudp_notify1); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n", + htt_stats_buf->sched_nonudp_notify2); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2149,23 +2034,23 @@ static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u", - htt_stats_buf->max_cmdq_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u", - htt_stats_buf->list_mpdu_cnt_hist_intvl); - len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u", - htt_stats_buf->add_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u", - htt_stats_buf->q_empty); - len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u", - htt_stats_buf->q_not_empty); - len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u", - htt_stats_buf->drop_notification); - len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n", - htt_stats_buf->desc_threshold); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n", + htt_stats_buf->max_cmdq_id); + len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n", + htt_stats_buf->list_mpdu_cnt_hist_intvl); + len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n", + htt_stats_buf->add_msdu); + len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n", + htt_stats_buf->q_empty); + len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n", + htt_stats_buf->q_not_empty); + len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n", + htt_stats_buf->drop_notification); + len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n", + htt_stats_buf->desc_threshold); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2183,13 +2068,13 @@ static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u", - htt_stats_buf->q_empty_failure); - len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u", - htt_stats_buf->q_not_empty_failure); - len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n", - htt_stats_buf->add_msdu_failure); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n", + htt_stats_buf->q_empty_failure); + len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n", + htt_stats_buf->q_not_empty_failure); + len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n", + htt_stats_buf->add_msdu_failure); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2207,33 +2092,35 @@ static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__cmdq_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n", - (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u", - htt_stats_buf->sync_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u", - htt_stats_buf->write_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u", - htt_stats_buf->gen_mpdu_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u", - htt_stats_buf->mpdu_queue_stats_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u", - htt_stats_buf->mpdu_head_info_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u", - htt_stats_buf->msdu_flow_stats_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u", - htt_stats_buf->remove_mpdu_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u", - htt_stats_buf->remove_msdu_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u", - htt_stats_buf->flush_cache_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u", - htt_stats_buf->update_mpduq_cmd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n", - htt_stats_buf->update_msduq_cmd); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID, + htt_stats_buf->mac_id__cmdq_id__word)); + len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n", + FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID, + htt_stats_buf->mac_id__cmdq_id__word)); + len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", + htt_stats_buf->sync_cmd); + len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", + htt_stats_buf->write_cmd); + len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", + htt_stats_buf->gen_mpdu_cmd); + len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", + htt_stats_buf->mpdu_queue_stats_cmd); + len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", + htt_stats_buf->mpdu_head_info_cmd); + len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", + htt_stats_buf->msdu_flow_stats_cmd); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", + htt_stats_buf->remove_mpdu_cmd); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", + htt_stats_buf->remove_msdu_cmd); + len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", + htt_stats_buf->flush_cache_cmd); + len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", + htt_stats_buf->update_mpduq_cmd); + len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n", + htt_stats_buf->update_msduq_cmd); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2252,20 +2139,20 @@ htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u", - htt_stats_buf->m1_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u", - htt_stats_buf->m2_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u", - htt_stats_buf->m3_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u", - htt_stats_buf->m4_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u", - htt_stats_buf->g1_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n", - htt_stats_buf->g2_packets); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n", + htt_stats_buf->m1_packets); + len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n", + htt_stats_buf->m2_packets); + len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n", + htt_stats_buf->m3_packets); + len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n", + htt_stats_buf->m4_packets); + len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n", + htt_stats_buf->g1_packets); + len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n", + htt_stats_buf->g2_packets); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2284,34 +2171,34 @@ htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u", - htt_stats_buf->ap_bss_peer_not_found); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u", - htt_stats_buf->ap_bcast_mcast_no_peer); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u", - htt_stats_buf->sta_delete_in_progress); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u", - htt_stats_buf->ibss_no_bss_peer); - len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u", - htt_stats_buf->invalid_vdev_type); - len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u", - htt_stats_buf->invalid_ast_peer_entry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u", - htt_stats_buf->peer_entry_invalid); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u", - htt_stats_buf->ethertype_not_ip); - len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u", - htt_stats_buf->eapol_lookup_failed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u", - htt_stats_buf->qpeer_not_allow_data); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u", - htt_stats_buf->fse_tid_override); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u", - htt_stats_buf->ipv6_jumbogram_zero_length); - len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n", - htt_stats_buf->qos_to_non_qos_in_prog); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n", + htt_stats_buf->ap_bss_peer_not_found); + len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n", + htt_stats_buf->ap_bcast_mcast_no_peer); + len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n", + htt_stats_buf->sta_delete_in_progress); + len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n", + htt_stats_buf->ibss_no_bss_peer); + len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n", + htt_stats_buf->invalid_vdev_type); + len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n", + htt_stats_buf->invalid_ast_peer_entry); + len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n", + htt_stats_buf->peer_entry_invalid); + len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n", + htt_stats_buf->ethertype_not_ip); + len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n", + htt_stats_buf->eapol_lookup_failed); + len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n", + htt_stats_buf->qpeer_not_allow_data); + len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n", + htt_stats_buf->fse_tid_override); + len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n", + htt_stats_buf->ipv6_jumbogram_zero_length); + len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n", + htt_stats_buf->qos_to_non_qos_in_prog); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2330,73 +2217,73 @@ htt_print_tx_de_classify_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u", - htt_stats_buf->arp_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u", - htt_stats_buf->igmp_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u", - htt_stats_buf->dhcp_packets); - len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u", - htt_stats_buf->host_inspected); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u", - htt_stats_buf->htt_included); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u", - htt_stats_buf->htt_valid_mcs); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u", - htt_stats_buf->htt_valid_nss); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u", - htt_stats_buf->htt_valid_preamble_type); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u", - htt_stats_buf->htt_valid_chainmask); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u", - htt_stats_buf->htt_valid_guard_interval); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u", - htt_stats_buf->htt_valid_retries); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u", - htt_stats_buf->htt_valid_bw_info); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u", - htt_stats_buf->htt_valid_power); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x", - htt_stats_buf->htt_valid_key_flags); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u", - htt_stats_buf->htt_valid_no_encryption); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u", - htt_stats_buf->fse_entry_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u", - htt_stats_buf->fse_priority_be); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u", - htt_stats_buf->fse_priority_high); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u", - htt_stats_buf->fse_priority_low); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u", - htt_stats_buf->fse_traffic_ptrn_be); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u", - htt_stats_buf->fse_traffic_ptrn_over_sub); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u", - htt_stats_buf->fse_traffic_ptrn_bursty); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u", - htt_stats_buf->fse_traffic_ptrn_interactive); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u", - htt_stats_buf->fse_traffic_ptrn_periodic); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u", - htt_stats_buf->fse_hwqueue_alloc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u", - htt_stats_buf->fse_hwqueue_created); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u", - htt_stats_buf->fse_hwqueue_send_to_host); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u", - htt_stats_buf->mcast_entry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u", - htt_stats_buf->bcast_entry); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u", - htt_stats_buf->htt_update_peer_cache); - len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u", - htt_stats_buf->htt_learning_frame); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u", - htt_stats_buf->fse_invalid_peer); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n", - htt_stats_buf->mec_notify); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n", + htt_stats_buf->arp_packets); + len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n", + htt_stats_buf->igmp_packets); + len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n", + htt_stats_buf->dhcp_packets); + len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n", + htt_stats_buf->host_inspected); + len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n", + htt_stats_buf->htt_included); + len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n", + htt_stats_buf->htt_valid_mcs); + len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n", + htt_stats_buf->htt_valid_nss); + len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n", + htt_stats_buf->htt_valid_preamble_type); + len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n", + htt_stats_buf->htt_valid_chainmask); + len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n", + htt_stats_buf->htt_valid_guard_interval); + len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n", + htt_stats_buf->htt_valid_retries); + len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n", + htt_stats_buf->htt_valid_bw_info); + len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n", + htt_stats_buf->htt_valid_power); + len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n", + htt_stats_buf->htt_valid_key_flags); + len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n", + htt_stats_buf->htt_valid_no_encryption); + len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n", + htt_stats_buf->fse_entry_count); + len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n", + htt_stats_buf->fse_priority_be); + len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n", + htt_stats_buf->fse_priority_high); + len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n", + htt_stats_buf->fse_priority_low); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n", + htt_stats_buf->fse_traffic_ptrn_be); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n", + htt_stats_buf->fse_traffic_ptrn_over_sub); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n", + htt_stats_buf->fse_traffic_ptrn_bursty); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n", + htt_stats_buf->fse_traffic_ptrn_interactive); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n", + htt_stats_buf->fse_traffic_ptrn_periodic); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n", + htt_stats_buf->fse_hwqueue_alloc); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n", + htt_stats_buf->fse_hwqueue_created); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n", + htt_stats_buf->fse_hwqueue_send_to_host); + len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n", + htt_stats_buf->mcast_entry); + len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n", + htt_stats_buf->bcast_entry); + len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n", + htt_stats_buf->htt_update_peer_cache); + len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n", + htt_stats_buf->htt_learning_frame); + len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n", + htt_stats_buf->fse_invalid_peer); + len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n", + htt_stats_buf->mec_notify); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2415,24 +2302,24 @@ htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u", - htt_stats_buf->eok); - len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u", - htt_stats_buf->classify_done); - len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u", - htt_stats_buf->lookup_failed); - len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u", - htt_stats_buf->send_host_dhcp); - len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u", - htt_stats_buf->send_host_mcast); - len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u", - htt_stats_buf->send_host_unknown_dest); - len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u", - htt_stats_buf->send_host); - len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n", - htt_stats_buf->status_invalid); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "eok = %u\n", + htt_stats_buf->eok); + len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n", + htt_stats_buf->classify_done); + len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n", + htt_stats_buf->lookup_failed); + len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n", + htt_stats_buf->send_host_dhcp); + len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n", + htt_stats_buf->send_host_mcast); + len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n", + htt_stats_buf->send_host_unknown_dest); + len += scnprintf(buf + len, buf_len - len, "send_host = %u\n", + htt_stats_buf->send_host); + len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n", + htt_stats_buf->status_invalid); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2451,14 +2338,14 @@ htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u", - htt_stats_buf->enqueued_pkts); - len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u", - htt_stats_buf->to_tqm); - len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n", - htt_stats_buf->to_tqm_bypass); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n", + htt_stats_buf->enqueued_pkts); + len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n", + htt_stats_buf->to_tqm); + len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n", + htt_stats_buf->to_tqm_bypass); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2477,14 +2364,14 @@ htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u", - htt_stats_buf->discarded_pkts); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u", - htt_stats_buf->local_frames); - len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n", - htt_stats_buf->is_ext_msdu); + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n", + htt_stats_buf->discarded_pkts); + len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n", + htt_stats_buf->local_frames); + len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n", + htt_stats_buf->is_ext_msdu); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2502,17 +2389,17 @@ static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u", - htt_stats_buf->tcl_dummy_frame); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u", - htt_stats_buf->tqm_dummy_frame); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u", - htt_stats_buf->tqm_notify_frame); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u", - htt_stats_buf->fw2wbm_enq); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n", - htt_stats_buf->tqm_bypass_frame); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n", + htt_stats_buf->tcl_dummy_frame); + len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n", + htt_stats_buf->tqm_dummy_frame); + len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n", + htt_stats_buf->tqm_notify_frame); + len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n", + htt_stats_buf->fw2wbm_enq); + len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n", + htt_stats_buf->tqm_bypass_frame); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2531,24 +2418,13 @@ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0}; u16 num_elements = tag_len >> 2; - u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements; - - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV"); - - if (required_buffer_size < HTT_MAX_STRING_LEN) { - ARRAY_TO_STRING(fw2wbm_ring_full_hist, - htt_stats_buf->fw2wbm_ring_full_hist, - num_elements); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "fw2wbm_ring_full_hist = %s\n", - fw2wbm_ring_full_hist); - } else { - len += HTT_DBG_OUT(buf + len, buf_len - len, - "INSUFFICIENT PRINT BUFFER "); - } + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist, + "fw2wbm_ring_full_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2566,21 +2442,21 @@ htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *s u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u", - htt_stats_buf->tcl2fw_entry_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u", - htt_stats_buf->not_to_fw); - len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u", - htt_stats_buf->invalid_pdev_vdev_peer); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u", - htt_stats_buf->tcl_res_invalid_addrx); - len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u", - htt_stats_buf->wbm2fw_entry_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n", - htt_stats_buf->invalid_pdev); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n", + htt_stats_buf->tcl2fw_entry_count); + len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n", + htt_stats_buf->not_to_fw); + len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n", + htt_stats_buf->invalid_pdev_vdev_peer); + len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n", + htt_stats_buf->tcl_res_invalid_addrx); + len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n", + htt_stats_buf->wbm2fw_entry_count); + len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n", + htt_stats_buf->invalid_pdev); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2597,52 +2473,51 @@ static inline void htt_print_ring_if_stats_tlv(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0}; - char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0}; - - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u", - htt_stats_buf->base_addr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u", - htt_stats_buf->elem_size); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u", - htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u", - (htt_stats_buf->num_elems__prefetch_tail_idx & - 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u", - htt_stats_buf->head_idx__tail_idx & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u", - (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u", - htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u", - (htt_stats_buf->shadow_head_idx__shadow_tail_idx & - 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u", - htt_stats_buf->num_tail_incr); - len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u", - htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u", - (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u", - htt_stats_buf->overrun_hit_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u", - htt_stats_buf->underrun_hit_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u", - htt_stats_buf->prod_blockwait_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u", - htt_stats_buf->cons_blockwait_count); - - ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count, - HTT_STATS_LOW_WM_BINS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ", - low_wm_hit_count); - - ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count, - HTT_STATS_HIGH_WM_BINS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n", - high_wm_hit_count); + + len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n", + htt_stats_buf->base_addr); + len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n", + htt_stats_buf->elem_size); + len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS, + htt_stats_buf->num_elems__prefetch_tail_idx)); + len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX, + htt_stats_buf->num_elems__prefetch_tail_idx)); + len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX, + htt_stats_buf->head_idx__tail_idx)); + len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX, + htt_stats_buf->head_idx__tail_idx)); + len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX, + htt_stats_buf->shadow_head_idx__shadow_tail_idx)); + len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX, + htt_stats_buf->shadow_head_idx__shadow_tail_idx)); + len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n", + htt_stats_buf->num_tail_incr); + len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH, + htt_stats_buf->lwm_thresh__hwm_thresh)); + len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n", + FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH, + htt_stats_buf->lwm_thresh__hwm_thresh)); + len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n", + htt_stats_buf->overrun_hit_count); + len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n", + htt_stats_buf->underrun_hit_count); + len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n", + htt_stats_buf->prod_blockwait_count); + len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n", + htt_stats_buf->cons_blockwait_count); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count, + "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count, + "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2660,11 +2535,11 @@ static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n", - htt_stats_buf->num_records); + len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", + htt_stats_buf->num_records); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2682,16 +2557,12 @@ static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = tag_len >> 2; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n"); - ARRAY_TO_STRING(dwords_used_by_user_n, - htt_stats_buf->dwords_used_by_user_n, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n", - dwords_used_by_user_n); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n, + "dwords_used_by_user_n", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2709,21 +2580,21 @@ static inline void htt_print_sfm_client_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u", - htt_stats_buf->client_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u", - htt_stats_buf->buf_min); - len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u", - htt_stats_buf->buf_max); - len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u", - htt_stats_buf->buf_busy); - len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u", - htt_stats_buf->buf_alloc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u", - htt_stats_buf->buf_avail); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n", - htt_stats_buf->num_users); + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "client_id = %u\n", + htt_stats_buf->client_id); + len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n", + htt_stats_buf->buf_min); + len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n", + htt_stats_buf->buf_max); + len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n", + htt_stats_buf->buf_busy); + len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n", + htt_stats_buf->buf_alloc); + len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n", + htt_stats_buf->buf_avail); + len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n", + htt_stats_buf->num_users); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2741,17 +2612,17 @@ static inline void htt_print_sfm_cmn_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u", - htt_stats_buf->buf_total); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u", - htt_stats_buf->mem_empty); - len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u", - htt_stats_buf->deallocate_bufs); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n", - htt_stats_buf->num_records); + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n", + htt_stats_buf->buf_total); + len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n", + htt_stats_buf->mem_empty); + len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n", + htt_stats_buf->deallocate_bufs); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", + htt_stats_buf->num_records); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2769,42 +2640,51 @@ static inline void htt_print_sring_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u", - (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u", - (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u", - (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24); - len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x", - htt_stats_buf->base_addr_lsb); - len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x", - htt_stats_buf->base_addr_msb); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u", - htt_stats_buf->ring_size); - len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u", - htt_stats_buf->elem_size); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u", - htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u", - (htt_stats_buf->num_avail_words__num_valid_words & - 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u", - htt_stats_buf->head_ptr__tail_ptr & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u", - (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u", - htt_stats_buf->consumer_empty__producer_full & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u", - (htt_stats_buf->consumer_empty__producer_full & - 0xFFFF0000) >> 16); - len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u", - htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n", - (htt_stats_buf->prefetch_count__internal_tail_ptr & - 0xFFFF0000) >> 16); + len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_SRING_STATS_MAC_ID, + htt_stats_buf->mac_id__ring_id__arena__ep)); + len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n", + FIELD_GET(HTT_SRING_STATS_RING_ID, + htt_stats_buf->mac_id__ring_id__arena__ep)); + len += scnprintf(buf + len, buf_len - len, "arena = %lu\n", + FIELD_GET(HTT_SRING_STATS_ARENA, + htt_stats_buf->mac_id__ring_id__arena__ep)); + len += scnprintf(buf + len, buf_len - len, "ep = %lu\n", + FIELD_GET(HTT_SRING_STATS_EP, + htt_stats_buf->mac_id__ring_id__arena__ep)); + len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n", + htt_stats_buf->base_addr_lsb); + len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n", + htt_stats_buf->base_addr_msb); + len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n", + htt_stats_buf->ring_size); + len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n", + htt_stats_buf->elem_size); + len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n", + FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS, + htt_stats_buf->num_avail_words__num_valid_words)); + len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n", + FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS, + htt_stats_buf->num_avail_words__num_valid_words)); + len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n", + FIELD_GET(HTT_SRING_STATS_HEAD_PTR, + htt_stats_buf->head_ptr__tail_ptr)); + len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n", + FIELD_GET(HTT_SRING_STATS_TAIL_PTR, + htt_stats_buf->head_ptr__tail_ptr)); + len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n", + FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY, + htt_stats_buf->consumer_empty__producer_full)); + len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n", + FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL, + htt_stats_buf->consumer_empty__producer_full)); + len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n", + FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT, + htt_stats_buf->prefetch_count__internal_tail_ptr)); + len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n", + FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR, + htt_stats_buf->prefetch_count__internal_tail_ptr)); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2822,9 +2702,9 @@ static inline void htt_print_sring_cmn_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n", - htt_stats_buf->num_records); + len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", + htt_stats_buf->num_records); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -2842,165 +2722,115 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 j; - char str_buf[HTT_MAX_STRING_LEN] = {0}; - char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL}; - - for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) { - tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!tx_gi[j]) - goto fail; - } - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u", - htt_stats_buf->tx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u", - htt_stats_buf->ac_mu_mimo_tx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u", - htt_stats_buf->ax_mu_mimo_tx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u", - htt_stats_buf->ofdma_tx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u", - htt_stats_buf->rts_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u", - htt_stats_buf->rts_success); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u", - htt_stats_buf->ack_rssi); - - len += HTT_DBG_OUT(buf + len, buf_len - len, - "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u", - htt_stats_buf->tx_legacy_cck_rate[0], - htt_stats_buf->tx_legacy_cck_rate[1], - htt_stats_buf->tx_legacy_cck_rate[2], - htt_stats_buf->tx_legacy_cck_rate[3]); - - len += HTT_DBG_OUT(buf + len, buf_len - len, - "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n" - " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u", - htt_stats_buf->tx_legacy_ofdm_rate[0], - htt_stats_buf->tx_legacy_ofdm_rate[1], - htt_stats_buf->tx_legacy_ofdm_rate[2], - htt_stats_buf->tx_legacy_ofdm_rate[3], - htt_stats_buf->tx_legacy_ofdm_rate[4], - htt_stats_buf->tx_legacy_ofdm_rate[5], - htt_stats_buf->tx_legacy_ofdm_rate[6], - htt_stats_buf->tx_legacy_ofdm_rate[7]); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss, - HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss, - HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss, - HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss, - HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw, - HTT_TX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw, - HTT_TX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw, - HTT_TX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw, - HTT_TX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc, - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream, - HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf); - - len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u", - htt_stats_buf->tx_he_ltf[1], - htt_stats_buf->tx_he_ltf[2], - htt_stats_buf->tx_he_ltf[3]); + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n", + htt_stats_buf->tx_ldpc); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n", + htt_stats_buf->ac_mu_mimo_tx_ldpc); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n", + htt_stats_buf->ax_mu_mimo_tx_ldpc); + len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n", + htt_stats_buf->ofdma_tx_ldpc); + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", + htt_stats_buf->rts_cnt); + len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n", + htt_stats_buf->rts_success); + len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n", + htt_stats_buf->ack_rssi); + + len += scnprintf(buf + len, buf_len - len, + "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n", + htt_stats_buf->tx_legacy_cck_rate[0], + htt_stats_buf->tx_legacy_cck_rate[1], + htt_stats_buf->tx_legacy_cck_rate[2], + htt_stats_buf->tx_legacy_cck_rate[3]); + + len += scnprintf(buf + len, buf_len - len, + "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n" + " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n", + htt_stats_buf->tx_legacy_ofdm_rate[0], + htt_stats_buf->tx_legacy_ofdm_rate[1], + htt_stats_buf->tx_legacy_ofdm_rate[2], + htt_stats_buf->tx_legacy_ofdm_rate[3], + htt_stats_buf->tx_legacy_ofdm_rate[4], + htt_stats_buf->tx_legacy_ofdm_rate[5], + htt_stats_buf->tx_legacy_ofdm_rate[6], + htt_stats_buf->tx_legacy_ofdm_rate[7]); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs, + "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs, + "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss", + HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss, + "ac_mu_mimo_tx_nss", + HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss, + "ax_mu_mimo_tx_nss", + HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss", + HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw", + HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw, + "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw, + "ax_mu_mimo_tx_bw", + HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw", + HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc", + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream", + HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); + + len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n", + htt_stats_buf->tx_he_ltf[1], + htt_stats_buf->tx_he_ltf[2], + htt_stats_buf->tx_he_ltf[3]); /* SU GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j], - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ", - j, tx_gi[j]); + len += scnprintf(buf + len, (buf_len - len), + "tx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL, + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } /* AC MU-MIMO GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j], - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ac_mu_mimo_tx_gi[%u] = %s ", - j, tx_gi[j]); + len += scnprintf(buf + len, (buf_len - len), + "ac_mu_mimo_tx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j], + NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } /* AX MU-MIMO GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j], - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ax_mu_mimo_tx_gi[%u] = %s ", - j, tx_gi[j]); + len += scnprintf(buf + len, (buf_len - len), + "ax_mu_mimo_tx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j], + NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } /* DL OFDMA GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j], - HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ", - j, tx_gi[j]); + len += scnprintf(buf + len, (buf_len - len), + "ofdma_tx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL, + HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm, - HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm", + HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3008,9 +2838,6 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, buf[len] = 0; stats_req->buf_len = len; -fail: - for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) - kfree(tx_gi[j]); } static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, @@ -3021,226 +2848,168 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 i, j; - u16 index = 0; - char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL}; - char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL}; - char str_buf[HTT_MAX_STRING_LEN] = {0}; - char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL}; - for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { - rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!rssi_chain[j]) - goto fail; - } - - for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!rx_gi[j]) - goto fail; - } - - for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { - rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC); - if (!rx_pilot_evm_db[j]) - goto fail; - } - - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u", - htt_stats_buf->nsts); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u", - htt_stats_buf->rx_ldpc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u", - htt_stats_buf->rts_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u", - htt_stats_buf->rssi_mgmt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u", - htt_stats_buf->rssi_data); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u", - htt_stats_buf->rssi_comb); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d", - htt_stats_buf->rssi_in_dbm); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs, - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss, - HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm, - HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc, - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw, - HTT_RX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u", - htt_stats_buf->nss_count); - - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u", - htt_stats_buf->pilot_count); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "nsts = %u\n", + htt_stats_buf->nsts); + len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n", + htt_stats_buf->rx_ldpc); + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", + htt_stats_buf->rts_cnt); + len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n", + htt_stats_buf->rssi_mgmt); + len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n", + htt_stats_buf->rssi_data); + len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n", + htt_stats_buf->rssi_comb); + len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n", + htt_stats_buf->rssi_in_dbm); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs", + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss", + HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm", + HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc", + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw", + HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + + len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n", + htt_stats_buf->nss_count); + + len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n", + htt_stats_buf->pilot_count); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { - index = 0; - + len += scnprintf(buf + len, buf_len - len, + "pilot_evm_db[%u] = ", j); for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++) - index += scnprintf(&rx_pilot_evm_db[j][index], - HTT_MAX_STRING_LEN - index, - " %u:%d,", - i, - htt_stats_buf->rx_pilot_evm_db[j][i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ", - j, rx_pilot_evm_db[j]); + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", + i, + htt_stats_buf->rx_pilot_evm_db[j][i]); + len += scnprintf(buf + len, buf_len - len, "\n"); } - index = 0; - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); + len += scnprintf(buf + len, buf_len - len, + "pilot_evm_db_mean = "); for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) - index += scnprintf(&str_buf[index], - HTT_MAX_STRING_LEN - index, - " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf); + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", i, + htt_stats_buf->rx_pilot_evm_db_mean[i]); + len += scnprintf(buf + len, buf_len - len, "\n"); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { - ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j], - HTT_RX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ", - j, rssi_chain[j]); + len += scnprintf(buf + len, buf_len - len, + "rssi_chain[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL, + HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); } for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j], - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ", - j, rx_gi[j]); + len += scnprintf(buf + len, buf_len - len, + "rx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL, + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream, - HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf); - - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u", - htt_stats_buf->rx_11ax_su_ext); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u", - htt_stats_buf->rx_11ac_mumimo); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u", - htt_stats_buf->rx_11ax_mumimo); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u", - htt_stats_buf->rx_11ax_ofdma); - len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u", - htt_stats_buf->txbf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate, - HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ", - str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate, - HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ", - str_buf); - - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u", - htt_stats_buf->rx_active_dur_us_low); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u", - htt_stats_buf->rx_active_dur_us_high); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u", - htt_stats_buf->rx_11ax_ul_ofdma); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs, - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream", + HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); + + len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n", + htt_stats_buf->rx_11ax_su_ext); + len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n", + htt_stats_buf->rx_11ac_mumimo); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n", + htt_stats_buf->rx_11ax_mumimo); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n", + htt_stats_buf->rx_11ax_ofdma); + len += scnprintf(buf + len, buf_len - len, "txbf = %u\n", + htt_stats_buf->txbf); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate, + "rx_legacy_cck_rate", + HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate, + "rx_legacy_ofdm_rate", + HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n"); + + len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n", + htt_stats_buf->rx_active_dur_us_low); + len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n", + htt_stats_buf->rx_active_dur_us_high); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n", + htt_stats_buf->rx_11ax_ul_ofdma); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs, + "ul_ofdma_rx_mcs", + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { - ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j], - HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ", - j, rx_gi[j]); + len += scnprintf(buf + len, buf_len - len, + "ul_ofdma_rx_gi[%u] = ", j); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL, + HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss, - HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss, + "ul_ofdma_rx_nss", + HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw, - HTT_RX_PDEV_STATS_NUM_BW_COUNTERS); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw", + HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u", + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n", htt_stats_buf->ul_ofdma_rx_stbc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u", + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n", htt_stats_buf->ul_ofdma_rx_ldpc); - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu, - HTT_RX_PDEV_MAX_OFDMA_NUM_USER); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ", - str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu, - HTT_RX_PDEV_MAX_OFDMA_NUM_USER); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ", - str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok, - HTT_RX_PDEV_MAX_OFDMA_NUM_USER); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf); - - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); - ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail, - HTT_RX_PDEV_MAX_OFDMA_NUM_USER); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s", - str_buf); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu, + "rx_ulofdma_non_data_ppdu", + HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu, + "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok, + "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail, + "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { - index = 0; - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); + len += scnprintf(buf + len, buf_len - len, + "rx_ul_fd_rssi: nss[%u] = ", j); for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) - index += scnprintf(&str_buf[index], - HTT_MAX_STRING_LEN - index, - " %u:%d,", - i, htt_stats_buf->rx_ul_fd_rssi[j][i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf); + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", + i, htt_stats_buf->rx_ul_fd_rssi[j][i]); + len += scnprintf(buf + len, buf_len - len, "\n"); } - len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x", - htt_stats_buf->per_chain_rssi_pkt_type); + len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n", + htt_stats_buf->per_chain_rssi_pkt_type); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { - index = 0; - memset(str_buf, 0x0, HTT_MAX_STRING_LEN); + len += scnprintf(buf + len, buf_len - len, + "rx_per_chain_rssi_in_dbm[%u] = ", j); for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) - index += scnprintf(&str_buf[index], - HTT_MAX_STRING_LEN - index, - " %u:%d,", - i, - htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf); + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", + i, + htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]); + len += scnprintf(buf + len, buf_len - len, "\n"); } - len += HTT_DBG_OUT(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3248,16 +3017,6 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, buf[len] = 0; stats_req->buf_len = len; - -fail: - for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) - kfree(rssi_chain[j]); - - for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) - kfree(rx_pilot_evm_db[j]); - - for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++) - kfree(rx_gi[i]); } static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf, @@ -3268,34 +3027,34 @@ static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u", - htt_stats_buf->fw_reo_ring_data_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u", - htt_stats_buf->fw_to_host_data_msdu_bcmc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u", - htt_stats_buf->fw_to_host_data_msdu_uc); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ofld_remote_data_buf_recycle_cnt = %u", - htt_stats_buf->ofld_remote_data_buf_recycle_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ofld_remote_free_buf_indication_cnt = %u", - htt_stats_buf->ofld_remote_free_buf_indication_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ofld_buf_to_host_data_msdu_uc = %u", - htt_stats_buf->ofld_buf_to_host_data_msdu_uc); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "reo_fw_ring_to_host_data_msdu_uc = %u", - htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc); - len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u", - htt_stats_buf->wbm_sw_ring_reap); - len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u", - htt_stats_buf->wbm_forward_to_host_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u", - htt_stats_buf->wbm_target_recycle_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "target_refill_ring_recycle_cnt = %u", - htt_stats_buf->target_refill_ring_recycle_cnt); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n", + htt_stats_buf->fw_reo_ring_data_msdu); + len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n", + htt_stats_buf->fw_to_host_data_msdu_bcmc); + len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n", + htt_stats_buf->fw_to_host_data_msdu_uc); + len += scnprintf(buf + len, buf_len - len, + "ofld_remote_data_buf_recycle_cnt = %u\n", + htt_stats_buf->ofld_remote_data_buf_recycle_cnt); + len += scnprintf(buf + len, buf_len - len, + "ofld_remote_free_buf_indication_cnt = %u\n", + htt_stats_buf->ofld_remote_free_buf_indication_cnt); + len += scnprintf(buf + len, buf_len - len, + "ofld_buf_to_host_data_msdu_uc = %u\n", + htt_stats_buf->ofld_buf_to_host_data_msdu_uc); + len += scnprintf(buf + len, buf_len - len, + "reo_fw_ring_to_host_data_msdu_uc = %u\n", + htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc); + len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n", + htt_stats_buf->wbm_sw_ring_reap); + len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n", + htt_stats_buf->wbm_forward_to_host_cnt); + len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n", + htt_stats_buf->wbm_target_recycle_cnt); + len += scnprintf(buf + len, buf_len - len, + "target_refill_ring_recycle_cnt = %u\n", + htt_stats_buf->target_refill_ring_recycle_cnt); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3314,17 +3073,13 @@ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n"); - ARRAY_TO_STRING(refill_ring_empty_cnt, - htt_stats_buf->refill_ring_empty_cnt, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n", - refill_ring_empty_cnt); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt, + "refill_ring_empty_cnt", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3344,17 +3099,13 @@ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n"); - ARRAY_TO_STRING(rxdma_err_cnt, - htt_stats_buf->rxdma_err, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n", - rxdma_err_cnt); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3373,17 +3124,13 @@ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char reo_err_cnt[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n"); - ARRAY_TO_STRING(reo_err_cnt, - htt_stats_buf->reo_err, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n", - reo_err_cnt); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3402,27 +3149,27 @@ htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u", - htt_stats_buf->sample_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u", - htt_stats_buf->total_max); - len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u", - htt_stats_buf->total_avg); - len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u", - htt_stats_buf->total_sample); - len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u", - htt_stats_buf->non_zeros_avg); - len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u", - htt_stats_buf->non_zeros_sample); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u", - htt_stats_buf->last_non_zeros_max); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u", - htt_stats_buf->last_non_zeros_min); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u", - htt_stats_buf->last_non_zeros_avg); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n", - htt_stats_buf->last_non_zeros_sample); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n", + htt_stats_buf->sample_id); + len += scnprintf(buf + len, buf_len - len, "total_max = %u\n", + htt_stats_buf->total_max); + len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n", + htt_stats_buf->total_avg); + len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n", + htt_stats_buf->total_sample); + len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n", + htt_stats_buf->non_zeros_avg); + len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n", + htt_stats_buf->non_zeros_sample); + len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n", + htt_stats_buf->last_non_zeros_max); + len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n", + htt_stats_buf->last_non_zeros_min); + len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n", + htt_stats_buf->last_non_zeros_avg); + len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n", + htt_stats_buf->last_non_zeros_sample); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3441,17 +3188,13 @@ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n"); - ARRAY_TO_STRING(refill_ring_num_refill, - htt_stats_buf->refill_ring_num_refill, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n", - refill_ring_num_refill); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill, + "refill_ring_num_refill", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3468,113 +3211,106 @@ static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0}; - char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0}; - - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u", - htt_stats_buf->ppdu_recvd); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u", - htt_stats_buf->mpdu_cnt_fcs_ok); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u", - htt_stats_buf->mpdu_cnt_fcs_err); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u", - htt_stats_buf->tcp_msdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u", - htt_stats_buf->tcp_ack_msdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u", - htt_stats_buf->udp_msdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u", - htt_stats_buf->other_msdu_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u", - htt_stats_buf->fw_ring_mpdu_ind); - - ARRAY_TO_STRING(fw_ring_mgmt_subtype, - htt_stats_buf->fw_ring_mgmt_subtype, - HTT_STATS_SUBTYPE_MAX); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ", - fw_ring_mgmt_subtype); - - ARRAY_TO_STRING(fw_ring_ctrl_subtype, - htt_stats_buf->fw_ring_ctrl_subtype, - HTT_STATS_SUBTYPE_MAX); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ", - fw_ring_ctrl_subtype); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u", - htt_stats_buf->fw_ring_mcast_data_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u", - htt_stats_buf->fw_ring_bcast_data_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u", - htt_stats_buf->fw_ring_ucast_data_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u", - htt_stats_buf->fw_ring_null_data_msdu); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u", - htt_stats_buf->fw_ring_mpdu_drop); - len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u", - htt_stats_buf->ofld_local_data_ind_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "ofld_local_data_buf_recycle_cnt = %u", - htt_stats_buf->ofld_local_data_buf_recycle_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u", - htt_stats_buf->drx_local_data_ind_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "drx_local_data_buf_recycle_cnt = %u", - htt_stats_buf->drx_local_data_buf_recycle_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u", - htt_stats_buf->local_nondata_ind_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u", - htt_stats_buf->local_nondata_buf_recycle_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u", - htt_stats_buf->fw_status_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u", - htt_stats_buf->fw_status_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u", - htt_stats_buf->fw_pkt_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u", - htt_stats_buf->fw_pkt_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u", - htt_stats_buf->fw_link_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u", - htt_stats_buf->fw_link_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u", - htt_stats_buf->host_pkt_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u", - htt_stats_buf->host_pkt_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u", - htt_stats_buf->mon_pkt_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u", - htt_stats_buf->mon_pkt_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "mon_status_buf_ring_refill_cnt = %u", - htt_stats_buf->mon_status_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u", - htt_stats_buf->mon_status_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u", - htt_stats_buf->mon_desc_buf_ring_refill_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u", - htt_stats_buf->mon_desc_buf_ring_empty_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u", - htt_stats_buf->mon_dest_ring_update_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u", - htt_stats_buf->mon_dest_ring_full_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u", - htt_stats_buf->rx_suspend_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u", - htt_stats_buf->rx_suspend_fail_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u", - htt_stats_buf->rx_resume_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u", - htt_stats_buf->rx_resume_fail_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u", - htt_stats_buf->rx_ring_switch_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u", - htt_stats_buf->rx_ring_restore_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u", - htt_stats_buf->rx_flush_cnt); - len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n", - htt_stats_buf->rx_recovery_reset_cnt); + + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n", + htt_stats_buf->ppdu_recvd); + len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n", + htt_stats_buf->mpdu_cnt_fcs_ok); + len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n", + htt_stats_buf->mpdu_cnt_fcs_err); + len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n", + htt_stats_buf->tcp_msdu_cnt); + len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n", + htt_stats_buf->tcp_ack_msdu_cnt); + len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n", + htt_stats_buf->udp_msdu_cnt); + len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n", + htt_stats_buf->other_msdu_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n", + htt_stats_buf->fw_ring_mpdu_ind); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype, + "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n"); + + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype, + "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n"); + + len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n", + htt_stats_buf->fw_ring_mcast_data_msdu); + len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n", + htt_stats_buf->fw_ring_bcast_data_msdu); + len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n", + htt_stats_buf->fw_ring_ucast_data_msdu); + len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n", + htt_stats_buf->fw_ring_null_data_msdu); + len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n", + htt_stats_buf->fw_ring_mpdu_drop); + len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n", + htt_stats_buf->ofld_local_data_ind_cnt); + len += scnprintf(buf + len, buf_len - len, + "ofld_local_data_buf_recycle_cnt = %u\n", + htt_stats_buf->ofld_local_data_buf_recycle_cnt); + len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n", + htt_stats_buf->drx_local_data_ind_cnt); + len += scnprintf(buf + len, buf_len - len, + "drx_local_data_buf_recycle_cnt = %u\n", + htt_stats_buf->drx_local_data_buf_recycle_cnt); + len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n", + htt_stats_buf->local_nondata_ind_cnt); + len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n", + htt_stats_buf->local_nondata_buf_recycle_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n", + htt_stats_buf->fw_status_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n", + htt_stats_buf->fw_status_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n", + htt_stats_buf->fw_pkt_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n", + htt_stats_buf->fw_pkt_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n", + htt_stats_buf->fw_link_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n", + htt_stats_buf->fw_link_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n", + htt_stats_buf->host_pkt_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n", + htt_stats_buf->host_pkt_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n", + htt_stats_buf->mon_pkt_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n", + htt_stats_buf->mon_pkt_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, + "mon_status_buf_ring_refill_cnt = %u\n", + htt_stats_buf->mon_status_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n", + htt_stats_buf->mon_status_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n", + htt_stats_buf->mon_desc_buf_ring_refill_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n", + htt_stats_buf->mon_desc_buf_ring_empty_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n", + htt_stats_buf->mon_dest_ring_update_cnt); + len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n", + htt_stats_buf->mon_dest_ring_full_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n", + htt_stats_buf->rx_suspend_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n", + htt_stats_buf->rx_suspend_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n", + htt_stats_buf->rx_resume_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n", + htt_stats_buf->rx_resume_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n", + htt_stats_buf->rx_ring_switch_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n", + htt_stats_buf->rx_ring_restore_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n", + htt_stats_buf->rx_flush_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n", + htt_stats_buf->rx_recovery_reset_cnt); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3592,16 +3328,12 @@ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n"); - ARRAY_TO_STRING(fw_ring_mpdu_err, - htt_stats_buf->fw_ring_mpdu_err, - HTT_RX_STATS_RXDMA_MAX_ERR); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n", - fw_ring_mpdu_err); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err, + "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3620,15 +3352,12 @@ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0}; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX); - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:"); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n"); - ARRAY_TO_STRING(fw_mpdu_drop, - htt_stats_buf->fw_mpdu_drop, - num_elems); - len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop", + num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3646,18 +3375,15 @@ htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf, u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char phy_errs[HTT_MAX_STRING_LEN] = {0}; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u", - htt_stats_buf->mac_id__word); - len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u", - htt_stats_buf->total_phy_err_cnt); + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n", + htt_stats_buf->mac_id__word); + len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n", + htt_stats_buf->total_phy_err_cnt); - ARRAY_TO_STRING(phy_errs, - htt_stats_buf->phy_err, - HTT_STATS_PHY_ERR_MAX); - len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs); + PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs", + HTT_STATS_PHY_ERR_MAX, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3676,20 +3402,20 @@ htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u", - htt_stats_buf->chan_num); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u", - htt_stats_buf->num_records); - len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x", - htt_stats_buf->valid_cca_counters_bitmap); - len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n", - htt_stats_buf->collection_interval); - - len += HTT_DBG_OUT(buf + len, buf_len - len, - "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)"); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|"); + len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", + htt_stats_buf->chan_num); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n", + htt_stats_buf->num_records); + len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n", + htt_stats_buf->valid_cca_counters_bitmap); + len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n", + htt_stats_buf->collection_interval); + + len += scnprintf(buf + len, buf_len - len, + "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n"); + len += scnprintf(buf + len, buf_len - len, + "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n"); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3708,16 +3434,16 @@ htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, - "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|", - htt_stats_buf->tx_frame_usec, - htt_stats_buf->rx_frame_usec, - htt_stats_buf->rx_clear_usec, - htt_stats_buf->my_rx_frame_usec, - htt_stats_buf->usec_cnt, - htt_stats_buf->med_rx_idle_usec, - htt_stats_buf->med_tx_idle_global_usec, - htt_stats_buf->cca_obss_usec); + len += scnprintf(buf + len, buf_len - len, + "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n", + htt_stats_buf->tx_frame_usec, + htt_stats_buf->rx_frame_usec, + htt_stats_buf->rx_clear_usec, + htt_stats_buf->my_rx_frame_usec, + htt_stats_buf->usec_cnt, + htt_stats_buf->med_rx_idle_usec, + htt_stats_buf->med_tx_idle_global_usec, + htt_stats_buf->cca_obss_usec); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3735,32 +3461,32 @@ static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u", - htt_stats_buf->mac_id__word & 0xFF); - len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u", - htt_stats_buf->last_unpause_ppdu_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u", - htt_stats_buf->hwsch_unpause_wait_tqm_write); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u", - htt_stats_buf->hwsch_dummy_tlv_skipped); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "hwsch_misaligned_offset_received = %u", - htt_stats_buf->hwsch_misaligned_offset_received); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u", - htt_stats_buf->hwsch_reset_count); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u", - htt_stats_buf->hwsch_dev_reset_war); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u", - htt_stats_buf->hwsch_delayed_pause); - len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u", - htt_stats_buf->hwsch_long_delayed_pause); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u", - htt_stats_buf->sch_rx_ppdu_no_response); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u", - htt_stats_buf->sch_selfgen_response); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n", - htt_stats_buf->sch_rx_sifs_resp_trigger); + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", + FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); + len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n", + htt_stats_buf->last_unpause_ppdu_id); + len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n", + htt_stats_buf->hwsch_unpause_wait_tqm_write); + len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n", + htt_stats_buf->hwsch_dummy_tlv_skipped); + len += scnprintf(buf + len, buf_len - len, + "hwsch_misaligned_offset_received = %u\n", + htt_stats_buf->hwsch_misaligned_offset_received); + len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n", + htt_stats_buf->hwsch_reset_count); + len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n", + htt_stats_buf->hwsch_dev_reset_war); + len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n", + htt_stats_buf->hwsch_delayed_pause); + len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n", + htt_stats_buf->hwsch_long_delayed_pause); + len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n", + htt_stats_buf->sch_rx_ppdu_no_response); + len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n", + htt_stats_buf->sch_selfgen_response); + len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n", + htt_stats_buf->sch_rx_sifs_resp_trigger); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3779,11 +3505,11 @@ htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u", - htt_stats_buf->pdev_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n", - htt_stats_buf->num_sessions); + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", + htt_stats_buf->pdev_id); + len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n", + htt_stats_buf->num_sessions); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3802,27 +3528,33 @@ htt_print_pdev_stats_twt_session_tlv(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:"); - len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u", - htt_stats_buf->vdev_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x", - htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF, - (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8, - (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16, - (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24, - (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF), - (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8); - len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u", - htt_stats_buf->flow_id_flags); - len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u", - htt_stats_buf->dialog_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u", - htt_stats_buf->wake_dura_us); - len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u", - htt_stats_buf->wake_intvl_us); - len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n", - htt_stats_buf->sp_offset_us); + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n", + htt_stats_buf->vdev_id); + len += scnprintf(buf + len, buf_len - len, + "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n", + FIELD_GET(HTT_MAC_ADDR_L32_0, + htt_stats_buf->peer_mac.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_L32_1, + htt_stats_buf->peer_mac.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_L32_2, + htt_stats_buf->peer_mac.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_L32_3, + htt_stats_buf->peer_mac.mac_addr_l32), + FIELD_GET(HTT_MAC_ADDR_H16_0, + htt_stats_buf->peer_mac.mac_addr_h16), + FIELD_GET(HTT_MAC_ADDR_H16_1, + htt_stats_buf->peer_mac.mac_addr_h16)); + len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n", + htt_stats_buf->flow_id_flags); + len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n", + htt_stats_buf->dialog_id); + len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n", + htt_stats_buf->wake_dura_us); + len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n", + htt_stats_buf->wake_intvl_us); + len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n", + htt_stats_buf->sp_offset_us); if (len >= buf_len) buf[buf_len - 1] = 0; @@ -3841,21 +3573,21 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u", + len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n", htt_stats_buf->num_obss_tx_ppdu_success); - len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n", + len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n", htt_stats_buf->num_obss_tx_ppdu_failure); - len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n", + len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n", htt_stats_buf->num_non_srg_opportunities); - len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n", + len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n", htt_stats_buf->num_non_srg_ppdu_tried); - len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n", + len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n", htt_stats_buf->num_non_srg_ppdu_success); - len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n", + len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n", htt_stats_buf->num_srg_opportunities); - len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n", + len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n", htt_stats_buf->num_srg_ppdu_tried); - len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG success PPDU = %u\n", + len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n", htt_stats_buf->num_srg_ppdu_success); if (len >= buf_len) @@ -3878,25 +3610,25 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf, u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u", - htt_stats_buf->pdev_id); - len += HTT_DBG_OUT(buf + len, buf_len - len, "current_head_idx = %u", - htt_stats_buf->current_head_idx); - len += HTT_DBG_OUT(buf + len, buf_len - len, "current_tail_idx = %u", - htt_stats_buf->current_tail_idx); - len += HTT_DBG_OUT(buf + len, buf_len - len, "num_htt_msgs_sent = %u", - htt_stats_buf->num_htt_msgs_sent); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "backpressure_time_ms = %u", - htt_stats_buf->backpressure_time_ms); + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", + htt_stats_buf->pdev_id); + len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n", + htt_stats_buf->current_head_idx); + len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n", + htt_stats_buf->current_tail_idx); + len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n", + htt_stats_buf->num_htt_msgs_sent); + len += scnprintf(buf + len, buf_len - len, + "backpressure_time_ms = %u\n", + htt_stats_buf->backpressure_time_ms); for (i = 0; i < 5; i++) - len += HTT_DBG_OUT(buf + len, buf_len - len, - "backpressure_hist_%u = %u", - i + 1, htt_stats_buf->backpressure_hist[i]); + len += scnprintf(buf + len, buf_len - len, + "backpressure_hist_%u = %u\n", + i + 1, htt_stats_buf->backpressure_hist[i]); - len += HTT_DBG_OUT(buf + len, buf_len - len, - "============================"); + len += scnprintf(buf + len, buf_len - len, + "============================\n"); if (len >= buf_len) { buf[buf_len - 1] = 0; @@ -3907,6 +3639,334 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf, } } +static inline +void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, + "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n"); + + len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = "); + for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = "); + for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs ="); + for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = "); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = "); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = "); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = "); + for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = "); + for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = "); + for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, + "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]); + len--; + + len += scnprintf(buf + len, buf_len - len, "\n"); + + stats_req->buf_len = len; +} + +static inline +void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndpa_queued_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndpa_queued[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndpa_tried_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndpa_tried[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndpa_flushed_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndpa_err_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndpa_err[i]); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + stats_req->buf_len = len; +} + +static inline +void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndp_queued_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndp_queued[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndp_tried_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndp_tried[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndp_flushed_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndp_flushed[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_ndp_err_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_ndp_err[i]); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + stats_req->buf_len = len; +} + +static inline +void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_brpoll_queued_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_brpoll_queued[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_brpoll_tried_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_brpoll_tried[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_brpoll_flushed_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_brp_err_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_brp_err[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + stats_req->buf_len = len; +} + +static inline +void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n"); + + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_num_ppdu_steer_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_num_ppdu_ol_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_num_usrs_prefetch_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_num_usrs_sound_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]); + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_num_usrs_force_sound_user%d = %u\n", + i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + stats_req->buf_len = len; +} + +static inline +void htt_print_phy_counters_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n", + htt_stats_buf->rx_ofdma_timing_err_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n", + htt_stats_buf->rx_cck_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n", + htt_stats_buf->mactx_abort_cnt); + len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n", + htt_stats_buf->macrx_abort_cnt); + len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n", + htt_stats_buf->phytx_abort_cnt); + len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n", + htt_stats_buf->phyrx_abort_cnt); + len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n", + htt_stats_buf->phyrx_defer_abort_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n", + htt_stats_buf->rx_gain_adj_lstf_event_cnt); + len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n", + htt_stats_buf->rx_gain_adj_non_legacy_cnt); + + for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++) + len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n", + i, htt_stats_buf->rx_pkt_cnt[i]); + + for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++) + len += scnprintf(buf + len, buf_len - len, + "rx_pkt_crc_pass_cnt[%d] = %u\n", + i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]); + + for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++) + len += scnprintf(buf + len, buf_len - len, + "per_blk_err_cnt[%d] = %u\n", + i, htt_stats_buf->per_blk_err_cnt[i]); + + for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++) + len += scnprintf(buf + len, buf_len - len, + "rx_ota_err_cnt[%d] = %u\n", + i, htt_stats_buf->rx_ota_err_cnt[i]); + + stats_req->buf_len = len; +} + +static inline +void htt_print_phy_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n"); + + for (i = 0; i < HTT_STATS_MAX_CHAINS; i++) + len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n", + i, htt_stats_buf->nf_chain[i]); + + len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n", + htt_stats_buf->false_radar_cnt); + len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n", + htt_stats_buf->radar_cs_cnt); + len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n", + htt_stats_buf->ani_level); + len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n", + htt_stats_buf->fw_run_time); + + stats_req->buf_len = len; +} + +static inline +void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; + int i; + const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = { + "assoc_req", "assoc_resp", + "reassoc_req", "reassoc_resp", + "probe_req", "probe_resp", + "timing_advertisement", "reserved", + "beacon", "atim", "disassoc", + "auth", "deauth", "action", "action_no_ack"}; + + len += scnprintf(buf + len, buf_len - len, + "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n"); + len += scnprintf(buf + len, buf_len - len, + "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", + htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1], + htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3], + htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]); + + len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n"); + for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++) + len += scnprintf(buf + len, buf_len - len, "%s:%u\n", + mgmt_frm_type[i], + htt_stat_buf->peer_rx_mgmt_subtype[i]); + + len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n"); + for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++) + len += scnprintf(buf + len, buf_len - len, "%s:%u\n", + mgmt_frm_type[i], + htt_stat_buf->peer_rx_mgmt_subtype[i]); + + len += scnprintf(buf + len, buf_len - len, "\n"); + + stats_req->buf_len = len; +} + static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *tag_buf, void *user_data) @@ -4258,6 +4318,30 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab, case HTT_STATS_RING_BACKPRESSURE_STATS_TAG: htt_print_backpressure_stats_tlv_v(tag_buf, user_data); break; + case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG: + htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG: + htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG: + htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG: + htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req); + break; + case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG: + htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req); + break; + case HTT_STATS_PHY_COUNTERS_TAG: + htt_print_phy_counters_tlv(tag_buf, stats_req); + break; + case HTT_STATS_PHY_STATS_TAG: + htt_print_phy_stats_tlv(tag_buf, stats_req); + break; + case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG: + htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req); + break; default: break; } @@ -4345,8 +4429,7 @@ static ssize_t ath11k_write_htt_stats_type(struct file *file, if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS) return -E2BIG; - if (type == ATH11K_DBG_HTT_EXT_STATS_RESET || - type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO) + if (type == ATH11K_DBG_HTT_EXT_STATS_RESET) return -EPERM; ar->debug.htt_stats.type = type; @@ -4407,6 +4490,15 @@ static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type, case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS; break; + case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS: + cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR; + cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]); + cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]); + cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]); + cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]); + cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]); + cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]); + break; default: break; } @@ -4464,7 +4556,9 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file) u8 type = ar->debug.htt_stats.type; int ret; - if (type == ATH11K_DBG_HTT_EXT_STATS_RESET) + if (type == ATH11K_DBG_HTT_EXT_STATS_RESET || + type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO || + type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) return -EPERM; mutex_lock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h index d428f52003a427..dc210c54d1310a 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h +++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h @@ -102,6 +102,14 @@ enum htt_tlv_tag_t { HTT_STATS_PDEV_OBSS_PD_TAG = 88, HTT_STATS_HW_WAR_TAG = 89, HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90, + HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG = 101, + HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108, + HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG = 113, + HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG = 114, + HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG = 115, + HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116, + HTT_STATS_PHY_COUNTERS_TAG = 121, + HTT_STATS_PHY_STATS_TAG = 122, HTT_STATS_MAX_TAG, }; @@ -137,6 +145,8 @@ struct htt_stats_string_tlv { u32 data[0]; /* Can be variable length */ } __packed; +#define HTT_STATS_MAC_ID GENMASK(7, 0) + /* == TX PDEV STATS == */ struct htt_tx_pdev_stats_cmn_tlv { u32 mac_id__word; @@ -290,6 +300,10 @@ struct htt_hw_stats_whal_tx_tlv { }; /* ============ PEER STATS ============ */ +#define HTT_MSDU_FLOW_STATS_TX_FLOW_NO GENMASK(15, 0) +#define HTT_MSDU_FLOW_STATS_TID_NUM GENMASK(19, 16) +#define HTT_MSDU_FLOW_STATS_DROP_RULE BIT(20) + struct htt_msdu_flow_stats_tlv { u32 last_update_timestamp; u32 last_add_timestamp; @@ -306,6 +320,11 @@ struct htt_msdu_flow_stats_tlv { #define MAX_HTT_TID_NAME 8 +#define HTT_TX_TID_STATS_SW_PEER_ID GENMASK(15, 0) +#define HTT_TX_TID_STATS_TID_NUM GENMASK(31, 16) +#define HTT_TX_TID_STATS_NUM_SCHED_PENDING GENMASK(7, 0) +#define HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ GENMASK(15, 8) + /* Tidq stats */ struct htt_tx_tid_stats_tlv { /* Stored as little endian */ @@ -326,6 +345,11 @@ struct htt_tx_tid_stats_tlv { u32 tid_tx_airtime; }; +#define HTT_TX_TID_STATS_V1_SW_PEER_ID GENMASK(15, 0) +#define HTT_TX_TID_STATS_V1_TID_NUM GENMASK(31, 16) +#define HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING GENMASK(7, 0) +#define HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ GENMASK(15, 8) + /* Tidq stats */ struct htt_tx_tid_stats_v1_tlv { /* Stored as little endian */ @@ -348,6 +372,9 @@ struct htt_tx_tid_stats_v1_tlv { u32 sendn_frms_allowed; }; +#define HTT_RX_TID_STATS_SW_PEER_ID GENMASK(15, 0) +#define HTT_RX_TID_STATS_TID_NUM GENMASK(31, 16) + struct htt_rx_tid_stats_tlv { u32 sw_peer_id__tid_num; u8 tid_name[MAX_HTT_TID_NAME]; @@ -386,6 +413,10 @@ struct htt_peer_stats_cmn_tlv { u32 inactive_time; }; +#define HTT_PEER_DETAILS_VDEV_ID GENMASK(7, 0) +#define HTT_PEER_DETAILS_PDEV_ID GENMASK(15, 8) +#define HTT_PEER_DETAILS_AST_IDX GENMASK(31, 16) + struct htt_peer_details_tlv { u32 peer_type; u32 sw_peer_id; @@ -510,6 +541,9 @@ struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv { u32 mu_mimo_ampdu_underrun_usr; }; +#define HTT_TX_HWQ_STATS_MAC_ID GENMASK(7, 0) +#define HTT_TX_HWQ_STATS_HWQ_ID GENMASK(15, 8) + struct htt_tx_hwq_mu_mimo_cmn_stats_tlv { u32 mac_id__hwq_id__word; }; @@ -789,6 +823,9 @@ struct htt_sched_txq_sched_ineligibility_tlv_v { u32 sched_ineligibility[0]; }; +#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) +#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8) + struct htt_tx_pdev_stats_sched_per_txq_tlv { u32 mac_id__txq_id__word; u32 sched_policy; @@ -910,6 +947,9 @@ struct htt_tx_tqm_error_stats_tlv { }; /* == TQM CMDQ stats == */ +#define HTT_TX_TQM_CMDQ_STATUS_MAC_ID GENMASK(7, 0) +#define HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID GENMASK(15, 8) + struct htt_tx_tqm_cmdq_status_tlv { u32 mac_id__cmdq_id__word; u32 sync_cmd; @@ -1055,6 +1095,15 @@ struct htt_tx_de_cmn_stats_tlv { #define HTT_STATS_LOW_WM_BINS 5 #define HTT_STATS_HIGH_WM_BINS 5 +#define HTT_RING_IF_STATS_NUM_ELEMS GENMASK(15, 0) +#define HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX GENMASK(31, 16) +#define HTT_RING_IF_STATS_HEAD_IDX GENMASK(15, 0) +#define HTT_RING_IF_STATS_TAIL_IDX GENMASK(31, 16) +#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX GENMASK(15, 0) +#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX GENMASK(31, 16) +#define HTT_RING_IF_STATS_LWM_THRESH GENMASK(15, 0) +#define HTT_RING_IF_STATS_HWM_THRESH GENMASK(31, 16) + struct htt_ring_if_stats_tlv { u32 base_addr; /* DWORD aligned base memory address of the ring */ u32 elem_size; @@ -1117,6 +1166,19 @@ struct htt_sfm_cmn_tlv { }; /* == SRNG STATS == */ +#define HTT_SRING_STATS_MAC_ID GENMASK(7, 0) +#define HTT_SRING_STATS_RING_ID GENMASK(15, 8) +#define HTT_SRING_STATS_ARENA GENMASK(23, 16) +#define HTT_SRING_STATS_EP BIT(24) +#define HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0) +#define HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16) +#define HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0) +#define HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16) +#define HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0) +#define HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16) +#define HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0) +#define HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16) + struct htt_sring_stats_tlv { u32 mac_id__ring_id__arena__ep; u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */ @@ -1696,6 +1758,170 @@ struct htt_ring_backpressure_stats_tlv { u32 backpressure_hist[5]; }; +#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14 +#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5 +#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 + +struct htt_pdev_txrate_txbf_stats_tlv { + /* SU TxBF TX MCS stats */ + u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS]; + /* Implicit BF TX MCS stats */ + u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS]; + /* Open loop TX MCS stats */ + u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS]; + /* SU TxBF TX NSS stats */ + u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + /* Implicit BF TX NSS stats */ + u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + /* Open loop TX NSS stats */ + u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + /* SU TxBF TX BW stats */ + u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS]; + /* Implicit BF TX BW stats */ + u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS]; + /* Open loop TX BW stats */ + u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS]; +}; + +struct htt_txbf_ofdma_ndpa_stats_tlv { + /* 11AX HE OFDMA NDPA frame queued to the HW */ + u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA NDPA frame sent over the air */ + u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA NDPA frame flushed by HW */ + u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA NDPA frame completed with error(s) */ + u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; +}; + +struct htt_txbf_ofdma_ndp_stats_tlv { + /* 11AX HE OFDMA NDP frame queued to the HW */ + u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA NDPA frame sent over the air */ + u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA NDPA frame flushed by HW */ + u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA NDPA frame completed with error(s) */ + u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; +}; + +struct htt_txbf_ofdma_brp_stats_tlv { + /* 11AX HE OFDMA MU BRPOLL frame queued to the HW */ + u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA MU BRPOLL frame sent over the air */ + u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA MU BRPOLL frame flushed by HW */ + u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA MU BRPOLL frame completed with error(s) */ + u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* Number of CBF(s) received when 11AX HE OFDMA MU BRPOLL frame + * completed with error(s). + */ + u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1]; +}; + +struct htt_txbf_ofdma_steer_stats_tlv { + /* 11AX HE OFDMA PPDUs that were sent over the air with steering (TXBF + OFDMA) */ + u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA PPDUs that were sent over the air in open loop */ + u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA number of users for which CBF prefetch was + * initiated to PHY HW during TX. + */ + u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA number of users for which sounding was initiated during TX */ + u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; + /* 11AX HE OFDMA number of users for which sounding was forced during TX */ + u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS]; +}; + +#define HTT_MAX_RX_PKT_CNT 8 +#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8 +#define HTT_MAX_PER_BLK_ERR_CNT 20 +#define HTT_MAX_RX_OTA_ERR_CNT 14 +#define HTT_STATS_MAX_CHAINS 8 +#define ATH11K_STATS_MGMT_FRM_TYPE_MAX 16 + +struct htt_phy_counters_tlv { + /* number of RXTD OFDMA OTA error counts except power surge and drop */ + u32 rx_ofdma_timing_err_cnt; + /* rx_cck_fail_cnt: + * number of cck error counts due to rx reception failure because of + * timing error in cck + */ + u32 rx_cck_fail_cnt; + /* number of times tx abort initiated by mac */ + u32 mactx_abort_cnt; + /* number of times rx abort initiated by mac */ + u32 macrx_abort_cnt; + /* number of times tx abort initiated by phy */ + u32 phytx_abort_cnt; + /* number of times rx abort initiated by phy */ + u32 phyrx_abort_cnt; + /* number of rx defered count initiated by phy */ + u32 phyrx_defer_abort_cnt; + /* number of sizing events generated at LSTF */ + u32 rx_gain_adj_lstf_event_cnt; + /* number of sizing events generated at non-legacy LTF */ + u32 rx_gain_adj_non_legacy_cnt; + /* rx_pkt_cnt - + * Received EOP (end-of-packet) count per packet type; + * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF + * [6-7]=RSVD + */ + u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT]; + /* rx_pkt_crc_pass_cnt - + * Received EOP (end-of-packet) count per packet type; + * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF + * [6-7]=RSVD + */ + u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT]; + /* per_blk_err_cnt - + * Error count per error source; + * [0] = unknown; [1] = LSIG; [2] = HTSIG; [3] = VHTSIG; [4] = HESIG; + * [5] = RXTD_OTA; [6] = RXTD_FATAL; [7] = DEMF; [8] = ROBE; + * [9] = PMI; [10] = TXFD; [11] = TXTD; [12] = PHYRF + * [13-19]=RSVD + */ + u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT]; + /* rx_ota_err_cnt - + * RXTD OTA (over-the-air) error count per error reason; + * [0] = voting fail; [1] = weak det fail; [2] = strong sig fail; + * [3] = cck fail; [4] = power surge; [5] = power drop; + * [6] = btcf timing timeout error; [7] = btcf packet detect error; + * [8] = coarse timing timeout error + * [9-13]=RSVD + */ + u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT]; +}; + +struct htt_phy_stats_tlv { + /* per chain hw noise floor values in dBm */ + s32 nf_chain[HTT_STATS_MAX_CHAINS]; + /* number of false radars detected */ + u32 false_radar_cnt; + /* number of channel switches happened due to radar detection */ + u32 radar_cs_cnt; + /* ani_level - + * ANI level (noise interference) corresponds to the channel + * the desense levels range from -5 to 15 in dB units, + * higher values indicating more noise interference. + */ + s32 ani_level; + /* running time in minutes since FW boot */ + u32 fw_run_time; +}; + +struct htt_peer_ctrl_path_txrx_stats_tlv { + /* peer mac address */ + u8 peer_mac_addr[ETH_ALEN]; + u8 rsvd[2]; + /* Num of tx mgmt frames with subtype on peer level */ + u32 peer_tx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX]; + /* Num of rx mgmt frames with subtype on peer level */ + u32 peer_rx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX]; +}; + #ifdef CONFIG_ATH11K_DEBUGFS void ath11k_debugfs_htt_stats_init(struct ath11k *ar); diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c index 270c0edbb10f9f..fecd9718f5ce04 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c @@ -419,15 +419,21 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file) struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; struct ath11k *ar = arsta->arvif->ar; struct debug_htt_stats_req *stats_req; + int type = ar->debug.htt_stats.type; int ret; + if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO && + type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) || + type == ATH11K_DBG_HTT_EXT_STATS_RESET) + return -EPERM; + stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE); if (!stats_req) return -ENOMEM; mutex_lock(&ar->conf_mutex); ar->debug.htt_stats.stats_req = stats_req; - stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO; + stats_req->type = type; memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN); ret = ath11k_debugfs_htt_stats_req(ar); mutex_unlock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index b0c8f62900997e..8baaeeb8cf821f 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -311,7 +311,7 @@ void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) if (!ab->hw_params.supports_shadow_regs) return; - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + for (i = 0; i < ab->hw_params.max_tx_ring; i++) ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); @@ -326,7 +326,7 @@ static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { + for (i = 0; i < ab->hw_params.max_tx_ring; i++) { ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); } @@ -366,7 +366,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) goto err; } - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { + for (i = 0; i < ab->hw_params.max_tx_ring; i++) { ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, HAL_TCL_DATA, i, 0, DP_TCL_DATA_RING_SIZE); @@ -739,6 +739,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, int budget) { struct napi_struct *napi = &irq_grp->napi; + const struct ath11k_hw_hal_params *hal_params; int grp_id = irq_grp->grp_id; int work_done = 0; int i = 0, j; @@ -821,8 +822,9 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, struct ath11k_pdev_dp *dp = &ar->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + hal_params = ab->hw_params.hal_params; ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, - HAL_RX_BUF_RBM_SW3_BM); + hal_params->rx_buf_rbm); } } } @@ -996,7 +998,7 @@ void ath11k_dp_free(struct ath11k_base *ab) ath11k_dp_reo_cmd_list_cleanup(ab); - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { + for (i = 0; i < ab->hw_params.max_tx_ring; i++) { spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); idr_for_each(&dp->tx_ring[i].txbuf_idr, ath11k_dp_tx_pending_cleanup, ab); @@ -1046,7 +1048,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab) size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { + for (i = 0; i < ab->hw_params.max_tx_ring; i++) { idr_init(&dp->tx_ring[i].txbuf_idr); spin_lock_init(&dp->tx_ring[i].tx_idr_lock); dp->tx_ring[i].tcl_data_ring_id = i; diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index ee768ccce46e1e..4794ca04f21363 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -170,6 +170,7 @@ struct ath11k_pdev_dp { #define DP_BA_WIN_SZ_MAX 256 #define DP_TCL_NUM_RING_MAX 3 +#define DP_TCL_NUM_RING_MAX_QCA6390 1 #define DP_IDLE_SCATTER_BUFS_MAX 16 @@ -195,6 +196,7 @@ struct ath11k_pdev_dp { #define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096 #define DP_RX_BUFFER_SIZE 2048 +#define DP_RX_BUFFER_SIZE_LITE 1024 #define DP_RX_BUFFER_ALIGN_SIZE 128 #define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0) @@ -1592,6 +1594,13 @@ struct ath11k_htt_extd_stats_msg { u8 data[0]; } __packed; +#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0) +#define HTT_MAC_ADDR_L32_1 GENMASK(15, 8) +#define HTT_MAC_ADDR_L32_2 GENMASK(23, 16) +#define HTT_MAC_ADDR_L32_3 GENMASK(31, 24) +#define HTT_MAC_ADDR_H16_0 GENMASK(7, 0) +#define HTT_MAC_ADDR_H16_1 GENMASK(15, 8) + struct htt_mac_addr { u32 mac_addr_l32; u32 mac_addr_h16; diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 9a224817630ae5..c5320847b80a7b 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -142,6 +142,18 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) return errmap; } +static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, + struct hal_rx_desc *desc) +{ + struct rx_attention *rx_attention; + u32 errmap; + + rx_attention = ath11k_dp_rx_get_attention(ab, desc); + errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); + + return errmap & DP_RX_MPDU_ERR_MSDU_LEN; +} + static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, struct hal_rx_desc *desc) { @@ -270,6 +282,18 @@ static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, __le32_to_cpu(attn->info1))); } +static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); +} + +static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, + struct hal_rx_desc *desc) +{ + return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); +} + static void ath11k_dp_service_mon_ring(struct timer_list *t) { struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); @@ -475,7 +499,7 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, rx_ring->bufs_max = num_entries; ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, - HAL_RX_BUF_RBM_SW3_BM); + ar->ab->hw_params.hal_params->rx_buf_rbm); return 0; } @@ -2156,6 +2180,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, { u8 *first_hdr; u8 decap; + struct ethhdr *ehdr; first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); @@ -2170,9 +2195,22 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, decrypted); break; case DP_RX_DECAP_TYPE_ETHERNET2_DIX: - /* TODO undecap support for middle/last msdu's of amsdu */ - ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, - enctype, status); + ehdr = (struct ethhdr *)msdu->data; + + /* mac80211 allows fast path only for authorized STA */ + if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { + ATH11K_SKB_RXCB(msdu)->is_eapol = true; + ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, + enctype, status); + break; + } + + /* PN for mcast packets will be validated in mac80211; + * remove eth header and add 802.11 header. + */ + if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) + ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, + enctype, status); break; case DP_RX_DECAP_TYPE_8023: /* TODO: Handle undecap for these formats */ @@ -2180,35 +2218,62 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, } } +static struct ath11k_peer * +ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) +{ + struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + struct hal_rx_desc *rx_desc = rxcb->rx_desc; + struct ath11k_peer *peer = NULL; + + lockdep_assert_held(&ab->base_lock); + + if (rxcb->peer_id) + peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); + + if (peer) + return peer; + + if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) + return NULL; + + peer = ath11k_peer_find_by_addr(ab, + ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); + return peer; +} + static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { - bool fill_crypto_hdr, mcast; + bool fill_crypto_hdr; enum hal_encrypt_type enctype; bool is_decrypted = false; + struct ath11k_skb_rxcb *rxcb; struct ieee80211_hdr *hdr; struct ath11k_peer *peer; struct rx_attention *rx_attention; u32 err_bitmap; - hdr = (struct ieee80211_hdr *)msdu->data; - /* PN for multicast packets will be checked in mac80211 */ + rxcb = ATH11K_SKB_RXCB(msdu); + fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); + rxcb->is_mcbc = fill_crypto_hdr; - mcast = is_multicast_ether_addr(hdr->addr1); - fill_crypto_hdr = mcast; + if (rxcb->is_mcbc) { + rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); + rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); + } spin_lock_bh(&ar->ab->base_lock); - peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2); + peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); if (peer) { - if (mcast) + if (rxcb->is_mcbc) enctype = peer->sec_type_grp; else enctype = peer->sec_type; } else { - enctype = HAL_ENCRYPT_TYPE_OPEN; + enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); } spin_unlock_bh(&ar->ab->base_lock); @@ -2247,8 +2312,11 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, if (!is_decrypted || fill_crypto_hdr) return; - hdr = (void *)msdu->data; - hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != + DP_RX_DECAP_TYPE_ETHERNET2_DIX) { + hdr = (void *)msdu->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } } static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, @@ -2337,8 +2405,10 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, channel_num = meta_data; center_freq = meta_data >> 16; - if (center_freq >= 5935 && center_freq <= 7105) { + if (center_freq >= ATH11K_MIN_6G_FREQ && + center_freq <= ATH11K_MAX_6G_FREQ) { rx_status->band = NL80211_BAND_6GHZ; + rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 173) { @@ -2356,57 +2426,56 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, rx_desc, sizeof(struct hal_rx_desc)); } - rx_status->freq = ieee80211_channel_to_frequency(channel_num, - rx_status->band); + if (rx_status->band != NL80211_BAND_6GHZ) + rx_status->freq = ieee80211_channel_to_frequency(channel_num, + rx_status->band); ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); } -static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, - size_t size) -{ - u8 *qc; - int tid; - - if (!ieee80211_is_data_qos(hdr->frame_control)) - return ""; - - qc = ieee80211_get_qos_ctl(hdr); - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - snprintf(out, size, "tid %d", tid); - - return out; -} - static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, - struct sk_buff *msdu) + struct sk_buff *msdu, + struct ieee80211_rx_status *status) { static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), }; - struct ieee80211_rx_status *status; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + struct ieee80211_rx_status *rx_status; struct ieee80211_radiotap_he *he = NULL; - char tid[32]; + struct ieee80211_sta *pubsta = NULL; + struct ath11k_peer *peer; + struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); + u8 decap = DP_RX_DECAP_TYPE_RAW; + bool is_mcbc = rxcb->is_mcbc; + bool is_eapol = rxcb->is_eapol; - status = IEEE80211_SKB_RXCB(msdu); - if (status->encoding == RX_ENC_HE) { + if (status->encoding == RX_ENC_HE && + !(status->flag & RX_FLAG_RADIOTAP_HE) && + !(status->flag & RX_FLAG_SKIP_MONITOR)) { he = skb_push(msdu, sizeof(known)); memcpy(he, &known, sizeof(known)); status->flag |= RX_FLAG_RADIOTAP_HE; } + if (!(status->flag & RX_FLAG_ONLY_MONITOR)) + decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); + + spin_lock_bh(&ar->ab->base_lock); + peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); + if (peer && peer->sta) + pubsta = peer->sta; + spin_unlock_bh(&ar->ab->base_lock); + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, - "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, - ieee80211_get_SA(hdr), - ath11k_print_get_tid(hdr, tid, sizeof(tid)), - is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? - "mcast" : "ucast", - (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, + peer ? peer->addr : NULL, + rxcb->tid, + is_mcbc ? "mcast" : "ucast", + rxcb->seq_no, (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", (status->encoding == RX_ENC_HT) ? "ht" : "", (status->encoding == RX_ENC_VHT) ? "vht" : "", @@ -2426,22 +2495,32 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", msdu->data, msdu->len); + rx_status = IEEE80211_SKB_RXCB(msdu); + *rx_status = *status; + /* TODO: trace rx packet */ - ieee80211_rx_napi(ar->hw, NULL, msdu, napi); + /* PN for multicast packets are not validate in HW, + * so skip 802.3 rx path + * Also, fast_rx expectes the STA to be authorized, hence + * eapol packets are sent in slow path. + */ + if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && + !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) + rx_status->flag |= RX_FLAG_8023; + + ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); } static int ath11k_dp_rx_process_msdu(struct ath11k *ar, struct sk_buff *msdu, - struct sk_buff_head *msdu_list) + struct sk_buff_head *msdu_list, + struct ieee80211_rx_status *rx_status) { struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc, *lrx_desc; struct rx_attention *rx_attention; - struct ieee80211_rx_status rx_status = {0}; - struct ieee80211_rx_status *status; struct ath11k_skb_rxcb *rxcb; - struct ieee80211_hdr *hdr; struct sk_buff *last_buf; u8 l3_pad_bytes; u8 *hdr_status; @@ -2458,6 +2537,12 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, } rx_desc = (struct hal_rx_desc *)msdu->data; + if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { + ath11k_warn(ar->ab, "msdu len not valid\n"); + ret = -EIO; + goto free_out; + } + lrx_desc = (struct hal_rx_desc *)last_buf->data; rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { @@ -2497,19 +2582,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar, } } - hdr = (struct ieee80211_hdr *)msdu->data; - - /* Process only data frames */ - if (!ieee80211_is_data(hdr->frame_control)) - return -EINVAL; - - ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status); - ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status); + ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); + ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); - rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; + rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; - status = IEEE80211_SKB_RXCB(msdu); - *status = rx_status; return 0; free_out: @@ -2524,6 +2601,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, struct ath11k_skb_rxcb *rxcb; struct sk_buff *msdu; struct ath11k *ar; + struct ieee80211_rx_status rx_status = {0}; u8 mac_id; int ret; @@ -2546,7 +2624,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, continue; } - ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list); + ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); if (ret) { ath11k_dbg(ab, ATH11K_DBG_DATA, "Unable to process msdu %d", ret); @@ -2554,7 +2632,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, continue; } - ath11k_dp_rx_deliver_msdu(ar, napi, msdu); + ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); (*quota)--; } @@ -2636,10 +2714,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); - rxcb->mac_id = mac_id; + rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, + desc.rx_mpdu_info.meta_data); + rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, + desc.rx_mpdu_info.info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, desc.info0); + rxcb->mac_id = mac_id; __skb_queue_tail(&msdu_list, msdu); if (total_msdu_reaped >= quota && !rxcb->is_continuation) { @@ -2674,7 +2756,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], - HAL_RX_BUF_RBM_SW3_BM); + ab->hw_params.hal_params->rx_buf_rbm); } ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, @@ -2867,6 +2949,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, int *budget, struct sk_buff_head *skb_list) { struct ath11k *ar; + const struct ath11k_hw_hal_params *hal_params; struct ath11k_pdev_dp *dp; struct dp_rxdma_ring *rx_ring; struct hal_srng *srng; @@ -2937,8 +3020,9 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, &buf_id); if (!skb) { + hal_params = ab->hw_params.hal_params; ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, - HAL_RX_BUF_RBM_SW3_BM); + hal_params->rx_buf_rbm); num_buffs_reaped++; break; } @@ -2948,7 +3032,8 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, - cookie, HAL_RX_BUF_RBM_SW3_BM); + cookie, + ab->hw_params.hal_params->rx_buf_rbm); ath11k_hal_srng_src_get_next_entry(ab, srng); num_buffs_reaped++; } @@ -2969,6 +3054,8 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, struct ath11k_peer *peer; struct ath11k_sta *arsta; int num_buffs_reaped = 0; + u32 rx_buf_sz; + u16 log_type = 0; __skb_queue_head_init(&skb_list); @@ -2981,8 +3068,16 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, memset(&ppdu_info, 0, sizeof(ppdu_info)); ppdu_info.peer_id = HAL_INVALID_PEERID; - if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) - trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); + if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { + log_type = ATH11K_PKTLOG_TYPE_LITE_RX; + rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; + } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { + log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; + rx_buf_sz = DP_RX_BUFFER_SIZE; + } + + if (log_type) + trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); @@ -3010,7 +3105,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) - trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); + trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); @@ -3310,7 +3405,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, paddr)) return -ENOMEM; @@ -3327,7 +3422,8 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); - ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); + ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, + ab->hw_params.hal_params->rx_buf_rbm); /* Fill mpdu details into reo entrace ring */ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; @@ -3375,7 +3471,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti spin_unlock_bh(&rx_refill_ring->idr_lock); err_unmap_dma: dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); return ret; } @@ -3704,7 +3800,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && - rbm != HAL_RX_BUF_RBM_SW3_BM) { + rbm != ab->hw_params.hal_params->rx_buf_rbm) { ab->soc_stats.invalid_rbm++; ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); ath11k_dp_rx_link_desc_return(ab, desc, @@ -3760,7 +3856,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], - HAL_RX_BUF_RBM_SW3_BM); + ab->hw_params.hal_params->rx_buf_rbm); } return tot_n_bufs_reaped; @@ -3941,7 +4037,6 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar, { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct ieee80211_rx_status rxs = {0}; - struct ieee80211_rx_status *status; bool drop = true; switch (rxcb->err_rel_src) { @@ -3961,10 +4056,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar, return; } - status = IEEE80211_SKB_RXCB(msdu); - *status = rxs; - - ath11k_dp_rx_deliver_msdu(ar, napi, msdu); + ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); } int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, @@ -4060,7 +4152,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], - HAL_RX_BUF_RBM_SW3_BM); + ab->hw_params.hal_params->rx_buf_rbm); } rcu_read_lock(); @@ -4169,7 +4261,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) if (num_buf_freed) ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, - HAL_RX_BUF_RBM_SW3_BM); + ab->hw_params.hal_params->rx_buf_rbm); return budget - quota; } @@ -4740,7 +4832,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, struct ieee80211_rx_status *rxs) { struct ath11k_base *ab = ar->ab; - struct sk_buff *msdu, *mpdu_buf, *prev_buf; + struct sk_buff *msdu, *prev_buf; u32 wifi_hdr_len; struct hal_rx_desc *rx_desc; char *hdr_desc; @@ -4748,8 +4840,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, struct ieee80211_hdr_3addr *wh; struct rx_attention *rx_attention; - mpdu_buf = NULL; - if (!head_msdu) goto err_merge_fail; @@ -4832,12 +4922,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, return head_msdu; err_merge_fail: - if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { - ath11k_dbg(ab, ATH11K_DBG_DATA, - "err_merge_fail mpdu_buf %pK", mpdu_buf); - /* Free the head buffer */ - dev_kfree_skb_any(mpdu_buf); - } return NULL; } @@ -4848,7 +4932,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, { struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *mon_skb, *skb_next, *header; - struct ieee80211_rx_status *rxs = &dp->rx_status, *status; + struct ieee80211_rx_status *rxs = &dp->rx_status; mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, tail_msdu, rxs); @@ -4874,10 +4958,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, } rxs->flag |= RX_FLAG_ONLY_MONITOR; - status = IEEE80211_SKB_RXCB(mon_skb); - *status = *rxs; - - ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); + ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); mon_skb = skb_next; } while (mon_skb); rxs->flag = 0; @@ -4899,6 +4980,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; + const struct ath11k_hw_hal_params *hal_params; void *ring_entry; void *mon_dst_srng; u32 ppdu_id; @@ -4962,16 +5044,18 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, if (rx_bufs_used) { rx_mon_stats->dest_ppdu_done++; + hal_params = ar->ab->hw_params.hal_params; + if (ar->ab->hw_params.rxdma1_enable) ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rxdma_mon_buf_ring, rx_bufs_used, - HAL_RX_BUF_RBM_SW3_BM); + hal_params->rx_buf_rbm); else ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rx_refill_buf_ring, rx_bufs_used, - HAL_RX_BUF_RBM_SW3_BM); + hal_params->rx_buf_rbm); } } @@ -5029,7 +5113,7 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); int ret = 0; - if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); else ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 8bba5234f81fc4..879fb2a9dc0c65 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -78,7 +78,7 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher) } int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, - struct sk_buff *skb) + struct ath11k_sta *arsta, struct sk_buff *skb) { struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; @@ -115,11 +115,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, tcl_ring_sel: tcl_ring_retry = false; - /* For some chip, it can only use tcl0 to tx */ - if (ar->ab->hw_params.tcl_0_only) - ti.ring_id = 0; - else - ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX; + + ti.ring_id = ring_selector % ab->hw_params.max_tx_ring; ring_map |= BIT(ti.ring_id); @@ -131,7 +128,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, spin_unlock_bh(&tx_ring->tx_idr_lock); if (ret < 0) { - if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) { + if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); return -ENOSPC; } @@ -145,7 +142,15 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) | FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id); ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb); - ti.meta_data_flags = arvif->tcl_metadata; + + if (ieee80211_has_a4(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr3) && arsta && + arsta->use_4addr_set) { + ti.meta_data_flags = arsta->tcl_metadata; + ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1); + } else { + ti.meta_data_flags = arvif->tcl_metadata; + } if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) { if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { @@ -240,8 +245,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. */ - if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1) && - !ar->ab->hw_params.tcl_0_only) { + if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) && + ab->hw_params.max_tx_ring > 1) { tcl_ring_retry = true; ring_selector++; } @@ -614,6 +619,9 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, struct hal_srng *cmd_ring; int cmd_num; + if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) + return -ESHUTDOWN; + cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); @@ -1068,12 +1076,16 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; - if (!reset) + if (!reset) { tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; - else + } else { tlv_filter = ath11k_mac_mon_status_filter_default; + if (ath11k_debugfs_is_extd_rx_stats_enabled(ar)) + tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); + } + ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, dp->mac_id + i, HAL_RXDMA_MONITOR_STATUS, diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h index f8a9f9c8e444e1..698b907b878d7e 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.h +++ b/drivers/net/wireless/ath/ath11k/dp_tx.h @@ -17,7 +17,7 @@ struct ath11k_dp_htt_wbm_tx_status { int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab); int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, - struct sk_buff *skb); + struct ath11k_sta *arsta, struct sk_buff *skb); void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id); int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, enum hal_reo_cmd_type type, diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h index d54ec6aa6281fb..00b595b84939f7 100644 --- a/drivers/net/wireless/ath/ath11k/hal_desc.h +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h @@ -496,6 +496,8 @@ struct hal_tlv_hdr { #define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29) #define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30) +#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0) + struct rx_mpdu_desc { u32 info0; /* %RX_MPDU_DESC_INFO */ u32 meta_data; diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index 325055ca41abd1..329c404cfa80dc 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -356,6 +356,7 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc, struct hal_wbm_release_ring *wbm_desc = desc; enum hal_wbm_rel_desc_type type; enum hal_wbm_rel_src_module rel_src; + enum hal_rx_buf_return_buf_manager ret_buf_mgr; type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE, wbm_desc->info0); @@ -371,8 +372,9 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc, rel_src != HAL_WBM_REL_SRC_MODULE_REO) return -EINVAL; - if (FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, - wbm_desc->buf_addr_info.info1) != HAL_RX_BUF_RBM_SW3_BM) { + ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, + wbm_desc->buf_addr_info.info1); + if (ret_buf_mgr != ab->hw_params.hal_params->rx_buf_rbm) { ab->soc_stats.invalid_rbm++; return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index d9596903b0a588..da35fcf5bc5603 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -7,10 +7,11 @@ #include #include -#include "hw.h" #include "core.h" #include "ce.h" #include "hif.h" +#include "hal.h" +#include "hw.h" /* Map from pdev index to hw mac index */ static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx) @@ -97,6 +98,7 @@ static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab, config->num_multicast_filter_entries = 0x20; config->num_wow_filters = 0x16; config->num_keep_alive_pattern = 0; + config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64; } static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab) @@ -197,6 +199,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab, config->peer_map_unmap_v2_support = 1; config->twt_ap_pdev_count = ab->num_radios; config->twt_ap_sta_count = 1000; + config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64; } static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw, @@ -372,6 +375,17 @@ static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info); } +static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) & + RX_MPDU_START_INFO1_MAC_ADDR2_VALID; +} + +static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) +{ + return desc->u.ipq8074.mpdu_start.addr2; +} + static struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc) { @@ -543,6 +557,17 @@ static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) return &desc->u.qcn9074.msdu_payload[0]; } +static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) & + RX_MPDU_START_INFO11_MAC_ADDR2_VALID; +} + +static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) +{ + return desc->u.qcn9074.mpdu_start.addr2; +} + static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc) { return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855, @@ -703,6 +728,17 @@ static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) return &desc->u.wcn6855.msdu_payload[0]; } +static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) & + RX_MPDU_START_INFO1_MAC_ADDR2_VALID; +} + +static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) +{ + return desc->u.wcn6855.mpdu_start.addr2; +} + static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab) { u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG; @@ -799,6 +835,8 @@ const struct ath11k_hw_ops ipq8074_ops = { .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, .reo_setup = ath11k_hw_ipq8074_reo_setup, .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2, }; const struct ath11k_hw_ops ipq6018_ops = { @@ -835,6 +873,8 @@ const struct ath11k_hw_ops ipq6018_ops = { .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, .reo_setup = ath11k_hw_ipq8074_reo_setup, .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2, }; const struct ath11k_hw_ops qca6390_ops = { @@ -871,6 +911,8 @@ const struct ath11k_hw_ops qca6390_ops = { .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload, .reo_setup = ath11k_hw_ipq8074_reo_setup, .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2, }; const struct ath11k_hw_ops qcn9074_ops = { @@ -907,6 +949,8 @@ const struct ath11k_hw_ops qcn9074_ops = { .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload, .reo_setup = ath11k_hw_ipq8074_reo_setup, .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2, }; const struct ath11k_hw_ops wcn6855_ops = { @@ -943,6 +987,8 @@ const struct ath11k_hw_ops wcn6855_ops = { .rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload, .reo_setup = ath11k_hw_wcn6855_reo_setup, .mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2, }; #define ATH11K_TX_RING_MASK_0 0x1 @@ -2079,3 +2125,11 @@ const struct ath11k_hw_regs wcn6855_regs = { .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac, .pcie_pcs_osc_dtct_config_base = 0x01e0c628, }; + +const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM, +}; + +const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM, +}; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 62f5978b30055a..19223d36846e81 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -6,6 +6,7 @@ #ifndef ATH11K_HW_H #define ATH11K_HW_H +#include "hal.h" #include "wmi.h" /* Target configuration defines */ @@ -119,6 +120,10 @@ struct ath11k_hw_ring_mask { u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX]; }; +struct ath11k_hw_hal_params { + enum hal_rx_buf_return_buf_manager rx_buf_rbm; +}; + struct ath11k_hw_params { const char *name; u16 hw_rev; @@ -128,7 +133,7 @@ struct ath11k_hw_params { struct { const char *dir; size_t board_size; - size_t cal_size; + size_t cal_offset; } fw; const struct ath11k_hw_ops *hw_ops; @@ -152,8 +157,14 @@ struct ath11k_hw_params { bool rx_mac_buf_ring; bool vdev_start_delay; bool htt_peer_map_v2; - bool tcl_0_only; - u8 spectral_fft_sz; + + struct { + u8 fft_sz; + u8 fft_pad_sz; + u8 summary_pad_sz; + u8 fft_hdr_len; + u16 max_fft_bins; + } spectral; u16 interface_modes; bool supports_monitor; @@ -163,6 +174,8 @@ struct ath11k_hw_params { bool supports_suspend; u32 hal_desc_sz; bool fix_l1ss; + u8 max_tx_ring; + const struct ath11k_hw_hal_params *hal_params; }; struct ath11k_hw_ops { @@ -202,6 +215,8 @@ struct ath11k_hw_ops { u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc); void (*reo_setup)(struct ath11k_base *ab); u16 (*mpdu_info_get_peerid)(u8 *tlv_data); + bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc); + u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc); }; extern const struct ath11k_hw_ops ipq8074_ops; @@ -214,6 +229,9 @@ extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074; +extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074; +extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390; + static inline int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw, int pdev_idx) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index e9b3689331ec2a..1cc55602787bbb 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -150,6 +150,9 @@ static const struct ieee80211_channel ath11k_6ghz_channels[] = { CHAN6G(225, 7075, 0), CHAN6G(229, 7095, 0), CHAN6G(233, 7115, 0), + + /* new addition in IEEE Std 802.11ax-2021 */ + CHAN6G(2, 5935, 0), }; static struct ieee80211_rate ath11k_legacy_rates[] = { @@ -354,6 +357,18 @@ ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) return 1; } +static u32 +ath11k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX]) +{ + int nss; + + for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--) + if (he_mcs_mask[nss]) + return nss + 1; + + return 1; +} + static u8 ath11k_parse_mpdudensity(u8 mpdudensity) { /* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": @@ -488,7 +503,8 @@ struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); - if (pdev && pdev->ar) { + if (pdev && pdev->ar && + (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) { arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id); if (arvif) return arvif; @@ -715,32 +731,386 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar) ar->num_stations = 0; } -static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id) +static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar) { - int ret = 0; + lockdep_assert_held(&ar->conf_mutex); + + if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + return -ESHUTDOWN; + + if (!wait_for_completion_timeout(&ar->vdev_setup_done, + ATH11K_VDEV_SETUP_TIMEOUT_HZ)) + return -ETIMEDOUT; + + return ar->last_wmi_vdev_start_status ? -EINVAL : 0; +} + +static void +ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + void *data) +{ + struct cfg80211_chan_def **def = data; + + *def = &conf->def; +} + +static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_channel *channel; + struct wmi_vdev_start_req_arg arg = {}; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + channel = chandef->chan; + + arg.vdev_id = vdev_id; + arg.channel.freq = channel->center_freq; + arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; + + arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width]; + arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); + + arg.channel.min_power = 0; + arg.channel.max_power = channel->max_power * 2; + arg.channel.max_reg_power = channel->max_reg_power * 2; + arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + + arg.pref_tx_streams = ar->num_tx_chains; + arg.pref_rx_streams = ar->num_rx_chains; + + arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); + + reinit_completion(&ar->vdev_setup_done); + reinit_completion(&ar->vdev_delete_done); + + ret = ath11k_wmi_vdev_start(ar, &arg, false); + if (ret) { + ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n", + vdev_id, ret); + return ret; + } + + ret = ath11k_mac_vdev_setup_sync(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n", + vdev_id, ret); + return ret; + } ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); if (ret) { ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); - return ret; + goto vdev_stop; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n", vdev_id); + return 0; + +vdev_stop: + reinit_completion(&ar->vdev_setup_done); + + ret = ath11k_wmi_vdev_stop(ar, vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n", + vdev_id, ret); + return ret; + } + + ret = ath11k_mac_vdev_setup_sync(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n", + vdev_id, ret); + return ret; + } + + return -EIO; } -static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar) { - /* mac80211 requires this op to be present and that's why - * there's an empty function, this can be extended when - * required. - */ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + + ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n", + ar->monitor_vdev_id, ret); + return ret; + } + + ret = ath11k_mac_vdev_setup_sync(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n", + ar->monitor_vdev_id, ret); + return ret; + } + + ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n", + ar->monitor_vdev_id, ret); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i stopped\n", + ar->monitor_vdev_id); + + return 0; +} + +static int ath11k_mac_monitor_vdev_create(struct ath11k *ar) +{ + struct ath11k_pdev *pdev = ar->pdev; + struct vdev_create_params param = {}; + int bit, ret; + u8 tmp_addr[6] = {0}; + u16 nss; + + lockdep_assert_held(&ar->conf_mutex); + + if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) + return 0; + + if (ar->ab->free_vdev_map == 0) { + ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n"); + return -ENOMEM; + } + + bit = __ffs64(ar->ab->free_vdev_map); + + ar->monitor_vdev_id = bit; + + param.if_id = ar->monitor_vdev_id; + param.type = WMI_VDEV_TYPE_MONITOR; + param.subtype = WMI_VDEV_SUBTYPE_NONE; + param.pdev_id = pdev->pdev_id; + + if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { + param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; + param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; + } + if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { + param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; + param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; + } + + ret = ath11k_wmi_vdev_create(ar, tmp_addr, ¶m); + if (ret) { + ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n", + ar->monitor_vdev_id, ret); + ar->monitor_vdev_id = -1; + return ret; + } + + nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1; + ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, + WMI_VDEV_PARAM_NSS, nss); + if (ret) { + ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", + ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); + goto err_vdev_del; + } + + ret = ath11k_mac_txpower_recalc(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n", + ar->monitor_vdev_id, ret); + goto err_vdev_del; + } + + ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; + ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); + ar->num_created_vdevs++; + set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d created\n", + ar->monitor_vdev_id); + + return 0; + +err_vdev_del: + ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id); + ar->monitor_vdev_id = -1; + return ret; +} + +static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar) +{ + int ret; + unsigned long time_left; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) + return 0; + + reinit_completion(&ar->vdev_delete_done); + + ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n", + ar->monitor_vdev_id, ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->vdev_delete_done, + ATH11K_VDEV_DELETE_TIMEOUT_HZ); + if (time_left == 0) { + ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); + } else { + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d deleted\n", + ar->monitor_vdev_id); + + ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id); + ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id); + ar->num_created_vdevs--; + ar->monitor_vdev_id = -1; + clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); + } + + return ret; +} + +static int ath11k_mac_monitor_start(struct ath11k *ar) +{ + struct cfg80211_chan_def *chandef = NULL; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) + return 0; + + ieee80211_iter_chan_contexts_atomic(ar->hw, + ath11k_mac_get_any_chandef_iter, + &chandef); + if (!chandef) + return 0; + + ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef); + if (ret) { + ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret); + ath11k_mac_monitor_vdev_delete(ar); + return ret; + } + + set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); + + ar->num_started_vdevs++; + ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false); + if (ret) { + ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d", + ret); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor started\n"); return 0; } +static int ath11k_mac_monitor_stop(struct ath11k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) + return 0; + + ret = ath11k_mac_monitor_vdev_stop(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret); + return ret; + } + + clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); + ar->num_started_vdevs--; + + ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true); + if (ret) { + ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d", + ret); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor stopped ret %d\n", ret); + + return 0; +} + +static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath11k *ar = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (conf->flags & IEEE80211_CONF_MONITOR) { + set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags); + + if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, + &ar->monitor_flags)) + goto out; + + ret = ath11k_mac_monitor_vdev_create(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to create monitor vdev: %d", + ret); + goto out; + } + + ret = ath11k_mac_monitor_start(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to start monitor: %d", + ret); + goto err_mon_del; + } + } else { + clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags); + + if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, + &ar->monitor_flags)) + goto out; + + ret = ath11k_mac_monitor_stop(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to stop monitor: %d", + ret); + goto out; + } + + ret = ath11k_mac_monitor_vdev_delete(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to delete monitor vdev: %d", + ret); + goto out; + } + } + } + +out: + mutex_unlock(&ar->conf_mutex); + return ret; + +err_mon_del: + ath11k_mac_monitor_vdev_delete(ar); + mutex_unlock(&ar->conf_mutex); + return ret; +} + static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; @@ -1035,7 +1405,7 @@ ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) } static bool -ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) +ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[]) { int nss; @@ -1093,6 +1463,14 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar, arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; } + /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20 + * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset + * both flags if guard interval is Default GI + */ + if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI) + arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40); + if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) @@ -1207,6 +1585,34 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set, return tx_mcs_set; } +static u8 ath11k_get_nss_160mhz(struct ath11k *ar, + u8 max_nss) +{ + u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info; + u8 max_sup_nss = 0; + + switch (nss_ratio_info) { + case WMI_NSS_RATIO_1BY2_NSS: + max_sup_nss = max_nss >> 1; + break; + case WMI_NSS_RATIO_3BY4_NSS: + ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n"); + break; + case WMI_NSS_RATIO_1_NSS: + max_sup_nss = max_nss; + break; + case WMI_NSS_RATIO_2_NSS: + ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n"); + break; + default: + ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n", + nss_ratio_info); + break; + } + + return max_sup_nss; +} + static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1216,10 +1622,12 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct ath11k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; - const u16 *vht_mcs_mask; + u16 *vht_mcs_mask; u8 ampdu_factor; u8 max_nss, vht_mcs; - int i; + int i, vht_nss, nss_idx; + bool user_rate_valid = true; + u32 rx_nss, tx_nss, nss_160; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; @@ -1262,6 +1670,24 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, if (sta->bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; + vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); + + if (vht_nss > sta->rx_nss) { + user_rate_valid = false; + for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) { + if (vht_mcs_mask[nss_idx]) { + user_rate_valid = true; + break; + } + } + } + + if (!user_rate_valid) { + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n", + sta->rx_nss, sta->addr); + vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1]; + } + /* Calculate peer NSS capability from VHT capabilities if STA * supports VHT. */ @@ -1294,10 +1720,95 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, /* TODO: Check */ arg->tx_max_mcs_nss = 0xFF; - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", - sta->addr, arg->peer_max_mpdu, arg->peer_flags); + if (arg->peer_phymode == MODE_11AC_VHT160 || + arg->peer_phymode == MODE_11AC_VHT80_80) { + tx_nss = ath11k_get_nss_160mhz(ar, max_nss); + rx_nss = min(arg->peer_nss, tx_nss); + arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE; + + if (!rx_nss) { + ath11k_warn(ar->ab, "invalid max_nss\n"); + return; + } + + if (arg->peer_phymode == MODE_11AC_VHT160) + nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1); + else + nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1); + + arg->peer_bw_rxnss_override |= nss_160; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n", + sta->addr, arg->peer_max_mpdu, arg->peer_flags, + arg->peer_bw_rxnss_override); +} + +static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss) +{ + switch ((mcs_map >> (2 * nss)) & 0x3) { + case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1; + } + return 0; +} + +static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set, + const u16 he_mcs_limit[NL80211_HE_NSS_MAX]) +{ + int idx_limit; + int nss; + u16 mcs_map; + u16 mcs; + + for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) { + mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) & + he_mcs_limit[nss]; + + if (mcs_map) + idx_limit = fls(mcs_map) - 1; + else + idx_limit = -1; + + switch (idx_limit) { + case 0 ... 7: + mcs = IEEE80211_HE_MCS_SUPPORT_0_7; + break; + case 8: + case 9: + mcs = IEEE80211_HE_MCS_SUPPORT_0_9; + break; + case 10: + case 11: + mcs = IEEE80211_HE_MCS_SUPPORT_0_11; + break; + default: + WARN_ON(1); + fallthrough; + case -1: + mcs = IEEE80211_HE_MCS_NOT_SUPPORTED; + break; + } + + tx_mcs_set &= ~(0x3 << (nss * 2)); + tx_mcs_set |= mcs << (nss * 2); + } + + return tx_mcs_set; +} + +static bool +ath11k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX]) +{ + int nss; + + for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) + if (he_mcs_mask[nss]) + return false; - /* TODO: rxnss_override */ + return true; } static void ath11k_peer_assoc_h_he(struct ath11k *ar, @@ -1305,13 +1816,30 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u8 ampdu_factor; - u16 v; + enum nl80211_band band; + u16 *he_mcs_mask; + u8 max_nss, he_mcs; + u16 he_tx_mcs = 0, v = 0; + int i, he_nss, nss_idx; + bool user_rate_valid = true; + u32 rx_nss, tx_nss, nss_160; + + if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) + return; if (!he_cap->has_he) return; + band = def.chan->band; + he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; + + if (ath11k_peer_assoc_h_he_masked(he_mcs_mask)) + return; + arg->he_flag = true; memcpy_and_pad(&arg->peer_he_cap_macinfo, @@ -1388,25 +1916,48 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) arg->twt_requester = true; + he_nss = ath11k_mac_max_he_nss(he_mcs_mask); + + if (he_nss > sta->rx_nss) { + user_rate_valid = false; + for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) { + if (he_mcs_mask[nss_idx]) { + user_rate_valid = true; + break; + } + } + } + + if (!user_rate_valid) { + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n", + sta->rx_nss, sta->addr); + he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1]; + } + switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); + v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; arg->peer_he_mcs_count++; + he_tx_mcs = v; } v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); + v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; arg->peer_he_mcs_count++; + if (!he_tx_mcs) + he_tx_mcs = v; fallthrough; default: @@ -1414,11 +1965,102 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); + v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; arg->peer_he_mcs_count++; + if (!he_tx_mcs) + he_tx_mcs = v; break; } + + /* Calculate peer NSS capability from HE capabilities if STA + * supports HE. + */ + for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) { + he_mcs = he_tx_mcs >> (2 * i) & 3; + + /* In case of fixed rates, MCS Range in he_tx_mcs might have + * unsupported range, with he_mcs_mask set, so check either of them + * to find nss. + */ + if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED || + he_mcs_mask[i]) + max_nss = i + 1; + } + arg->peer_nss = min(sta->rx_nss, max_nss); + + if (arg->peer_phymode == MODE_11AX_HE160 || + arg->peer_phymode == MODE_11AX_HE80_80) { + tx_nss = ath11k_get_nss_160mhz(ar, max_nss); + rx_nss = min(arg->peer_nss, tx_nss); + arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE; + + if (!rx_nss) { + ath11k_warn(ar->ab, "invalid max_nss\n"); + return; + } + + if (arg->peer_phymode == MODE_11AX_HE160) + nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1); + else + nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1); + + arg->peer_bw_rxnss_override |= nss_160; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n", + sta->addr, arg->peer_nss, + arg->peer_he_mcs_count, + arg->peer_bw_rxnss_override); +} + +static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 ampdu_factor; + + if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + + if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa) + return; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + arg->bw_80 = true; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + arg->bw_160 = true; + + arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa); + arg->peer_mpdu_density = + ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START, + arg->peer_he_caps_6ghz)); + + /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of + * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value + * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE + * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz + * Band Capabilities element in the 6 GHz band. + * + * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and + * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability. + */ + ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK, + he_cap->he_cap_elem.mac_cap_info[3]) + + FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, + arg->peer_he_caps_6ghz); + + arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1; } static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta, @@ -1427,11 +2069,16 @@ static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta, const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; int smps; - if (!ht_cap->ht_supported) + if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa) return; - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + if (ht_cap->ht_supported) { + smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; + smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + } else { + smps = le16_get_bits(sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_SM_PS); + } switch (smps) { case WLAN_HT_CAP_SM_PS_STATIC: @@ -1621,6 +2268,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; + const u16 *he_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) @@ -1629,10 +2277,12 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; switch (band) { case NL80211_BAND_2GHZ: - if (sta->he_cap.has_he) { + if (sta->he_cap.has_he && + !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { if (sta->bandwidth == IEEE80211_STA_RX_BW_80) phymode = MODE_11AX_HE80_2G; else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) @@ -1660,7 +2310,8 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: /* Check HE first */ - if (sta->he_cap.has_he) { + if (sta->he_cap.has_he && + !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { phymode = ath11k_mac_get_phymode_he(ar, sta); } else if (sta->vht_cap.vht_supported && !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { @@ -1702,11 +2353,12 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, ath11k_peer_assoc_h_basic(ar, vif, sta, arg); ath11k_peer_assoc_h_crypto(ar, vif, sta, arg); ath11k_peer_assoc_h_rates(ar, vif, sta, arg); + ath11k_peer_assoc_h_phymode(ar, vif, sta, arg); ath11k_peer_assoc_h_ht(ar, vif, sta, arg); ath11k_peer_assoc_h_vht(ar, vif, sta, arg); ath11k_peer_assoc_h_he(ar, vif, sta, arg); + ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg); ath11k_peer_assoc_h_qos(ar, vif, sta, arg); - ath11k_peer_assoc_h_phymode(ar, vif, sta, arg); ath11k_peer_assoc_h_smps(sta, arg); /* TODO: amsdu_disable req? */ @@ -1714,15 +2366,20 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif, const u8 *addr, - const struct ieee80211_sta_ht_cap *ht_cap) + const struct ieee80211_sta_ht_cap *ht_cap, + u16 he_6ghz_capa) { int smps; - if (!ht_cap->ht_supported) + if (!ht_cap->ht_supported && !he_6ghz_capa) return 0; - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + if (ht_cap->ht_supported) { + smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; + smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + } else { + smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa); + } if (smps >= ARRAY_SIZE(ath11k_smps_map)) return -EINVAL; @@ -1775,7 +2432,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, } ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid, - &ap_sta->ht_cap); + &ap_sta->ht_cap, + le16_to_cpu(ap_sta->he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -1956,7 +2614,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar, /* Set and enable SRG/non-SRG OBSS PD Threshold */ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD; - if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) { + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id); if (ret) ath11k_warn(ar->ab, @@ -2383,18 +3041,21 @@ void __ath11k_mac_scan_finish(struct ath11k *ar) break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); + fallthrough; + case ATH11K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { - .aborted = (ar->scan.state == - ATH11K_SCAN_ABORTING), + .aborted = ((ar->scan.state == + ATH11K_SCAN_ABORTING) || + (ar->scan.state == + ATH11K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); - } else if (ar->scan.roc_notify) { - ieee80211_remain_on_channel_expired(ar->hw); } - fallthrough; - case ATH11K_SCAN_STARTING: + ar->scan.state = ATH11K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; @@ -2886,6 +3547,20 @@ ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar, return num_rates; } +static int +ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) + num_rates += hweight16(mask->control[band].he_mcs[i]); + + return num_rates; +} + static int ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, @@ -2914,6 +3589,10 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, return -EINVAL; } + /* Avoid updating invalid nss as fixed rate*/ + if (nss > sta->rx_nss) + return -EINVAL; + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", sta->addr); @@ -2932,6 +3611,57 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, return ret; } +static int +ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif, + struct ieee80211_sta *sta, + const struct cfg80211_bitrate_mask *mask, + enum nl80211_band band) +{ + struct ath11k *ar = arvif->ar; + u8 he_rate, nss; + u32 rate_code; + int ret, i; + + lockdep_assert_held(&ar->conf_mutex); + + nss = 0; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { + if (hweight16(mask->control[band].he_mcs[i]) == 1) { + nss = i + 1; + he_rate = ffs(mask->control[band].he_mcs[i]) - 1; + } + } + + if (!nss) { + ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM", + sta->addr); + return -EINVAL; + } + + /* Avoid updating invalid nss as fixed rate */ + if (nss > sta->rx_nss) + return -EINVAL; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac setting fixed he rate for peer %pM, device will not switch to any other selected rates", + sta->addr); + + rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1, + WMI_RATE_PREAMBLE_HE); + + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_PARAM_FIXED_RATE, + rate_code); + if (ret) + ath11k_warn(ar->ab, + "failed to update sta %pM fixed rate %d: %d\n", + sta->addr, rate_code, ret); + + return ret; +} + static int ath11k_station_assoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2943,7 +3673,7 @@ static int ath11k_station_assoc(struct ath11k *ar, struct cfg80211_chan_def def; enum nl80211_band band; struct cfg80211_bitrate_mask *mask; - u8 num_vht_rates; + u8 num_vht_rates, num_he_rates; lockdep_assert_held(&ar->conf_mutex); @@ -2969,9 +3699,10 @@ static int ath11k_station_assoc(struct ath11k *ar, } num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); + num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); - /* If single VHT rate is configured (by set_bitrate_mask()), - * peer_assoc will disable VHT. This is now enabled by a peer specific + /* If single VHT/HE rate is configured (by set_bitrate_mask()), + * peer_assoc will disable VHT/HE. This is now enabled by a peer specific * fixed param. * Note that all other rates and NSS will be disabled for this peer. */ @@ -2980,6 +3711,11 @@ static int ath11k_station_assoc(struct ath11k *ar, band); if (ret) return ret; + } else if (sta->he_cap.has_he && num_he_rates == 1) { + ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, + band); + if (ret) + return ret; } /* Re-assoc is run only to update supported rates for given station. It @@ -2989,7 +3725,7 @@ static int ath11k_station_assoc(struct ath11k *ar, return 0; ret = ath11k_setup_peer_smps(ar, arvif, sta->addr, - &sta->ht_cap); + &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -3050,8 +3786,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; + const u16 *he_mcs_mask; u32 changed, bw, nss, smps; - int err, num_vht_rates; + int err, num_vht_rates, num_he_rates; const struct cfg80211_bitrate_mask *mask; struct peer_assoc_params peer_arg; @@ -3066,6 +3803,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; spin_lock_bh(&ar->data_lock); @@ -3081,8 +3819,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) mutex_lock(&ar->conf_mutex); nss = max_t(u32, 1, nss); - nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask), - ath11k_mac_max_vht_nss(vht_mcs_mask))); + nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask), + ath11k_mac_max_vht_nss(vht_mcs_mask)), + ath11k_mac_max_he_nss(he_mcs_mask))); if (changed & IEEE80211_RC_BW_CHANGED) { err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, @@ -3118,6 +3857,8 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) mask = &arvif->bitrate_mask; num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); + num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, + mask); /* Peer_assoc_prepare will reject vht rates in * bitrate_mask if its not available in range format and @@ -3133,11 +3874,25 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) if (sta->vht_cap.vht_supported && num_vht_rates == 1) { ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); + } else if (sta->he_cap.has_he && num_he_rates == 1) { + ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, + band); } else { - /* If the peer is non-VHT or no fixed VHT rate + /* If the peer is non-VHT/HE or no fixed VHT/HE rate * is provided in the new bitrate mask we set the - * other rates using peer_assoc command. + * other rates using peer_assoc command. Also clear + * the peer fixed rate settings as it has higher proprity + * than peer assoc */ + err = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_PARAM_FIXED_RATE, + WMI_FIXED_RATE_NONE); + if (err) + ath11k_warn(ar->ab, + "failed to disable peer fixed rate for sta %pM: %d\n", + sta->addr, err); + ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); @@ -3155,6 +3910,31 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) mutex_unlock(&ar->conf_mutex); } +static void ath11k_sta_set_4addr_wk(struct work_struct *wk) +{ + struct ath11k *ar; + struct ath11k_vif *arvif; + struct ath11k_sta *arsta; + struct ieee80211_sta *sta; + int ret = 0; + + arsta = container_of(wk, struct ath11k_sta, set_4addr_wk); + sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + arvif = arsta->arvif; + ar = arvif->ar; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "setting USE_4ADDR for peer %pM\n", sta->addr); + + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_USE_4ADDR, 1); + + if (ret) + ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n", + sta->addr, ret); +} + static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif, struct ieee80211_sta *sta) { @@ -3234,11 +4014,13 @@ static int ath11k_mac_station_add(struct ath11k *ar, } if (ieee80211_vif_is_mesh(vif)) { + ath11k_dbg(ab, ATH11K_DBG_MAC, + "setting USE_4ADDR for mesh STA %pM\n", sta->addr); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_4ADDR, 1); if (ret) { - ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n", + ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", sta->addr, ret); goto free_tx_stats; } @@ -3291,8 +4073,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, /* cancel must be done outside the mutex to avoid deadlock */ if ((old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST)) + new_state == IEEE80211_STA_NOTEXIST)) { cancel_work_sync(&arsta->update_wk); + cancel_work_sync(&arsta->set_4addr_wk); + } mutex_lock(&ar->conf_mutex); @@ -3301,6 +4085,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, memset(arsta, 0, sizeof(*arsta)); arsta->arvif = arvif; INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); + INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); ret = ath11k_mac_station_add(ar, vif, sta); if (ret) @@ -3395,6 +4180,19 @@ static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, return ret; } +static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enabled) +{ + struct ath11k *ar = hw->priv; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + + if (enabled && !arsta->use_4addr_set) { + ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk); + arsta->use_4addr_set = true; + } +} + static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3765,11 +4563,6 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask, ath11k_set_vht_txbf_cap(ar, &vht_cap.cap); - /* TODO: Enable back VHT160 mode once association issues are fixed */ - /* Disabling VHT160 and VHT80+80 modes */ - vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; - vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; - rxmcs_map = 0; txmcs_map = 0; for (i = 0; i < 8; i++) { @@ -3814,7 +4607,9 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar, rate_cap_rx_chainmask); } - if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) { + if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && + (ar->ab->hw_params.single_pdev_only || + !ar->supports_6ghz)) { band = &ar->mac.sbands[NL80211_BAND_5GHZ]; ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; if (ht_cap_info) @@ -4313,6 +5108,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; + struct ath11k_sta *arsta = NULL; u32 info_flags = info->flags; bool is_prb_rsp; int ret; @@ -4338,7 +5134,10 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, return; } - ret = ath11k_dp_tx(ar, arvif, skb); + if (control->sta) + arsta = (struct ath11k_sta *)control->sta->drv_priv; + + ret = ath11k_dp_tx(ar, arvif, arsta, skb); if (ret) { ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); @@ -4639,7 +5438,8 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw, if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET || (vif->type != NL80211_IFTYPE_STATION && vif->type != NL80211_IFTYPE_AP)) - vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; + vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED | + IEEE80211_OFFLOAD_DECAP_ENABLED); if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) param_value = ATH11K_HW_TXRX_ETHERNET; @@ -4655,6 +5455,22 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw, arvif->vdev_id, ret); vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; } + + param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE; + if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED) + param_value = ATH11K_HW_TXRX_ETHERNET; + else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) + param_value = ATH11K_HW_TXRX_RAW; + else + param_value = ATH11K_HW_TXRX_NATIVE_WIFI; + + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n", + arvif->vdev_id, ret); + vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; + } } static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, @@ -4683,8 +5499,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, } if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { - ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", - TARGET_NUM_VDEVS); + ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n", + ar->num_created_vdevs, TARGET_NUM_VDEVS); ret = -EBUSY; goto err; } @@ -4700,10 +5516,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { arvif->bitrate_mask.control[i].legacy = 0xffffffff; + arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI; memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].ht_mcs)); memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + memset(arvif->bitrate_mask.control[i].he_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].he_mcs)); } bit = __ffs64(ab->free_vdev_map); @@ -4724,6 +5543,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; + ar->monitor_vdev_id = bit; break; default: WARN_ON(1); @@ -4825,6 +5645,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, goto err_peer_del; } break; + case WMI_VDEV_TYPE_MONITOR: + set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); + break; default: break; } @@ -4845,6 +5668,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, ath11k_dp_vdev_tx_attach(ar, arvif); + if (vif->type != NL80211_IFTYPE_MONITOR && + test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) { + ret = ath11k_mac_monitor_vdev_create(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d", + ret); + goto err_peer_del; + } + } + mutex_unlock(&ar->conf_mutex); return 0; @@ -4942,6 +5775,18 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", vif->addr, arvif->vdev_id); + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); + ar->monitor_vdev_id = -1; + } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) && + !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { + ret = ath11k_mac_monitor_vdev_delete(ar); + if (ret) + /* continue even if there's an error */ + ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d", + ret); + } + err_vdev_del: spin_lock_bh(&ar->data_lock); list_del(&arvif->list); @@ -4952,7 +5797,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, idr_for_each(&ar->txmgmt_idr, ath11k_mac_vif_txmgmt_idr_remove, vif); - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { + for (i = 0; i < ab->hw_params.max_tx_ring; i++) { spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock); idr_for_each(&ab->dp.tx_ring[i].txbuf_idr, ath11k_mac_vif_unref, vif); @@ -4961,7 +5806,6 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, /* Recalc txpower for remaining vdev */ ath11k_mac_txpower_recalc(ar); - clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); /* TODO: recal traffic pause state based on the available vdevs */ @@ -4984,8 +5828,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct ath11k *ar = hw->priv; - bool reset_flag = false; - int ret = 0; mutex_lock(&ar->conf_mutex); @@ -4993,23 +5835,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; - /* For monitor mode */ - reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); - - ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); - if (!ret) { - if (!reset_flag) - set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); - else - clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); - } else { - ath11k_warn(ar->ab, - "fail to set monitor filter: %d\n", ret); - } - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, - "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n", - changed_flags, *total_flags, reset_flag); - mutex_unlock(&ar->conf_mutex); } @@ -5118,20 +5943,6 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw, mutex_unlock(&ar->conf_mutex); } -static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar) -{ - lockdep_assert_held(&ar->conf_mutex); - - if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) - return -ESHUTDOWN; - - if (!wait_for_completion_timeout(&ar->vdev_setup_done, - ATH11K_VDEV_SETUP_TIMEOUT_HZ)) - return -ETIMEDOUT; - - return ar->last_wmi_vdev_start_status ? -EINVAL : 0; -} - static int ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, const struct cfg80211_chan_def *chandef, @@ -5214,7 +6025,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, return ret; } - ar->num_started_vdevs++; + if (!restart) + ar->num_started_vdevs++; + ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); @@ -5342,12 +6155,16 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, struct ath11k_vif *arvif; int ret; int i; + bool monitor_vif = false; lockdep_assert_held(&ar->conf_mutex); for (i = 0; i < n_vifs; i++) { arvif = (void *)vifs[i].vif->drv_priv; + if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) + monitor_vif = true; + ath11k_dbg(ab, ATH11K_DBG_MAC, "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n", arvif->vdev_id, @@ -5368,6 +6185,8 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, arvif->vdev_id, ret); continue; } + + ar->num_started_vdevs--; } /* All relevant vdevs are downed and associated channel resources @@ -5405,6 +6224,24 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, continue; } } + + /* Restart the internal monitor vdev on new channel */ + if (!monitor_vif && + test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { + ret = ath11k_mac_monitor_stop(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d", + ret); + return; + } + + ret = ath11k_mac_monitor_start(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d", + ret); + return; + } + } } static void @@ -5484,7 +6321,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { - ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id); + ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr); if (ret) { ath11k_warn(ab, "failed put monitor up: %d\n", ret); return ret; @@ -5544,6 +6381,18 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, } } + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ret = ath11k_mac_monitor_start(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d", + ret); + goto out; + } + + arvif->is_started = true; + goto out; + } + ret = ath11k_mac_vdev_start(arvif, &ctx->def); if (ret) { ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", @@ -5551,14 +6400,19 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, ctx->def.chan->center_freq, ret); goto out; } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { - ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id); - if (ret) - goto out; - } arvif->is_started = true; + if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { + ret = ath11k_mac_monitor_start(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d", + ret); + goto out; + } + } + /* TODO: Setup ps and cts/rts protection */ ret = 0; @@ -5592,6 +6446,20 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, ath11k_peer_find_by_addr(ab, ar->mac_addr)) ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + ret = ath11k_mac_monitor_stop(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d", + ret); + mutex_unlock(&ar->conf_mutex); + return; + } + + arvif->is_started = false; + mutex_unlock(&ar->conf_mutex); + return; + } + ret = ath11k_mac_vdev_stop(arvif); if (ret) ath11k_warn(ab, "failed to stop vdev %i: %d\n", @@ -5603,6 +6471,16 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ath11k_wmi_vdev_down(ar, arvif->vdev_id); + if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + ar->num_started_vdevs == 1 && + test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { + ret = ath11k_mac_monitor_stop(ar); + if (ret) + /* continue even if there's an error */ + ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d", + ret); + } + mutex_unlock(&ar->conf_mutex); } @@ -5720,9 +6598,26 @@ ath11k_mac_has_single_legacy_rate(struct ath11k *ar, if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) return false; + if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask)) + return false; + return num_rates == 1; } +static __le16 +ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap) +{ + if (he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + return he_cap->he_mcs_nss_supp.tx_mcs_80p80; + + if (he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + return he_cap->he_mcs_nss_supp.tx_mcs_160; + + return he_cap->he_mcs_nss_supp.tx_mcs_80; +} + static bool ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, enum nl80211_band band, @@ -5731,8 +6626,10 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, { struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + u16 he_mcs_map = 0; u8 ht_nss_mask = 0; u8 vht_nss_mask = 0; + u8 he_nss_mask = 0; int i; /* No need to consider legacy here. Basic rates are always present @@ -5759,7 +6656,20 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, return false; } - if (ht_nss_mask != vht_nss_mask) + he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap)); + + for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { + if (mask->control[band].he_mcs[i] == 0) + continue; + + if (mask->control[band].he_mcs[i] == + ath11k_mac_get_max_he_mcs_map(he_mcs_map, i)) + he_nss_mask |= BIT(i); + else + return false; + } + + if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask) return false; if (ht_nss_mask == 0) @@ -5806,42 +6716,125 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar, return 0; } -static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif, - u32 rate, u8 nss, u8 sgi, u8 ldpc) +static int +ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf) { struct ath11k *ar = arvif->ar; - u32 vdev_param; int ret; - lockdep_assert_held(&ar->conf_mutex); + /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */ + if (he_gi && he_gi != 0xFF) + he_gi += 1; - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", - arvif->vdev_id, rate, nss, sgi); + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_SGI, he_gi); + if (ret) { + ath11k_warn(ar->ab, "failed to set he gi %d: %d\n", + he_gi, ret); + return ret; + } + /* start from 1 */ + if (he_ltf != 0xFF) + he_ltf += 1; - vdev_param = WMI_VDEV_PARAM_FIXED_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - vdev_param, rate); + WMI_VDEV_PARAM_HE_LTF, he_ltf); if (ret) { - ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", - rate, ret); + ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n", + he_ltf, ret); return ret; } - vdev_param = WMI_VDEV_PARAM_NSS; + return 0; +} + +static int +ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf) +{ + struct ath11k *ar = arvif->ar; + int ret; + u32 he_ar_gi_ltf; + + if (he_gi != 0xFF) { + switch (he_gi) { + case NL80211_RATE_INFO_HE_GI_0_8: + he_gi = WMI_AUTORATE_800NS_GI; + break; + case NL80211_RATE_INFO_HE_GI_1_6: + he_gi = WMI_AUTORATE_1600NS_GI; + break; + case NL80211_RATE_INFO_HE_GI_3_2: + he_gi = WMI_AUTORATE_3200NS_GI; + break; + default: + ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi); + return -EINVAL; + } + } + + if (he_ltf != 0xFF) { + switch (he_ltf) { + case NL80211_RATE_INFO_HE_1XLTF: + he_ltf = WMI_HE_AUTORATE_LTF_1X; + break; + case NL80211_RATE_INFO_HE_2XLTF: + he_ltf = WMI_HE_AUTORATE_LTF_2X; + break; + case NL80211_RATE_INFO_HE_4XLTF: + he_ltf = WMI_HE_AUTORATE_LTF_4X; + break; + default: + ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf); + return -EINVAL; + } + } + + he_ar_gi_ltf = he_gi | he_ltf; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - vdev_param, nss); + WMI_VDEV_PARAM_AUTORATE_MISC_CFG, + he_ar_gi_ltf); if (ret) { - ath11k_warn(ar->ab, "failed to set nss param %d: %d\n", - nss, ret); + ath11k_warn(ar->ab, + "failed to set he autorate gi %u ltf %u: %d\n", + he_gi, he_ltf, ret); return ret; } - vdev_param = WMI_VDEV_PARAM_SGI; + return 0; +} + +static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif, + u32 rate, u8 nss, u8 sgi, u8 ldpc, + u8 he_gi, u8 he_ltf, bool he_fixed_rate) +{ + struct ath11k *ar = arvif->ar; + u32 vdev_param; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n", + arvif->vdev_id, rate, nss, sgi, ldpc, he_gi, + he_ltf, he_fixed_rate); + + if (!arvif->vif->bss_conf.he_support) { + vdev_param = WMI_VDEV_PARAM_FIXED_RATE; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) { + ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", + rate, ret); + return ret; + } + } + + vdev_param = WMI_VDEV_PARAM_NSS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - vdev_param, sgi); + vdev_param, nss); if (ret) { - ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n", - sgi, ret); + ath11k_warn(ar->ab, "failed to set nss param %d: %d\n", + nss, ret); return ret; } @@ -5854,6 +6847,35 @@ static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif, return ret; } + if (arvif->vif->bss_conf.he_support) { + if (he_fixed_rate) { + ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, + he_ltf); + if (ret) { + ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n", + ret); + return ret; + } + } else { + ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi, + he_ltf); + if (ret) { + ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n", + ret); + return ret; + } + } + } else { + vdev_param = WMI_VDEV_PARAM_SGI; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, sgi); + if (ret) { + ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n", + sgi, ret); + return ret; + } + } + return 0; } @@ -5882,6 +6904,31 @@ ath11k_mac_vht_mcs_range_present(struct ath11k *ar, return true; } +static bool +ath11k_mac_he_mcs_range_present(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int i; + u16 he_mcs; + + for (i = 0; i < NL80211_HE_NSS_MAX; i++) { + he_mcs = mask->control[band].he_mcs[i]; + + switch (he_mcs) { + case 0: + case BIT(8) - 1: + case BIT(10) - 1: + case BIT(12) - 1: + break; + default: + return false; + } + } + + return true; +} + static void ath11k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { @@ -5913,6 +6960,54 @@ static void ath11k_mac_disable_peer_fixed_rate(void *data, sta->addr, ret); } +static bool +ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + bool he_fixed_rate = false, vht_fixed_rate = false; + struct ath11k_peer *peer, *tmp; + const u16 *vht_mcs_mask, *he_mcs_mask; + u8 vht_nss, he_nss; + bool ret = true; + + vht_mcs_mask = mask->control[band].vht_mcs; + he_mcs_mask = mask->control[band].he_mcs; + + if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1) + vht_fixed_rate = true; + + if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1) + he_fixed_rate = true; + + if (!vht_fixed_rate && !he_fixed_rate) + return true; + + vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); + he_nss = ath11k_mac_max_he_nss(he_mcs_mask); + + rcu_read_lock(); + spin_lock_bh(&ar->ab->base_lock); + list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) { + if (peer->sta) { + if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported || + peer->sta->rx_nss < vht_nss)) { + ret = false; + goto out; + } + if (he_fixed_rate && (!peer->sta->he_cap.has_he || + peer->sta->rx_nss < he_nss)) { + ret = false; + goto out; + } + } + } + +out: + spin_unlock_bh(&ar->ab->base_lock); + rcu_read_unlock(); + return ret; +} + static int ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -5924,6 +7019,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; + const u16 *he_mcs_mask; + u8 he_ltf = 0; + u8 he_gi = 0; u32 rate; u8 nss; u8 sgi; @@ -5931,6 +7029,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, int single_nss; int ret; int num_rates; + bool he_fixed_rate = false; if (ath11k_mac_vif_chan(vif, &def)) return -EPERM; @@ -5938,12 +7037,16 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, band = def.chan->band; ht_mcs_mask = mask->control[band].ht_mcs; vht_mcs_mask = mask->control[band].vht_mcs; + he_mcs_mask = mask->control[band].he_mcs; ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); sgi = mask->control[band].gi; if (sgi == NL80211_TXRATE_FORCE_LGI) return -EINVAL; + he_gi = mask->control[band].he_gi; + he_ltf = mask->control[band].he_ltf; + /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it * requires passing atleast one of used basic rates along with them. * Fixed rate setting across different preambles(legacy, HT, VHT) is @@ -5967,11 +7070,22 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, &single_nss)) { rate = WMI_FIXED_RATE_NONE; nss = single_nss; + mutex_lock(&ar->conf_mutex); + arvif->bitrate_mask = *mask; + ieee80211_iterate_stations_atomic(ar->hw, + ath11k_mac_set_bitrate_mask_iter, + arvif); + mutex_unlock(&ar->conf_mutex); } else { rate = WMI_FIXED_RATE_NONE; + + if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask)) + ath11k_warn(ar->ab, + "could not update fixed rate settings to all peers due to mcs/nss incompatibility\n"); nss = min_t(u32, ar->num_tx_chains, - max(ath11k_mac_max_ht_nss(ht_mcs_mask), - ath11k_mac_max_vht_nss(vht_mcs_mask))); + max(max(ath11k_mac_max_ht_nss(ht_mcs_mask), + ath11k_mac_max_vht_nss(vht_mcs_mask)), + ath11k_mac_max_he_nss(he_mcs_mask))); /* If multiple rates across different preambles are given * we can reconfigure this info with all peers using PEER_ASSOC @@ -6002,16 +7116,28 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, * RATEMASK CMD */ ath11k_warn(ar->ab, - "Setting more than one MCS Value in bitrate mask not supported\n"); + "setting %d mcs values in bitrate mask not supported\n", + num_rates); + return -EINVAL; + } + + num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, + mask); + if (num_rates == 1) + he_fixed_rate = true; + + if (!ath11k_mac_he_mcs_range_present(ar, band, mask) && + num_rates > 1) { + ath11k_warn(ar->ab, + "Setting more than one HE MCS Value in bitrate mask not supported\n"); return -EINVAL; } + mutex_lock(&ar->conf_mutex); ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_disable_peer_fixed_rate, arvif); - mutex_lock(&ar->conf_mutex); - arvif->bitrate_mask = *mask; ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_set_bitrate_mask_iter, @@ -6022,9 +7148,10 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); + ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi, + he_ltf, he_fixed_rate); if (ret) { - ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n", + ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n", arvif->vdev_id, ret); } @@ -6109,7 +7236,13 @@ static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (!sband) sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + if (!sband) + sband = hw->wiphy->bands[NL80211_BAND_6GHZ]; if (!sband || idx >= sband->n_channels) { ret = -ENOENT; goto exit; @@ -6180,6 +7313,7 @@ static const struct ieee80211_ops ath11k_ops = { .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan, .set_key = ath11k_mac_op_set_key, .sta_state = ath11k_mac_op_sta_state, + .sta_set_4addr = ath11k_mac_op_sta_set_4addr, .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr, .sta_rc_update = ath11k_mac_op_sta_rc_update, .conf_tx = ath11k_mac_op_conf_tx, @@ -6240,7 +7374,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, u32 supported_bands) { struct ieee80211_supported_band *band; - struct ath11k_hal_reg_capabilities_ext *reg_cap; + struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap; void *channels; u32 phy_id; @@ -6250,6 +7384,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, ATH11K_NUM_CHANS); reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; + temp_reg_cap = reg_cap; if (supported_bands & WMI_HOST_WLAN_2G_CAP) { channels = kmemdup(ath11k_2ghz_channels, @@ -6268,11 +7403,11 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); - reg_cap = &ar->ab->hal_reg_cap[phy_id]; + temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, - reg_cap->low_2ghz_chan, - reg_cap->high_2ghz_chan); + temp_reg_cap->low_2ghz_chan, + temp_reg_cap->high_2ghz_chan); } if (supported_bands & WMI_HOST_WLAN_5G_CAP) { @@ -6292,9 +7427,15 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, band->n_bitrates = ath11k_a_rates_size; band->bitrates = ath11k_a_rates; ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band; + + if (ar->ab->hw_params.single_pdev_only) { + phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); + temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; + } + ath11k_mac_update_ch_list(ar, band, - reg_cap->low_5ghz_chan, - reg_cap->high_5ghz_chan); + temp_reg_cap->low_5ghz_chan, + temp_reg_cap->high_5ghz_chan); } if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) { @@ -6317,12 +7458,12 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); - reg_cap = &ar->ab->hal_reg_cap[phy_id]; + temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, - reg_cap->low_5ghz_chan, - reg_cap->high_5ghz_chan); + temp_reg_cap->low_5ghz_chan, + temp_reg_cap->high_5ghz_chan); } } @@ -6367,7 +7508,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80); + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_80P80) | + BIT(NL80211_CHAN_WIDTH_160); ar->hw->wiphy->iface_combinations = combinations; ar->hw->wiphy->n_iface_combinations = 1; @@ -6505,8 +7648,16 @@ static int __ath11k_mac_register(struct ath11k *ar) ieee80211_hw_set(ar->hw, QUEUE_CONTROL); ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); - ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD); - if (ht_cap & WMI_HT_CAP_ENABLED) { + + if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) { + ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD); + ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD); + } + + if (cap->nss_ratio_enabled) + ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW); + + if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) { ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER); @@ -6521,7 +7672,7 @@ static int __ath11k_mac_register(struct ath11k *ar) * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ - if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) + if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz) ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; @@ -6590,7 +7741,7 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); /* Apply the regd received during initialization */ - ret = ath11k_regd_update(ar, true); + ret = ath11k_regd_update(ar); if (ret) { ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret); goto err_unregister_hw; @@ -6631,6 +7782,10 @@ int ath11k_mac_register(struct ath11k_base *ab) if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; + /* Initialize channel counters frequency value in hertz */ + ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; + for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; @@ -6641,18 +7796,14 @@ int ath11k_mac_register(struct ath11k_base *ab) ar->mac_addr[4] += i; } + idr_init(&ar->txmgmt_idr); + spin_lock_init(&ar->txmgmt_idr_lock); + ret = __ath11k_mac_register(ar); if (ret) goto err_cleanup; - - idr_init(&ar->txmgmt_idr); - spin_lock_init(&ar->txmgmt_idr_lock); } - /* Initialize channel counters frequency value in hertz */ - ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; - ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; - return 0; err_cleanup: @@ -6723,7 +7874,11 @@ int ath11k_mac_allocate(struct ath11k_base *ab) INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work); skb_queue_head_init(&ar->wmi_mgmt_tx_queue); - clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); + + clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); + + ar->monitor_vdev_id = -1; + clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); } return 0; diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 4bc59bdaf2443f..254ca4acc8e837 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -115,6 +115,9 @@ struct ath11k_generic_iter { #define WMI_MAX_SPATIAL_STREAM 3 #define ATH11K_CHAN_WIDTH_NUM 8 +#define ATH11K_BW_NSS_MAP_ENABLE BIT(31) +#define ATH11K_PEER_RX_NSS_160MHZ GENMASK(2, 0) +#define ATH11K_PEER_RX_NSS_80_80MHZ GENMASK(5, 3) #define ATH11K_OBSS_PD_MAX_THRESHOLD -82 #define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62 diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 5abb38cc3b55f3..3d353e7c9d5c24 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -430,6 +430,8 @@ static void ath11k_pci_force_wake(struct ath11k_base *ab) static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) { + mdelay(100); + if (power_on) { ath11k_pci_enable_ltssm(ab); ath11k_pci_clear_all_intrs(ab); @@ -439,9 +441,9 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) } ath11k_mhi_clear_vector(ab); + ath11k_pci_clear_dbg_registers(ab); ath11k_pci_soc_global_reset(ab); ath11k_mhi_set_mhictrl_reset(ab); - ath11k_pci_clear_dbg_registers(ab); } int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) @@ -853,7 +855,32 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) } } -static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) +static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable) +{ + struct pci_dev *dev = ab_pci->pdev; + u16 control; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + + if (enable) + control |= PCI_MSI_FLAGS_ENABLE; + else + control &= ~PCI_MSI_FLAGS_ENABLE; + + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +} + +static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci) +{ + ath11k_pci_msi_config(ab_pci, true); +} + +static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci) +{ + ath11k_pci_msi_config(ab_pci, false); +} + +static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; const struct ath11k_msi_config *msi_config = ab_pci->msi_config; @@ -874,6 +901,7 @@ static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) else return num_vectors; } + ath11k_pci_msi_disable(ab_pci); msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); if (!msi_desc) { @@ -896,7 +924,7 @@ static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) return ret; } -static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci) +static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci) { pci_free_irq_vectors(ab_pci->pdev); } @@ -1017,6 +1045,8 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) */ ath11k_pci_aspm_disable(ab_pci); + ath11k_pci_msi_enable(ab_pci); + ret = ath11k_mhi_start(ab_pci); if (ret) { ath11k_err(ab, "failed to start mhi: %d\n", ret); @@ -1037,6 +1067,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) ath11k_pci_aspm_restore(ab_pci); ath11k_pci_force_wake(ab_pci->ab); + + ath11k_pci_msi_disable(ab_pci); + ath11k_mhi_stop(ab_pci); clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); ath11k_pci_sw_reset(ab_pci->ab, false); @@ -1261,7 +1294,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev, goto err_pci_free_region; } - ret = ath11k_pci_enable_msi(ab_pci); + ret = ath11k_pci_alloc_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to enable msi: %d\n", ret); goto err_pci_free_region; @@ -1315,7 +1348,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ath11k_mhi_unregister(ab_pci); err_pci_disable_msi: - ath11k_pci_disable_msi(ab_pci); + ath11k_pci_free_msi(ab_pci); err_pci_free_region: ath11k_pci_free_region(ab_pci); @@ -1346,7 +1379,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) ath11k_mhi_unregister(ab_pci); ath11k_pci_free_irq(ab); - ath11k_pci_disable_msi(ab_pci); + ath11k_pci_free_msi(ab_pci); ath11k_pci_free_region(ab_pci); ath11k_hal_srng_deinit(ab); diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c index f49abefa9618e7..85471f8b3563c0 100644 --- a/drivers/net/wireless/ath/ath11k/peer.c +++ b/drivers/net/wireless/ath/ath11k/peer.c @@ -251,6 +251,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, struct ieee80211_sta *sta, struct peer_create_params *param) { struct ath11k_peer *peer; + struct ath11k_sta *arsta; int ret; lockdep_assert_held(&ar->conf_mutex); @@ -319,6 +320,16 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, peer->sec_type = HAL_ENCRYPT_TYPE_OPEN; peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; + if (sta) { + arsta = (struct ath11k_sta *)sta->drv_priv; + arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) | + FIELD_PREP(HTT_TCL_META_DATA_PEER_ID, + peer->peer_id); + + /* set HTT extension valid bit to 0 by default */ + arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; + } + ar->num_peers++; spin_unlock_bh(&ar->ab->base_lock); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index b5e34d670715e7..fa73118de6dbd4 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -950,6 +950,78 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, num_macs), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + voltage_mv_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + voltage_mv), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + time_freq_hz_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + time_freq_hz), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + otp_version_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + otp_version), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + eeprom_read_timeout_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + eeprom_read_timeout), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -1770,7 +1842,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) chunk->vaddr = dma_alloc_coherent(ab->dev, chunk->size, &chunk->paddr, - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!chunk->vaddr) { if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, @@ -1846,8 +1918,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); - ret = qmi_txn_init(&ab->qmi.handle, &txn, - qmi_wlanfw_cap_resp_msg_v01_ei, &resp); + ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei, + &resp); if (ret < 0) goto out; @@ -1900,6 +1972,12 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id, sizeof(ab->qmi.target.fw_build_id)); + if (resp.eeprom_read_timeout_valid) { + ab->qmi.target.eeprom_caldata = + resp.eeprom_read_timeout; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n"); + } + ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n", ab->qmi.target.chip_id, ab->qmi.target.chip_family, ab->qmi.target.board_id, ab->qmi.target.soc_id); @@ -1917,98 +1995,73 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) return ret; } -static int -ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type, - struct qmi_wlanfw_bdf_download_req_msg_v01 *req, - void __iomem *bdf_addr) -{ - const struct firmware *fw_entry; - struct ath11k_board_data bd; - u32 fw_size; - int ret; - - switch (type) { - case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN: - memset(&bd, 0, sizeof(bd)); - - ret = ath11k_core_fetch_bdf(ab, &bd); - if (ret) { - ath11k_warn(ab, "failed to load board file: %d\n", ret); - return ret; - } - - fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len); - memcpy_toio(bdf_addr, bd.data, fw_size); - ath11k_core_free_bdf(ab, &bd); - break; - case ATH11K_QMI_FILE_TYPE_CALDATA: - fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE); - if (IS_ERR(fw_entry)) { - ret = PTR_ERR(fw_entry); - ath11k_warn(ab, "failed to load %s: %d\n", - ATH11K_DEFAULT_CAL_FILE, ret); - return ret; - } - - fw_size = min_t(u32, ab->hw_params.fw.board_size, - fw_entry->size); - - memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET, - fw_entry->data, fw_size); - - release_firmware(fw_entry); - break; - default: - return -EINVAL; - } - - req->total_size = fw_size; - return 0; -} - -static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) +static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, + const u8 *data, u32 len, u8 type) { struct qmi_wlanfw_bdf_download_req_msg_v01 *req; struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; struct qmi_txn txn = {}; + const u8 *temp = data; void __iomem *bdf_addr = NULL; - int type, ret; + int ret; + u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; + memset(&resp, 0, sizeof(resp)); - bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE); - if (!bdf_addr) { - ath11k_warn(ab, "failed ioremap for board file\n"); - ret = -EIO; - goto out; + if (ab->bus_params.fixed_bdf_addr) { + bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size); + if (!bdf_addr) { + ath11k_warn(ab, "qmi ioremap error for bdf_addr\n"); + ret = -EIO; + goto err_free_req; + } } - for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) { + while (remaining) { req->valid = 1; req->file_id_valid = 1; req->file_id = ab->qmi.target.board_id; req->total_size_valid = 1; + req->total_size = remaining; req->seg_id_valid = 1; - req->seg_id = type; - req->data_valid = 0; - req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; - req->bdf_type = 0; - req->bdf_type_valid = 0; + req->data_valid = 1; + req->bdf_type = type; + req->bdf_type_valid = 1; req->end_valid = 1; - req->end = 1; + req->end = 0; - ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr); - if (ret < 0) - goto out_qmi_bdf; + if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) { + req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01; + } else { + req->data_len = remaining; + req->end = 1; + } + + if (ab->bus_params.fixed_bdf_addr || + type == ATH11K_QMI_FILE_TYPE_EEPROM) { + req->data_valid = 0; + req->end = 1; + req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; + } else { + memcpy(req->data, temp, req->data_len); + } + + if (ab->bus_params.fixed_bdf_addr) { + if (type == ATH11K_QMI_FILE_TYPE_CALDATA) + bdf_addr += ab->hw_params.fw.cal_offset; + + memcpy_toio(bdf_addr, temp, len); + } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_bdf_download_resp_msg_v01_ei, &resp); if (ret < 0) - goto out_qmi_bdf; + goto err_iounmap; ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n", type); @@ -2019,54 +2072,62 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab) qmi_wlanfw_bdf_download_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - goto out_qmi_bdf; + goto err_iounmap; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); - if (ret < 0) - goto out_qmi_bdf; + if (ret < 0) { + ath11k_warn(ab, "failed to wait board file download request: %d\n", + ret); + goto err_iounmap; + } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "board file download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; - goto out_qmi_bdf; + goto err_iounmap; + } + + if (ab->bus_params.fixed_bdf_addr || + type == ATH11K_QMI_FILE_TYPE_EEPROM) { + remaining = 0; + } else { + remaining -= req->data_len; + temp += req->data_len; + req->seg_id++; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n", + remaining); } } -out_qmi_bdf: - iounmap(bdf_addr); -out: +err_iounmap: + if (ab->bus_params.fixed_bdf_addr) + iounmap(bdf_addr); + +err_free_req: kfree(req); + return ret; } static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) { - struct qmi_wlanfw_bdf_download_req_msg_v01 *req; - struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; + struct device *dev = ab->dev; + char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE]; + const struct firmware *fw_entry; struct ath11k_board_data bd; - unsigned int remaining; - struct qmi_txn txn = {}; - int ret; - const u8 *temp; - int bdf_type; - - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) - return -ENOMEM; - memset(&resp, 0, sizeof(resp)); + u32 fw_size, file_type; + int ret = 0, bdf_type; + const u8 *tmp; memset(&bd, 0, sizeof(bd)); ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) { - ath11k_warn(ab, "failed to fetch board file: %d\n", ret); + ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret); goto out; } - temp = bd.data; - remaining = bd.len; - if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) bdf_type = ATH11K_QMI_BDF_TYPE_ELF; else @@ -2074,67 +2135,60 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type); - while (remaining) { - req->valid = 1; - req->file_id_valid = 1; - req->file_id = ab->qmi.target.board_id; - req->total_size_valid = 1; - req->total_size = bd.len; - req->seg_id_valid = 1; - req->data_valid = 1; - req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; - req->bdf_type = bdf_type; - req->bdf_type_valid = 1; - req->end_valid = 1; - req->end = 0; + fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len); - if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) { - req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01; - } else { - req->data_len = remaining; - req->end = 1; - } + ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to load bdf file\n"); + goto out; + } - memcpy(req->data, temp, req->data_len); + /* QCA6390 does not support cal data, skip it */ + if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF) + goto out; - ret = qmi_txn_init(&ab->qmi.handle, &txn, - qmi_wlanfw_bdf_download_resp_msg_v01_ei, - &resp); - if (ret < 0) - goto out_qmi_bdf; + if (ab->qmi.target.eeprom_caldata) { + file_type = ATH11K_QMI_FILE_TYPE_EEPROM; + tmp = filename; + fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; + } else { + file_type = ATH11K_QMI_FILE_TYPE_CALDATA; - ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n", - remaining); + /* cal--.bin */ + snprintf(filename, sizeof(filename), "cal-%s-%s.bin", + ath11k_bus_str(ab->hif.bus), dev_name(dev)); + fw_entry = ath11k_core_firmware_request(ab, filename); + if (!IS_ERR(fw_entry)) + goto success; - ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, - QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, - QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, - qmi_wlanfw_bdf_download_req_msg_v01_ei, req); - if (ret < 0) { - qmi_txn_cancel(&txn); - goto out_qmi_bdf; + fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE); + if (IS_ERR(fw_entry)) { + ret = PTR_ERR(fw_entry); + ath11k_warn(ab, + "qmi failed to load CAL data file:%s\n", + filename); + goto out; } +success: + fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size); + tmp = fw_entry->data; + } - ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); - if (ret < 0) - goto out_qmi_bdf; - - if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { - ath11k_warn(ab, "bdf download request failed: %d %d\n", - resp.resp.result, resp.resp.error); - ret = resp.resp.result; - goto out_qmi_bdf; - } - remaining -= req->data_len; - temp += req->data_len; - req->seg_id++; + ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to load caldata\n"); + goto out_qmi_cal; } -out_qmi_bdf: - ath11k_core_free_bdf(ab, &bd); + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi caldata type: %u\n", file_type); +out_qmi_cal: + if (!ab->qmi.target.eeprom_caldata) + release_firmware(fw_entry); out: - kfree(req); + ath11k_core_free_bdf(ab, &bd); + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi BDF download sequence completed\n"); + return ret; } @@ -2519,10 +2573,7 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) return ret; } - if (ab->bus_params.fixed_bdf_addr) - ret = ath11k_qmi_load_bdf_fixed_addr(ab); - else - ret = ath11k_qmi_load_bdf_qmi(ab); + ret = ath11k_qmi_load_bdf_qmi(ab); if (ret < 0) { ath11k_warn(ab, "failed to load board data file: %d\n", ret); return ret; @@ -2707,8 +2758,10 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) list_del(&event->list); spin_unlock(&qmi->event_lock); - if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) + if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) { + kfree(event); return; + } switch (event->type) { case ATH11K_QMI_EVENT_SERVER_ARRIVE: diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 3d593033070321..3bb0f9ef799682 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -10,11 +10,9 @@ #include #define ATH11K_HOST_VERSION_STRING "WIN" -#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000 +#define ATH11K_QMI_WLANFW_TIMEOUT_MS 10000 #define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64 #define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000 -#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024) -#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024) #define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128 #define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45 #define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01 @@ -44,6 +42,7 @@ struct ath11k_base; enum ath11k_qmi_file_type { ATH11K_QMI_FILE_TYPE_BDF_GOLDEN, ATH11K_QMI_FILE_TYPE_CALDATA, + ATH11K_QMI_FILE_TYPE_EEPROM, ATH11K_QMI_MAX_FILE_TYPE, }; @@ -104,6 +103,7 @@ struct target_info { u32 board_id; u32 soc_id; u32 fw_version; + u32 eeprom_caldata; char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1]; char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1]; char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH]; @@ -135,7 +135,7 @@ struct ath11k_qmi { wait_queue_head_t cold_boot_waitq; }; -#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189 +#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261 #define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034 #define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7 #define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 @@ -285,7 +285,7 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 { }; #define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0 -#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207 +#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235 #define QMI_WLANFW_CAP_REQ_V01 0x0024 #define QMI_WLANFW_CAP_RESP_V01 0x0024 @@ -366,6 +366,14 @@ struct qmi_wlanfw_cap_resp_msg_v01 { char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1]; u8 num_macs_valid; u8 num_macs; + u8 voltage_mv_valid; + u32 voltage_mv; + u8 time_freq_hz_valid; + u32 time_freq_hz; + u8 otp_version_valid; + u32 otp_version; + u8 eeprom_read_timeout_valid; + u32 eeprom_read_timeout; }; struct qmi_wlanfw_cap_req_msg_v01 { diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index e1a1df169034bb..a66b5bdd21679e 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -97,7 +97,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) struct channel_param *ch; enum nl80211_band band; int num_channels = 0; - int params_len; int i, ret; bands = hw->wiphy->bands; @@ -117,10 +116,8 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) if (WARN_ON(!num_channels)) return -EINVAL; - params_len = sizeof(struct scan_chan_list_params) + - num_channels * sizeof(struct channel_param); - params = kzalloc(params_len, GFP_KERNEL); - + params = kzalloc(struct_size(params, ch_param, num_channels), + GFP_KERNEL); if (!params) return -ENOMEM; @@ -198,7 +195,7 @@ static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig, sizeof(struct ieee80211_reg_rule)); } -int ath11k_regd_update(struct ath11k *ar, bool init) +int ath11k_regd_update(struct ath11k *ar) { struct ieee80211_regdomain *regd, *regd_copy = NULL; int ret, regd_len, pdev_id; @@ -209,7 +206,10 @@ int ath11k_regd_update(struct ath11k *ar, bool init) spin_lock_bh(&ab->base_lock); - if (init) { + /* Prefer the latest regd update over default if it's available */ + if (ab->new_regd[pdev_id]) { + regd = ab->new_regd[pdev_id]; + } else { /* Apply the regd received during init through * WMI_REG_CHAN_LIST_CC event. In case of failure to * receive the regd, initialize with a default world @@ -222,8 +222,6 @@ int ath11k_regd_update(struct ath11k *ar, bool init) "failed to receive default regd during init\n"); regd = (struct ieee80211_regdomain *)&ath11k_world_regd; } - } else { - regd = ab->new_regd[pdev_id]; } if (!regd) { @@ -683,7 +681,7 @@ void ath11k_regd_update_work(struct work_struct *work) regd_update_work); int ret; - ret = ath11k_regd_update(ar, false); + ret = ath11k_regd_update(ar); if (ret) { /* Firmware has already moved to the new regd. We need * to maintain channel consistency across FW, Host driver diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 65d56d44796f62..5fb9dc03a74e82 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -31,6 +31,6 @@ void ath11k_regd_update_work(struct work_struct *work); struct ieee80211_regdomain * ath11k_reg_build_regd(struct ath11k_base *ab, struct cur_regulatory_info *reg_info, bool intersect); -int ath11k_regd_update(struct ath11k *ar, bool init); +int ath11k_regd_update(struct ath11k *ar); int ath11k_reg_update_chan_list(struct ath11k *ar); #endif diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c index 1afe677596594f..ac4da99b5577c9 100644 --- a/drivers/net/wireless/ath/ath11k/spectral.c +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -11,22 +11,20 @@ #define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1 #define ATH11K_SPECTRAL_DWORD_SIZE 4 -/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes */ -#define ATH11K_SPECTRAL_BIN_SIZE 4 -#define ATH11K_SPECTRAL_ATH11K_MIN_BINS 64 -#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32 -#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256 +#define ATH11K_SPECTRAL_MIN_BINS 32 +#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1) +#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1) #define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095 /* Max channel computed by sum of 2g and 5g band channels */ #define ATH11K_SPECTRAL_TOTAL_CHANNEL 41 #define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70 -#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE (sizeof(struct fft_sample_ath11k) + \ - ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS) +#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \ + ATH11K_SPECTRAL_MAX_IB_BINS(x)) #define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \ ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL) -#define ATH11K_SPECTRAL_SUB_BUFF_SIZE ATH11K_SPECTRAL_PER_SAMPLE_SIZE +#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) #define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE #define ATH11K_SPECTRAL_20MHZ 20 @@ -444,8 +442,8 @@ static ssize_t ath11k_write_file_spectral_bins(struct file *file, if (kstrtoul(buf, 0, &val)) return -EINVAL; - if (val < ATH11K_SPECTRAL_ATH11K_MIN_BINS || - val > SPECTRAL_ATH11K_MAX_NUM_BINS) + if (val < ATH11K_SPECTRAL_MIN_BINS || + val > ar->ab->hw_params.spectral.max_fft_bins) return -EINVAL; if (!is_power_of_2(val)) @@ -581,12 +579,12 @@ int ath11k_spectral_process_fft(struct ath11k *ar, struct spectral_tlv *tlv; int tlv_len, bin_len, num_bins; u16 length, freq; - u8 chan_width_mhz; + u8 chan_width_mhz, bin_sz; int ret; lockdep_assert_held(&ar->spectral.lock); - if (!ab->hw_params.spectral_fft_sz) { + if (!ab->hw_params.spectral.fft_sz) { ath11k_warn(ab, "invalid bin size type for hw rev %d\n", ab->hw_rev); return -EINVAL; @@ -596,7 +594,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar, tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); /* convert Dword into bytes */ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; - bin_len = tlv_len - (sizeof(*fft_report) - sizeof(*tlv)); + bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len; if (data_len < (bin_len + sizeof(*fft_report))) { ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n", @@ -604,12 +602,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar, return -EINVAL; } - num_bins = bin_len / ATH11K_SPECTRAL_BIN_SIZE; + bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz; + num_bins = bin_len / bin_sz; /* Only In-band bins are useful to user for visualize */ num_bins >>= 1; - if (num_bins < ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS || - num_bins > ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS || + if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS || + num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) || !is_power_of_2(num_bins)) { ath11k_warn(ab, "Invalid num of bins %d\n", num_bins); return -EINVAL; @@ -654,7 +653,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar, fft_sample->freq2 = __cpu_to_be16(freq); ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins, - ab->hw_params.spectral_fft_sz); + ab->hw_params.spectral.fft_sz); fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index, search.peak_mag, @@ -690,7 +689,7 @@ static int ath11k_spectral_process_data(struct ath11k *ar, goto unlock; } - sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS; + sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab); fft_sample = kmalloc(sample_sz, GFP_ATOMIC); if (!fft_sample) { ret = -ENOBUFS; @@ -738,7 +737,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar, * is 4 DWORD size (16 bytes). * Need to remove this workaround once HW bug fixed */ - tlv_len = sizeof(*summary) - sizeof(*tlv); + tlv_len = sizeof(*summary) - sizeof(*tlv) + + ab->hw_params.spectral.summary_pad_sz; if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) { ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n", @@ -901,7 +901,7 @@ static inline int ath11k_spectral_debug_register(struct ath11k *ar) ar->spectral.rfs_scan = relay_open("spectral_scan", ar->debug.debugfs_pdev, - ATH11K_SPECTRAL_SUB_BUFF_SIZE, + ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab), ATH11K_SPECTRAL_NUM_SUB_BUF, &rfs_scan_cb, NULL); if (!ar->spectral.rfs_scan) { @@ -962,7 +962,7 @@ int ath11k_spectral_init(struct ath11k_base *ab) ab->wmi_ab.svc_map)) return 0; - if (!ab->hw_params.spectral_fft_sz) + if (!ab->hw_params.spectral.fft_sz) return 0; for (i = 0; i < ab->num_radios; i++) { diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h index d2d2a3cb082693..25d18e9d5b0b47 100644 --- a/drivers/net/wireless/ath/ath11k/trace.h +++ b/drivers/net/wireless/ath/ath11k/trace.h @@ -79,14 +79,15 @@ TRACE_EVENT(ath11k_htt_ppdu_stats, ); TRACE_EVENT(ath11k_htt_rxdesc, - TP_PROTO(struct ath11k *ar, const void *data, size_t len), + TP_PROTO(struct ath11k *ar, const void *data, size_t log_type, size_t len), - TP_ARGS(ar, data, len), + TP_ARGS(ar, data, log_type, len), TP_STRUCT__entry( __string(device, dev_name(ar->ab->dev)) __string(driver, dev_driver_string(ar->ab->dev)) __field(u16, len) + __field(u16, log_type) __dynamic_array(u8, rxdesc, len) ), @@ -94,14 +95,16 @@ TRACE_EVENT(ath11k_htt_rxdesc, __assign_str(device, dev_name(ar->ab->dev)); __assign_str(driver, dev_driver_string(ar->ab->dev)); __entry->len = len; + __entry->log_type = log_type; memcpy(__get_dynamic_array(rxdesc), data, len); ), TP_printk( - "%s %s rxdesc len %d", + "%s %s rxdesc len %d type %d", __get_str(driver), __get_str(device), - __entry->len + __entry->len, + __entry->log_type ) ); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 6c253eae9d0692..5ae2ef4680d6cb 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -360,6 +360,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g; + pdev_cap->nss_ratio_enabled = + WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); + pdev_cap->nss_ratio_info = + WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); } else { return -EINVAL; } @@ -403,18 +407,18 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, sizeof(struct ath11k_ppe_threshold)); - } - cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; - cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; - cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; - cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; - cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; - cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; - memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, - sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); - memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, - sizeof(struct ath11k_ppe_threshold)); + cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; + cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; + cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; + cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; + cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; + cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; + memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, + sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); + memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, + sizeof(struct ath11k_ppe_threshold)); + } return 0; } @@ -783,14 +787,26 @@ int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id) static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan, struct wmi_vdev_start_req_arg *arg) { + u32 center_freq1 = arg->channel.band_center_freq1; + memset(chan, 0, sizeof(*chan)); chan->mhz = arg->channel.freq; chan->band_center_freq1 = arg->channel.band_center_freq1; - if (arg->channel.mode == MODE_11AC_VHT80_80) + + if (arg->channel.mode == MODE_11AX_HE160) { + if (arg->channel.freq > arg->channel.band_center_freq1) + chan->band_center_freq1 = center_freq1 + 40; + else + chan->band_center_freq1 = center_freq1 - 40; + + chan->band_center_freq2 = arg->channel.band_center_freq1; + + } else if (arg->channel.mode == MODE_11AC_VHT80_80) { chan->band_center_freq2 = arg->channel.band_center_freq2; - else + } else { chan->band_center_freq2 = 0; + } chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode); if (arg->channel.passive) @@ -868,6 +884,8 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, } cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED; + if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) + cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED; ptr = skb->data + sizeof(*cmd); chan = ptr; @@ -1339,6 +1357,7 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->req_type = type; + cmd->pdev_id = ar->pdev->pdev_id; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI bss chan info req type %d\n", type); @@ -1903,8 +1922,8 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE); - he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; - he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; + he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i]; + he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i]; ptr += sizeof(*he_mcs); } @@ -2285,7 +2304,7 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, u16 num_send_chans, num_sends = 0, max_chan_limit = 0; u32 *reg1, *reg2; - tchan_info = &chan_list->ch_param[0]; + tchan_info = chan_list->ch_param; while (chan_list->nallchans) { len = sizeof(*cmd) + TLV_HDR_SIZE; max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / @@ -2352,6 +2371,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE; if (tchan_info->psc_channel) chan_info->info |= WMI_CHAN_INFO_PSC; + if (tchan_info->dfs_set) + chan_info->info |= WMI_CHAN_INFO_DFS; chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, tchan_info->phy_mode); @@ -3495,7 +3516,7 @@ ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size; wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters; wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id; - wmi_cfg->flag1 = tg_cfg->atf_config; + wmi_cfg->flag1 = tg_cfg->flag1; wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support; wmi_cfg->sched_params = tg_cfg->sched_params; wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count; @@ -4046,8 +4067,8 @@ static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc, len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities)); if (!svc_rdy_ext->n_mac_phy_caps) { - svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, - GFP_ATOMIC); + svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id, + len, GFP_ATOMIC); if (!svc_rdy_ext->mac_phy_caps) return -ENOMEM; } @@ -4447,8 +4468,8 @@ static struct cur_reg_rule struct cur_reg_rule *reg_rule_ptr; u32 count; - reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), - GFP_ATOMIC); + reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), + GFP_ATOMIC); if (!reg_rule_ptr) return NULL; @@ -5234,9 +5255,11 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, dst->hw_queued = src->hw_queued; dst->hw_reaped = src->hw_reaped; dst->underrun = src->underrun; + dst->hw_paused = src->hw_paused; dst->tx_abort = src->tx_abort; dst->mpdus_requeued = src->mpdus_requeued; dst->tx_ko = src->tx_ko; + dst->tx_xretry = src->tx_xretry; dst->data_rc = src->data_rc; dst->self_triggers = src->self_triggers; dst->sw_retry_failure = src->sw_retry_failure; @@ -5247,6 +5270,16 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure; dst->phy_underrun = src->phy_underrun; dst->txop_ovf = src->txop_ovf; + dst->seq_posted = src->seq_posted; + dst->seq_failed_queueing = src->seq_failed_queueing; + dst->seq_completed = src->seq_completed; + dst->seq_restarted = src->seq_restarted; + dst->mu_seq_posted = src->mu_seq_posted; + dst->mpdus_sw_flush = src->mpdus_sw_flush; + dst->mpdus_hw_filter = src->mpdus_hw_filter; + dst->mpdus_truncated = src->mpdus_truncated; + dst->mpdus_ack_failed = src->mpdus_ack_failed; + dst->mpdus_expired = src->mpdus_expired; } static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, @@ -5266,6 +5299,7 @@ static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, dst->phy_errs = src->phy_errs; dst->phy_err_drop = src->phy_err_drop; dst->mpdu_errs = src->mpdu_errs; + dst->rx_ovfl_errs = src->rx_ovfl_errs; } static void @@ -5502,12 +5536,16 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, "PPDUs reaped", pdev->hw_reaped); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Num underruns", pdev->underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Num HW Paused", pdev->hw_paused); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PPDUs cleaned", pdev->tx_abort); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs requeued", pdev->mpdus_requeued); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Excessive retries", pdev->tx_ko); + "PPDU OK", pdev->tx_ko); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Excessive retries", pdev->tx_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", @@ -5531,6 +5569,26 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, "PHY underrun", pdev->phy_underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "MPDU is more than txop limit", pdev->txop_ovf); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num sequences posted", pdev->seq_posted); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num seq failed queueing ", pdev->seq_failed_queueing); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num sequences completed ", pdev->seq_completed); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num sequences restarted ", pdev->seq_restarted); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num of MU sequences posted ", pdev->mu_seq_posted); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num of MPDUS truncated ", pdev->mpdus_truncated); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Num of MPDUS expired ", pdev->mpdus_expired); *length = len; } @@ -5575,6 +5633,8 @@ ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, "PHY errors drops", pdev->phy_err_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Overflow errors", pdev->rx_ovfl_errs); *length = len; } @@ -5792,6 +5852,17 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk pdev_idx = reg_info->phy_id; + /* Avoid default reg rule updates sent during FW recovery if + * it is already available + */ + spin_lock(&ab->base_lock); + if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) && + ab->default_regd[pdev_idx]) { + spin_unlock(&ab->base_lock); + goto mem_free; + } + spin_unlock(&ab->base_lock); + if (pdev_idx >= ab->num_radios) { /* Process the event for phy0 only if single_pdev_only * is true. If pdev_idx is valid but not 0, discard the @@ -5829,10 +5900,10 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk } spin_lock(&ab->base_lock); - if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) { - /* Once mac is registered, ar is valid and all CC events from - * fw is considered to be received due to user requests - * currently. + if (ab->default_regd[pdev_idx]) { + /* The initial rules from FW after WMI Init is to build + * the default regd. From then on, any rules updated for + * the pdev could be due to user reg changes. * Free previously built regd before assigning the newly * generated regd to ar. NULL pointer handling will be * taken care by kfree itself. @@ -5842,13 +5913,9 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk ab->new_regd[pdev_idx] = regd; ieee80211_queue_work(ar->hw, &ar->regd_update_work); } else { - /* Multiple events for the same *ar is not expected. But we - * can still clear any previously stored default_regd if we - * are receiving this event for the same radio by mistake. - * NULL pointer handling will be taken care by kfree itself. + /* This regd would be applied during mac registration and is + * held constant throughout for regd intersection purpose */ - kfree(ab->default_regd[pdev_idx]); - /* This regd would be applied during mac registration */ ab->default_regd[pdev_idx] = regd; } ab->dfs_region = reg_info->dfs_region; @@ -6119,8 +6186,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) { + if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ && + rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) { status->band = NL80211_BAND_6GHZ; + status->freq = rx_ev.chan_freq; } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) { @@ -6141,8 +6210,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) sband = &ar->mac.sbands[status->band]; - status->freq = ieee80211_channel_to_frequency(rx_ev.channel, - status->band); + if (status->band != NL80211_BAND_6GHZ) + status->freq = ieee80211_channel_to_frequency(rx_ev.channel, + status->band); + status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR; status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); @@ -6220,8 +6291,9 @@ static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *s rcu_read_unlock(); } -static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab, - u32 vdev_id) +static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab, + u32 vdev_id, + enum ath11k_scan_state state) { int i; struct ath11k_pdev *pdev; @@ -6233,7 +6305,7 @@ static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab, ar = pdev->ar; spin_lock_bh(&ar->data_lock); - if (ar->scan.state == ATH11K_SCAN_ABORTING && + if (ar->scan.state == state && ar->scan.vdev_id == vdev_id) { spin_unlock_bh(&ar->data_lock); return ar; @@ -6263,10 +6335,15 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) * aborting scan's vdev id matches this event info. */ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED && - scan_ev.reason == WMI_SCAN_REASON_CANCELLED) - ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id); - else + scan_ev.reason == WMI_SCAN_REASON_CANCELLED) { + ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, + ATH11K_SCAN_ABORTING); + if (!ar) + ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, + ATH11K_SCAN_RUNNING); + } else { ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id); + } if (!ar) { ath11k_warn(ab, "Received scan event for unknown vdev"); @@ -6301,6 +6378,8 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) ath11k_wmi_event_scan_start_failed(ar); break; case WMI_SCAN_EVENT_DEQUEUED: + __ath11k_mac_scan_finish(ar); + break; case WMI_SCAN_EVENT_PREEMPTED: case WMI_SCAN_EVENT_RESTARTED: case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: @@ -7065,6 +7144,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_TWT_ENABLE_EVENTID: case WMI_TWT_DISABLE_EVENTID: case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: + case WMI_PEER_CREATE_CONF_EVENTID: ath11k_dbg(ab, ATH11K_DBG_WMI, "ignoring unsupported event 0x%x\n", id); break; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index d35c47e0b19d42..0584e68e75939a 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -119,6 +119,22 @@ enum { WMI_HOST_WLAN_2G_5G_CAP = 0x3, }; +/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command. + * Used only for HE auto rate mode. + */ +enum { + /* HE LTF related configuration */ + WMI_HE_AUTORATE_LTF_1X = BIT(0), + WMI_HE_AUTORATE_LTF_2X = BIT(1), + WMI_HE_AUTORATE_LTF_4X = BIT(2), + + /* HE GI related configuration */ + WMI_AUTORATE_400NS_GI = BIT(8), + WMI_AUTORATE_800NS_GI = BIT(9), + WMI_AUTORATE_1600NS_GI = BIT(10), + WMI_AUTORATE_3200NS_GI = BIT(11), +}; + /* * wmi command groups. */ @@ -647,6 +663,9 @@ enum wmi_tlv_event_id { WMI_PEER_RESERVED9_EVENTID, WMI_PEER_RESERVED10_EVENTID, WMI_PEER_OPER_MODE_CHANGE_EVENTID, + WMI_PEER_TX_PN_RESPONSE_EVENTID, + WMI_PEER_CFR_CAPTURE_EVENTID, + WMI_PEER_CREATE_CONF_EVENTID, WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT), WMI_HOST_SWBA_EVENTID, WMI_TBTTOFFSET_UPDATE_EVENTID, @@ -1044,7 +1063,9 @@ enum wmi_tlv_vdev_param { WMI_VDEV_PARAM_HE_RANGE_EXT, WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE, WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME, + WMI_VDEV_PARAM_HE_LTF = 0x74, WMI_VDEV_PARAM_BA_MODE = 0x7e, + WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80, WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87, WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99, WMI_VDEV_PARAM_PROTOTYPE = 0x8000, @@ -2128,6 +2149,24 @@ enum wmi_direct_buffer_module { WMI_DIRECT_BUF_MAX }; +/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext + * event + * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss + * of 80MHz + * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss + * of 80MHz + * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz + * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max + * nss of 80MHz + */ + +enum wmi_nss_ratio { + WMI_NSS_RATIO_1BY2_NSS = 0x0, + WMI_NSS_RATIO_3BY4_NSS = 0x1, + WMI_NSS_RATIO_1_NSS = 0x2, + WMI_NSS_RATIO_2_NSS = 0x3, +}; + struct wmi_host_pdev_band_to_mac { u32 pdev_id; u32 start_freq; @@ -2244,6 +2283,8 @@ struct wmi_init_cmd { u32 num_host_mem_chunks; } __packed; +#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5) + struct wmi_resource_config { u32 tlv_header; u32 num_vdevs; @@ -2370,6 +2411,12 @@ struct wmi_hw_mode_capabilities { } __packed; #define WMI_MAX_HECAP_PHY_SIZE (3) +#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS BIT(0) +#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \ + FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val) +#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1) +#define WMI_NSS_RATIO_INFO_GET(_val) \ + FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val) struct wmi_mac_phy_capabilities { u32 hw_mode_id; @@ -2403,6 +2450,12 @@ struct wmi_mac_phy_capabilities { u32 he_cap_info_2g_ext; u32 he_cap_info_5g_ext; u32 he_cap_info_internal; + u32 wireless_modes; + u32 low_2ghz_chan_freq; + u32 high_2ghz_chan_freq; + u32 low_5ghz_chan_freq; + u32 high_5ghz_chan_freq; + u32 nss_ratio; } __packed; struct wmi_hal_reg_capabilities_ext { @@ -2527,6 +2580,7 @@ struct wmi_vdev_down_cmd { #define WMI_VDEV_START_HIDDEN_SSID BIT(0) #define WMI_VDEV_START_PMF_ENABLED BIT(1) #define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3) +#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4) struct wmi_ssid { u32 ssid_len; @@ -2960,6 +3014,7 @@ struct wmi_pdev_bss_chan_info_req_cmd { u32 tlv_header; /* ref wmi_bss_chan_info_req_type */ u32 req_type; + u32 pdev_id; } __packed; struct wmi_ap_ps_peer_cmd { @@ -3608,7 +3663,7 @@ struct wmi_stop_scan_cmd { struct scan_chan_list_params { u32 pdev_id; u16 nallchans; - struct channel_param ch_param[1]; + struct channel_param ch_param[]; }; struct wmi_scan_chan_list_cmd { @@ -3917,7 +3972,11 @@ struct wmi_vht_rate_set { struct wmi_he_rate_set { u32 tlv_header; + + /* MCS at which the peer can receive */ u32 rx_mcs_set; + + /* MCS at which the peer can transmit */ u32 tx_mcs_set; } __packed; @@ -4056,7 +4115,6 @@ struct wmi_vdev_stopped_event { } __packed; struct wmi_pdev_bss_chan_info_event { - u32 pdev_id; u32 freq; /* Units in MHz */ u32 noise_floor; /* units are dBm */ /* rx clear - how often the channel was unused */ @@ -4074,6 +4132,7 @@ struct wmi_pdev_bss_chan_info_event { /*rx_cycle cnt for my bss in 64bits format */ u32 rx_bss_cycle_count_low; u32 rx_bss_cycle_count_high; + u32 pdev_id; } __packed; #define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 @@ -4168,6 +4227,9 @@ struct wmi_pdev_stats_tx { /* Num underruns */ s32 underrun; + /* Num hw paused */ + u32 hw_paused; + /* Num PPDUs cleaned up in TX abort */ s32 tx_abort; @@ -4177,6 +4239,8 @@ struct wmi_pdev_stats_tx { /* excessive retries */ u32 tx_ko; + u32 tx_xretry; + /* data hw rate code */ u32 data_rc; @@ -4206,6 +4270,40 @@ struct wmi_pdev_stats_tx { /* MPDU is more than txop limit */ u32 txop_ovf; + + /* Num sequences posted */ + u32 seq_posted; + + /* Num sequences failed in queueing */ + u32 seq_failed_queueing; + + /* Num sequences completed */ + u32 seq_completed; + + /* Num sequences restarted */ + u32 seq_restarted; + + /* Num of MU sequences posted */ + u32 mu_seq_posted; + + /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT + * (Reset,channel change) + */ + s32 mpdus_sw_flush; + + /* Num MPDUs filtered by HW, all filter condition (TTL expired) */ + s32 mpdus_hw_filter; + + /* Num MPDUs truncated by PDG (TXOP, TBTT, + * PPDU_duration based on rate, dyn_bw) + */ + s32 mpdus_truncated; + + /* Num MPDUs that was tried but didn't receive ACK or BA */ + s32 mpdus_ack_failed; + + /* Num MPDUs that was dropped du to expiry. */ + s32 mpdus_expired; } __packed; struct wmi_pdev_stats_rx { @@ -4240,6 +4338,9 @@ struct wmi_pdev_stats_rx { /* Number of mpdu errors - FCS, MIC, ENC etc. */ s32 mpdu_errs; + + /* Num overflow errors */ + s32 rx_ovfl_errs; } __packed; struct wmi_pdev_stats { @@ -5014,7 +5115,7 @@ struct target_resource_config { u32 vo_minfree; u32 rx_batchmode; u32 tt_support; - u32 atf_config; + u32 flag1; u32 iphdr_pad_config; u32 qwrap_config:16, alloc_frag_desc_for_data_pkt:16; diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c index 8113baddd8fc62..37bf641064734d 100644 --- a/drivers/net/wireless/ath/ath5k/sysfs.c +++ b/drivers/net/wireless/ath/ath5k/sysfs.c @@ -14,7 +14,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \ { \ struct ieee80211_hw *hw = dev_get_drvdata(dev); \ struct ath5k_hw *ah = hw->priv; \ - return snprintf(buf, PAGE_SIZE, "%d\n", get); \ + return sysfs_emit(buf, "%d\n", get); \ } \ \ static ssize_t ath5k_attr_store_##name(struct device *dev, \ @@ -41,7 +41,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \ { \ struct ieee80211_hw *hw = dev_get_drvdata(dev); \ struct ath5k_hw *ah = hw->priv; \ - return snprintf(buf, PAGE_SIZE, "%d\n", get); \ + return sysfs_emit(buf, "%d\n", get); \ } \ static DEVICE_ATTR(name, 0444, ath5k_attr_show_##name, NULL) @@ -64,7 +64,7 @@ static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL); + return sysfs_emit(buf, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL); } static DEVICE_ATTR(noise_immunity_level_max, 0444, ath5k_attr_show_noise_immunity_level_max, NULL); @@ -73,7 +73,7 @@ static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL); + return sysfs_emit(buf, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL); } static DEVICE_ATTR(firstep_level_max, 0444, ath5k_attr_show_firstep_level_max, NULL); diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index fefdc6753acd26..bd1183830e9112 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3781,6 +3781,7 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, { struct net_device *ndev; struct ath6kl_vif *vif; + u8 addr[ETH_ALEN]; ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, ether_setup); if (!ndev) @@ -3803,14 +3804,14 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, vif->htcap[NL80211_BAND_2GHZ].ht_enable = true; vif->htcap[NL80211_BAND_5GHZ].ht_enable = true; - memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); + ether_addr_copy(addr, ar->mac_addr); if (fw_vif_idx != 0) { - ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) | - 0x2; + addr[0] = (addr[0] ^ (1 << fw_vif_idx)) | 0x2; if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, ar->fw_capabilities)) - ndev->dev_addr[4] ^= 0x80; + addr[4] ^= 0x80; } + eth_hw_addr_set(ndev, addr); init_netdev(ndev); diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 5372e948e761d2..aba70f35e574b9 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -340,6 +340,11 @@ static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb) le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } + + /* Ignore broken descriptors. */ + if (usb_endpoint_maxp(endpoint) == 0) + continue; + urbcount = 0; pipe_num = @@ -907,7 +912,7 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, - size, 2 * HZ); + size, 2000); if (ret < 0) { ath6kl_warn("Failed to read usb control message: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c index 56d1a7764b9f87..708c8969b503ab 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -19,9 +19,14 @@ #include #include #include +#include +#include struct owl_ctx { + struct pci_dev *pdev; struct completion eeprom_load; + struct work_struct work; + struct nvmem_cell *cell; }; #define EEPROM_FILENAME_LEN 100 @@ -42,6 +47,12 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, u32 bar0; bool swap_needed = false; + /* also note that we are doing *u16 operations on the file */ + if (cal_len > 4096 || cal_len < 0x200 || (cal_len & 1) == 1) { + dev_err(&pdev->dev, "eeprom has an invalid size.\n"); + return -EINVAL; + } + if (*cal_data != AR5416_EEPROM_MAGIC) { if (*cal_data != swab16(AR5416_EEPROM_MAGIC)) { dev_err(&pdev->dev, "invalid calibration data\n"); @@ -99,38 +110,31 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, return 0; } -static void owl_fw_cb(const struct firmware *fw, void *context) +static void owl_rescan(struct pci_dev *pdev) { - struct pci_dev *pdev = (struct pci_dev *)context; - struct owl_ctx *ctx = (struct owl_ctx *)pci_get_drvdata(pdev); - struct pci_bus *bus; - - complete(&ctx->eeprom_load); - - if (!fw) { - dev_err(&pdev->dev, "no eeprom data received.\n"); - goto release; - } - - /* also note that we are doing *u16 operations on the file */ - if (fw->size > 4096 || fw->size < 0x200 || (fw->size & 1) == 1) { - dev_err(&pdev->dev, "eeprom file has an invalid size.\n"); - goto release; - } - - if (ath9k_pci_fixup(pdev, (const u16 *)fw->data, fw->size)) - goto release; + struct pci_bus *bus = pdev->bus; pci_lock_rescan_remove(); - bus = pdev->bus; pci_stop_and_remove_bus_device(pdev); /* the device should come back with the proper * ProductId. But we have to initiate a rescan. */ pci_rescan_bus(bus); pci_unlock_rescan_remove(); +} + +static void owl_fw_cb(const struct firmware *fw, void *context) +{ + struct owl_ctx *ctx = (struct owl_ctx *)context; + + complete(&ctx->eeprom_load); -release: + if (fw) { + ath9k_pci_fixup(ctx->pdev, (const u16 *)fw->data, fw->size); + owl_rescan(ctx->pdev); + } else { + dev_err(&ctx->pdev->dev, "no eeprom data received.\n"); + } release_firmware(fw); } @@ -152,6 +156,43 @@ static const char *owl_get_eeprom_name(struct pci_dev *pdev) return eeprom_name; } +static void owl_nvmem_work(struct work_struct *work) +{ + struct owl_ctx *ctx = container_of(work, struct owl_ctx, work); + void *buf; + size_t len; + + complete(&ctx->eeprom_load); + + buf = nvmem_cell_read(ctx->cell, &len); + if (!IS_ERR(buf)) { + ath9k_pci_fixup(ctx->pdev, buf, len); + kfree(buf); + owl_rescan(ctx->pdev); + } else { + dev_err(&ctx->pdev->dev, "no nvmem data received.\n"); + } +} + +static int owl_nvmem_probe(struct owl_ctx *ctx) +{ + int err; + + ctx->cell = devm_nvmem_cell_get(&ctx->pdev->dev, "calibration"); + if (IS_ERR(ctx->cell)) { + err = PTR_ERR(ctx->cell); + if (err == -ENOENT || err == -EOPNOTSUPP) + return 1; /* not present, try firmware_request */ + + return err; + } + + INIT_WORK(&ctx->work, owl_nvmem_work); + schedule_work(&ctx->work); + + return 0; +} + static int owl_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -164,21 +205,27 @@ static int owl_probe(struct pci_dev *pdev, pcim_pin_device(pdev); - eeprom_name = owl_get_eeprom_name(pdev); - if (!eeprom_name) { - dev_err(&pdev->dev, "no eeprom filename found.\n"); - return -ENODEV; - } - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; init_completion(&ctx->eeprom_load); + ctx->pdev = pdev; pci_set_drvdata(pdev, ctx); + + err = owl_nvmem_probe(ctx); + if (err <= 0) + return err; + + eeprom_name = owl_get_eeprom_name(pdev); + if (!eeprom_name) { + dev_err(&pdev->dev, "no eeprom filename found.\n"); + return -ENODEV; + } + err = request_firmware_nowait(THIS_MODULE, true, eeprom_name, - &pdev->dev, GFP_KERNEL, pdev, owl_fw_cb); + &pdev->dev, GFP_KERNEL, ctx, owl_fw_cb); if (err) dev_err(&pdev->dev, "failed to request caldata (%d).\n", err); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 4c81b1d7f41711..fb7a2952d0ce8b 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -749,9 +749,9 @@ static int read_file_misc(struct seq_file *file, void *data) static int read_file_reset(struct seq_file *file, void *data) { - struct ieee80211_hw *hw = dev_get_drvdata(file->private); - struct ath_softc *sc = hw->priv; + struct ath_softc *sc = file->private; static const char * const reset_cause[__RESET_TYPE_MAX] = { + [RESET_TYPE_USER] = "User reset", [RESET_TYPE_BB_HANG] = "Baseband Hang", [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog", [RESET_TYPE_FATAL_INT] = "Fatal HW Error", @@ -779,6 +779,55 @@ static int read_file_reset(struct seq_file *file, void *data) return 0; } +static int open_file_reset(struct inode *inode, struct file *f) +{ + return single_open(f, read_file_reset, inode->i_private); +} + +static ssize_t write_file_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file_inode(file)->i_private; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val != 1) + return -EINVAL; + + /* avoid rearming hw_reset_work on shutdown */ + mutex_lock(&sc->mutex); + if (test_bit(ATH_OP_INVALID, &common->op_flags)) { + mutex_unlock(&sc->mutex); + return -EBUSY; + } + + ath9k_queue_reset(sc, RESET_TYPE_USER); + mutex_unlock(&sc->mutex); + + return count; +} + +static const struct file_operations fops_reset = { + .read = seq_read, + .write = write_file_reset, + .open = open_file_reset, + .owner = THIS_MODULE, + .llseek = seq_lseek, + .release = single_release, +}; + void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, struct ath_txq *txq, unsigned int flags) @@ -1393,8 +1442,8 @@ int ath9k_init_debug(struct ath_hw *ah) read_file_queues); debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy, read_file_misc); - debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy, - read_file_reset); + debugfs_create_file("reset", 0600, sc->debug.debugfs_phy, + sc, &fops_reset); ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats); ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats); diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 33826aa136874d..389459c04d14a5 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -39,6 +39,7 @@ struct fft_sample_tlv; #endif enum ath_reset_type { + RESET_TYPE_USER, RESET_TYPE_BB_HANG, RESET_TYPE_BB_WATCHDOG, RESET_TYPE_FATAL_INT, diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index c22d457dbc5418..e6b3cd49ea18bd 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -135,13 +135,23 @@ static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob, offset, data); } +static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset, + u16 *data) +{ + return ath9k_hw_nvram_read_array(ah->nvmem_blob, + ah->nvmem_blob_len / sizeof(u16), + offset, data); +} + bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_platform_data *pdata = ah->dev->platform_data; bool ret; - if (ah->eeprom_blob) + if (ah->nvmem_blob) + ret = ath9k_hw_nvram_read_nvmem(ah, off, data); + else if (ah->eeprom_blob) ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data); else if (pdata && !pdata->use_eeprom) ret = ath9k_hw_nvram_read_pdata(pdata, off, data); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index b7b65b1c90e8fd..096a206f49ed5e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -977,6 +977,8 @@ struct ath_hw { bool disable_5ghz; const struct firmware *eeprom_blob; + u16 *nvmem_blob; /* devres managed */ + size_t nvmem_blob_len; struct ath_dynack dynack; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index e9a36dd7144f1f..4f00400c7ffb83 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -568,6 +569,57 @@ static void ath9k_eeprom_release(struct ath_softc *sc) release_firmware(sc->sc_ah->eeprom_blob); } +static int ath9k_nvmem_request_eeprom(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct nvmem_cell *cell; + void *buf; + size_t len; + int err; + + cell = devm_nvmem_cell_get(sc->dev, "calibration"); + if (IS_ERR(cell)) { + err = PTR_ERR(cell); + + /* nvmem cell might not be defined, or the nvmem + * subsystem isn't included. In this case, follow + * the established "just return 0;" convention of + * ath9k_init_platform to say: + * "All good. Nothing to see here. Please go on." + */ + if (err == -ENOENT || err == -EOPNOTSUPP) + return 0; + + return err; + } + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* run basic sanity checks on the returned nvram cell length. + * That length has to be a multiple of a "u16" (i.e.: & 1). + * Furthermore, it has to be more than "let's say" 512 bytes + * but less than the maximum of AR9300_EEPROM_SIZE (16kb). + */ + if ((len & 1) == 1 || len < 512 || len >= AR9300_EEPROM_SIZE) { + kfree(buf); + return -EINVAL; + } + + /* devres manages the calibration values release on shutdown */ + ah->nvmem_blob = (u16 *)devm_kmemdup(sc->dev, buf, len, GFP_KERNEL); + kfree(buf); + if (!ah->nvmem_blob) + return -ENOMEM; + + ah->nvmem_blob_len = len; + ah->ah_flags &= ~AH_USE_EEPROM; + ah->ah_flags |= AH_NO_EEP_SWAP; + + return 0; +} + static int ath9k_init_platform(struct ath_softc *sc) { struct ath9k_platform_data *pdata = sc->dev->platform_data; @@ -704,6 +756,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (ret) return ret; + ret = ath9k_nvmem_request_eeprom(sc); + if (ret) + return ret; + if (ath9k_led_active_high != -1) ah->config.led_active_high = ath9k_led_active_high == 1; @@ -1038,6 +1094,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, ARRAY_SIZE(ath9k_tpt_blink)); #endif + wiphy_read_of_freq_limits(hw->wiphy); + /* Register with mac80211 */ error = ieee80211_register_hw(hw); if (error) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 139831539da376..98090e40e1cf48 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -533,8 +533,10 @@ irqreturn_t ath_isr(int irq, void *dev) ath9k_debug_sync_cause(sc, sync_cause); status &= ah->imask; /* discard unasked-for bits */ - if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) + if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) { + ath9k_hw_kill_interrupts(sc->sc_ah); return IRQ_HANDLED; + } /* * If there are no status bits set, then this interrupt was not diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 80390495ea250f..75cb53a3ec15e7 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -183,10 +183,12 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd, if (cd == NULL) return; list_del(&cd->head); - for (i = 0; i < dpd->num_radar_types; i++) { - struct pri_detector *de = cd->detectors[i]; - if (de != NULL) - de->exit(de); + if (cd->detectors) { + for (i = 0; i < dpd->num_radar_types; i++) { + struct pri_detector *de = cd->detectors[i]; + if (de != NULL) + de->exit(de); + } } kfree(cd->detectors); kfree(cd); diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h index 9c2e5458e42574..e14f374f97d425 100644 --- a/drivers/net/wireless/ath/spectral_common.h +++ b/drivers/net/wireless/ath/spectral_common.h @@ -24,7 +24,6 @@ * could be acquired so far. */ #define SPECTRAL_ATH10K_MAX_NUM_BINS 256 -#define SPECTRAL_ATH11K_MAX_NUM_BINS 512 /* FFT sample format given to userspace via debugfs. * diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index 389b5e7129a6db..6af306ae41ad9b 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c @@ -120,7 +120,7 @@ static ssize_t write_file_dump(struct file *file, if (begin == NULL) break; - if (kstrtou32(begin, 0, &arg[i]) != 0) + if (kstrtos32(begin, 0, &arg[i]) != 0) break; } diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 8e1dbfda653867..aff04ef6626638 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -403,8 +403,21 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, ctl->skb->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(ctl->skb); - if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { - /* Keep frame until TX status comes */ + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { + if (info->flags & IEEE80211_TX_CTL_NO_ACK) { + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb); + } else { + /* Wait for the TX ack indication or timeout... */ + spin_lock(&wcn->dxe_lock); + if (WARN_ON(wcn->tx_ack_skb)) + ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb); + wcn->tx_ack_skb = ctl->skb; /* Tracking ref */ + mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10); + spin_unlock(&wcn->dxe_lock); + } + /* do not free, ownership transferred to mac80211 status cb */ + } else { ieee80211_free_txskb(wcn->hw, ctl->skb); } @@ -426,7 +439,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) { struct wcn36xx *wcn = (struct wcn36xx *)dev; int int_src, int_reason; - bool transmitted = false; wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); @@ -466,7 +478,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | WCN36XX_CH_STAT_INT_ED_MASK)) { reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); - transmitted = true; } } @@ -479,7 +490,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) WCN36XX_DXE_0_INT_CLR, WCN36XX_INT_MASK_CHAN_TX_L); - if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) { wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ERR_CLR, @@ -507,25 +517,8 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | WCN36XX_CH_STAT_INT_ED_MASK)) { reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); - transmitted = true; - } - } - - spin_lock(&wcn->dxe_lock); - if (wcn->tx_ack_skb && transmitted) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(wcn->tx_ack_skb); - - /* TX complete, no need to wait for 802.11 ack indication */ - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS && - info->flags & IEEE80211_TX_CTL_NO_ACK) { - info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - del_timer(&wcn->tx_ack_timer); - ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb); - wcn->tx_ack_skb = NULL; - ieee80211_wake_queues(wcn->hw); } } - spin_unlock(&wcn->dxe_lock); return IRQ_HANDLED; } @@ -613,6 +606,10 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, dxe = ctl->desc; while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { + /* do not read until we own DMA descriptor */ + dma_rmb(); + + /* read/modify DMA descriptor */ skb = ctl->skb; dma_addr = dxe->dst_addr_l; ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); @@ -623,9 +620,15 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); wcn36xx_rx_skb(wcn, skb); - } /* else keep old skb not submitted and use it for rx DMA */ + } + /* else keep old skb not submitted and reuse it for rx DMA + * (dropping the packet that it contained) + */ + /* flush descriptor changes before re-marking as valid */ + dma_wmb(); dxe->ctrl = ctrl; + ctl = ctl->next; dxe = ctl->desc; } diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 455143c4164ee9..9bea2b01f9aab8 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -359,6 +359,8 @@ enum wcn36xx_hal_host_msg_type { WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206, WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207, + WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ = 208, + WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP = 209, WCN36XX_HAL_SCAN_OFFLOAD_IND = 210, WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233, @@ -1353,6 +1355,36 @@ struct wcn36xx_hal_stop_scan_offload_rsp_msg { u32 status; } __packed; +#define WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK 0x000000ff +#define WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK 0x0000ff00 +#define WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK 0x00ff0000 +#define WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK 0xff000000 +#define WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK 0x000000ff +#define WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE BIT(7) +#define WCN36XX_HAL_CHAN_INFO_FLAG_DFS BIT(10) +#define WCN36XX_HAL_CHAN_INFO_FLAG_HT BIT(11) +#define WCN36XX_HAL_CHAN_INFO_FLAG_VHT BIT(12) +#define WCN36XX_HAL_CHAN_INFO_PHY_11A 0 +#define WCN36XX_HAL_CHAN_INFO_PHY_11BG 1 +#define WCN36XX_HAL_DEFAULT_ANT_GAIN 6 +#define WCN36XX_HAL_DEFAULT_MIN_POWER 6 + +struct wcn36xx_hal_channel_param { + u32 mhz; + u32 band_center_freq1; + u32 band_center_freq2; + u32 channel_info; + u32 reg_info_1; + u32 reg_info_2; +} __packed; + +struct wcn36xx_hal_update_channel_list_req_msg { + struct wcn36xx_hal_msg_header header; + + u8 num_channel; + struct wcn36xx_hal_channel_param channels[80]; +} __packed; + enum wcn36xx_hal_rate_index { HW_RATE_INDEX_1MBPS = 0x82, HW_RATE_INDEX_2MBPS = 0x84, @@ -3384,11 +3416,11 @@ struct tl_hal_flush_ac_rsp_msg { struct wcn36xx_hal_enter_imps_req_msg { struct wcn36xx_hal_msg_header header; -}; +} __packed; -struct wcn36xx_hal_exit_imps_req { +struct wcn36xx_hal_exit_imps_req_msg { struct wcn36xx_hal_msg_header header; -}; +} __packed; struct wcn36xx_hal_enter_bmps_req_msg { struct wcn36xx_hal_msg_header header; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index ec913ec991f3ff..b04533bbc3a451 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -85,7 +85,9 @@ static struct ieee80211_channel wcn_5ghz_channels[] = { CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), + CHAN5G(5680, 136, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), + CHAN5G(5720, 144, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), @@ -135,7 +137,9 @@ static struct ieee80211_supported_band wcn_band_2ghz = { .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40 | - IEEE80211_HT_CAP_LSIG_TXOP_PROT, + IEEE80211_HT_CAP_LSIG_TXOP_PROT | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_SUP_WIDTH_20_40, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, @@ -432,6 +436,13 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_PS) wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS); + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + if (hw->conf.flags & IEEE80211_CONF_IDLE) + wcn36xx_smd_enter_imps(wcn); + else + wcn36xx_smd_exit_imps(wcn); + } + mutex_unlock(&wcn->conf_mutex); return 0; @@ -569,12 +580,14 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) { sta_priv->is_data_encrypted = true; /* Reconfigure bss with encrypt_type */ - if (NL80211_IFTYPE_STATION == vif->type) + if (NL80211_IFTYPE_STATION == vif->type) { wcn36xx_smd_config_bss(wcn, vif, sta, sta->addr, true); + wcn36xx_smd_config_sta(wcn, vif, sta); + } wcn36xx_smd_set_stakey(wcn, vif_priv->encrypt_type, @@ -604,15 +617,6 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } } } - /* FIXME: Only enable bmps support when encryption is enabled. - * For any reasons, when connected to open/no-security BSS, - * the wcn36xx controller in bmps mode does not forward - * 'wake-up' beacons despite AP sends DTIM with station AID. - * It could be due to a firmware issue or to the way driver - * configure the station. - */ - if (vif->type == NL80211_IFTYPE_STATION) - vif_priv->allow_bmps = true; break; case DISABLE_KEY: if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) { @@ -650,19 +654,19 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_scan_request *hw_req) { struct wcn36xx *wcn = hw->priv; - int i; if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) { /* fallback to mac80211 software scan */ return 1; } - /* For unknown reason, the hardware offloaded scan only works with - * 2.4Ghz channels, fallback to software scan in other cases. + /* Firmware scan offload is limited to 48 channels, fallback to + * software driven scanning otherwise. */ - for (i = 0; i < hw_req->req.n_channels; i++) { - if (hw_req->req.channels[i]->band != NL80211_BAND_2GHZ) - return 1; + if (hw_req->req.n_channels > 48) { + wcn36xx_warn("Offload scan aborted, n_channels=%u", + hw_req->req.n_channels); + return 1; } mutex_lock(&wcn->scan_lock); @@ -676,6 +680,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw, mutex_unlock(&wcn->scan_lock); + wcn36xx_smd_update_channel_list(wcn, &hw_req->req); return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req); } @@ -913,7 +918,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, vif->addr, bss_conf->aid); vif_priv->sta_assoc = false; - vif_priv->allow_bmps = false; wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr, @@ -1123,6 +1127,13 @@ static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow) goto out; ret = wcn36xx_smd_wlan_host_suspend_ind(wcn); } + + /* Disable IRQ, we don't want to handle any packet before mac80211 is + * resumed and ready to receive packets. + */ + disable_irq(wcn->tx_irq); + disable_irq(wcn->rx_irq); + out: mutex_unlock(&wcn->conf_mutex); return ret; @@ -1145,6 +1156,10 @@ static int wcn36xx_resume(struct ieee80211_hw *hw) wcn36xx_smd_ipv6_ns_offload(wcn, vif, false); wcn36xx_smd_arp_offload(wcn, vif, false); } + + enable_irq(wcn->tx_irq); + enable_irq(wcn->rx_irq); + mutex_unlock(&wcn->conf_mutex); return 0; @@ -1338,7 +1353,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL); ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR); wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -1490,6 +1504,7 @@ static int wcn36xx_probe(struct platform_device *pdev) mutex_init(&wcn->conf_mutex); mutex_init(&wcn->hal_mutex); mutex_init(&wcn->scan_lock); + __skb_queue_head_init(&wcn->amsdu); wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL); if (!wcn->hal_buf) { @@ -1567,6 +1582,8 @@ static int wcn36xx_remove(struct platform_device *pdev) iounmap(wcn->dxe_base); iounmap(wcn->ccu_base); + __skb_queue_purge(&wcn->amsdu); + mutex_destroy(&wcn->hal_mutex); ieee80211_free_hw(hw); diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c index 2d0780fefd477e..2c660458b3830c 100644 --- a/drivers/net/wireless/ath/wcn36xx/pmc.c +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c @@ -18,19 +18,19 @@ #include "wcn36xx.h" +#define WCN36XX_BMPS_FAIL_THREHOLD 3 + int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn, struct ieee80211_vif *vif) { int ret = 0; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - - if (!vif_priv->allow_bmps) - return -ENOTSUPP; - + /* TODO: Make sure the TX chain clean */ ret = wcn36xx_smd_enter_bmps(wcn, vif); if (!ret) { wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n"); vif_priv->pw_state = WCN36XX_BMPS; + vif_priv->bmps_fail_ct = 0; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; } else { /* @@ -39,6 +39,11 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn, * received just after auth complete */ wcn36xx_err("Can not enter BMPS!\n"); + + if (vif_priv->bmps_fail_ct++ == WCN36XX_BMPS_FAIL_THREHOLD) { + ieee80211_connection_loss(vif); + vif_priv->bmps_fail_ct = 0; + } } return ret; } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 57fa857b290b79..ed45e2cf039be3 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -266,7 +267,8 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta, sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor; sta_params->max_ampdu_density = sta->ht_cap.ampdu_density; - sta_params->max_amsdu_size = is_cap_supported(caps, + /* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */ + sta_params->max_amsdu_size = !is_cap_supported(caps, IEEE80211_HT_CAP_MAX_AMSDU); sta_params->sgi_20Mhz = is_cap_supported(caps, IEEE80211_HT_CAP_SGI_20); @@ -927,6 +929,86 @@ int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn) return ret; } +int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req) +{ + struct wcn36xx_hal_update_channel_list_req_msg *msg_body; + int ret, i; + + msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL); + if (!msg_body) + return -ENOMEM; + + INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ); + + msg_body->num_channel = min_t(u8, req->n_channels, sizeof(msg_body->channels)); + for (i = 0; i < msg_body->num_channel; i++) { + struct wcn36xx_hal_channel_param *param = &msg_body->channels[i]; + u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER; + u32 ant_gain = WCN36XX_HAL_DEFAULT_ANT_GAIN; + + param->mhz = req->channels[i]->center_freq; + param->band_center_freq1 = req->channels[i]->center_freq; + param->band_center_freq2 = 0; + + if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR) + param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE; + + if (req->channels[i]->flags & IEEE80211_CHAN_RADAR) + param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_DFS; + + if (req->channels[i]->band == NL80211_BAND_5GHZ) { + param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_HT; + param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_VHT; + param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11A; + } else { + param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11BG; + } + + if (min_power > req->channels[i]->max_power) + min_power = req->channels[i]->max_power; + + if (req->channels[i]->max_antenna_gain) + ant_gain = req->channels[i]->max_antenna_gain; + + u32p_replace_bits(¶m->reg_info_1, min_power, + WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK); + u32p_replace_bits(¶m->reg_info_1, req->channels[i]->max_power, + WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK); + u32p_replace_bits(¶m->reg_info_1, req->channels[i]->max_reg_power, + WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK); + u32p_replace_bits(¶m->reg_info_1, 0, + WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK); + u32p_replace_bits(¶m->reg_info_2, ant_gain, + WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK); + + wcn36xx_dbg(WCN36XX_DBG_HAL, + "%s: freq=%u, channel_info=%08x, reg_info1=%08x, reg_info2=%08x\n", + __func__, param->mhz, param->channel_info, param->reg_info_1, + param->reg_info_2); + } + + mutex_lock(&wcn->hal_mutex); + + PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body)); + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len); + if (ret) { + wcn36xx_err("Sending hal_update_channel_list failed\n"); + goto out; + } + + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("hal_update_channel_list response failed err=%d\n", ret); + goto out; + } + +out: + kfree(msg_body); + mutex_unlock(&wcn->hal_mutex); + return ret; +} + static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len) { struct wcn36xx_hal_switch_channel_rsp_msg *rsp; @@ -2184,6 +2266,59 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) return ret; } +int wcn36xx_smd_enter_imps(struct wcn36xx *wcn) +{ + struct wcn36xx_hal_enter_imps_req_msg msg_body; + int ret; + + mutex_lock(&wcn->hal_mutex); + INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_IMPS_REQ); + + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + if (ret) { + wcn36xx_err("Sending hal_enter_imps failed\n"); + goto out; + } + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("hal_enter_imps response failed err=%d\n", ret); + goto out; + } + + wcn36xx_dbg(WCN36XX_DBG_HAL, "Entered idle mode\n"); +out: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + +int wcn36xx_smd_exit_imps(struct wcn36xx *wcn) +{ + struct wcn36xx_hal_exit_imps_req_msg msg_body; + int ret; + + mutex_lock(&wcn->hal_mutex); + INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_IMPS_REQ); + + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + if (ret) { + wcn36xx_err("Sending hal_exit_imps failed\n"); + goto out; + } + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("hal_exit_imps response failed err=%d\n", ret); + goto out; + } + wcn36xx_dbg(WCN36XX_DBG_HAL, "Exited idle mode\n"); +out: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim) { struct wcn36xx_hal_set_power_params_req_msg msg_body; @@ -2341,8 +2476,11 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn) INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ); set_feat_caps(msg_body.feat_caps, STA_POWERSAVE); - if (wcn->rf_id == RF_IRIS_WCN3680) + if (wcn->rf_id == RF_IRIS_WCN3680) { set_feat_caps(msg_body.feat_caps, DOT11AC); + set_feat_caps(msg_body.feat_caps, WLAN_CH144); + set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION); + } PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2623,30 +2761,52 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn, size_t len) { struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf; - struct wcn36xx_vif *tmp; + struct wcn36xx_vif *vif_priv; + struct ieee80211_vif *vif; + struct ieee80211_bss_conf *bss_conf; struct ieee80211_sta *sta; + bool found = false; if (len != sizeof(*rsp)) { wcn36xx_warn("Corrupted delete sta indication\n"); return -EIO; } - wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n", - rsp->addr2, rsp->sta_id); + wcn36xx_dbg(WCN36XX_DBG_HAL, + "delete station indication %pM index %d reason %d\n", + rsp->addr2, rsp->sta_id, rsp->reason_code); - list_for_each_entry(tmp, &wcn->vif_list, list) { + list_for_each_entry(vif_priv, &wcn->vif_list, list) { rcu_read_lock(); - sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2); - if (sta) - ieee80211_report_low_ack(sta, 0); + vif = wcn36xx_priv_to_vif(vif_priv); + + if (vif->type == NL80211_IFTYPE_STATION) { + /* We could call ieee80211_find_sta too, but checking + * bss_conf is clearer. + */ + bss_conf = &vif->bss_conf; + if (vif_priv->sta_assoc && + !memcmp(bss_conf->bssid, rsp->addr2, ETH_ALEN)) { + found = true; + wcn36xx_dbg(WCN36XX_DBG_HAL, + "connection loss bss_index %d\n", + vif_priv->bss_index); + ieee80211_connection_loss(vif); + } + } else { + sta = ieee80211_find_sta(vif, rsp->addr2); + if (sta) { + found = true; + ieee80211_report_low_ack(sta, 0); + } + } + rcu_read_unlock(); - if (sta) + if (found) return 0; } - wcn36xx_warn("STA with addr %pM and index %d not found\n", - rsp->addr2, - rsp->sta_id); + wcn36xx_warn("BSS or STA with addr %pM not found\n", rsp->addr2); return -ENOENT; } @@ -3060,6 +3220,9 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_GTK_OFFLOAD_RSP: case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP: case WCN36XX_HAL_HOST_RESUME_RSP: + case WCN36XX_HAL_ENTER_IMPS_RSP: + case WCN36XX_HAL_EXIT_IMPS_RSP: + case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP: memcpy(wcn->hal_buf, buf, len); wcn->hal_rsp_len = len; complete(&wcn->hal_rsp_compl); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index d8bded03945d43..88e045dad8f3a1 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -70,6 +70,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t cha int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct cfg80211_scan_request *req); int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn); +int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req); int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr); int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index); @@ -163,4 +164,7 @@ int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn); int wcn36xx_smd_host_resume(struct wcn36xx *wcn); +int wcn36xx_smd_enter_imps(struct wcn36xx *wcn); +int wcn36xx_smd_exit_imps(struct wcn36xx *wcn); + #endif /* _SMD_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index cab196bb38cd4c..75951ccbc840ee 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -31,6 +31,13 @@ struct wcn36xx_rate { enum rate_info_bw bw; }; +/* Buffer descriptor rx_ch field is limited to 5-bit (4+1), a mapping is used + * for 11A Channels. + */ +static const u8 ab_rx_ch_map[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, + 108, 112, 116, 120, 124, 128, 132, 136, 140, + 149, 153, 157, 161, 165, 144 }; + static const struct wcn36xx_rate wcn36xx_rate_table[] = { /* 11b rates */ { 10, 0, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 }, @@ -224,6 +231,41 @@ static const struct wcn36xx_rate wcn36xx_rate_table[] = { { 4333, 9, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 }, }; +static struct sk_buff *wcn36xx_unchain_msdu(struct sk_buff_head *amsdu) +{ + struct sk_buff *skb, *first; + int total_len = 0; + int space; + + first = __skb_dequeue(amsdu); + + skb_queue_walk(amsdu, skb) + total_len += skb->len; + + space = total_len - skb_tailroom(first); + if (space > 0 && pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0) { + __skb_queue_head(amsdu, first); + return NULL; + } + + /* Walk list again, copying contents into msdu_head */ + while ((skb = __skb_dequeue(amsdu))) { + skb_copy_from_linear_data(skb, skb_put(first, skb->len), + skb->len); + dev_kfree_skb_irq(skb); + } + + return first; +} + +static void __skb_queue_purge_irq(struct sk_buff_head *list) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(list)) != NULL) + dev_kfree_skb_irq(skb); +} + int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) { struct ieee80211_rx_status status; @@ -245,6 +287,26 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) "BD <<< ", (char *)bd, sizeof(struct wcn36xx_rx_bd)); + if (bd->pdu.mpdu_data_off <= bd->pdu.mpdu_header_off || + bd->pdu.mpdu_len < bd->pdu.mpdu_header_len) + goto drop; + + if (bd->asf && !bd->esf) { /* chained A-MSDU chunks */ + /* Sanity check */ + if (bd->pdu.mpdu_data_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE) + goto drop; + + skb_put(skb, bd->pdu.mpdu_data_off + bd->pdu.mpdu_len); + skb_pull(skb, bd->pdu.mpdu_data_off); + + /* Only set status for first chained BD (with mac header) */ + goto done; + } + + if (bd->pdu.mpdu_header_off < sizeof(*bd) || + bd->pdu.mpdu_header_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE) + goto drop; + skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len); skb_pull(skb, bd->pdu.mpdu_header_off); @@ -291,6 +353,22 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) ieee80211_is_probe_resp(hdr->frame_control)) status.boottime_ns = ktime_get_boottime_ns(); + if (bd->scan_learn) { + /* If packet originates from hardware scanning, extract the + * band/channel from bd descriptor. + */ + u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; + + if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { + status.band = NL80211_BAND_5GHZ; + status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], + status.band); + } else { + status.band = NL80211_BAND_2GHZ; + status.freq = ieee80211_channel_to_frequency(hwch, status.band); + } + } + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { @@ -305,9 +383,37 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) (char *)skb->data, skb->len); } +done: + /* Chained AMSDU ? slow path */ + if (unlikely(bd->asf && !(bd->lsf && bd->esf))) { + if (bd->esf && !skb_queue_empty(&wcn->amsdu)) { + wcn36xx_err("Discarding non complete chain"); + __skb_queue_purge_irq(&wcn->amsdu); + } + + __skb_queue_tail(&wcn->amsdu, skb); + + if (!bd->lsf) + return 0; /* Not the last AMSDU, wait for more */ + + skb = wcn36xx_unchain_msdu(&wcn->amsdu); + if (!skb) + goto drop; + } + ieee80211_rx_irqsafe(wcn->hw, skb); return 0; + +drop: /* drop everything */ + wcn36xx_err("Drop frame! skb:%p len:%u hoff:%u doff:%u asf=%u esf=%u lsf=%u\n", + skb, bd->pdu.mpdu_len, bd->pdu.mpdu_header_off, + bd->pdu.mpdu_data_off, bd->asf, bd->esf, bd->lsf); + + dev_kfree_skb_irq(skb); + __skb_queue_purge_irq(&wcn->amsdu); + + return -EINVAL; } static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd, @@ -321,8 +427,6 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd, bd->pdu.mpdu_header_off; bd->pdu.mpdu_len = len; bd->pdu.tid = tid; - /* Use seq number generated by mac80211 */ - bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST; } static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn, @@ -419,6 +523,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd, tid = ieee80211_get_tid(hdr); /* TID->QID is one-to-one mapping */ bd->queue_id = tid; + bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS; + } else { + bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS; } if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT || @@ -429,6 +536,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd, if (ieee80211_is_any_nullfunc(hdr->frame_control)) { /* Don't use a regular queue for null packet (no ampdu) */ bd->queue_id = WCN36XX_TX_U_WQ_ID; + bd->bd_rate = WCN36XX_BD_RATE_CTRL; + if (ieee80211_is_qos_nullfunc(hdr->frame_control)) + bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST; } if (bcast) { @@ -488,6 +598,8 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd, bd->queue_id = WCN36XX_TX_U_WQ_ID; *vif_priv = __vif_priv; + bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS; + wcn36xx_set_tx_pdu(bd, ieee80211_is_data_qos(hdr->frame_control) ? sizeof(struct ieee80211_qos_hdr) : @@ -502,10 +614,11 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct wcn36xx_vif *vif_priv = NULL; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - unsigned long flags; bool is_low = ieee80211_is_data(hdr->frame_control); bool bcast = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); + bool ack_ind = (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) && + !(info->flags & IEEE80211_TX_CTL_NO_ACK); struct wcn36xx_tx_bd bd; int ret; @@ -521,30 +634,16 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, bd.dpu_rf = WCN36XX_BMU_WQ_TX; - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { + if (unlikely(ack_ind)) { wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n"); - spin_lock_irqsave(&wcn->dxe_lock, flags); - if (wcn->tx_ack_skb) { - spin_unlock_irqrestore(&wcn->dxe_lock, flags); - wcn36xx_warn("tx_ack_skb already set\n"); - return -EINVAL; - } - - wcn->tx_ack_skb = skb; - spin_unlock_irqrestore(&wcn->dxe_lock, flags); - /* Only one at a time is supported by fw. Stop the TX queues * until the ack status gets back. */ ieee80211_stop_queues(wcn->hw); - /* TX watchdog if no TX irq or ack indication received */ - mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10); - /* Request ack indication from the firmware */ - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - bd.tx_comp = 1; + bd.tx_comp = 1; } /* Data frames served first*/ @@ -558,14 +657,8 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, bd.tx_bd_sign = 0xbdbdbdbd; ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); - if (ret && (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { - /* If the skb has not been transmitted, - * don't keep a reference to it. - */ - spin_lock_irqsave(&wcn->dxe_lock, flags); - wcn->tx_ack_skb = NULL; - spin_unlock_irqrestore(&wcn->dxe_lock, flags); - + if (unlikely(ret && ack_ind)) { + /* If the skb has not been transmitted, resume TX queue */ ieee80211_wake_queues(wcn->hw); } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h index 032216e82b2bee..b54311ffde9c53 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.h +++ b/drivers/net/wireless/ath/wcn36xx/txrx.h @@ -110,7 +110,8 @@ struct wcn36xx_rx_bd { /* 0x44 */ u32 exp_seq_num:12; u32 cur_seq_num:12; - u32 fr_type_subtype:8; + u32 rf_band:2; + u32 fr_type_subtype:6; /* 0x48 */ u32 msdu_size:16; diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index add6e527e83306..1c8d918137da24 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -128,7 +128,6 @@ struct wcn36xx_vif { enum wcn36xx_hal_bss_type bss_type; /* Power management */ - bool allow_bmps; enum wcn36xx_power_state pw_state; u8 bss_index; @@ -151,6 +150,8 @@ struct wcn36xx_vif { } rekey_data; struct list_head sta_list; + + int bmps_fail_ct; }; /** @@ -269,6 +270,9 @@ struct wcn36xx { struct sk_buff *tx_ack_skb; struct timer_list tx_ack_timer; + /* For A-MSDU re-aggregation */ + struct sk_buff_head amsdu; + /* RF module */ unsigned rf_id; @@ -276,7 +280,6 @@ struct wcn36xx { /* Debug file system entry */ struct wcn36xx_dfs_entry dfs; #endif /* CONFIG_WCN36XX_DEBUGFS */ - }; static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn, diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 1ff2679963f06d..764d1d14132b0e 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -723,11 +723,13 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, ndev = vif_to_ndev(vif); ether_addr_copy(ndev->perm_addr, ndev_main->perm_addr); if (is_valid_ether_addr(params->macaddr)) { - ether_addr_copy(ndev->dev_addr, params->macaddr); + eth_hw_addr_set(ndev, params->macaddr); } else { - ether_addr_copy(ndev->dev_addr, ndev_main->perm_addr); - ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << vif->mid)) | - 0x2; /* locally administered */ + u8 addr[ETH_ALEN]; + + ether_addr_copy(addr, ndev_main->perm_addr); + addr[0] = (addr[0] ^ (1 << vif->mid)) | 0x2; /* locally administered */ + eth_hw_addr_set(ndev, addr); } wdev = vif_to_wdev(vif); ether_addr_copy(wdev->address, ndev->dev_addr); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 3ba5b2550a8c19..7da87c9f363fdc 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1358,7 +1358,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil) ether_addr_copy(ndev->perm_addr, mac); ether_addr_copy(wiphy->perm_addr, mac); if (!is_valid_ether_addr(ndev->dev_addr)) - ether_addr_copy(ndev->dev_addr, mac); + eth_hw_addr_set(ndev, mac); if (rf_status) {/* bad RF cable? */ wil_err(wil, "RF communication error 0x%04x", @@ -1431,7 +1431,7 @@ static int wil_get_otp_info(struct wil6210_priv *wil) ether_addr_copy(ndev->perm_addr, mac); ether_addr_copy(wiphy->perm_addr, mac); if (!is_valid_ether_addr(ndev->dev_addr)) - ether_addr_copy(ndev->dev_addr, mac); + eth_hw_addr_set(ndev, mac); return 0; } @@ -1609,7 +1609,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) struct net_device *ndev = wil->main_ndev; ether_addr_copy(ndev->perm_addr, mac); - ether_addr_copy(ndev->dev_addr, ndev->perm_addr); + eth_hw_addr_set(ndev, ndev->perm_addr); return 0; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 30392eb1cbbd56..11946ecd0b9994 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1341,7 +1341,7 @@ struct wil6210_priv *wil_cfg80211_init(struct device *dev); void wil_cfg80211_deinit(struct wil6210_priv *wil); void wil_p2p_wdev_free(struct wil6210_priv *wil); -int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); +int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr); int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan, u8 edmg_chan, u8 hidden_ssid, u8 is_go); int wmi_pcp_stop(struct wil6210_vif *vif); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 2dc8406736f488..dd8abbb2884973 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -2097,7 +2097,7 @@ int wmi_echo(struct wil6210_priv *wil) WIL_WMI_CALL_GENERAL_TO_MS); } -int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) +int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr) { struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_set_mac_address_cmd cmd; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index febce4e8b3dd37..35c2e798d98bd1 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -600,7 +600,7 @@ static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, u16 data); static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, - u8 *data, int data_len); + const u8 *data, int data_len); static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index, u8 *data, int data_len); static void atmel_scan(struct atmel_private *priv, int specific_ssid); @@ -1296,7 +1296,7 @@ static int atmel_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; - memcpy (dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return atmel_open(dev); } @@ -3669,6 +3669,7 @@ static int probe_atmel_card(struct net_device *dev) { int rc = 0; struct atmel_private *priv = netdev_priv(dev); + u8 addr[ETH_ALEN] = {}; /* reset pccard */ if (priv->bus_type == BUS_TYPE_PCCARD) @@ -3693,7 +3694,9 @@ static int probe_atmel_card(struct net_device *dev) if (i == 0) { printk(KERN_ALERT "%s: MAC failed to boot MAC address reader.\n", dev->name); } else { - atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6); + + atmel_copy_to_host(dev, addr, atmel_read16(dev, MR2), 6); + eth_hw_addr_set(dev, addr); /* got address, now squash it again until the network interface is opened */ if (priv->bus_type == BUS_TYPE_PCCARD) @@ -3705,7 +3708,8 @@ static int probe_atmel_card(struct net_device *dev) /* Mac address easy in this case. */ priv->card_type = CARD_TYPE_PARALLEL_FLASH; atmel_write16(dev, BSR, 1); - atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6); + atmel_copy_to_host(dev, addr, 0xc000, 6); + eth_hw_addr_set(dev, addr); atmel_write16(dev, BSR, 0x200); rc = 1; } else { @@ -3713,7 +3717,8 @@ static int probe_atmel_card(struct net_device *dev) for the Mac Address */ priv->card_type = CARD_TYPE_SPI_FLASH; if (atmel_wakeup_firmware(priv) == 0) { - atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); + atmel_get_mib(priv, Mac_Address_Mib_Type, 0, addr, 6); + eth_hw_addr_set(dev, addr); /* got address, now squash it again until the network interface is opened */ @@ -3730,7 +3735,7 @@ static int probe_atmel_card(struct net_device *dev) 0x00, 0x04, 0x25, 0x00, 0x00, 0x00 }; printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name); - memcpy(dev->dev_addr, default_mac, ETH_ALEN); + eth_hw_addr_set(dev, default_mac); } } @@ -4103,7 +4108,7 @@ static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index, } static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index, - u8 *data, int data_len) + const u8 *data, int data_len) { struct get_set_mib m; m.type = type; diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c index d5a1a5c582366e..ac72ca39e409b2 100644 --- a/drivers/net/wireless/broadcom/b43/phy_g.c +++ b/drivers/net/wireless/broadcom/b43/phy_g.c @@ -2297,7 +2297,7 @@ static u8 b43_gphy_aci_scan(struct b43_wldev *dev) b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF); b43_set_all_gains(dev, 3, 8, 1); - start = (channel - 5 > 0) ? channel - 5 : 1; + start = (channel > 5) ? channel - 5 : 1; end = (channel + 5 < 14) ? channel + 5 : 13; for (i = start; i <= end; i++) { diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c index 06891b4f837b92..fdf78c10a05c2c 100644 --- a/drivers/net/wireless/broadcom/b43legacy/radio.c +++ b/drivers/net/wireless/broadcom/b43legacy/radio.c @@ -283,7 +283,7 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) & 0x7FFF); b43legacy_set_all_gains(dev, 3, 8, 1); - start = (channel - 5 > 0) ? channel - 5 : 1; + start = (channel > 5) ? channel - 5 : 1; end = (channel + 5 < 14) ? channel + 5 : 13; for (i = start; i <= end; i++) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 9db12ffd2ff801..fb727778312c38 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1783,8 +1783,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) val = WPA_AUTH_PSK; break; default: - bphy_err(drvr, "invalid cipher group (%d)\n", - sme->crypto.cipher_group); + bphy_err(drvr, "invalid akm suite (%d)\n", + sme->crypto.akm_suites[0]); return -EINVAL; } } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { @@ -1816,8 +1816,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) profile->is_ft = true; break; default: - bphy_err(drvr, "invalid cipher group (%d)\n", - sme->crypto.cipher_group); + bphy_err(drvr, "invalid akm suite (%d)\n", + sme->crypto.akm_suites[0]); return -EINVAL; } } else if (val & WPA3_AUTH_SAE_PSK) { @@ -1838,8 +1838,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) } break; default: - bphy_err(drvr, "invalid cipher group (%d)\n", - sme->crypto.cipher_group); + bphy_err(drvr, "invalid akm suite (%d)\n", + sme->crypto.akm_suites[0]); return -EINVAL; } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index db5f8535fdb57a..fed9cd5f29a26d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -244,7 +244,7 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) } else { brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data); memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN); - memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN); + eth_hw_addr_set(ifp->ndev, ifp->mac_addr); } return err; } @@ -655,7 +655,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool locked) ndev->ethtool_ops = &brcmf_ethtool_ops; /* set the mac address & netns */ - memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, ifp->mac_addr); dev_net_set(ndev, wiphy_net(cfg_to_wiphy(drvr->config))); INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list); @@ -830,7 +830,7 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp) ndev->netdev_ops = &brcmf_netdev_ops_p2p; /* set the mac address */ - memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, ifp->mac_addr); if (register_netdev(ndev) != 0) { bphy_err(drvr, "couldn't register the p2p net device\n"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c index 6d5188b78f2de0..0af452dca7664e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c @@ -75,6 +75,16 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&acepc_t8_data, }, + { + /* Cyberbook T116 rugged tablet */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "20170531"), + }, + /* The factory image nvram file is identical to the ACEPC T8 one */ + .driver_data = (void *)&acepc_t8_data, + }, { /* Match for the GPDwin which unfortunately uses somewhat * generic dmi strings, which is why we test for 4 strings. diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 2f7bc3a70c6541..513c7e6421b23a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -29,7 +29,7 @@ static int brcmf_of_get_country_codes(struct device *dev, return (count == -EINVAL) ? 0 : count; } - cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL); + cc = devm_kzalloc(dev, struct_size(cc, table, count), GFP_KERNEL); if (!cc) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 9ac0d8c73d5a2a..4735063e4c0355 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2125,7 +2125,7 @@ static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif) struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev); struct net_device *pri_ndev = cfg_to_ndev(cfg); struct brcmf_if *ifp = netdev_priv(pri_ndev); - u8 *addr = vif->wdev.netdev->dev_addr; + const u8 *addr = vif->wdev.netdev->dev_addr; return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN); } @@ -2135,7 +2135,7 @@ static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif) struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev); struct net_device *pri_ndev = cfg_to_ndev(cfg); struct brcmf_if *ifp = netdev_priv(pri_ndev); - u8 *addr = vif->wdev.netdev->dev_addr; + const u8 *addr = vif->wdev.netdev->dev_addr; return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN); } diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 65dd8cff1b011e..45594f003ef7b4 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1109,7 +1109,7 @@ struct airo_info; static int get_dec_u16(char *buffer, int *start, int limit); static void OUT4500(struct airo_info *, u16 reg, u16 value); static unsigned short IN4500(struct airo_info *, u16 reg); -static u16 setup_card(struct airo_info*, u8 *mac, int lock); +static u16 setup_card(struct airo_info*, struct net_device *dev, int lock); static int enable_MAC(struct airo_info *ai, int lock); static void disable_MAC(struct airo_info *ai, int lock); static void enable_interrupts(struct airo_info*); @@ -2337,9 +2337,9 @@ static int airo_set_mac_address(struct net_device *dev, void *p) disable_MAC(ai, 1); writeConfigRid (ai, 1); enable_MAC(ai, 1); - memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len); + dev_addr_set(ai->dev, addr->sa_data); if (ai->wifidev) - memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len); + dev_addr_set(ai->wifidev, addr->sa_data); return 0; } @@ -2854,7 +2854,7 @@ static struct net_device *_init_airo_card(unsigned short irq, int port, } if (probe) { - if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) { + if (setup_card(ai, dev, 1) != SUCCESS) { airo_print_err(dev->name, "MAC could not be enabled"); rc = -EIO; goto err_out_map; @@ -2972,7 +2972,7 @@ int reset_airo_card(struct net_device *dev) if (reset_card (dev, 1)) return -1; - if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) { + if (setup_card(ai, dev, 1) != SUCCESS) { airo_print_err(dev->name, "MAC could not be enabled"); return -1; } @@ -3817,7 +3817,8 @@ static inline void set_auth_type(struct airo_info *local, int auth_type) local->last_auth = auth_type; } -static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock) +static int noinline_for_stack airo_readconfig(struct airo_info *ai, + struct net_device *dev, int lock) { int i, status; /* large variables, so don't inline this function, @@ -3861,9 +3862,7 @@ static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int } /* Save off the MAC */ - for (i = 0; i < ETH_ALEN; i++) { - mac[i] = ai->config.macAddr[i]; - } + eth_hw_addr_set(dev, ai->config.macAddr); /* Check to see if there are any insmod configured rates to add */ @@ -3879,7 +3878,7 @@ static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int } -static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) +static u16 setup_card(struct airo_info *ai, struct net_device *dev, int lock) { Cmd cmd; Resp rsp; @@ -3925,7 +3924,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) if (lock) up(&ai->sem); if (ai->config.len == 0) { - status = airo_readconfig(ai, mac, lock); + status = airo_readconfig(ai, dev, lock); if (status != SUCCESS) return ERROR; } @@ -5654,7 +5653,7 @@ static int __maybe_unused airo_pci_resume(struct device *dev_d) if (prev_state != PCI_D1) { reset_card(dev, 0); mpi_init_descriptors(ai); - setup_card(ai, dev->dev_addr, 0); + setup_card(ai, dev, 0); clear_bit(FLAG_RADIO_OFF, &ai->flags); clear_bit(FLAG_PENDING_XMIT, &ai->flags); } else { @@ -7534,7 +7533,7 @@ static int airo_config_commit(struct net_device *dev, readSsidRid(local, &SSID_rid); if (test_bit(FLAG_MPI,&local->flags)) - setup_card(local, dev->dev_addr, 1); + setup_card(local, dev, 1); else reset_airo_card(dev); disable_MAC(local, 1); @@ -8208,7 +8207,7 @@ static int flashrestart(struct airo_info *ai, struct net_device *dev) if (status != SUCCESS) return status; } - status = setup_card(ai, dev->dev_addr, 1); + status = setup_card(ai, dev, 1); if (!test_bit(FLAG_MPI,&ai->flags)) for (i = 0; i < MAX_FIDS; i++) { diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 47eb89b773cf7f..2ace2b27ecad28 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -4685,7 +4685,7 @@ static int ipw2100_read_mac_address(struct ipw2100_priv *priv) return -EIO; } - memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(priv->net_dev, addr); IPW_DEBUG_INFO("card MAC is %pM\n", priv->net_dev->dev_addr); return 0; @@ -4712,7 +4712,7 @@ static int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode) if (priv->config & CFG_CUSTOM_MAC) { memcpy(cmd.host_command_parameters, priv->mac_addr, ETH_ALEN); - memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(priv->net_dev, priv->mac_addr); } else memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr, ETH_ALEN); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ada6ce32c1f19e..23037bfc9e4cbf 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -199,7 +199,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv, struct clx2_tx_queue *txq, int qindex); static int ipw_queue_reset(struct ipw_priv *priv); -static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, +static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf, int len, int sync); static void ipw_tx_queue_free(struct ipw_priv *); @@ -2264,7 +2264,7 @@ static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) } static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, - void *data) + const void *data) { struct host_cmd cmd = { .cmd = command, @@ -3777,7 +3777,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv, dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count, &q->q.dma_addr, GFP_KERNEL); if (!q->bd) { - IPW_ERROR("pci_alloc_consistent(%zd) failed\n", + IPW_ERROR("dma_alloc_coherent(%zd) failed\n", sizeof(q->bd[0]) * count); kfree(q->txb); q->txb = NULL; @@ -5033,7 +5033,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv, return used; } -static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, +static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf, int len, int sync) { struct clx2_tx_queue *txq = &priv->txq_cmd; @@ -11185,7 +11185,7 @@ static int ipw_up(struct ipw_priv *priv) ipw_init_ordinals(priv); if (!(priv->config & CFG_CUSTOM_MAC)) eeprom_parse_mac(priv, priv->mac_addr); - memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(priv->net_dev, priv->mac_addr); ipw_set_geo(priv); @@ -11542,7 +11542,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv) priv->prom_priv->priv = priv; strcpy(priv->prom_net_dev->name, "rtap%d"); - memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(priv->prom_net_dev, priv->mac_addr); priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index 98fe62737888c5..55cac934f4ee92 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h @@ -1945,7 +1945,7 @@ struct host_cmd { u8 cmd; u8 len; u16 reserved; - u32 *param; + const u32 *param; } __packed; /* XXX */ struct cmdlog_host_cmd { diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 45abb25b65a9f2..bd4e7d75295836 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3819,7 +3819,6 @@ il3945_pci_remove(struct pci_dev *pdev) il3945_unset_hw_params(il); /*netif_stop_queue(dev); */ - flush_workqueue(il->workqueue); /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes * il->workqueue... so we can't take down the workqueue diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 0223532fd56a08..d93900e62e3d33 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6731,7 +6731,6 @@ il4965_pci_remove(struct pci_dev *pdev) il_eeprom_free(il); /*netif_stop_queue(dev); */ - flush_workqueue(il->workqueue); /* ieee80211_unregister_hw calls il_mac_stop, which flushes * il->workqueue... so we can't take down the workqueue diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index d86918d162aa05..0d4656efe908b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -15,7 +15,7 @@ iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += queue/tx.o -iwlwifi-objs += fw/img.o fw/notif-wait.o +iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index 44c4fe97539014..116defb15afbdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -3,11 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2020 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index df6ac00340b262..ab2038a3fbe21b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -3,11 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2020 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index d8231cc821aede..1572097bccf17e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -9,7 +9,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 66 +#define IWL_22000_UCODE_API_MAX 67 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -53,6 +53,9 @@ #define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-" #define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" #define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" +#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-" +#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm7-a0-" + #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" @@ -106,6 +109,10 @@ IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \ IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode" +#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \ + IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -355,7 +362,7 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { .base_params = &iwl_ax210_base_params, .umac_prph_offset = 0x300000, .integrated = true, - /* TODO: the following values need to be checked */ + .low_latency_xtal = true, .xtal_latency = 12000, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; @@ -469,6 +476,14 @@ const char iwl_ax210_killer_1675w_name[] = "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; const char iwl_ax210_killer_1675x_name[] = "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; +const char iwl_ax211_killer_1675s_name[] = + "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax211_killer_1675i_name[] = + "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax411_killer_1690s_name[] = + "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; +const char iwl_ax411_killer_1690i_name[] = + "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, @@ -850,6 +865,20 @@ const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = { + .fw_name_pre = IWL_BZ_A_FM_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = { + .fw_name_pre = IWL_GL_A_FM_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -876,3 +905,5 @@ MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index 6cdd7d983bdae6..e2e23d2bc1fe5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -3,11 +3,6 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2020 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index 541a3ec8577708..20929e59c2f4d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -3,11 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 - 2020 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index 1276df1c7a55bb..abb8696ba294ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2021 Intel Corporation */ #ifndef __iwl_agn_h__ #define __iwl_agn_h__ @@ -398,8 +398,10 @@ do { \ if (!iwl_is_rfkill((m))) \ IWL_ERR(m, fmt, ##args); \ else \ - __iwl_err((m)->dev, true, \ - !iwl_have_debug_level(IWL_DL_RADIO), \ + __iwl_err((m)->dev, \ + iwl_have_debug_level(IWL_DL_RADIO) ? \ + IWL_ERR_MODE_RFKILL : \ + IWL_ERR_MODE_TRACE_ONLY, \ fmt, ##args); \ } while (0) #else @@ -408,7 +410,8 @@ do { \ if (!iwl_is_rfkill((m))) \ IWL_ERR(m, fmt, ##args); \ else \ - __iwl_err((m)->dev, true, true, fmt, ##args); \ + __iwl_err((m)->dev, IWL_ERR_MODE_TRACE_ONLY, \ + fmt, ##args); \ } while (0) #endif /* CONFIG_IWLWIFI_DEBUG */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 91104920183876..b246dbd371b350 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -3,10 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2018 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 4bd792c06ff6e6..bbd57409120125 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -2,11 +2,6 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ /* * Please use this file (dev.h) for driver implementation definitions. diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index c3e25885d1943a..39e40901fa4647 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -3,11 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index e8a4d604b91060..71f67a019cf60f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -3,11 +3,6 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h index 6fe20180dc87ce..5038fc378a1f78 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h @@ -2,11 +2,6 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #ifndef __iwl_leds_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 3b937a7dd40323..40d790b36d85b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -2,11 +2,6 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 75e7665773c52c..754876cd27ce89 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -6,11 +6,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index cc7b69fd14d37b..fbd57a2b2bd506 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -6,11 +6,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -52,7 +47,6 @@ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_AUTHOR); MODULE_LICENSE("GPL"); /* Please keep this array *SORTED* by hex value. @@ -1525,7 +1519,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) kfree(priv->nvm_data); /*netif_stop_queue(dev); */ - flush_workqueue(priv->workqueue); /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes * priv->workqueue... so we can't take down the workqueue diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c index 93ef023905c961..6d16a7105656ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c @@ -6,10 +6,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h index 3f8db1fc4b5902..f38201ce1e9914 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/power.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h @@ -5,10 +5,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #ifndef __iwl_power_setting_h__ #define __iwl_power_setting_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index 548540dd0c0f75..b7c8b209bfea49 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -3,11 +3,6 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 - 2020 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index 68a840d739e836..0b47f1993c5db3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -2,11 +2,6 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #ifndef __iwl_agn_rs_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 3cd7b423c588df..db0c41bbeb0e19 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -7,11 +7,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portionhelp of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index 12a3d464ae640e..70338bc7bb5402 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -3,11 +3,6 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index c4ecf6ed218602..2d38227dfdd20b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -3,10 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2018 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index ddc14059b07d15..8f7a0f36c276b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -5,11 +5,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c index 2684a924ba5749..43e8d04d5a8b22 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -6,10 +6,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 3b0ff458a158b7..7ace052fc78a49 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -5,10 +5,6 @@ * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #ifndef __iwl_tt_setting_h__ #define __iwl_tt_setting_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 847b8e07f81c51..60a7b61d59aa33 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -3,11 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index 24194c79121817..4b27a53d0bb4aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -3,11 +3,6 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 1efac0b2a94d73..bf431fa4fe81fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -184,9 +184,11 @@ int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, } IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32); -union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, - union acpi_object *data, - int data_size, int *tbl_rev) +union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, + union acpi_object *data, + int min_data_size, + int max_data_size, + int *tbl_rev) { int i; union acpi_object *wifi_pkg; @@ -196,7 +198,7 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, * describes the domain, and one more entry, otherwise there's * no point in reading it. */ - if (WARN_ON_ONCE(data_size < 2)) + if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size)) return ERR_PTR(-EINVAL); /* @@ -222,7 +224,8 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, /* skip entries that are not a package with the right size */ if (wifi_pkg->type != ACPI_TYPE_PACKAGE || - wifi_pkg->package.count != data_size) + wifi_pkg->package.count < min_data_size || + wifi_pkg->package.count > max_data_size) continue; domain = &wifi_pkg->package.elements[0]; @@ -236,7 +239,7 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, found: return wifi_pkg; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg); +IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, @@ -707,49 +710,103 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; int i, j, k, ret, tbl_rev; - int idx = 1; /* start from one to skip the domain */ - u8 num_bands; + u8 num_bands, num_profiles; + static const struct { + u8 revisions; + u8 bands; + u8 profiles; + u8 min_profiles; + } rev_data[] = { + { + .revisions = BIT(3), + .bands = ACPI_GEO_NUM_BANDS_REV2, + .profiles = ACPI_NUM_GEO_PROFILES_REV3, + .min_profiles = 3, + }, + { + .revisions = BIT(2), + .bands = ACPI_GEO_NUM_BANDS_REV2, + .profiles = ACPI_NUM_GEO_PROFILES, + }, + { + .revisions = BIT(0) | BIT(1), + .bands = ACPI_GEO_NUM_BANDS_REV0, + .profiles = ACPI_NUM_GEO_PROFILES, + }, + }; + int idx; + /* start from one to skip the domain */ + int entry_idx = 1; + + BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3); + BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES); data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - /* start by trying to read revision 2 */ - wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WGDS_WIFI_DATA_SIZE_REV2, - &tbl_rev); - if (!IS_ERR(wifi_pkg)) { - if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); - goto out_free; - } - - num_bands = ACPI_GEO_NUM_BANDS_REV2; - - goto read_table; - } - - /* then try revision 0 (which is the same as 1) */ - wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WGDS_WIFI_DATA_SIZE_REV0, - &tbl_rev); - if (!IS_ERR(wifi_pkg)) { - if (tbl_rev != 0 && tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + /* read the highest revision we understand first */ + for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) { + /* min_profiles != 0 requires num_profiles header */ + u32 hdr_size = 1 + !!rev_data[idx].min_profiles; + u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE * + rev_data[idx].bands; + u32 max_size = hdr_size + profile_size * rev_data[idx].profiles; + u32 min_size; + + if (!rev_data[idx].min_profiles) + min_size = max_size; + else + min_size = hdr_size + + profile_size * rev_data[idx].min_profiles; + + wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data, + min_size, max_size, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (!(BIT(tbl_rev) & rev_data[idx].revisions)) + continue; + + num_bands = rev_data[idx].bands; + num_profiles = rev_data[idx].profiles; + + if (rev_data[idx].min_profiles) { + /* read header that says # of profiles */ + union acpi_object *entry; + + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > num_profiles) { + ret = -EINVAL; + goto out_free; + } + num_profiles = entry->integer.value; + + /* + * this also validates >= min_profiles since we + * otherwise wouldn't have gotten the data when + * looking up in ACPI + */ + if (wifi_pkg->package.count != + min_size + profile_size * num_profiles) { + ret = -EINVAL; + goto out_free; + } + } + goto read_table; } - - num_bands = ACPI_GEO_NUM_BANDS_REV0; - - goto read_table; } - ret = PTR_ERR(wifi_pkg); + if (idx < ARRAY_SIZE(rev_data)) + ret = PTR_ERR(wifi_pkg); + else + ret = -ENOENT; goto out_free; read_table: fwrt->geo_rev = tbl_rev; - for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { + for (i = 0; i < num_profiles; i++) { for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { union acpi_object *entry; @@ -762,7 +819,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) fwrt->geo_profiles[i].bands[j].max = fwrt->geo_profiles[i].bands[1].max; } else { - entry = &wifi_pkg->package.elements[idx++]; + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > U8_MAX) { ret = -EINVAL; @@ -779,7 +837,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) fwrt->geo_profiles[i].bands[j].chains[k] = fwrt->geo_profiles[i].bands[1].chains[k]; } else { - entry = &wifi_pkg->package.elements[idx++]; + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > U8_MAX) { ret = -EINVAL; @@ -803,10 +862,10 @@ IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) { /* - * The GEO_TX_POWER_LIMIT command is not supported on earlier - * firmware versions. Unfortunately, we don't have a TLV API - * flag to rely on, so rely on the major version which is in - * the first byte of ucode_ver. This was implemented + * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on + * earlier firmware versions. Unfortunately, we don't have a + * TLV API flag to rely on, so rely on the major version which + * is in the first byte of ucode_ver. This was implemented * initially on version 38 and then backported to 17. It was * also backported to 29, but only for 7265D devices. The * intention was to have it in 36 as well, but not all 8000 @@ -822,14 +881,15 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) IWL_EXPORT_SYMBOL(iwl_sar_geo_support); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset *table, u32 n_bands) + struct iwl_per_chain_offset *table, + u32 n_bands, u32 n_profiles) { int i, j; if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; - for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { + for (i = 0; i < n_profiles; i++) { for (j = 0; j < n_bands; j++) { struct iwl_per_chain_offset *chain = &table[i * n_bands + j]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 16ed0995b51e7f..4aaa8a6b071ba9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -29,6 +29,7 @@ #define ACPI_SAR_PROFILE_NUM 4 #define ACPI_NUM_GEO_PROFILES 3 +#define ACPI_NUM_GEO_PROFILES_REV3 8 #define ACPI_GEO_PER_CHAIN_SIZE 3 #define ACPI_SAR_NUM_CHAINS_REV0 2 @@ -59,13 +60,6 @@ #define ACPI_GEO_NUM_BANDS_REV2 3 #define ACPI_GEO_NUM_CHAINS 2 -#define ACPI_WGDS_WIFI_DATA_SIZE_REV0 (ACPI_NUM_GEO_PROFILES * \ - ACPI_GEO_NUM_BANDS_REV0 * \ - ACPI_GEO_PER_CHAIN_SIZE + 1) -#define ACPI_WGDS_WIFI_DATA_SIZE_REV2 (ACPI_NUM_GEO_PROFILES * \ - ACPI_GEO_NUM_BANDS_REV2 * \ - ACPI_GEO_PER_CHAIN_SIZE + 1) - #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 @@ -115,8 +109,10 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, + DSM_FUNC_ENABLE_6E = 3, DSM_FUNC_11AX_ENABLEMENT = 6, - DSM_FUNC_ENABLE_UNII4_CHAN = 7 + DSM_FUNC_ENABLE_UNII4_CHAN = 7, + DSM_FUNC_ACTIVATE_CHANNEL = 8 }; enum iwl_dsm_values_srd { @@ -158,10 +154,11 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value); -union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, - union acpi_object *data, - int data_size, int *tbl_rev); - +union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, + union acpi_object *data, + int min_data_size, + int max_data_size, + int *tbl_rev); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available * @@ -198,7 +195,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset *table, u32 n_bands); + struct iwl_per_chain_offset *table, + u32 n_bands, u32 n_profiles); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, int *block_list_size); @@ -230,10 +228,11 @@ static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, return -ENOENT; } -static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, - union acpi_object *data, - int data_size, - int *tbl_rev) +static inline union acpi_object * +iwl_acpi_get_wifi_pkg_range(struct device *dev, + union acpi_object *data, + int min_data_size, int max_data_size, + int *tbl_rev) { return ERR_PTR(-ENOENT); } @@ -293,4 +292,14 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt } #endif /* CONFIG_ACPI */ + +static inline union acpi_object * +iwl_acpi_get_wifi_pkg(struct device *dev, + union acpi_object *data, + int data_size, int *tbl_rev) +{ + return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size, + tbl_rev); +} + #endif /* __iwl_fw_acpi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 3ec82cae398169..1503119ea910aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -624,7 +624,7 @@ struct iwl_wowlan_status_v6 { } __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ /** - * struct iwl_wowlan_status - WoWLAN status + * struct iwl_wowlan_status_v7 - WoWLAN status * @gtk: GTK data * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter @@ -693,49 +693,6 @@ struct iwl_wowlan_status_v9 { u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */ -/** - * struct iwl_wowlan_status - WoWLAN status - * @gtk: GTK data - * @igtk: IGTK data - * @bigtk: BIGTK data - * @replay_ctr: GTK rekey replay counter - * @pattern_number: number of the matched pattern - * @non_qos_seq_ctr: non-QoS sequence counter to use next - * @qos_seq_ctr: QoS sequence counters to use next - * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason - * @num_of_gtk_rekeys: number of GTK rekeys - * @tid_tear_down: bitmap of TIDs torn down - * @reserved: reserved - * @received_beacons: number of received beacons - * @wake_packet_length: wakeup packet length - * @wake_packet_bufsize: wakeup packet buffer size - * @tid_tear_down: bit mask of tids whose BA sessions were closed - * in suspend state - * @wake_packet: wakeup packet - */ -struct iwl_wowlan_status { - struct iwl_wowlan_gtk_status gtk[1]; - struct iwl_wowlan_igtk_status igtk[1]; - struct iwl_wowlan_igtk_status bigtk[WOWLAN_IGTK_KEYS_NUM]; - __le64 replay_ctr; - __le16 pattern_number; - __le16 non_qos_seq_ctr; - __le16 qos_seq_ctr[8]; - __le32 wakeup_reasons; - __le32 num_of_gtk_rekeys; - u8 tid_tear_down; - u8 reserved[3]; - __le32 received_beacons; - __le32 wake_packet_length; - __le32 wake_packet_bufsize; - u8 wake_packet[]; /* can be truncated from _length to _bufsize */ -} __packed; /* WOWLAN_STATUSES_API_S_VER_11 */ - -static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk) -{ - return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK; -} - /* TODO: NetDetect API */ #endif /* __iwl_fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index d8b5870d6e9af5..3988f5fea33ab0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -7,6 +7,7 @@ #include +#define IWL_FW_INI_HW_SMEM_REGION_ID 15 #define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 #define IWL_FW_INI_MAX_CFG_NAME 64 @@ -242,6 +243,62 @@ struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_hcmd hcmd; } __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */ +/** +* struct iwl_fw_ini_conf_tlv - preset configuration TLV +* +* @address: the base address +* @value: value to set at address + +*/ +struct iwl_fw_ini_addr_val { + __le32 address; + __le32 value; +} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */ + +/** + * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory. + * + * @hdr: debug header + * @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point + * @set_type: write access type preset token for time point. + * one of &enum iwl_fw_ini_config_set_type + * @addr_offset: the offset to add to any item in address[0] field + * @addr_val: address value pair + */ +struct iwl_fw_ini_conf_set_tlv { + struct iwl_fw_ini_header hdr; + __le32 time_point; + __le32 set_type; + __le32 addr_offset; + struct iwl_fw_ini_addr_val addr_val[0]; +} __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */ + +/** + * enum iwl_fw_ini_config_set_type + * + * @IWL_FW_INI_CONFIG_SET_TYPE_INVALID: invalid config set + * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: for PERIPHERY MAC configuration + * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY: for PERIPHERY PHY configuration + * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX: for PERIPHERY AUX configuration + * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: for DEVICE MEMORY configuration + * @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration + * @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration + * @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration + * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported +*/ + +enum iwl_fw_ini_config_set_type { + IWL_FW_INI_CONFIG_SET_TYPE_INVALID = 0, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX, + IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY, + IWL_FW_INI_CONFIG_SET_TYPE_CSR, + IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR, + IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM, + IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM, +} __packed; + /** * enum iwl_fw_ini_allocation_id * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 8adccd5da095ff..029ae64bf2b21a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -361,6 +361,41 @@ struct iwl_buf_alloc_cmd { struct iwl_buf_alloc_frag frags[BUF_ALLOC_MAX_NUM_FRAGS]; } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_2 */ +#define DRAM_INFO_FIRST_MAGIC_WORD 0x76543210 +#define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF + +/** + * struct iwL_dram_info - DRAM fragments allocation struct + * + * Driver will fill in the first 1K(+) of the pointed DRAM fragment + * + * @first_word: magic word value + * @second_word: magic word value + * @framfrags: DRAM fragmentaion detail +*/ +struct iwl_dram_info { + __le32 first_word; + __le32 second_word; + struct iwl_buf_alloc_cmd dram_frags[IWL_FW_INI_ALLOCATION_NUM - 1]; +} __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */ + +/** + * struct iwl_dbgc1_info - DBGC1 address and size + * + * Driver will fill the dbcg1 address and size at address based on config TLV. + * + * @first_word: all 0 set as identifier + * @dbgc1_add_lsb: LSB bits of DBGC1 physical address + * @dbgc1_add_msb: MSB bits of DBGC1 physical address + * @dbgc1_size: DBGC1 size +*/ +struct iwl_dbgc1_info { + __le32 first_word; + __le32 dbgc1_add_lsb; + __le32 dbgc1_add_msb; + __le32 dbgc1_size; +} __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */ + /** * struct iwl_dbg_host_event_cfg_cmd * @enabled_severities: enabled severities diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 6bbb8b8c91cd60..12af94e166ed0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -206,7 +206,7 @@ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8), IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), - IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, + IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_AB_MSK, IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24), IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25), IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27), @@ -629,6 +629,7 @@ enum iwl_location_bw { IWL_LOCATION_BW_20MHZ, IWL_LOCATION_BW_40MHZ, IWL_LOCATION_BW_80MHZ, + IWL_LOCATION_BW_160MHZ, }; #define TK_11AZ_LEN 32 @@ -1500,7 +1501,9 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 { u8 reserved[3]; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; -} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6 */ +} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6, + LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */ + /** * enum iwl_tof_response_status - tof response status @@ -1581,7 +1584,8 @@ struct iwl_tof_range_rsp_ntfy_v8 { u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS]; -} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8 */ +} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8, + LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */ #define IWL_MVM_TOF_MCSI_BUF_SIZE (245) /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index 6610d1234f74df..d088c820b1a914 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -39,9 +39,9 @@ enum iwl_mac_conf_subcmd_ids { PROBE_RESPONSE_DATA_NOTIF = 0xFC, /** - * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif + * @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif */ - CHANNEL_SWITCH_NOA_NOTIF = 0xFF, + CHANNEL_SWITCH_START_NOTIF = 0xFF, }; #define IWL_P2P_NOA_DESC_COUNT (2) @@ -102,11 +102,11 @@ struct iwl_missed_vap_notif { } __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */ /** - * struct iwl_channel_switch_noa_notif - Channel switch NOA notification + * struct iwl_channel_switch_start_notif - Channel switch start notification * * @id_and_color: ID and color of the MAC */ -struct iwl_channel_switch_noa_notif { +struct iwl_channel_switch_start_notif { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 7be7715b431dfd..11f0bd283e499f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -138,6 +138,8 @@ struct iwl_mac_data_ibss { * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w) * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT + * @COEX_HIGH_PRIORITY_ENABLE: high priority mode for BT coex, to be used + * during 802.1X negotiation (and allowed during 4-way-HS) */ enum iwl_mac_data_policy { TWT_SUPPORTED = BIT(0), @@ -145,6 +147,7 @@ enum iwl_mac_data_policy { FLEXIBLE_TWT_SUPPORTED = BIT(2), PROTECTED_TWT_SUPPORTED = BIT(3), BROADCAST_TWT_SUPPORTED = BIT(4), + COEX_HIGH_PRIORITY_ENABLE = BIT(5), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index cf48c6fa8f6550..3551a3f1c1aab9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -471,6 +471,29 @@ struct iwl_lari_config_change_cmd_v4 { __le32 oem_unii4_allow_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */ +/** + * struct iwl_lari_config_change_cmd_v5 - change LARI configuration + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * @chan_state_active_bitmap: Bitmap for overriding channel state to active. + * Each bit represents a country or region to activate, according to the BIOS + * definitions. + */ +struct iwl_lari_config_change_cmd_v5 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; +} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */ + /** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index d07a632f1af7c4..c04f2521fcb3be 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2012-2014, 2019-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -29,9 +29,9 @@ enum iwl_phy_ops_subcmd_ids { TEMP_REPORTING_THRESHOLDS_CMD = 0x04, /** - * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd + * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd */ - GEO_TX_POWER_LIMIT = 0x05, + PER_CHAIN_LIMIT_OFFSET_CMD = 0x05, /** * @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 86445385f072c4..4d671c878bb7a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -378,9 +378,10 @@ struct iwl_dev_tx_power_cmd { }; }; -#define IWL_NUM_GEO_PROFILES 3 -#define IWL_NUM_BANDS_PER_CHAIN_V1 2 -#define IWL_NUM_BANDS_PER_CHAIN_V2 3 +#define IWL_NUM_GEO_PROFILES 3 +#define IWL_NUM_GEO_PROFILES_V3 8 +#define IWL_NUM_BANDS_PER_CHAIN_V1 2 +#define IWL_NUM_BANDS_PER_CHAIN_V2 3 /** * enum iwl_geo_per_chain_offset_operation - type of operation @@ -390,10 +391,10 @@ struct iwl_dev_tx_power_cmd { enum iwl_geo_per_chain_offset_operation { IWL_PER_CHAIN_OFFSET_SET_TABLES, IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, -}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */ +}; /* PER_CHAIN_OFFSET_OPERATION_E */ /** - * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT. + * struct iwl_per_chain_offset - embedded struct for PER_CHAIN_LIMIT_OFFSET_CMD. * @max_tx_power: maximum allowed tx power. * @chain_a: tx power offset for chain a. * @chain_b: tx power offset for chain b. @@ -405,17 +406,17 @@ struct iwl_per_chain_offset { } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd. + * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. */ struct iwl_geo_tx_power_profiles_cmd_v1 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; -} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */ +} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd_v2 - struct for GEO_TX_POWER_LIMIT cmd. + * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: BIOS table revision. @@ -424,10 +425,10 @@ struct iwl_geo_tx_power_profiles_cmd_v2 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; __le32 table_revision; -} __packed; /* GEO_TX_POWER_LIMIT_VER_2 */ +} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */ /** - * struct iwl_geo_tx_power_profile_cmd_v3 - struct for GEO_TX_POWER_LIMIT cmd. + * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: BIOS table revision. @@ -436,21 +437,47 @@ struct iwl_geo_tx_power_profiles_cmd_v3 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2]; __le32 table_revision; -} __packed; /* GEO_TX_POWER_LIMIT_VER_3 */ +} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */ + +/** + * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + * @table_revision: BIOS table revision. + */ +struct iwl_geo_tx_power_profiles_cmd_v4 { + __le32 ops; + struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V1]; + __le32 table_revision; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */ + +/** + * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + * @table_revision: BIOS table revision. + */ +struct iwl_geo_tx_power_profiles_cmd_v5 { + __le32 ops; + struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V2]; + __le32 table_revision; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */ union iwl_geo_tx_power_profiles_cmd { struct iwl_geo_tx_power_profiles_cmd_v1 v1; struct iwl_geo_tx_power_profiles_cmd_v2 v2; struct iwl_geo_tx_power_profiles_cmd_v3 v3; + struct iwl_geo_tx_power_profiles_cmd_v4 v4; + struct iwl_geo_tx_power_profiles_cmd_v5 v5; }; /** - * struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd + * struct iwl_geo_tx_power_profiles_resp - response to PER_CHAIN_LIMIT_OFFSET_CMD cmd * @profile_idx: current geo profile in use */ struct iwl_geo_tx_power_profiles_resp { __le32 profile_idx; -} __packed; /* GEO_TX_POWER_LIMIT_RESP */ +} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */ /** * union iwl_ppag_table_cmd - union for all versions of PPAG command diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index fc2fa49e982521..a09081d7ed45ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -184,6 +184,14 @@ struct iwl_tlc_update_notif { __le32 amsdu_enabled; } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + /* * These serve as indexes into * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; @@ -226,6 +234,8 @@ enum { IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX, IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, }; #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) @@ -248,7 +258,7 @@ enum { }; /* - * rate_n_flags bit fields + * rate_n_flags bit fields version 1 * * The 32-bit value has different layouts in the low 8 bites depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats @@ -266,15 +276,15 @@ enum { /* Bit 8: (1) HT format, (0) legacy or VHT format */ #define RATE_MCS_HT_POS 8 -#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) +#define RATE_MCS_HT_MSK_V1 BIT(RATE_MCS_HT_POS) /* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ -#define RATE_MCS_CCK_POS 9 -#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) +#define RATE_MCS_CCK_POS_V1 9 +#define RATE_MCS_CCK_MSK_V1 BIT(RATE_MCS_CCK_POS_V1) /* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ -#define RATE_MCS_VHT_POS 26 -#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) +#define RATE_MCS_VHT_POS_V1 26 +#define RATE_MCS_VHT_MSK_V1 BIT(RATE_MCS_VHT_POS_V1) /* @@ -300,15 +310,16 @@ enum { * streams and 16-23 have three streams. We could also support MCS 32 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) */ -#define RATE_HT_MCS_RATE_CODE_MSK 0x7 -#define RATE_HT_MCS_NSS_POS 3 -#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS) +#define RATE_HT_MCS_RATE_CODE_MSK_V1 0x7 +#define RATE_HT_MCS_NSS_POS_V1 3 +#define RATE_HT_MCS_NSS_MSK_V1 (3 << RATE_HT_MCS_NSS_POS_V1) +#define RATE_HT_MCS_MIMO2_MSK BIT(RATE_HT_MCS_NSS_POS_V1) /* Bit 10: (1) Use Green Field preamble */ #define RATE_HT_MCS_GF_POS 10 #define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) -#define RATE_HT_MCS_INDEX_MSK 0x3f +#define RATE_HT_MCS_INDEX_MSK_V1 0x3f /* * Very High-throughput (VHT) rate format for bits 7:0 @@ -324,6 +335,7 @@ enum { #define RATE_VHT_MCS_RATE_CODE_MSK 0xf #define RATE_VHT_MCS_NSS_POS 4 #define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) +#define RATE_VHT_MCS_MIMO2_MSK BIT(RATE_VHT_MCS_NSS_POS) /* * Legacy OFDM rate format for bits 7:0 @@ -347,37 +359,30 @@ enum { * 110) 11 Mbps * (bit 7 is 0) */ -#define RATE_LEGACY_RATE_MSK 0xff +#define RATE_LEGACY_RATE_MSK_V1 0xff /* Bit 10 - OFDM HE */ -#define RATE_MCS_HE_POS 10 -#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS) +#define RATE_MCS_HE_POS_V1 10 +#define RATE_MCS_HE_MSK_V1 BIT(RATE_MCS_HE_POS_V1) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT */ #define RATE_MCS_CHAN_WIDTH_POS 11 -#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << RATE_MCS_CHAN_WIDTH_POS) /* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ -#define RATE_MCS_SGI_POS 13 -#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) +#define RATE_MCS_SGI_POS_V1 13 +#define RATE_MCS_SGI_MSK_V1 BIT(RATE_MCS_SGI_POS_V1) /* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ #define RATE_MCS_ANT_POS 14 #define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ RATE_MCS_ANT_B_MSK) -#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ - RATE_MCS_ANT_C_MSK) -#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK +#define RATE_MCS_ANT_MSK RATE_MCS_ANT_AB_MSK /* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 @@ -411,27 +416,27 @@ enum { * 3 (does not occur) */ #define RATE_MCS_HE_GI_LTF_POS 20 -#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) +#define RATE_MCS_HE_GI_LTF_MSK_V1 (3 << RATE_MCS_HE_GI_LTF_POS) /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ -#define RATE_MCS_HE_TYPE_POS 22 -#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) -#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) -#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) -#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) -#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_POS_V1 22 +#define RATE_MCS_HE_TYPE_SU_V1 (0 << RATE_MCS_HE_TYPE_POS_V1) +#define RATE_MCS_HE_TYPE_EXT_SU_V1 BIT(RATE_MCS_HE_TYPE_POS_V1) +#define RATE_MCS_HE_TYPE_MU_V1 (2 << RATE_MCS_HE_TYPE_POS_V1) +#define RATE_MCS_HE_TYPE_TRIG_V1 (3 << RATE_MCS_HE_TYPE_POS_V1) +#define RATE_MCS_HE_TYPE_MSK_V1 (3 << RATE_MCS_HE_TYPE_POS_V1) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ -#define RATE_MCS_DUP_POS 24 -#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) +#define RATE_MCS_DUP_POS_V1 24 +#define RATE_MCS_DUP_MSK_V1 (3 << RATE_MCS_DUP_POS_V1) /* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ -#define RATE_MCS_LDPC_POS 27 -#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) +#define RATE_MCS_LDPC_POS_V1 27 +#define RATE_MCS_LDPC_MSK_V1 BIT(RATE_MCS_LDPC_POS_V1) /* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ -#define RATE_MCS_HE_106T_POS 28 -#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) +#define RATE_MCS_HE_106T_POS_V1 28 +#define RATE_MCS_HE_106T_MSK_V1 BIT(RATE_MCS_HE_106T_POS_V1) /* Bit 30-31: (1) RTS, (2) CTS */ #define RATE_MCS_RTS_REQUIRED_POS (30) @@ -440,6 +445,152 @@ enum { #define RATE_MCS_CTS_REQUIRED_POS (31) #define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS) +/* rate_n_flags bit field version 2 + * + * The 32-bit value has different layouts in the low 8 bits depending on the + * format. There are three formats, HT, VHT and legacy (11abg, with subformats + * for CCK and OFDM). + * + */ + +/* Bits 10-8: rate format + * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT) + * (3) Very High-throughput (VHT) (4) High-efficiency (HE) + * (5) Extremely High-throughput (EHT) + */ +#define RATE_MCS_MOD_TYPE_POS 8 +#define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS) + +/* + * Legacy CCK rate format for bits 0:3: + * + * (0) 0xa - 1 Mbps + * (1) 0x14 - 2 Mbps + * (2) 0x37 - 5.5 Mbps + * (3) 0x6e - 11 nbps + * + * Legacy OFDM rate format for bis 3:0: + * + * (0) 6 Mbps + * (1) 9 Mbps + * (2) 12 Mbps + * (3) 18 Mbps + * (4) 24 Mbps + * (5) 36 Mbps + * (6) 48 Mbps + * (7) 54 Mbps + * + */ +#define RATE_LEGACY_RATE_MSK 0x7 + +/* + * HT, VHT, HE, EHT rate format for bits 3:0 + * 3-0: MCS + * + */ +#define RATE_HT_MCS_CODE_MSK 0x7 +#define RATE_MCS_NSS_POS 4 +#define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS) +#define RATE_MCS_CODE_MSK 0xf +#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \ + ((r) & RATE_HT_MCS_CODE_MSK)) + +/* Bits 7-5: reserved */ + +/* + * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz + */ +#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_320 (4 << RATE_MCS_CHAN_WIDTH_POS) + +/* Bit 15-14: Antenna selection: + * Bit 14: Ant A active + * Bit 15: Ant B active + * + * All relevant definitions are same as in v1 + */ + +/* Bit 16 (1) LDPC enables, (0) LDPC disabled */ +#define RATE_MCS_LDPC_POS 16 +#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) + +/* Bit 17: (0) SS, (1) SS*2 (same as v1) */ + +/* Bit 18: OFDM-HE dual carrier mode (same as v1) */ + +/* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */ + +/* + * Bit 22-20: HE LTF type and guard interval + * CCK: + * 0 long preamble + * 1 short preamble + * HT/VHT: + * 0 0.8us + * 1 0.4us + * HE (ext) SU: + * 0 1xLTF+0.8us + * 1 2xLTF+0.8us + * 2 2xLTF+1.6us + * 3 4xLTF+3.2us + * 4 4xLTF+0.8us + * HE MU: + * 0 4xLTF+0.8us + * 1 2xLTF+0.8us + * 2 2xLTF+1.6us + * 3 4xLTF+3.2us + * HE TRIG: + * 0 1xLTF+1.6us + * 1 2xLTF+1.6us + * 2 4xLTF+3.2us + * */ +#define RATE_MCS_HE_GI_LTF_MSK (0x7 << RATE_MCS_HE_GI_LTF_POS) +#define RATE_MCS_SGI_POS RATE_MCS_HE_GI_LTF_POS +#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) +#define RATE_MCS_HE_SU_4_LTF 3 +#define RATE_MCS_HE_SU_4_LTF_08_GI 4 + +/* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ +#define RATE_MCS_HE_TYPE_POS 23 +#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) + +/* Bit 25: duplicate channel enabled + * + * if this bit is set, duplicate is according to BW (bits 11-13): + * + * CCK: 2x 20MHz + * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16) + * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160) + * */ +#define RATE_MCS_DUP_POS 25 +#define RATE_MCS_DUP_MSK (1 << RATE_MCS_DUP_POS) + +/* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ +#define RATE_MCS_HE_106T_POS 26 +#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) + +/* Bit 27: EHT extra LTF: + * instead of 1 LTF for SISO use 2 LTFs, + * instead of 2 LTFs for NSTS=2 use 4 LTFs*/ +#define RATE_MCS_EHT_EXTRA_LTF_POS 27 +#define RATE_MCS_EHT_EXTRA_LTF_MSK (1 << RATE_MCS_EHT_EXTRA_LTF_POS) + +/* Bit 31-28: reserved */ + /* Link Quality definitions */ /* # entries in rate scale table to support Tx retries */ @@ -557,4 +708,13 @@ struct iwl_lq_cmd { __le32 ss_params; }; /* LINK_QUALITY_CMD_API_S_VER_1 */ +u8 iwl_fw_rate_idx_to_plcp(int idx); +u32 iwl_new_rate_from_v1(u32 rate_v1); +u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags); +const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx); +const char *iwl_rs_pretty_ant(u8 ant); +const char *iwl_rs_pretty_bw(int bw); +int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); +bool iwl_he_is_sgi(u32 rate_n_flags); + #endif /* __iwl_fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 3f13b572915a81..1989b270862b03 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -13,7 +13,6 @@ #define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 #define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff #define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 -#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 #define IWL_RX_INFO_ENERGY_ANT_A_POS 0 #define IWL_RX_INFO_ENERGY_ANT_B_POS 8 #define IWL_RX_INFO_ENERGY_ANT_C_POS 16 @@ -126,14 +125,12 @@ enum iwl_rx_phy_flags { * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid - * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: key parameters were usable * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked * in the driver. * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or - * alg = CCM only. Checks replay attack for 11w frames. Relevant only if - * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. + * alg = CCM only. Checks replay attack for 11w frames. * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM @@ -145,9 +142,6 @@ enum iwl_rx_phy_flags { * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted - * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: extended IV (set with TKIP) - * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: key ID comparison done - * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask @@ -158,7 +152,6 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), - RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), RX_MPDU_RES_STATUS_ICV_OK = BIT(5), RX_MPDU_RES_STATUS_MIC_OK = BIT(6), RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), @@ -172,9 +165,6 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), - RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), - RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), - RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, @@ -236,7 +226,6 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), - IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), @@ -251,12 +240,8 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), - IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), - IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), - IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), - IWL_RX_MPDU_STATUS_KEY = 0x3f0000, IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22), IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000, @@ -460,7 +445,7 @@ struct iwl_rx_mpdu_desc_v1 { __le32 phy_data1; }; }; -} __packed; +} __packed; /* RX_MPDU_RES_START_API_S_VER_4 */ /** * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor @@ -560,7 +545,8 @@ struct iwl_rx_mpdu_desc_v3 { * @reserved: reserved */ __le32 reserved[2]; -} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ +} __packed; /* RX_MPDU_RES_START_API_S_VER_3, + RX_MPDU_RES_START_API_S_VER_5 */ /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor @@ -625,7 +611,9 @@ struct iwl_rx_mpdu_desc { struct iwl_rx_mpdu_desc_v1 v1; struct iwl_rx_mpdu_desc_v3 v3; }; -} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ +} __packed; /* RX_MPDU_RES_START_API_S_VER_3, + RX_MPDU_RES_START_API_S_VER_4, + RX_MPDU_RES_START_API_S_VER_5 */ #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) @@ -679,7 +667,8 @@ struct iwl_rx_no_data { __le32 rate; __le32 phy_info[2]; __le32 rx_vec[2]; -} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */ +} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1, + TX_NO_DATA_NTFY_API_S_VER_2 */ struct iwl_frame_release { u8 baid; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index f1a3e14880e7a2..5edbe27c092273 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -28,6 +28,8 @@ * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported) * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported) * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported) + * @STA_FLG_MAX_AGG_SIZE_2M: maximal size for A-MPDU (2M supported) + * @STA_FLG_MAX_AGG_SIZE_4M: maximal size for A-MPDU (4M supported) * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is * initialised by driver and can be updated by fw upon reception of diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 5fddfd39194142..4a74c0ea0f3153 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ @@ -81,6 +81,10 @@ enum iwl_tx_cmd_flags { IWL_TX_FLAGS_CMD_RATE = BIT(0), IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), IWL_TX_FLAGS_HIGH_PRI = BIT(2), + /* Use these flags only from + * TX_FLAGS_BITS_API_S_VER_4 and above */ + IWL_TX_FLAGS_RTS = BIT(3), + IWL_TX_FLAGS_CTS = BIT(4), }; /* TX_FLAGS_BITS_API_S_VER_3 */ /** @@ -269,7 +273,8 @@ struct iwl_tx_cmd_gen2 { struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; struct ieee80211_hdr hdr[]; -} __packed; /* TX_CMD_API_S_VER_7 */ +} __packed; /* TX_CMD_API_S_VER_7, + TX_CMD_API_S_VER_9 */ /** * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices @@ -292,7 +297,8 @@ struct iwl_tx_cmd_gen3 { __le32 rate_n_flags; __le64 ttl; struct ieee80211_hdr hdr[]; -} __packed; /* TX_CMD_API_S_VER_8 */ +} __packed; /* TX_CMD_API_S_VER_8, + TX_CMD_API_S_VER_10 */ /* * TX response related data @@ -593,7 +599,8 @@ struct iwl_mvm_tx_resp { __le16 tx_queue; __le16 reserved2; struct agg_tx_status status; -} __packed; /* TX_RSP_API_S_VER_6 */ +} __packed; /* TX_RSP_API_S_VER_6, + TX_RSP_API_S_VER_7 */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA @@ -719,7 +726,8 @@ struct iwl_mvm_compressed_ba_notif { DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid); DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd); }; -} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ +} __packed; /* COMPRESSED_BA_RES_API_S_VER_4, + COMPRESSED_BA_RES_API_S_VER_5 */ /** * struct iwl_mac_beacon_cmd_v6 - beacon template command @@ -759,12 +767,20 @@ struct iwl_mac_beacon_cmd_v7 { struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ +/* Bit flags for BEACON_TEMPLATE_CMD_API until version 10 */ +enum iwl_mac_beacon_flags_v1 { + IWL_MAC_BEACON_CCK_V1 = BIT(8), + IWL_MAC_BEACON_ANT_A_V1 = BIT(9), + IWL_MAC_BEACON_ANT_B_V1 = BIT(10), + IWL_MAC_BEACON_FILS_V1 = BIT(12), +}; + +/* Bit flags for BEACON_TEMPLATE_CMD_API version 11 and above */ enum iwl_mac_beacon_flags { - IWL_MAC_BEACON_CCK = BIT(8), - IWL_MAC_BEACON_ANT_A = BIT(9), - IWL_MAC_BEACON_ANT_B = BIT(10), - IWL_MAC_BEACON_ANT_C = BIT(11), - IWL_MAC_BEACON_FILS = BIT(12), + IWL_MAC_BEACON_CCK = BIT(5), + IWL_MAC_BEACON_ANT_A = BIT(6), + IWL_MAC_BEACON_ANT_B = BIT(7), + IWL_MAC_BEACON_FILS = BIT(8), }; /** @@ -792,7 +808,9 @@ struct iwl_mac_beacon_cmd { __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[]; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */ +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10, + BEACON_TEMPLATE_CMD_API_S_VER_11, + BEACON_TEMPLATE_CMD_API_S_VER_12 */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 6dcafd0a3d4b1a..a39013c401c923 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -159,11 +159,15 @@ static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (i = 0; i < fifo_len; i++) + for (i = 0; i < fifo_len / sizeof(u32); i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); + + if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) + fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, + fifo_data, fifo_len); + *dump_data = iwl_fw_error_next_data(*dump_data); } @@ -659,6 +663,10 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); *dump_data = iwl_fw_error_next_data(*dump_data); + if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) + fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs, + dump_mem->data, len); + IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } @@ -752,6 +760,12 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); (*data) = iwl_fw_error_next_data(*data); + + if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) + fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, + fwrt->fw_paging_db[i].fw_offs, + paging->data, + PAGING_BLOCK_SIZE); } } @@ -980,6 +994,11 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, dump_data->data + data_size, data_size); + if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) + fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr, + dump_data->data + data_size, + data_size); + dump_data = iwl_fw_error_next_data(dump_data); } @@ -1146,6 +1165,13 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->dev_addr.size)); + if ((le32_to_cpu(reg->id) & IWL_FW_INI_REGION_V2_MASK) == + IWL_FW_INI_HW_SMEM_REGION_ID && + fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) + fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, + range->data, + le32_to_cpu(reg->dev_addr.size)); + return sizeof(*range) + le32_to_cpu(range->range_data_size); } @@ -1338,6 +1364,10 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, for (i = 0; i < iter->fifo_size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) + fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, + reg_dump, iter->fifo_size); + out: iwl_trans_release_nic_access(fwrt->trans); @@ -2077,7 +2107,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, */ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); if (hw_type == IWL_AX210_HW_TYPE) { - u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); + u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2); u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); u32 masked_bits = is_jacket | (is_cdb << 1); @@ -2360,7 +2390,9 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, if (dump_data->monitor_only) dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); - fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); + fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask, + fwrt->sanitize_ops, + fwrt->sanitize_ctx); file_len = le32_to_cpu(dump_file->file_len); fw_error_dump.fwrt_len = file_len; @@ -2788,6 +2820,12 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); + + if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) + fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, + cfg->d3_debug_data_base_addr, + fwrt->dump.d3_debug_data, + cfg->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index a1842205e86a15..016b3a4c5f5136 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -214,7 +214,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu /* reset the device */ iwl_trans_sw_reset(trans); - err = iwl_finish_nic_init(trans, trans->trans_cfg); + err = iwl_finish_nic_init(trans); if (err) return; } @@ -328,6 +328,13 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt) for (i = 0; i < ARRAY_SIZE(table.sw_status); i++) IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n", table.sw_status[i], i); + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH); + + IWL_ERR(fwrt, "Function Scratch status:\n"); + IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch); + } } static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 521ca2bb0e928a..9036b32ec765d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -342,10 +342,6 @@ struct iwl_fw_ini_dump_cfg_name { #define IWL_AX210_HW_TYPE 0x42 /* How many bits to roll when adding to the HW type of AX210 HW */ #define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12 -/* This prph is used to tell apart HW_TYPE == 0x42 NICs */ -#define WFPM_OTP_CFG1_ADDR 0xd03098 -#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) -#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) /* struct iwl_fw_ini_dump_info - ini dump information * @version: dump version diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 6c8e9f3a6af2e0..3d572f5024bbcb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -100,6 +100,9 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_PNVM_SKU = 64, IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65, + IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, + IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, + IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0, IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0, @@ -107,6 +110,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2, IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3, IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4, + IWL_UCODE_TLV_TYPE_CONF_SET = IWL_UCODE_TLV_DEBUG_BASE + 5, IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_TRIGGERS, /* TLVs 0x1000-0x2000 are for internal driver usage */ @@ -416,6 +420,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59, IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60, + IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61, + IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, @@ -449,7 +455,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, - IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102, + IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ @@ -956,6 +962,10 @@ struct iwl_fw_tcm_error_addr { __le32 addr; }; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */ +struct iwl_fw_dump_exclude { + __le32 addr, size; +}; + static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv, size_t fixed_size, size_t var_size) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c index c2a4e60518bc71..24a96667369130 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c @@ -1,59 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* * Copyright(c) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ + */ #include "img.h" diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 153a3529e77ae3..993bda17fa309f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -124,11 +124,13 @@ struct fw_img { * @fw_paging_phys: page phy pointer * @fw_paging_block: pointer to the allocated block * @fw_paging_size: page size + * @fw_offs: offset in the device */ struct iwl_fw_paging { dma_addr_t fw_paging_phys; struct page *fw_paging_block; u32 fw_paging_size; + u32 fw_offs; }; /** @@ -174,6 +176,10 @@ struct iwl_fw_dbg { u32 dump_mask; }; +struct iwl_dump_exclude { + u32 addr, size; +}; + /** * struct iwl_fw - variables associated with the firmware * @@ -194,6 +200,10 @@ struct iwl_fw_dbg { * @cipher_scheme: optional external cipher scheme. * @human_readable: human readable version * we get the ALIVE from the uCode + * @phy_integration_ver: PHY integration version string + * @phy_integration_ver_len: length of @phy_integration_ver + * @dump_excl: image dump exclusion areas for RT image + * @dump_excl_wowlan: image dump exclusion areas for WoWLAN image */ struct iwl_fw { u32 ucode_ver; @@ -225,6 +235,8 @@ struct iwl_fw { u8 *phy_integration_ver; u32 phy_integration_ver_len; + + struct iwl_dump_exclude dump_excl[2], dump_excl_wowlan[2]; }; static inline const char *get_fw_dbg_mode_string(int mode) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 2ecec00db9da7f..566957ac45393c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2020 Intel Corporation + * Copyright (C) 2019-2021 Intel Corporation */ #include "iwl-drv.h" #include "runtime.h" @@ -16,6 +16,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx, struct dentry *dbgfs_dir) { int i; @@ -26,6 +28,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, fwrt->dev = trans->dev; fwrt->dump.conf = FW_DBG_INVALID; fwrt->ops = ops; + fwrt->sanitize_ops = sanitize_ops; + fwrt->sanitize_ctx = sanitize_ctx; fwrt->ops_ctx = ops_ctx; for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) { fwrt->dump.wks[i].idx = i; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 4a8fe9641a321d..58ca3849d1f3f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -152,6 +152,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, image->sec[sec_idx].len); + fwrt->fw_paging_db[0].fw_offs = image->sec[sec_idx].offset; dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys, fwrt->fw_paging_db[0].fw_paging_size, @@ -197,6 +198,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, len); + block->fw_offs = image->sec[sec_idx].offset + offset; dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index dde22bdc87039e..7d4aa398729a18 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -162,7 +162,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, goto out; } - IWL_INFO(trans, "loaded PNVM version 0x%0x\n", sha1); + IWL_INFO(trans, "loaded PNVM version %08x\n", sha1); ret = iwl_trans_set_pnvm(trans, pnvm_data, size); out: @@ -284,16 +284,19 @@ int iwl_pnvm_load(struct iwl_trans *trans, /* First attempt to get the PNVM from BIOS */ package = iwl_uefi_get_pnvm(trans, &len); if (!IS_ERR_OR_NULL(package)) { - data = kmemdup(package->data, len, GFP_KERNEL); + if (len >= sizeof(*package)) { + /* we need only the data */ + len -= sizeof(*package); + data = kmemdup(package->data, len, GFP_KERNEL); + } else { + data = NULL; + } /* free package regardless of whether kmemdup succeeded */ kfree(package); - if (data) { - /* we need only the data size */ - len -= sizeof(*package); + if (data) goto parse; - } } /* If it's not available, try from the filesystem */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c new file mode 100644 index 00000000000000..a21c3befd93b5b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include +#include "fw/api/rs.h" +#include "iwl-drv.h" +#include "iwl-config.h" + +#define IWL_DECLARE_RATE_INFO(r) \ + [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP + +/* + * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP + * */ +static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1), + IWL_DECLARE_RATE_INFO(2), + IWL_DECLARE_RATE_INFO(5), + IWL_DECLARE_RATE_INFO(11), + IWL_DECLARE_RATE_INFO(6), + IWL_DECLARE_RATE_INFO(9), + IWL_DECLARE_RATE_INFO(12), + IWL_DECLARE_RATE_INFO(18), + IWL_DECLARE_RATE_INFO(24), + IWL_DECLARE_RATE_INFO(36), + IWL_DECLARE_RATE_INFO(48), + IWL_DECLARE_RATE_INFO(54), +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +static const char * const ant_name[] = { + [ANT_NONE] = "None", + [ANT_A] = "A", + [ANT_B] = "B", + [ANT_AB] = "AB", +}; + +static const char * const pretty_bw[] = { + "20Mhz", + "40Mhz", + "80Mhz", + "160 Mhz", + "320Mhz", +}; + +u8 iwl_fw_rate_idx_to_plcp(int idx) +{ + return fw_rate_idx_to_plcp[idx]; +} +IWL_EXPORT_SYMBOL(iwl_fw_rate_idx_to_plcp); + +const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx) +{ + return &rate_mcs[idx]; +} +IWL_EXPORT_SYMBOL(iwl_rate_mcs); + +const char *iwl_rs_pretty_ant(u8 ant) +{ + if (ant >= ARRAY_SIZE(ant_name)) + return "UNKNOWN"; + + return ant_name[ant]; +} +IWL_EXPORT_SYMBOL(iwl_rs_pretty_ant); + +const char *iwl_rs_pretty_bw(int bw) +{ + if (bw >= ARRAY_SIZE(pretty_bw)) + return "unknown bw"; + + return pretty_bw[bw]; +} +IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw); + +u32 iwl_new_rate_from_v1(u32 rate_v1) +{ + u32 rate_v2 = 0; + u32 dup = 0; + + if (rate_v1 == 0) + return rate_v1; + /* convert rate */ + if (rate_v1 & RATE_MCS_HT_MSK_V1) { + u32 nss = 0; + + rate_v2 |= RATE_MCS_HT_MSK; + rate_v2 |= + rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; + nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >> + RATE_HT_MCS_NSS_POS_V1; + rate_v2 |= nss << RATE_MCS_NSS_POS; + } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || + rate_v1 & RATE_MCS_HE_MSK_V1) { + rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; + + rate_v2 |= rate_v1 & RATE_VHT_MCS_MIMO2_MSK; + + if (rate_v1 & RATE_MCS_HE_MSK_V1) { + u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; + u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> + RATE_MCS_HE_106T_POS_V1; + u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> + RATE_MCS_HE_GI_LTF_POS; + + if ((he_type_bits == RATE_MCS_HE_TYPE_SU || + he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && + he_gi_ltf == RATE_MCS_HE_SU_4_LTF) + /* the new rate have an additional bit to + * represent the value 4 rather then using SGI + * bit for this purpose - as it was done in the old + * rate */ + he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> + RATE_MCS_SGI_POS_V1; + + rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; + rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS; + rate_v2 |= he_106t << RATE_MCS_HE_106T_POS; + rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; + rate_v2 |= RATE_MCS_HE_MSK; + } else { + rate_v2 |= RATE_MCS_VHT_MSK; + } + /* if legacy format */ + } else { + u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); + + WARN_ON(legacy_rate < 0); + rate_v2 |= legacy_rate; + if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) + rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK; + } + + /* convert flags */ + if (rate_v1 & RATE_MCS_LDPC_MSK_V1) + rate_v2 |= RATE_MCS_LDPC_MSK; + rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | + (rate_v1 & RATE_MCS_ANT_AB_MSK) | + (rate_v1 & RATE_MCS_STBC_MSK) | + (rate_v1 & RATE_MCS_BF_MSK); + + dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; + if (dup) { + rate_v2 |= RATE_MCS_DUP_MSK; + rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS; + } + + if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && + (rate_v1 & RATE_MCS_SGI_MSK_V1)) + rate_v2 |= RATE_MCS_SGI_MSK; + + return rate_v2; +} +IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1); + +u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; + int idx; + bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); + int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; + int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; + + for (idx = offset; idx < last; idx++) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) + return idx - offset; + return -1; +} + +int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) +{ + char *type; + u8 mcs = 0, nss = 0; + u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; + u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >> + RATE_MCS_CHAN_WIDTH_POS; + u32 format = rate & RATE_MCS_MOD_TYPE_MSK; + bool sgi; + + if (format == RATE_MCS_CCK_MSK || + format == RATE_MCS_LEGACY_OFDM_MSK) { + int legacy_rate = rate & RATE_LEGACY_RATE_MSK; + int index = format == RATE_MCS_CCK_MSK ? + legacy_rate : + legacy_rate + IWL_FIRST_OFDM_RATE; + + return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", + iwl_rs_pretty_ant(ant), + index == IWL_RATE_INVALID ? "BAD" : + iwl_rate_mcs(index)->mbps); + } + + if (format == RATE_MCS_VHT_MSK) + type = "VHT"; + else if (format == RATE_MCS_HT_MSK) + type = "HT"; + else if (format == RATE_MCS_HE_MSK) + type = "HE"; + else + type = "Unknown"; /* shouldn't happen */ + + mcs = format == RATE_MCS_HT_MSK ? + RATE_HT_MCS_INDEX(rate) : + rate & RATE_MCS_CODE_MSK; + nss = ((rate & RATE_MCS_NSS_MSK) + >> RATE_MCS_NSS_POS) + 1; + sgi = format == RATE_MCS_HE_MSK ? + iwl_he_is_sgi(rate) : + rate & RATE_MCS_SGI_MSK; + + return scnprintf(buf, bufsz, + "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s", + rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss, + (sgi) ? "SGI " : "NGI ", + (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", + (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", + (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "", + (rate & RATE_MCS_BF_MSK) ? "BF " : ""); +} +IWL_EXPORT_SYMBOL(rs_pretty_print_rate); + +bool iwl_he_is_sgi(u32 rate_n_flags) +{ + u32 type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u32 ltf_gi = rate_n_flags & RATE_MCS_HE_GI_LTF_MSK; + + if (type == RATE_MCS_HE_TYPE_SU || + type == RATE_MCS_HE_TYPE_EXT_SU) + return ltf_gi == RATE_MCS_HE_SU_4_LTF_08_GI; + return false; +} +IWL_EXPORT_SYMBOL(iwl_he_is_sgi); + diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 35af85a5430bee..69799f1ed2c4d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -105,6 +105,9 @@ struct iwl_fw_runtime { const struct iwl_fw_runtime_ops *ops; void *ops_ctx; + const struct iwl_dump_sanitize_ops *sanitize_ops; + void *sanitize_ctx; + /* Paging */ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; u16 num_of_paging_blk; @@ -151,7 +154,7 @@ struct iwl_fw_runtime { struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; u8 sar_chain_a_profile; u8 sar_chain_b_profile; - struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; + struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3]; u32 geo_rev; union iwl_ppag_table_cmd ppag_table; u32 ppag_ver; @@ -161,6 +164,8 @@ struct iwl_fw_runtime { void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx, struct dentry *dbgfs_dir); static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index 45d0b36d79b5ae..d552c656ac9fe1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -2,7 +2,8 @@ /* * Copyright(c) 2021 Intel Corporation */ - +#ifndef __iwl_fw_uefi__ +#define __iwl_fw_uefi__ #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" @@ -40,3 +41,5 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) return ERR_PTR(-EOPNOTSUPP); } #endif /* CONFIG_EFI */ + +#endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 7eb534df533102..665167a223f61e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -95,7 +95,6 @@ enum iwl_nvm_type { #define ANT_AC (ANT_A | ANT_C) #define ANT_BC (ANT_B | ANT_C) #define ANT_ABC (ANT_A | ANT_B | ANT_C) -#define MAX_ANT_NUM 3 static inline u8 num_of_ant(u8 mask) @@ -420,6 +419,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_MAC_TYPE_BZ 0x46 +#define IWL_CFG_MAC_TYPE_GL 0x47 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -511,6 +511,10 @@ extern const char iwl_ax210_killer_1675w_name[]; extern const char iwl_ax210_killer_1675x_name[]; extern const char iwl9560_killer_1550i_160_name[]; extern const char iwl9560_killer_1550s_160_name[]; +extern const char iwl_ax211_killer_1675s_name[]; +extern const char iwl_ax211_killer_1675i_name[]; +extern const char iwl_ax411_killer_1690s_name[]; +extern const char iwl_ax411_killer_1690i_name[]; extern const char iwl_ax211_name[]; extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; @@ -628,6 +632,8 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; +extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0; +extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index e1fec23ac07f63..5adf485db38e20 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -109,12 +109,12 @@ struct iwl_prph_scratch_pnvm_cfg { * struct iwl_prph_scratch_hwm_cfg - hwm config * @hwm_base_addr: hwm start address * @hwm_size: hwm size in DWs - * @reserved: reserved + * @debug_token_config: debug preset */ struct iwl_prph_scratch_hwm_cfg { __le64 hwm_base_addr; __le32 hwm_size; - __le32 reserved; + __le32 debug_token_config; } __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index cf796403c45c0e..ff79a2ecb24224 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -34,6 +34,7 @@ #define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ #define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ #define CSR_GP_CNTRL (CSR_BASE+0x024) +#define CSR_FUNC_SCRATCH (CSR_BASE+0x02c) /* Scratch register - used for FW dbg */ /* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) @@ -135,6 +136,12 @@ #define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) #define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) +/* + * Scratch register initial configuration - this is set on init, and read + * during a error FW error. + */ +#define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) + /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) @@ -598,6 +605,7 @@ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IML = BIT(1), MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2), + MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 125479b5c0d61e..7ab98b419cc1f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -16,6 +16,7 @@ * @IWL_DBG_TLV_TYPE_HCMD: host command TLV * @IWL_DBG_TLV_TYPE_REGION: region TLV * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV + * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs */ enum iwl_dbg_tlv_type { @@ -25,6 +26,7 @@ enum iwl_dbg_tlv_type { IWL_DBG_TLV_TYPE_HCMD, IWL_DBG_TLV_TYPE_REGION, IWL_DBG_TLV_TYPE_TRIGGER, + IWL_DBG_TLV_TYPE_CONF_SET, IWL_DBG_TLV_TYPE_NUM, }; @@ -59,6 +61,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,}, [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, + [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,}, }; static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, @@ -260,6 +263,30 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, return ret; } +static int iwl_dbg_tlv_config_set(struct iwl_trans *trans, + const struct iwl_ucode_tlv *tlv) +{ + struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data; + u32 tp = le32_to_cpu(conf_set->time_point); + u32 type = le32_to_cpu(conf_set->set_type); + + if (tp <= IWL_FW_INI_TIME_POINT_INVALID || + tp >= IWL_FW_INI_TIME_POINT_NUM) { + IWL_DEBUG_FW(trans, + "WRT: Invalid time point %u for config set TLV\n", tp); + return -EINVAL; + } + + if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID || + type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) { + IWL_DEBUG_FW(trans, + "WRT: Invalid config set type %u for config set TLV\n", type); + return -EINVAL; + } + + return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list); +} + static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) = { [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info, @@ -267,6 +294,7 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd, [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region, [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger, + [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set, }; void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, @@ -399,6 +427,13 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans) list_del(&tlv_node->list); kfree(tlv_node); } + + list_for_each_entry_safe(tlv_node, tlv_node_tmp, + &tp->config_list, list) { + list_del(&tlv_node->list); + kfree(tlv_node); + } + } for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++) @@ -466,6 +501,7 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans) INIT_LIST_HEAD(&tp->trig_list); INIT_LIST_HEAD(&tp->hcmd_list); INIT_LIST_HEAD(&tp->active_trig_list); + INIT_LIST_HEAD(&tp->config_list); } } @@ -649,6 +685,10 @@ static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt) { int ret, i; + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) + return; + for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { ret = iwl_dbg_tlv_apply_buffer(fwrt, i); if (ret) @@ -658,6 +698,87 @@ static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt) } } +static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_allocation_id alloc_id, + struct iwl_dram_info *dram_info) +{ + struct iwl_fw_mon *fw_mon; + u32 remain_frags, num_frags; + int j, fw_mon_idx = 0; + struct iwl_buf_alloc_cmd *data; + + if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != + IWL_FW_INI_LOCATION_DRAM_PATH) { + IWL_DEBUG_FW(fwrt, "DRAM_PATH is not supported alloc_id %u\n", alloc_id); + return -1; + } + + fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; + + /* the first fragment of DBGC1 is given to the FW via register + * or context info + */ + if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1) + fw_mon_idx++; + + remain_frags = fw_mon->num_frags - fw_mon_idx; + if (!remain_frags) + return -1; + + num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); + data = &dram_info->dram_frags[alloc_id - 1]; + data->alloc_id = cpu_to_le32(alloc_id); + data->num_frags = cpu_to_le32(num_frags); + data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH); + + IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n", + cpu_to_le32(alloc_id), cpu_to_le32(num_frags)); + + for (j = 0; j < num_frags; j++) { + struct iwl_buf_alloc_frag *frag = &data->frags[j]; + struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++]; + + frag->addr = cpu_to_le64(fw_mon_frag->physical); + frag->size = cpu_to_le32(fw_mon_frag->size); + IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n"); + IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n", + j, cpu_to_le64(fw_mon_frag->physical), + cpu_to_le32(fw_mon_frag->size)); + } + return 0; +} + +static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt) +{ + int ret, i, dram_alloc = 0; + struct iwl_dram_info dram_info; + struct iwl_dram_data *frags = + &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0]; + + if (!fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) + return; + + dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); + dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); + + for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1; + i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) { + ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info); + if (!ret) + dram_alloc++; + else + IWL_WARN(fwrt, + "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n", + i, ret); + } + if (dram_alloc) { + memcpy(frags->block, &dram_info, sizeof(dram_info)); + IWL_DEBUG_FW(fwrt, "block data after %016x\n", + *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block)); + } +} + static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, struct list_head *hcmd_list) { @@ -677,6 +798,97 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, } } +static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt, + struct list_head *config_list) +{ + struct iwl_dbg_tlv_node *node; + + list_for_each_entry(node, config_list, list) { + struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data; + u32 count, address, value; + u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8; + u32 type = le32_to_cpu(config_list->set_type); + u32 offset = le32_to_cpu(config_list->addr_offset); + + switch (type) { + case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: { + if (!iwl_trans_grab_nic_access(fwrt->trans)) { + IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n"); + IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n"); + continue; + } + IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len); + for (count = 0; count < len; count++) { + address = le32_to_cpu(config_list->addr_val[count].address); + value = le32_to_cpu(config_list->addr_val[count].value); + iwl_trans_write_prph(fwrt->trans, address + offset, value); + } + iwl_trans_release_nic_access(fwrt->trans); + break; + } + case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: { + for (count = 0; count < len; count++) { + address = le32_to_cpu(config_list->addr_val[count].address); + value = le32_to_cpu(config_list->addr_val[count].value); + iwl_trans_write_mem32(fwrt->trans, address + offset, value); + IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n", + count, address, value); + } + break; + } + case IWL_FW_INI_CONFIG_SET_TYPE_CSR: { + for (count = 0; count < len; count++) { + address = le32_to_cpu(config_list->addr_val[count].address); + value = le32_to_cpu(config_list->addr_val[count].value); + iwl_write32(fwrt->trans, address + offset, value); + IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n", + count, address, value); + } + break; + } + case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: { + struct iwl_dbgc1_info dram_info = {}; + struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0]; + __le64 dram_base_addr = cpu_to_le64(frags->physical); + __le32 dram_size = cpu_to_le32(frags->size); + u64 dram_addr = le64_to_cpu(dram_base_addr); + u32 ret; + + IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n", + dram_base_addr, dram_size); + IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n", + le32_to_cpu(config_list->addr_offset)); + for (count = 0; count < len; count++) { + address = le32_to_cpu(config_list->addr_val[count].address); + dram_info.dbgc1_add_lsb = + cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400); + dram_info.dbgc1_add_msb = + cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32); + dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400); + ret = iwl_trans_write_mem(fwrt->trans, + address + offset, &dram_info, 4); + if (ret) { + IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n"); + break; + } + } + break; + } + case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: { + u32 debug_token_config = + le32_to_cpu(config_list->addr_val[0].value); + + IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n", + debug_token_config); + fwrt->trans->dbg.ucode_preset = debug_token_config; + break; + } + default: + break; + } + } +} + static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) { struct iwl_dbg_tlv_timer_node *timer_node = @@ -996,8 +1208,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) &fwrt->trans->dbg.fw_mon_cfg[i]; u32 dest = le32_to_cpu(fw_mon_cfg->buf_location); - if (dest == IWL_FW_INI_LOCATION_INVALID) + if (dest == IWL_FW_INI_LOCATION_INVALID) { + failed_alloc |= BIT(i); continue; + } if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) *ini_dest = dest; @@ -1024,8 +1238,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) &fwrt->trans->dbg.active_regions[i]; u32 reg_type; - if (!*active_reg) + if (!*active_reg) { + fwrt->trans->dbg.unsupported_region_msk |= BIT(i); continue; + } reg = (void *)(*active_reg)->data; reg_type = le32_to_cpu(reg->type); @@ -1051,7 +1267,7 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, union iwl_dbg_tlv_tp_data *tp_data, bool sync) { - struct list_head *hcmd_list, *trig_list; + struct list_head *hcmd_list, *trig_list, *conf_list; if (!iwl_trans_dbg_ini_valid(fwrt->trans) || tp_id == IWL_FW_INI_TIME_POINT_INVALID || @@ -1060,15 +1276,19 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list; trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list; + conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list; switch (tp_id) { case IWL_FW_INI_TIME_POINT_EARLY: iwl_dbg_tlv_init_cfg(fwrt); + iwl_dbg_tlv_apply_config(fwrt, conf_list); + iwl_dbg_tlv_update_drams(fwrt); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: iwl_dbg_tlv_apply_buffers(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); + iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_PERIODIC: @@ -1079,11 +1299,13 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); + iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); break; default: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); + iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index c12b1fd3f47922..79287708bd6e63 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -33,11 +33,13 @@ union iwl_dbg_tlv_tp_data { * @trig_list: list of triggers * @active_trig_list: list of active triggers * @hcmd_list: list of host commands + * @config_list: list of configuration */ struct iwl_dbg_tlv_time_point_data { struct list_head trig_list; struct list_head active_trig_list; struct list_head hcmd_list; + struct list_head config_list; }; struct iwl_trans; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c index f6ca2fc37c407c..ae4c2a3d63d5b4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2011 Intel Corporation + * Copyright (C) 2005-2011, 2021 Intel Corporation */ #include #include @@ -31,21 +31,31 @@ IWL_EXPORT_SYMBOL(__iwl_info); __iwl_fn(crit) IWL_EXPORT_SYMBOL(__iwl_crit); -void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, - const char *fmt, ...) +void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; - va_list args; + va_list args, args2; va_start(args, fmt); - vaf.va = &args; - if (!trace_only) { - if (rfkill_prefix) + switch (mode) { + case IWL_ERR_MODE_RATELIMIT: + if (net_ratelimit()) + break; + fallthrough; + case IWL_ERR_MODE_REGULAR: + case IWL_ERR_MODE_RFKILL: + va_copy(args2, args); + vaf.va = &args2; + if (mode == IWL_ERR_MODE_RFKILL) dev_err(dev, "(RFKILL) %pV", &vaf); else dev_err(dev, "%pV", &vaf); + va_end(args2); + break; + default: + break; } trace_iwlwifi_err(&vaf); va_end(args); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index 528eba441926d7..1b9f16a31b546e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -2,14 +2,9 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2020 Intel Corporation + * Copyright(c) 2018 - 2021 Intel Corporation * * Portions of this file are derived from the ipw3945 project. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #ifndef __iwl_debug_h__ @@ -27,9 +22,16 @@ static inline bool iwl_have_debug_level(u32 level) #endif } +enum iwl_err_mode { + IWL_ERR_MODE_REGULAR, + IWL_ERR_MODE_RFKILL, + IWL_ERR_MODE_TRACE_ONLY, + IWL_ERR_MODE_RATELIMIT, +}; + struct device; -void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, - const char *fmt, ...) __printf(4, 5); +void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...) + __printf(3, 4); void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3); void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3); void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); @@ -38,13 +40,17 @@ void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3); #define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n') /* No matter what is m (priv, bus, trans), this will work */ -#define IWL_ERR_DEV(d, f, a...) \ +#define __IWL_ERR_DEV(d, mode, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ - __iwl_err((d), false, false, f, ## a); \ + __iwl_err((d), mode, f, ## a); \ } while (0) +#define IWL_ERR_DEV(d, f, a...) \ + __IWL_ERR_DEV(d, IWL_ERR_MODE_REGULAR, f, ## a) #define IWL_ERR(m, f, a...) \ IWL_ERR_DEV((m)->dev, f, ## a) +#define IWL_ERR_LIMIT(m, f, a...) \ + __IWL_ERR_DEV((m)->dev, IWL_ERR_MODE_RATELIMIT, f, ## a) #define IWL_WARN(m, f, a...) \ do { \ CHECK_FOR_NEWLINE(f); \ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h index 1bc6ecc3214049..347fd95c4e3a5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -4,11 +4,6 @@ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h index a57019241a7808..0af9d8362c5b8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h @@ -3,11 +3,6 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016-2017 Intel Deutschland GmbH - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 72ca882daed5e0..46ed723f138a66 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -5,11 +5,6 @@ * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h index d0467da5af0346..7dd70011fd1ef6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h @@ -2,11 +2,6 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h index 2228faefffbca3..3ec0205ac9f903 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h @@ -2,11 +2,6 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c index b5037db0c38172..999b7c652289d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c @@ -3,11 +3,6 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2018 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index fc8bc212ee8473..1455b578358bf8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -4,11 +4,6 @@ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(C) 2016 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #ifndef __IWLWIFI_DEVICE_TRACE diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 77124b8b235ee6..36196e07b1a045 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -31,7 +31,6 @@ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_AUTHOR); MODULE_LICENSE("GPL"); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -550,6 +549,43 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, return 0; } +static void iwl_drv_set_dump_exclude(struct iwl_drv *drv, + enum iwl_ucode_tlv_type tlv_type, + const void *tlv_data, u32 tlv_len) +{ + const struct iwl_fw_dump_exclude *fw = tlv_data; + struct iwl_dump_exclude *excl; + + if (tlv_len < sizeof(*fw)) + return; + + if (tlv_type == IWL_UCODE_TLV_SEC_TABLE_ADDR) { + excl = &drv->fw.dump_excl[0]; + + /* second time we find this, it's for WoWLAN */ + if (excl->addr) + excl = &drv->fw.dump_excl_wowlan[0]; + } else if (fw_has_capa(&drv->fw.ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG)) { + /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is regular image */ + excl = &drv->fw.dump_excl[0]; + } else { + /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is WoWLAN image */ + excl = &drv->fw.dump_excl_wowlan[0]; + } + + if (excl->addr) + excl++; + + if (excl->addr) { + IWL_DEBUG_FW_INFO(drv, "found too many excludes in fw file\n"); + return; + } + + excl->addr = le32_to_cpu(fw->addr) & ~FW_ADDR_CACHE_CONTROL; + excl->size = le32_to_cpu(fw->size); +} + static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, @@ -1133,6 +1169,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_REGIONS: case IWL_UCODE_TLV_TYPE_TRIGGERS: + case IWL_UCODE_TLV_TYPE_CONF_SET: if (iwlwifi_mod_params.enable_ini) iwl_dbg_tlv_alloc(drv->trans, tlv, false); break; @@ -1166,6 +1203,11 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -ENOMEM; drv->fw.phy_integration_ver_len = tlv_len; break; + case IWL_UCODE_TLV_SEC_TABLE_ADDR: + case IWL_UCODE_TLV_D3_KEK_KCK_ADDR: + iwl_drv_set_dump_exclude(drv, tlv_type, + tlv_data, tlv_len); + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index b6442df0c6439e..2e2d60a586925d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2020 Intel Corporation + * Copyright (C) 2005-2014, 2020-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #ifndef __iwl_drv_h__ @@ -9,7 +9,6 @@ /* for all modules */ #define DRV_NAME "iwlwifi" -#define DRV_AUTHOR "Intel Corporation " /* radio config bits (actual values from NVM definition) */ #define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index dbab2f10d7505c..b9e86bf972e567 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2019 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation */ #include #include @@ -139,7 +139,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans) { int ret; - ret = iwl_finish_nic_init(trans, trans->trans_cfg); + ret = iwl_finish_nic_init(trans); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 2517c4ae07ab34..46917b4216b30f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -398,9 +398,50 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) return 0; } -int iwl_finish_nic_init(struct iwl_trans *trans, - const struct iwl_cfg_trans_params *cfg_trans) +#define IWL_HOST_MON_BLOCK_PEMON 0x00 +#define IWL_HOST_MON_BLOCK_HIPM 0x22 + +#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00 +#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01 +#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06 + +static void iwl_dump_host_monitor_block(struct iwl_trans *trans, + u32 block, u32 vec, u32 iter) +{ + int i; + + IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec); + iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec); + for (i = 0; i < iter; i++) + IWL_ERR(trans, " value [iter %d]: 0x%08x\n", + i, iwl_read32(trans, CSR_MONITOR_STATUS_REG)); +} + +static void iwl_dump_host_monitor(struct iwl_trans *trans) { + switch (trans->trans_cfg->device_family) { + case IWL_DEVICE_FAMILY_22000: + case IWL_DEVICE_FAMILY_AX210: + IWL_ERR(trans, "CSR_RESET = 0x%x\n", + iwl_read32(trans, CSR_RESET)); + iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON, + IWL_HOST_MON_BLOCK_PEMON_VEC0, 15); + iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON, + IWL_HOST_MON_BLOCK_PEMON_VEC1, 15); + iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON, + IWL_HOST_MON_BLOCK_PEMON_WFPM, 15); + iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM, + IWL_HOST_MON_BLOCK_PEMON_VEC0, 1); + break; + default: + /* not supported yet */ + return; + } +} + +int iwl_finish_nic_init(struct iwl_trans *trans) +{ + const struct iwl_cfg_trans_params *cfg_trans = trans->trans_cfg; u32 poll_ready; int err; @@ -433,9 +474,12 @@ int iwl_finish_nic_init(struct iwl_trans *trans, * and accesses to uCode SRAM. */ err = iwl_poll_bit(trans, CSR_GP_CNTRL, poll_ready, poll_ready, 25000); - if (err < 0) + if (err < 0) { IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); + iwl_dump_host_monitor(trans); + } + if (cfg_trans->bisr_workaround) { /* ensure BISR shift has finished */ udelay(200); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 3c21c0e081f8e9..37b3bd62897e2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #ifndef __iwl_io_h__ #define __iwl_io_h__ @@ -52,8 +52,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); void iwl_force_nmi(struct iwl_trans *trans); -int iwl_finish_nic_init(struct iwl_trans *trans, - const struct iwl_cfg_trans_params *cfg_trans); +int iwl_finish_nic_init(struct iwl_trans *trans); /* Error handling */ int iwl_dump_fh(struct iwl_trans *trans, char **buf); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 475f951d4b1eff..f470f9aea50f57 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -534,6 +534,17 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } +static const u8 iwl_vendor_caps[] = { + 0xdd, /* vendor element */ + 0x06, /* length */ + 0x00, 0x17, 0x35, /* Intel OUI */ + 0x08, /* type (Intel Capabilities) */ + /* followed by 16 bits of capabilities */ +#define IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE BIT(0) + IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE, + 0x00 +}; + static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), @@ -781,6 +792,12 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT)) iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BCAST_TWT; + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + !is_ap) { + iftype_data->vendor_elems.data = iwl_vendor_caps; + iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps); + } } static void iwl_init_he_hw_capab(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index d0a7d58336a9ee..a84ab02cf9d7e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -347,6 +347,12 @@ #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 +#define WFPM_CTRL_REG_GEN2 0xd03030 +#define WFPM_OTP_CFG1_ADDR 0x00a03098 +#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098 +#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) +#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) + #define WFPM_GP2 0xA030B4 /* DBGI SRAM Register details */ @@ -399,10 +405,40 @@ enum { LMPM_PAGE_PASS_NOTIF_POS = BIT(20), }; +/* + * CRF ID register + * + * type: bits 0-11 + * reserved: bits 12-18 + * slave_exist: bit 19 + * dash: bits 20-23 + * step: bits 24-26 + * flavor: bits 27-31 + */ +#define REG_CRF_ID_TYPE(val) (((val) & 0x00000FFF) >> 0) +#define REG_CRF_ID_SLAVE(val) (((val) & 0x00080000) >> 19) +#define REG_CRF_ID_DASH(val) (((val) & 0x00F00000) >> 20) +#define REG_CRF_ID_STEP(val) (((val) & 0x07000000) >> 24) +#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF8000000) >> 27) + #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSIX_ENABLE BIT(25) +#define SD_REG_VER 0xa29600 +#define SD_REG_VER_GEN2 0x00a2b800 + +#define REG_CRF_ID_TYPE_JF_1 0x201 +#define REG_CRF_ID_TYPE_JF_2 0x202 +#define REG_CRF_ID_TYPE_HR_CDB 0x503 +#define REG_CRF_ID_TYPE_HR_NONE_CDB 0x504 +#define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501 +#define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532 +#define REG_CRF_ID_TYPE_GF 0x410 +#define REG_CRF_ID_TYPE_GF_TC 0xF08 +#define REG_CRF_ID_TYPE_MR 0x810 +#define REG_CRF_ID_TYPE_FM 0x910 + #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) #define PREG_WFPM_ACCESS BIT(12) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 8f0ff540f4398e..4ebb1871bd1fb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -362,6 +362,20 @@ struct iwl_hcmd_arr { #define HCMD_ARR(x) \ { .arr = x, .size = ARRAY_SIZE(x) } +/** + * struct iwl_dump_sanitize_ops - dump sanitization operations + * @frob_txf: Scrub the TX FIFO data + * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header + * but that might be short or long (&struct iwl_cmd_header or + * &struct iwl_cmd_header_wide) + * @frob_mem: Scrub memory data + */ +struct iwl_dump_sanitize_ops { + void (*frob_txf)(void *ctx, void *buf, size_t buflen); + void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen); + void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen); +}; + /** * struct iwl_trans_config - transport configuration * @@ -586,7 +600,9 @@ struct iwl_trans_ops { u32 value); struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, - u32 dump_mask); + u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx); void (*debugfs_cleanup)(struct iwl_trans *trans); void (*sync_nmi)(struct iwl_trans *trans); int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len); @@ -723,8 +739,8 @@ struct iwl_self_init_dram { * @debug_info_tlv_list: list of debug info TLVs * @time_point: array of debug time points * @periodic_trig_list: periodic triggers list - * @domains_bitmap: bitmap of active domains other than - * &IWL_FW_INI_DOMAIN_ALWAYS_ON + * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON + * @ucode_preset: preset based on ucode */ struct iwl_trans_debug { u8 n_dest_reg; @@ -758,6 +774,7 @@ struct iwl_trans_debug { struct list_head periodic_trig_list; u32 domains_bitmap; + u32 ucode_preset; }; struct iwl_dma_ptr { @@ -1086,11 +1103,14 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans, } static inline struct iwl_trans_dump_data * -iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) +iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx) { if (!trans->ops->dump_data) return NULL; - return trans->ops->dump_data(trans, dump_mask); + return trans->ops->dump_data(trans, dump_mask, + sanitize_ops, sanitize_ctx); } static inline struct iwl_device_tx_cmd * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 9f706fffb59220..a19f646a324f4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1379,12 +1379,49 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) /* converted data from the different status responses */ struct iwl_wowlan_status_data { - u16 pattern_number; - u16 qos_seq_ctr[8]; + u64 replay_ctr; + u32 num_of_gtk_rekeys; + u32 received_beacons; u32 wakeup_reasons; u32 wake_packet_length; u32 wake_packet_bufsize; - const u8 *wake_packet; + u16 pattern_number; + u16 non_qos_seq_ctr; + u16 qos_seq_ctr[8]; + u8 tid_tear_down; + + struct { + /* + * We store both the TKIP and AES representations + * coming from the firmware because we decode the + * data from there before we iterate the keys and + * know which one we need. + */ + struct { + struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; + } tkip, aes; + /* including RX MIC key for TKIP */ + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + } gtk; + + struct { + /* Same as above */ + struct { + struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; + u64 tx_pn; + } tkip, aes; + } ptk; + + struct { + u64 ipn; + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + } igtk; + + u8 wake_packet[]; }; static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, @@ -1540,77 +1577,90 @@ static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, seq->tkip.iv16 = le16_to_cpu(sc->iv16); } -static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +static void iwl_mvm_set_key_rx_seq_tids(struct ieee80211_key_conf *key, + struct ieee80211_key_seq *seq) { int tid; - BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + ieee80211_set_key_rx_seq(key, tid, &seq[tid]); +} - if (sta && iwl_mvm_has_new_rx_api(mvm)) { - struct iwl_mvm_sta *mvmsta; - struct iwl_mvm_key_pn *ptk_pn; +static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm, + struct iwl_wowlan_status_data *status, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_key_pn *ptk_pn; + int tid; - mvmsta = iwl_mvm_sta_from_mac80211(sta); + iwl_mvm_set_key_rx_seq_tids(key, status->ptk.aes.seq); - rcu_read_lock(); - ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); - if (WARN_ON(!ptk_pn)) { - rcu_read_unlock(); - return; - } + if (!iwl_mvm_has_new_rx_api(mvm)) + return; - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { - struct ieee80211_key_seq seq = {}; - int i; - iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); - ieee80211_set_key_rx_seq(key, tid, &seq); - for (i = 1; i < mvm->trans->num_rx_queues; i++) - memcpy(ptk_pn->q[i].pn[tid], - seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); - } + rcu_read_lock(); + ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); + if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); - } else { - for (tid = 0; tid < IWL_NUM_RSC; tid++) { - struct ieee80211_key_seq seq = {}; + return; + } - iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); - ieee80211_set_key_rx_seq(key, tid, &seq); - } + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + int i; + + for (i = 1; i < mvm->trans->num_rx_queues; i++) + memcpy(ptk_pn->q[i].pn[tid], + status->ptk.aes.seq[tid].ccmp.pn, + IEEE80211_CCMP_PN_LEN); } + rcu_read_unlock(); } -static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, - struct ieee80211_key_conf *key) +static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status, + union iwl_all_tsc_rsc *sc) { - int tid; + int i; + + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); - BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); + /* GTK RX counters */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i], + &status->gtk.tkip.seq[i]); + iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i], + &status->gtk.aes.seq[i]); + } - for (tid = 0; tid < IWL_NUM_RSC; tid++) { - struct ieee80211_key_seq seq = {}; + /* PTK TX counter */ + status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) | + ((u64)le32_to_cpu(sc->tkip.tsc.iv32) << 16); + status->ptk.aes.tx_pn = le64_to_cpu(sc->aes.tsc.pn); - iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); - ieee80211_set_key_rx_seq(key, tid, &seq); + /* PTK RX counters */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + iwl_mvm_tkip_sc_to_seq(&sc->tkip.unicast_rsc[i], + &status->ptk.tkip.seq[i]); + iwl_mvm_aes_sc_to_seq(&sc->aes.unicast_rsc[i], + &status->ptk.aes.seq[i]); } } static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, struct ieee80211_key_conf *key, - struct iwl_wowlan_status *status) + struct iwl_wowlan_status_data *status) { - union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc; - switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: - iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); + iwl_mvm_set_key_rx_seq_tids(key, status->gtk.aes.seq); break; case WLAN_CIPHER_SUITE_TKIP: - iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); + iwl_mvm_set_key_rx_seq_tids(key, status->gtk.tkip.seq); break; default: WARN_ON(1); @@ -1619,7 +1669,7 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; - struct iwl_wowlan_status *status; + struct iwl_wowlan_status_data *status; void *last_gtk; u32 cipher; bool find_phase, unhandled_cipher; @@ -1633,6 +1683,7 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; + struct iwl_wowlan_status_data *status = data->status; if (data->unhandled_cipher) return; @@ -1661,10 +1712,6 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, * note that this assumes no TDLS sessions are active */ if (sta) { - struct ieee80211_key_seq seq = {}; - union iwl_all_tsc_rsc *sc = - &data->status->gtk[0].rsc.all_tsc_rsc; - if (data->find_phase) return; @@ -1672,16 +1719,12 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: - iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, - sta, key); - atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); + atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn); + iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key); break; case WLAN_CIPHER_SUITE_TKIP: - iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); - iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); - atomic64_set(&key->tx_pn, - (u64)seq.tkip.iv16 | - ((u64)seq.tkip.iv32 << 16)); + atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn); + iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq); break; } @@ -1703,7 +1746,7 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_wowlan_status *status) + struct iwl_wowlan_status_data *status) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_d3_gtk_iter_data gtkdata = { @@ -1717,7 +1760,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, if (!status || !vif->bss_conf.bssid) return false; - if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) + if (status->wakeup_reasons & disconnection_reasons) return false; /* find last GTK that we used initially, if any */ @@ -1741,7 +1784,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, iwl_mvm_d3_update_keys, >kdata); IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", - le32_to_cpu(status->num_of_gtk_rekeys)); + status->num_of_gtk_rekeys); if (status->num_of_gtk_rekeys) { struct ieee80211_key_conf *key; struct { @@ -1750,36 +1793,32 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, } conf = { .conf.cipher = gtkdata.cipher, .conf.keyidx = - iwlmvm_wowlan_gtk_idx(&status->gtk[0]), + status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK, }; __be64 replay_ctr; IWL_DEBUG_WOWLAN(mvm, "Received from FW GTK cipher %d, key index %d\n", conf.conf.cipher, conf.conf.keyidx); + + BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); + BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk.key)); + + memcpy(conf.conf.key, status->gtk.key, sizeof(status->gtk.key)); + switch (gtkdata.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: - BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); conf.conf.keylen = WLAN_KEY_LEN_CCMP; - memcpy(conf.conf.key, status->gtk[0].key, - WLAN_KEY_LEN_CCMP); break; case WLAN_CIPHER_SUITE_GCMP_256: - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; - memcpy(conf.conf.key, status->gtk[0].key, - WLAN_KEY_LEN_GCMP_256); break; case WLAN_CIPHER_SUITE_TKIP: - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); conf.conf.keylen = WLAN_KEY_LEN_TKIP; - memcpy(conf.conf.key, status->gtk[0].key, 16); - /* leave TX MIC key zeroed, we don't use it anyway */ - memcpy(conf.conf.key + - NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, - status->gtk[0].tkip_mic_key, 8); break; } @@ -1788,8 +1827,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, return false; iwl_mvm_set_key_rx_seq(mvm, key, status); - replay_ctr = - cpu_to_be64(le64_to_cpu(status->replay_ctr)); + replay_ctr = cpu_to_be64(status->replay_ctr); ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); @@ -1800,7 +1838,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, WOWLAN_GET_STATUSES, 0) < 10) { mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ - mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; + mvmvif->seqno = status->non_qos_seq_ctr + 0x10; } return true; @@ -1808,13 +1846,13 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, /* Occasionally, templates would be nice. This is one of those times ... */ #define iwl_mvm_parse_wowlan_status_common(_ver) \ -static struct iwl_wowlan_status * \ +static struct iwl_wowlan_status_data * \ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ - void *_data, int len) \ + struct iwl_wowlan_status_ ##_ver *data,\ + int len) \ { \ - struct iwl_wowlan_status *status; \ - struct iwl_wowlan_status_ ##_ver *data = _data; \ - int data_size; \ + struct iwl_wowlan_status_data *status; \ + int data_size, i; \ \ if (len < sizeof(*data)) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ @@ -1832,18 +1870,22 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ return ERR_PTR(-ENOMEM); \ \ /* copy all the common fields */ \ - status->replay_ctr = data->replay_ctr; \ - status->pattern_number = data->pattern_number; \ - status->non_qos_seq_ctr = data->non_qos_seq_ctr; \ - memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \ - sizeof(status->qos_seq_ctr)); \ - status->wakeup_reasons = data->wakeup_reasons; \ - status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \ - status->received_beacons = data->received_beacons; \ - status->wake_packet_length = data->wake_packet_length; \ - status->wake_packet_bufsize = data->wake_packet_bufsize; \ + status->replay_ctr = le64_to_cpu(data->replay_ctr); \ + status->pattern_number = le16_to_cpu(data->pattern_number); \ + status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \ + for (i = 0; i < 8; i++) \ + status->qos_seq_ctr[i] = \ + le16_to_cpu(data->qos_seq_ctr[i]); \ + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \ + status->num_of_gtk_rekeys = \ + le32_to_cpu(data->num_of_gtk_rekeys); \ + status->received_beacons = le32_to_cpu(data->received_beacons); \ + status->wake_packet_length = \ + le32_to_cpu(data->wake_packet_length); \ + status->wake_packet_bufsize = \ + le32_to_cpu(data->wake_packet_bufsize); \ memcpy(status->wake_packet, data->wake_packet, \ - le32_to_cpu(status->wake_packet_bufsize)); \ + status->wake_packet_bufsize); \ \ return status; \ } @@ -1852,10 +1894,49 @@ iwl_mvm_parse_wowlan_status_common(v6) iwl_mvm_parse_wowlan_status_common(v7) iwl_mvm_parse_wowlan_status_common(v9) -static struct iwl_wowlan_status * +static void iwl_mvm_convert_gtk(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status *data) +{ + BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(data->tkip_mic_key) > + sizeof(status->gtk.key)); + + status->gtk.len = data->key_len; + status->gtk.flags = data->key_flags; + + memcpy(status->gtk.key, data->key, sizeof(data->key)); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (status->gtk.len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + data->tkip_mic_key, sizeof(data->tkip_mic_key)); +} + +static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_igtk_status *data) +{ + const u8 *ipn = data->ipn; + + BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); + + status->igtk.len = data->key_len; + status->igtk.flags = data->key_flags; + + memcpy(status->igtk.key, data->key, sizeof(data->key)); + + status->igtk.ipn = ((u64)ipn[5] << 0) | + ((u64)ipn[4] << 8) | + ((u64)ipn[3] << 16) | + ((u64)ipn[2] << 24) | + ((u64)ipn[1] << 32) | + ((u64)ipn[0] << 40); +} + +static struct iwl_wowlan_status_data * iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) { - struct iwl_wowlan_status *status; + struct iwl_wowlan_status_data *status; struct iwl_wowlan_get_status_cmd get_status_cmd = { .sta_id = cpu_to_le32(sta_id), }; @@ -1895,59 +1976,57 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; - status = iwl_mvm_parse_wowlan_status_common_v6(mvm, - cmd.resp_pkt->data, - len); + status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len); if (IS_ERR(status)) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > - sizeof(status->gtk[0].key)); - BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > - sizeof(status->gtk[0].tkip_mic_key)); + sizeof(status->gtk.key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(v6->gtk.tkip_mic_key) > + sizeof(status->gtk.key)); /* copy GTK info to the right place */ - memcpy(status->gtk[0].key, v6->gtk.decrypt_key, + memcpy(status->gtk.key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key)); - memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, + memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key)); - memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, - sizeof(status->gtk[0].rsc)); + + iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc); /* hardcode the key length to 16 since v6 only supports 16 */ - status->gtk[0].key_len = 16; + status->gtk.len = 16; /* * The key index only uses 2 bits (values 0 to 3) and * we always set bit 7 which means this is the * currently used key. */ - status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); + status->gtk.flags = v6->gtk.key_index | BIT(7); } else if (notif_ver == 7) { struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; - status = iwl_mvm_parse_wowlan_status_common_v7(mvm, - cmd.resp_pkt->data, - len); + status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len); if (IS_ERR(status)) goto out_free_resp; - status->gtk[0] = v7->gtk[0]; - status->igtk[0] = v7->igtk[0]; + iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); + iwl_mvm_convert_gtk(status, &v7->gtk[0]); + iwl_mvm_convert_igtk(status, &v7->igtk[0]); } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; /* these three command versions have same layout and size, the * difference is only in a few not used (reserved) fields. */ - status = iwl_mvm_parse_wowlan_status_common_v9(mvm, - cmd.resp_pkt->data, - len); + status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len); if (IS_ERR(status)) goto out_free_resp; - status->gtk[0] = v9->gtk[0]; - status->igtk[0] = v9->igtk[0]; + iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); + iwl_mvm_convert_gtk(status, &v9->gtk[0]); + iwl_mvm_convert_igtk(status, &v9->igtk[0]); status->tid_tear_down = v9->tid_tear_down; } else { @@ -1962,7 +2041,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) return status; } -static struct iwl_wowlan_status * +static struct iwl_wowlan_status_data * iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id) { u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, @@ -1987,29 +2066,17 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_wowlan_status_data status; - struct iwl_wowlan_status *fw_status; + struct iwl_wowlan_status_data *status; int i; bool keep; struct iwl_mvm_sta *mvm_ap_sta; - fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); - if (IS_ERR_OR_NULL(fw_status)) + status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); + if (IS_ERR(status)) goto out_unlock; IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", - le32_to_cpu(fw_status->wakeup_reasons)); - - status.pattern_number = le16_to_cpu(fw_status->pattern_number); - for (i = 0; i < 8; i++) - status.qos_seq_ctr[i] = - le16_to_cpu(fw_status->qos_seq_ctr[i]); - status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); - status.wake_packet_length = - le32_to_cpu(fw_status->wake_packet_length); - status.wake_packet_bufsize = - le32_to_cpu(fw_status->wake_packet_bufsize); - status.wake_packet = fw_status->wake_packet; + status->wakeup_reasons); /* still at hard-coded place 0 for D3 image */ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); @@ -2017,7 +2084,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, goto out_free; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = status.qos_seq_ctr[i]; + u16 seq = status->qos_seq_ctr[i]; /* firmware stores last-used value, we store next value */ seq += 0x10; mvm_ap_sta->tid_data[i].seq_number = seq; @@ -2033,15 +2100,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, /* now we have all the data we need, unlock to avoid mac80211 issues */ mutex_unlock(&mvm->mutex); - iwl_mvm_report_wakeup_reasons(mvm, vif, &status); + iwl_mvm_report_wakeup_reasons(mvm, vif, status); - keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); + keep = iwl_mvm_setup_connection_keep(mvm, vif, status); - kfree(fw_status); + kfree(status); return keep; out_free: - kfree(fw_status); + kfree(status); out_unlock: mutex_unlock(&mvm->mutex); return false; @@ -2165,16 +2232,16 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + struct iwl_wowlan_status_data *status; struct iwl_mvm_nd_query_results query; - struct iwl_wowlan_status *fw_status; unsigned long matched_profiles; u32 reasons = 0; int i, n_matches, ret; - fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); - if (!IS_ERR_OR_NULL(fw_status)) { - reasons = le32_to_cpu(fw_status->wakeup_reasons); - kfree(fw_status); + status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); + if (!IS_ERR(status)) { + reasons = status->wakeup_reasons; + kfree(status); } if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) @@ -2336,7 +2403,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0); ret = 1; - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; goto err; } @@ -2385,6 +2451,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) } } + /* after the successful handshake, we're out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; /* @@ -2455,6 +2522,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); + /* regardless of what happened, we're now out of D3 */ + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + return 1; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 5dc39fbb74d67c..ff66001d507ef6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -395,10 +395,9 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, "A-MPDU size limit %d\n", lq_sta->pers.dbg_agg_frame_count_lim); desc += scnprintf(buff + desc, bufsz - desc, - "valid_tx_ant %s%s%s\n", + "valid_tx_ant %s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : ""); desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags); @@ -986,8 +985,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, continue; pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i)); - pos += rs_pretty_print_rate(pos, endpos - pos, - stats->last_rates[idx]); + pos += rs_pretty_print_rate_v1(pos, endpos - pos, + stats->last_rates[idx]); if (pos < endpos - 1) *pos++ = '\n'; } @@ -1060,8 +1059,6 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "A"); if (mvm->scan_rx_ant & ANT_B) pos += scnprintf(buf + pos, bufsz - pos, "B"); - if (mvm->scan_rx_ant & ANT_C) - pos += scnprintf(buf + pos, bufsz - pos, "C"); pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); @@ -1196,7 +1193,6 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) struct ieee80211_tx_info *info; struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate; - u16 flags; int i; len /= 2; @@ -1243,12 +1239,9 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) mvmvif = iwl_mvm_vif_from_mac80211(vif); info = IEEE80211_SKB_CB(beacon); rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); - flags = iwl_mvm_mac80211_idx_to_hwrate(rate); - if (rate == IWL_FIRST_CCK_RATE) - flags |= IWL_MAC_BEACON_CCK; - - beacon_cmd.flags = cpu_to_le16(flags); + beacon_cmd.flags = + cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate)); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 03e5bf5cb9094c..949fb790f8fb71 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -324,6 +324,7 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, u8 *ctrl_ch_position) { u32 freq = peer->chandef.chan->center_freq; + u8 cmd_ver; *channel = ieee80211_frequency_to_channel(freq); @@ -344,6 +345,17 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; break; + case NL80211_CHAN_WIDTH_160: + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, + TOF_RANGE_REQ_CMD, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver >= 13) { + *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; + *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; + break; + } + fallthrough; default: IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", peer->chandef.width); @@ -1142,6 +1154,7 @@ static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) { switch (ver) { + case 9: case 8: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); case 7: @@ -1205,7 +1218,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) int peer_idx; if (new_api) { - if (notif_ver == 8) { + if (notif_ver >= 8) { fw_ap = &fw_resp_v8->ap[i]; iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); } else if (notif_ver == 7) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index eba5433c2626df..bda6da7d988e9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -46,8 +46,8 @@ static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef, } static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, - u8 *format_bw, - u8 *ctrl_ch_position) + u8 *format_bw, u8 *ctrl_ch_position, + u8 cmd_ver) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: @@ -68,6 +68,14 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; + case NL80211_CHAN_WIDTH_160: + if (cmd_ver >= 9) { + *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; + *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; + *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); + break; + } + fallthrough; default: return -ENOTSUPP; } @@ -140,7 +148,8 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, - &cmd.ctrl_ch_position); + &cmd.ctrl_ch_position, + cmd_ver); else err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw, &cmd.ctrl_ch_position); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 74404c96063bca..863fec150e5368 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -295,6 +295,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, if (ret) { struct iwl_trans *trans = mvm->trans; + /* SecBoot info */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { IWL_ERR(mvm, @@ -302,6 +303,17 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS)); + } else if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_8000) { + IWL_ERR(mvm, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_prph(trans, SB_CPU_1_STATUS), + iwl_read_prph(trans, SB_CPU_2_STATUS)); + } + + /* LMAC/UMAC PC info */ + if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_9000) { IWL_ERR(mvm, "UMAC PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_UMAC_CURRENT_PC)); @@ -312,12 +324,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, IWL_ERR(mvm, "LMAC2 PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_LMAC2_CURRENT_PC)); - } else if (trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_8000) { - IWL_ERR(mvm, - "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", - iwl_read_prph(trans, SB_CPU_1_STATUS), - iwl_read_prph(trans, SB_CPU_2_STATUS)); } if (ret == -ETIMEDOUT) @@ -763,14 +769,18 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) int ret; struct iwl_host_cmd cmd; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, - GEO_TX_POWER_LIMIT, + PER_CHAIN_LIMIT_OFFSET_CMD, IWL_FW_CMD_VER_UNKNOWN); /* the ops field is at the same spot for all versions, so set in v1 */ geo_tx_cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); - if (cmd_ver == 3) + if (cmd_ver == 5) + len = sizeof(geo_tx_cmd.v5); + else if (cmd_ver == 4) + len = sizeof(geo_tx_cmd.v4); + else if (cmd_ver == 3) len = sizeof(geo_tx_cmd.v3); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) @@ -782,7 +792,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) return -EOPNOTSUPP; cmd = (struct iwl_host_cmd){ - .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT), + .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD), .len = { len, }, .flags = CMD_WANT_SKB, .data = { &geo_tx_cmd }, @@ -797,7 +807,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) resp = (void *)cmd.resp_pkt->data; ret = le32_to_cpu(resp->profile_idx); - if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) + if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3)) ret = -EIO; iwl_free_resp(&cmd); @@ -809,36 +819,58 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) union iwl_geo_tx_power_profiles_cmd cmd; u16 len; u32 n_bands; + u32 n_profiles; int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, - GEO_TX_POWER_LIMIT, + PER_CHAIN_LIMIT_OFFSET_CMD, IWL_FW_CMD_VER_UNKNOWN); BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) != - offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops)); + offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) || + offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) != + offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) || + offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) != + offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops)); + /* the ops field is at the same spot for all versions, so set in v1 */ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); - if (cmd_ver == 3) { + if (cmd_ver == 5) { + len = sizeof(cmd.v5); + n_bands = ARRAY_SIZE(cmd.v5.table[0]); + n_profiles = ACPI_NUM_GEO_PROFILES_REV3; + } else if (cmd_ver == 4) { + len = sizeof(cmd.v4); + n_bands = ARRAY_SIZE(cmd.v4.table[0]); + n_profiles = ACPI_NUM_GEO_PROFILES_REV3; + } else if (cmd_ver == 3) { len = sizeof(cmd.v3); n_bands = ARRAY_SIZE(cmd.v3.table[0]); + n_profiles = ACPI_NUM_GEO_PROFILES; } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { len = sizeof(cmd.v2); n_bands = ARRAY_SIZE(cmd.v2.table[0]); + n_profiles = ACPI_NUM_GEO_PROFILES; } else { len = sizeof(cmd.v1); n_bands = ARRAY_SIZE(cmd.v1.table[0]); + n_profiles = ACPI_NUM_GEO_PROFILES; } BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) != - offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table)); + offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) || + offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) != + offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) || + offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) != + offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table)); /* the table is at the same position for all versions, so set use v1 */ - ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], n_bands); + ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], + n_bands, n_profiles); /* * It is a valid scenario to not support SAR, or miss wgds table, @@ -851,14 +883,19 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) * Set the revision on versions that contain it. * This must be done after calling iwl_sar_geo_init(). */ - if (cmd_ver == 3) + if (cmd_ver == 5) + cmd.v5.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + else if (cmd_ver == 4) + cmd.v4.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + else if (cmd_ver == 3) cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); return iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT), + WIDE_ID(PHY_OPS_GROUP, + PER_CHAIN_LIMIT_OFFSET_CMD), 0, len, &cmd); } @@ -1108,7 +1145,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { u8 value; - int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, DSM_RFI_FUNC_ENABLE, + int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE, &iwl_rfi_guid, &value); if (ret < 0) { @@ -1133,30 +1170,45 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { int ret; u32 value; - struct iwl_lari_config_change_cmd_v4 cmd = {}; + struct iwl_lari_config_change_cmd_v5 cmd = {}; cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); - ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT, + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT, &iwl_guid, &value); if (!ret) cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); - /* apply more config masks here */ - ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_UNII4_CHAN, &iwl_guid, &value); if (!ret) cmd.oem_unii4_allow_bitmap = cpu_to_le32(value); + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_ACTIVATE_CHANNEL, + &iwl_guid, &value); + if (!ret) + cmd.chan_state_active_bitmap = cpu_to_le32(value); + + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_ENABLE_6E, + &iwl_guid, &value); + if (!ret) + cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); + if (cmd.config_bitmap || + cmd.oem_uhb_allow_bitmap || cmd.oem_11ax_allow_bitmap || - cmd.oem_unii4_allow_bitmap) { + cmd.oem_unii4_allow_bitmap || + cmd.chan_state_active_bitmap) { size_t cmd_size; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE, 1); - if (cmd_ver == 4) + if (cmd_ver == 5) + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); + else if (cmd_ver == 4) cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); else if (cmd_ver == 3) cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); @@ -1170,9 +1222,13 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) le32_to_cpu(cmd.config_bitmap), le32_to_cpu(cmd.oem_11ax_allow_bitmap)); IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n", + "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", le32_to_cpu(cmd.oem_unii4_allow_bitmap), + le32_to_cpu(cmd.chan_state_active_bitmap), cmd_ver); + IWL_DEBUG_RADIO(mvm, + "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n", + le32_to_cpu(cmd.oem_uhb_allow_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index fd352b2624a6f2..fd7d4abfb454b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -604,6 +604,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, ctxt_sta->is_assoc = cpu_to_le32(1); + if (!mvmvif->authorized && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) + ctxt_sta->data_policy |= + cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE); + /* * allow multicast data frames only as long as the station is * authorized, i.e., GTK keys are already installed (if needed) @@ -812,6 +818,21 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, return rate; } +u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) +{ + u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); + bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, + LONG_GROUP, + BEACON_TEMPLATE_CMD, + 0) > 10; + + if (rate_idx <= IWL_FIRST_CCK_RATE) + flags |= is_new_rate ? IWL_MAC_BEACON_CCK + : IWL_MAC_BEACON_CCK_V1; + + return flags; +} + static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, @@ -844,9 +865,10 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); - tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); + tx->rate_n_flags |= + cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate)); if (rate == IWL_FIRST_CCK_RATE) - tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); + tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1); } @@ -929,11 +951,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, u16 flags; struct ieee80211_chanctx_conf *ctx; int channel; - - flags = iwl_mvm_mac80211_idx_to_hwrate(rate); - - if (rate == IWL_FIRST_CCK_RATE) - flags |= IWL_MAC_BEACON_CCK; + flags = iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate); /* Enable FILS on PSC channels only */ rcu_read_lock(); @@ -942,7 +960,11 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, WARN_ON(channel == 0); if (cfg80211_channel_is_psc(ctx->def.chan) && !IWL_MVM_DISABLE_AP_FILS) { - flags |= IWL_MAC_BEACON_FILS; + flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + BEACON_TEMPLATE_CMD, + 0) > 10 ? + IWL_MAC_BEACON_FILS : + IWL_MAC_BEACON_FILS_V1; beacon_cmd.short_ssid = cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid, vif->bss_conf.ssid_len)); @@ -1535,11 +1557,11 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, ieee80211_beacon_set_cntdwn(vif, notif->csa_counter); } -void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) +void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data; + struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; struct ieee80211_vif *csa_vif, *vif; struct iwl_mvm_vif *mvmvif; u32 id_n_color, csa_id, mac_id; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3a4585222d6d40..9fb9c7dad314f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -145,7 +145,8 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80), + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160), .preambles = BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | @@ -2022,7 +2023,8 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, } sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; - own_he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + own_he_cap = ieee80211_get_he_iftype_cap(sband, + ieee80211_vif_type_p2p(vif)); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR_OR_NULL(sta)) { @@ -2234,6 +2236,34 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } +static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration_override) +{ + u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; + u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; + + if (duration_override > duration) + duration = duration_override; + + /* Try really hard to protect the session and hear a beacon + * The new session protection command allows us to protect the + * session for a much longer time since the firmware will internally + * create two events: a 300TU one with a very high priority that + * won't be fragmented which should be enough for 99% of the cases, + * and another one (which we configure here to be 900TU long) which + * will have a slightly lower priority, but more importantly, can be + * fragmented so that it'll allow other activities to run. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) + iwl_mvm_schedule_session_protection(mvm, vif, 900, + min_duration, false); + else + iwl_mvm_protect_session(mvm, vif, duration, + min_duration, 500, false); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -2317,6 +2347,20 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, 5 * dur, false); + } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status) && + !vif->bss_conf.dtim_period) { + /* + * If we're not restarting and still haven't + * heard a beacon (dtim period unknown) then + * make sure we still have enough minimum time + * remaining in the time event, since the auth + * might actually have taken quite a while + * (especially for SAE) and so the remaining + * time could be small without us having heard + * a beacon yet. + */ + iwl_mvm_protect_assoc(mvm, vif, 0); } iwl_mvm_sf_update(mvm, vif, false); @@ -3192,38 +3236,52 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); - if (sta->tdls) + if (sta->tdls) { iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_ENABLE_LINK); + } else { + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - /* enable beacon filtering */ - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + mvmvif->authorized = 1; - /* - * Now that the station is authorized, i.e., keys were already - * installed, need to indicate to the FW that - * multicast data frames can be forwarded to the driver - */ - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + /* + * Now that the station is authorized, i.e., keys were already + * installed, need to indicate to the FW that + * multicast data frames can be forwarded to the driver + */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { - /* Multicast data frames are no longer allowed */ - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (!sta->tdls) { + /* Multicast data frames are no longer allowed */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + + /* + * Set this after the above iwl_mvm_mac_ctxt_changed() + * to avoid sending high prio again for a little time. + */ + mvmvif->authorized = 0; - /* disable beacon filtering */ - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - WARN_ON(ret && - !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status)); + /* disable beacon filtering */ + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + WARN_ON(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)); + } ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + /* remove session protection if still running */ + iwl_mvm_stop_session_protection(mvm, vif); } ret = 0; } else if (old_state == IEEE80211_STA_AUTH && @@ -3316,29 +3374,24 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; - u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; - if (info->duration > duration) - duration = info->duration; + mutex_lock(&mvm->mutex); + iwl_mvm_protect_assoc(mvm, vif, info->duration); + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* for successful cases (auth/assoc), don't cancel session protection */ + if (info->success) + return; mutex_lock(&mvm->mutex); - /* Try really hard to protect the session and hear a beacon - * The new session protection command allows us to protect the - * session for a much longer time since the firmware will internally - * create two events: a 300TU one with a very high priority that - * won't be fragmented which should be enough for 99% of the cases, - * and another one (which we configure here to be 900TU long) which - * will have a slightly lower priority, but more importantly, can be - * fragmented so that it'll allow other activities to run. - */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) - iwl_mvm_schedule_session_protection(mvm, vif, 900, - min_duration, false); - else - iwl_mvm_protect_session(mvm, vif, duration, - min_duration, 500, false); + iwl_mvm_stop_session_protection(mvm, vif); mutex_unlock(&mvm->mutex); } @@ -4704,6 +4757,9 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; + IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", + mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); + if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ @@ -4720,8 +4776,6 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, if (mvmvif->csa_failed) goto out_unlock; - IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d mode = %d\n", - mvmvif->id, chsw->count, chsw->block_tx); WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), @@ -4873,6 +4927,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) { + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: rinfo->bw = RATE_INFO_BW_20; @@ -4888,30 +4944,65 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) break; } - if (rate_n_flags & RATE_MCS_HT_MSK) { - rinfo->flags |= RATE_INFO_FLAGS_MCS; - rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK); - rinfo->nss = u32_get_bits(rate_n_flags, - RATE_HT_MCS_NSS_MSK) + 1; - if (rate_n_flags & RATE_MCS_SGI_MSK) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; - rinfo->mcs = u32_get_bits(rate_n_flags, - RATE_VHT_MCS_RATE_CODE_MSK); - rinfo->nss = u32_get_bits(rate_n_flags, - RATE_VHT_MCS_NSS_MSK) + 1; - if (rate_n_flags & RATE_MCS_SGI_MSK) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; - } else if (rate_n_flags & RATE_MCS_HE_MSK) { + if (format == RATE_MCS_CCK_MSK || + format == RATE_MCS_LEGACY_OFDM_MSK) { + int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); + + /* add the offset needed to get to the legacy ofdm indices */ + if (format == RATE_MCS_LEGACY_OFDM_MSK) + rate += IWL_FIRST_OFDM_RATE; + + switch (rate) { + case IWL_RATE_1M_INDEX: + rinfo->legacy = 10; + break; + case IWL_RATE_2M_INDEX: + rinfo->legacy = 20; + break; + case IWL_RATE_5M_INDEX: + rinfo->legacy = 55; + break; + case IWL_RATE_11M_INDEX: + rinfo->legacy = 110; + break; + case IWL_RATE_6M_INDEX: + rinfo->legacy = 60; + break; + case IWL_RATE_9M_INDEX: + rinfo->legacy = 90; + break; + case IWL_RATE_12M_INDEX: + rinfo->legacy = 120; + break; + case IWL_RATE_18M_INDEX: + rinfo->legacy = 180; + break; + case IWL_RATE_24M_INDEX: + rinfo->legacy = 240; + break; + case IWL_RATE_36M_INDEX: + rinfo->legacy = 360; + break; + case IWL_RATE_48M_INDEX: + rinfo->legacy = 480; + break; + case IWL_RATE_54M_INDEX: + rinfo->legacy = 540; + } + return; + } + + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_MCS_NSS_MSK) + 1; + rinfo->mcs = format == RATE_MCS_HT_MSK ? + RATE_HT_MCS_INDEX(rate_n_flags) : + u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); + + if (format == RATE_MCS_HE_MSK) { u32 gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; - rinfo->mcs = u32_get_bits(rate_n_flags, - RATE_VHT_MCS_RATE_CODE_MSK); - rinfo->nss = u32_get_bits(rate_n_flags, - RATE_VHT_MCS_NSS_MSK) + 1; if (rate_n_flags & RATE_MCS_HE_106T_MSK) { rinfo->bw = RATE_INFO_BW_HE_RU; @@ -4925,10 +5016,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; - else if (rate_n_flags & RATE_MCS_SGI_MSK) - rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; - else + else if (gi_ltf == 3) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; break; case RATE_MCS_HE_TYPE_MU: if (gi_ltf == 0 || gi_ltf == 1) @@ -4948,46 +5039,19 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; - } else { - switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) { - case IWL_RATE_1M_PLCP: - rinfo->legacy = 10; - break; - case IWL_RATE_2M_PLCP: - rinfo->legacy = 20; - break; - case IWL_RATE_5M_PLCP: - rinfo->legacy = 55; - break; - case IWL_RATE_11M_PLCP: - rinfo->legacy = 110; - break; - case IWL_RATE_6M_PLCP: - rinfo->legacy = 60; - break; - case IWL_RATE_9M_PLCP: - rinfo->legacy = 90; - break; - case IWL_RATE_12M_PLCP: - rinfo->legacy = 120; - break; - case IWL_RATE_18M_PLCP: - rinfo->legacy = 180; - break; - case IWL_RATE_24M_PLCP: - rinfo->legacy = 240; - break; - case IWL_RATE_36M_PLCP: - rinfo->legacy = 360; - break; - case IWL_RATE_48M_PLCP: - rinfo->legacy = 480; - break; - case IWL_RATE_54M_PLCP: - rinfo->legacy = 540; - break; - } + return; + } + + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + + if (format == RATE_MCS_HT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + + } else if (format == RATE_MCS_VHT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; } + } static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, @@ -5332,6 +5396,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, + .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, .sched_scan_start = iwl_mvm_mac_sched_scan_start, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f877d86b038e32..2b1dcd60e00f65 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -297,6 +297,7 @@ struct iwl_probe_resp_data { * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, * as a result from low_latency bit flags and takes force into account. + * @authorized: indicates the AP station was set to authorized * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -330,6 +331,7 @@ struct iwl_mvm_vif { bool monitor_active; u8 low_latency: 6; u8 low_latency_actual: 1; + u8 authorized:1; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -1443,12 +1445,17 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm); /* Utils */ +int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, + enum nl80211_band band); int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); -u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); +void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, + enum nl80211_band band, + struct ieee80211_tx_rate *r); +u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) @@ -1629,6 +1636,8 @@ int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, void *data, int len); u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif); +u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, + u8 rate_idx); void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size); @@ -1649,8 +1658,8 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -1732,7 +1741,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) /* rate scaling */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); -int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); +int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index da705fcaf0fcc7..6d18a1fd649b90 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -583,8 +583,9 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, return; wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); - if (wgds_tbl_idx < 0) - IWL_DEBUG_INFO(mvm, "SAR WGDS is disabled (%d)\n", + if (wgds_tbl_idx < 1) + IWL_DEBUG_INFO(mvm, + "SAR WGDS is disabled or error received (%d)\n", wgds_tbl_idx); else IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 77ea2d0a30916a..232ad531d612a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -29,7 +29,6 @@ #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_AUTHOR); MODULE_LICENSE("GPL"); static const struct iwl_op_mode_ops iwl_mvm_ops; @@ -384,9 +383,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_probe_resp_data_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_probe_resp_data_notif), - RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF, - iwl_mvm_channel_switch_noa_notif, - RX_HANDLER_SYNC, struct iwl_channel_switch_noa_notif), + RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, + iwl_mvm_channel_switch_start_notif, + RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_datapath_monitor_notif), @@ -512,7 +511,7 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(SESSION_PROTECTION_CMD), HCMD_NAME(SESSION_PROTECTION_NOTIF), - HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF), + HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; /* Please keep this array *SORTED* by hex value. @@ -522,7 +521,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), - HCMD_NAME(GEO_TX_POWER_LIMIT), + HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; @@ -726,6 +725,183 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) return 0; } +struct iwl_mvm_frob_txf_data { + u8 *buf; + size_t buflen; +}; + +static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct iwl_mvm_frob_txf_data *txf = data; + u8 keylen, match, matchend; + u8 *keydata; + size_t i; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + keydata = key->key; + keylen = key->keylen; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + /* + * WEP has short keys which might show up in the payload, + * and then you can deduce the key, so in this case just + * remove all FIFO data. + * For TKIP, we don't know the phase 2 keys here, so same. + */ + memset(txf->buf, 0xBB, txf->buflen); + return; + default: + return; + } + + /* scan for key material and clear it out */ + match = 0; + for (i = 0; i < txf->buflen; i++) { + if (txf->buf[i] != keydata[match]) { + match = 0; + continue; + } + match++; + if (match == keylen) { + memset(txf->buf + i - keylen, 0xAA, keylen); + match = 0; + } + } + + /* we're dealing with a FIFO, so check wrapped around data */ + matchend = match; + for (i = 0; match && i < keylen - match; i++) { + if (txf->buf[i] != keydata[match]) + break; + match++; + if (match == keylen) { + memset(txf->buf, 0xAA, i + 1); + memset(txf->buf + txf->buflen - matchend, 0xAA, + matchend); + break; + } + } +} + +static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen) +{ + struct iwl_mvm_frob_txf_data txf = { + .buf = buf, + .buflen = buflen, + }; + struct iwl_mvm *mvm = ctx; + + /* embedded key material exists only on old API */ + if (iwl_mvm_has_new_tx_api(mvm)) + return; + + rcu_read_lock(); + ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf); + rcu_read_unlock(); +} + +static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len) +{ + /* we only use wide headers for commands */ + struct iwl_cmd_header_wide *hdr = hcmd; + unsigned int frob_start = sizeof(*hdr), frob_end = 0; + + if (len < sizeof(hdr)) + return; + + /* all the commands we care about are in LONG_GROUP */ + if (hdr->group_id != LONG_GROUP) + return; + + switch (hdr->cmd) { + case WEP_KEY: + case WOWLAN_TKIP_PARAM: + case WOWLAN_KEK_KCK_MATERIAL: + case ADD_STA_KEY: + /* + * blank out everything here, easier than dealing + * with the various versions of the command + */ + frob_end = INT_MAX; + break; + case MGMT_MCAST_KEY: + frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); + BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) != + offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); + + frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); + BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) < + offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); + break; + } + + if (frob_start >= frob_end) + return; + + if (frob_end > len) + frob_end = len; + + memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start); +} + +static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen) +{ + const struct iwl_dump_exclude *excl; + struct iwl_mvm *mvm = ctx; + int i; + + switch (mvm->fwrt.cur_fw_img) { + case IWL_UCODE_INIT: + default: + /* not relevant */ + return; + case IWL_UCODE_REGULAR: + case IWL_UCODE_REGULAR_USNIFFER: + excl = mvm->fw->dump_excl; + break; + case IWL_UCODE_WOWLAN: + excl = mvm->fw->dump_excl_wowlan; + break; + } + + BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) != + sizeof(mvm->fw->dump_excl_wowlan)); + + for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) { + u32 start, end; + + if (!excl[i].addr || !excl[i].size) + continue; + + start = excl[i].addr; + end = start + excl[i].size; + + if (end <= mem_addr || start >= mem_addr + buflen) + continue; + + if (start < mem_addr) + start = mem_addr; + + if (end > mem_addr + buflen) + end = mem_addr + buflen; + + memset((u8 *)mem + start - mem_addr, 0xAA, end - start); + } +} + +static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { + .frob_txf = iwl_mvm_frob_txf, + .frob_hcmd = iwl_mvm_frob_hcmd, + .frob_mem = iwl_mvm_frob_mem, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -775,7 +951,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->hw = hw; iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, - dbgfs_dir); + &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); iwl_mvm_get_acpi_tables(mvm); @@ -868,8 +1044,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->cmd_ver.range_resp = iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, 5); - /* we only support up to version 8 */ - if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 8)) + /* we only support up to version 9 */ + if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) goto out_free; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index f2b090be38980e..b2ea2fca5376f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -128,6 +128,19 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac; bool tid_found = false; + if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) || + cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { + cmd->rx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); + } else { + cmd->rx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); + cmd->tx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); + } + #ifdef CONFIG_IWLWIFI_DEBUGFS /* set advanced pm flag with no uapsd ACs to enable ps-poll */ if (mvmvif->dbgfs_pm.use_ps_poll) { @@ -182,19 +195,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; - if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) || - cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { - cmd->rx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); - cmd->tx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); - } else { - cmd->rx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); - cmd->tx_data_timeout_uapsd = - cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); - } - if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->heavy_tx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 2d58cb969918f1..958702403a45db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -32,10 +32,6 @@ static u8 rs_fw_set_active_chains(u8 chains) fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; if (chains & ANT_B) fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; - if (chains & ANT_C) - WARN(false, - "tlc offload doesn't support antenna C. chains: 0x%x\n", - chains); return fw_chains; } @@ -314,7 +310,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; + + if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_UPDATE_NOTIF, 0) < 3) { + rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate), + le32_to_cpu(notif->rate)); + IWL_DEBUG_RATE(mvm, + "Got rate in old format. Rate: %s. Converting.\n", + pretty_rate); + lq_sta->last_rate_n_flags = + iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); + } else { lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); + } rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), lq_sta->last_rate_n_flags); IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index b97708cb869d56..f4d02f9fe16d4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -4,11 +4,6 @@ * Copyright(c) 2005 - 2014, 2018 - 2021 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #include #include @@ -335,15 +330,15 @@ static const struct rs_tx_column rs_tx_columns[] = { static inline u8 rs_extract_rate(u32 rate_n_flags) { /* also works for HT because bits 7:6 are zero there */ - return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK); + return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK_V1); } static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) { int idx = 0; - if (rate_n_flags & RATE_MCS_HT_MSK) { - idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK; + if (rate_n_flags & RATE_MCS_HT_MSK_V1) { + idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1; idx += IWL_RATE_MCS_0_INDEX; /* skip 9M not supported in HT*/ @@ -351,8 +346,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) idx += 1; if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) return idx; - } else if (rate_n_flags & RATE_MCS_VHT_MSK || - rate_n_flags & RATE_MCS_HE_MSK) { + } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1 || + rate_n_flags & RATE_MCS_HE_MSK_V1) { idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; idx += IWL_RATE_MCS_0_INDEX; @@ -361,8 +356,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) idx++; if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) return idx; - if ((rate_n_flags & RATE_MCS_HE_MSK) && - (idx <= IWL_LAST_HE_RATE)) + if ((rate_n_flags & RATE_MCS_HE_MSK_V1) && + idx <= IWL_LAST_HE_RATE) return idx; } else { /* legacy rate format, search for match in table */ @@ -459,44 +454,8 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640}, }; -/* mbps, mcs */ -static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { - { "1", "BPSK DSSS"}, - { "2", "QPSK DSSS"}, - {"5.5", "BPSK CCK"}, - { "11", "QPSK CCK"}, - { "6", "BPSK 1/2"}, - { "9", "BPSK 1/2"}, - { "12", "QPSK 1/2"}, - { "18", "QPSK 3/4"}, - { "24", "16QAM 1/2"}, - { "36", "16QAM 3/4"}, - { "48", "64QAM 2/3"}, - { "54", "64QAM 3/4"}, - { "60", "64QAM 5/6"}, -}; - #define MCS_INDEX_PER_STREAM (8) -static const char *rs_pretty_ant(u8 ant) -{ - static const char * const ant_name[] = { - [ANT_NONE] = "None", - [ANT_A] = "A", - [ANT_B] = "B", - [ANT_AB] = "AB", - [ANT_C] = "C", - [ANT_AC] = "AC", - [ANT_BC] = "BC", - [ANT_ABC] = "ABC", - }; - - if (ant > ANT_ABC) - return "UNKNOWN"; - - return ant_name[ant]; -} - static const char *rs_pretty_lq_type(enum iwl_table_type type) { static const char * const lq_types[] = { @@ -558,7 +517,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate) rate_str = "BAD_RATE"; sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type), - rs_pretty_ant(rate->ant), rate_str); + iwl_rs_pretty_ant(rate->ant), rate_str); return buf; } @@ -654,8 +613,7 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, static inline int get_num_of_ant_from_rate(u32 rate_n_flags) { return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + - !!(rate_n_flags & RATE_MCS_ANT_C_MSK); + !!(rate_n_flags & RATE_MCS_ANT_B_MSK); } /* @@ -820,12 +778,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, int index = rate->index; ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) & - RATE_MCS_ANT_ABC_MSK); + RATE_MCS_ANT_AB_MSK); if (is_legacy(rate)) { ucode_rate |= iwl_rates[index].plcp; if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) - ucode_rate |= RATE_MCS_CCK_MSK; + ucode_rate |= RATE_MCS_CCK_MSK_V1; return ucode_rate; } @@ -840,7 +798,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, IWL_ERR(mvm, "Invalid HT rate index %d\n", index); index = IWL_LAST_HT_RATE; } - ucode_rate |= RATE_MCS_HT_MSK; + ucode_rate |= RATE_MCS_HT_MSK_V1; if (is_ht_siso(rate)) ucode_rate |= iwl_rates[index].plcp_ht_siso; @@ -853,7 +811,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, IWL_ERR(mvm, "Invalid VHT rate index %d\n", index); index = IWL_LAST_VHT_RATE; } - ucode_rate |= RATE_MCS_VHT_MSK; + ucode_rate |= RATE_MCS_VHT_MSK_V1; if (is_vht_siso(rate)) ucode_rate |= iwl_rates[index].plcp_vht_siso; else if (is_vht_mimo2(rate)) @@ -873,9 +831,9 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, ucode_rate |= rate->bw; if (rate->sgi) - ucode_rate |= RATE_MCS_SGI_MSK; + ucode_rate |= RATE_MCS_SGI_MSK_V1; if (rate->ldpc) - ucode_rate |= RATE_MCS_LDPC_MSK; + ucode_rate |= RATE_MCS_LDPC_MSK_V1; return ucode_rate; } @@ -885,7 +843,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, enum nl80211_band band, struct rs_rate *rate) { - u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; + u32 ant_msk = ucode_rate & RATE_MCS_ANT_AB_MSK; u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate); u8 nss; @@ -898,9 +856,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, rate->ant = (ant_msk >> RATE_MCS_ANT_POS); /* Legacy */ - if (!(ucode_rate & RATE_MCS_HT_MSK) && - !(ucode_rate & RATE_MCS_VHT_MSK) && - !(ucode_rate & RATE_MCS_HE_MSK)) { + if (!(ucode_rate & RATE_MCS_HT_MSK_V1) && + !(ucode_rate & RATE_MCS_VHT_MSK_V1) && + !(ucode_rate & RATE_MCS_HE_MSK_V1)) { if (num_of_ant == 1) { if (band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; @@ -912,20 +870,20 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, } /* HT, VHT or HE */ - if (ucode_rate & RATE_MCS_SGI_MSK) + if (ucode_rate & RATE_MCS_SGI_MSK_V1) rate->sgi = true; - if (ucode_rate & RATE_MCS_LDPC_MSK) + if (ucode_rate & RATE_MCS_LDPC_MSK_V1) rate->ldpc = true; if (ucode_rate & RATE_MCS_STBC_MSK) rate->stbc = true; if (ucode_rate & RATE_MCS_BF_MSK) rate->bfer = true; - rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; + rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK_V1; - if (ucode_rate & RATE_MCS_HT_MSK) { - nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >> - RATE_HT_MCS_NSS_POS) + 1; + if (ucode_rate & RATE_MCS_HT_MSK_V1) { + nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK_V1) >> + RATE_HT_MCS_NSS_POS_V1) + 1; if (nss == 1) { rate->type = LQ_HT_SISO; @@ -938,7 +896,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, } else { WARN_ON_ONCE(1); } - } else if (ucode_rate & RATE_MCS_VHT_MSK) { + } else if (ucode_rate & RATE_MCS_VHT_MSK_V1) { nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; @@ -953,7 +911,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, } else { WARN_ON_ONCE(1); } - } else if (ucode_rate & RATE_MCS_HE_MSK) { + } else if (ucode_rate & RATE_MCS_HE_MSK_V1) { nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; @@ -981,9 +939,6 @@ static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate) { u8 new_ant_type; - if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C)) - return 0; - if (!rs_is_valid_ant(valid_ant, rate->ant)) return 0; @@ -2552,7 +2507,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, } IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n", - rs_pretty_ant(best_ant), best_rssi); + iwl_rs_pretty_ant(best_ant), best_rssi); if (best_ant != ANT_A && best_ant != ANT_B) rate->ant = first_antenna(valid_tx_ant); @@ -2652,7 +2607,6 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, lq_sta->pers.chains = rx_status->chains; lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; - lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; lq_sta->pers.last_rssi = S8_MIN; for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { @@ -2738,8 +2692,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, return; lq_sta = mvm_sta; - iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, - info->band, &info->control.rates[0]); + iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags, + info->band, &info->control.rates[0]); info->control.rates[0].count = 1; /* Report the optimal rate based on rssi and STA caps if we haven't @@ -2749,8 +2703,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, optimal_rate = rs_get_optimal_rate(mvm, lq_sta); last_ucode_rate = ucode_rate_from_rs_rate(mvm, optimal_rate); - iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, - &txrc->reported_rate); + iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, + &txrc->reported_rate); } } @@ -2909,7 +2863,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) mvm->drv_rx_stats.success_frames++; - switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { + switch (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) { case RATE_MCS_CHAN_WIDTH_20: mvm->drv_rx_stats.bw_20_frames++; break; @@ -2926,10 +2880,10 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) WARN_ONCE(1, "bad BW. rate 0x%x", rate); } - if (rate & RATE_MCS_HT_MSK) { + if (rate & RATE_MCS_HT_MSK_V1) { mvm->drv_rx_stats.ht_frames++; - nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; - } else if (rate & RATE_MCS_VHT_MSK) { + nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1) + 1; + } else if (rate & RATE_MCS_VHT_MSK_V1) { mvm->drv_rx_stats.vht_frames++; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; @@ -2942,7 +2896,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) else if (nss == 2) mvm->drv_rx_stats.mimo2_frames++; - if (rate & RATE_MCS_SGI_MSK) + if (rate & RATE_MCS_SGI_MSK_V1) mvm->drv_rx_stats.sgi_frames++; else mvm->drv_rx_stats.ngi_frames++; @@ -3323,7 +3277,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, int i; int num_rates = ARRAY_SIZE(lq_cmd->rs_table); __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate); - u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; + u8 ant = (ucode_rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; @@ -3688,35 +3642,37 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta) IWL_DEBUG_RATE(mvm, "leave\n"); } -int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) +int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate) { - char *type, *bw; + char *type; u8 mcs = 0, nss = 0; - u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; + u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; + u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) >> + RATE_MCS_CHAN_WIDTH_POS; - if (!(rate & RATE_MCS_HT_MSK) && - !(rate & RATE_MCS_VHT_MSK) && - !(rate & RATE_MCS_HE_MSK)) { + if (!(rate & RATE_MCS_HT_MSK_V1) && + !(rate & RATE_MCS_VHT_MSK_V1) && + !(rate & RATE_MCS_HE_MSK_V1)) { int index = iwl_hwrate_to_plcp_idx(rate); return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", - rs_pretty_ant(ant), + iwl_rs_pretty_ant(ant), index == IWL_RATE_INVALID ? "BAD" : - iwl_rate_mcs[index].mbps); + iwl_rate_mcs(index)->mbps); } - if (rate & RATE_MCS_VHT_MSK) { + if (rate & RATE_MCS_VHT_MSK_V1) { type = "VHT"; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; - } else if (rate & RATE_MCS_HT_MSK) { + } else if (rate & RATE_MCS_HT_MSK_V1) { type = "HT"; - mcs = rate & RATE_HT_MCS_INDEX_MSK; - nss = ((rate & RATE_HT_MCS_NSS_MSK) - >> RATE_HT_MCS_NSS_POS) + 1; - } else if (rate & RATE_MCS_HE_MSK) { + mcs = rate & RATE_HT_MCS_INDEX_MSK_V1; + nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) + >> RATE_HT_MCS_NSS_POS_V1) + 1; + } else if (rate & RATE_MCS_HE_MSK_V1) { type = "HE"; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) @@ -3725,29 +3681,12 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) type = "Unknown"; /* shouldn't happen */ } - switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - bw = "20Mhz"; - break; - case RATE_MCS_CHAN_WIDTH_40: - bw = "40Mhz"; - break; - case RATE_MCS_CHAN_WIDTH_80: - bw = "80Mhz"; - break; - case RATE_MCS_CHAN_WIDTH_160: - bw = "160Mhz"; - break; - default: - bw = "BAD BW"; - } - return scnprintf(buf, bufsz, "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s", - rate, type, rs_pretty_ant(ant), bw, mcs, nss, - (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", + rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss, + (rate & RATE_MCS_SGI_MSK_V1) ? "SGI " : "NGI ", (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", - (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", + (rate & RATE_MCS_LDPC_MSK_V1) ? "LDPC " : "", (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "", (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } @@ -3830,10 +3769,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, lq_sta->active_legacy_rate); desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate); - desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n", + desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", - (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : ""); desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n", (is_legacy(rate)) ? "legacy" : is_vht(rate) ? "VHT" : "HT"); @@ -3891,7 +3829,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, desc += scnprintf(buff + desc, bufsz - desc, " rate[%d] 0x%X ", i, r); - desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r); + desc += rs_pretty_print_rate_v1(buff + desc, bufsz - desc, r); if (desc < bufsz - 1) buff[desc++] = '\n'; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 32104c9f8f5eed..b7bc8c1b2ddae1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -5,11 +5,6 @@ * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2018 - 2019 Intel Corporation - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * *****************************************************************************/ #ifndef __rs_h__ @@ -36,11 +31,6 @@ struct iwl_rs_rate_info { #define IWL_RATE_60M_PLCP 3 -enum { - IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, - IWL_RATE_INVALID = IWL_RATE_COUNT, -}; - #define LINK_QUAL_MAX_RETRY_NUM 16 enum { @@ -211,13 +201,6 @@ struct rs_rate { #define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80) #define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160) -#define IWL_MAX_MCS_DISPLAY_SIZE 12 - -struct iwl_rate_mcs_info { - char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; - char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; -}; - /** * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW * @last_rate_n_flags: last rate reported by FW diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 8ef5399ad9be6d..d22f40a5354d83 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -103,7 +103,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info, struct ieee80211_rx_status *rx_status) { - int energy_a, energy_b, energy_c, max_energy; + int energy_a, energy_b, max_energy; u32 val; val = @@ -114,14 +114,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS; energy_b = energy_b ? -energy_b : S8_MIN; - energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> - IWL_RX_INFO_ENERGY_ANT_C_POS; - energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); - max_energy = max(max_energy, energy_c); - IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", - energy_a, energy_b, energy_c, max_energy); + IWL_DEBUG_STATS(mvm, "energy In A %d B %d , and max %d\n", + energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & @@ -129,7 +125,6 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, >> RX_RES_PHY_FLAGS_ANTENNA_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; - rx_status->chain_signal[2] = energy_c; } /* @@ -235,7 +230,7 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, mdata->rx.airtime += le16_to_cpu(phy_info->frame_time); } - if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) + if (!(rate_n_flags & (RATE_MCS_HT_MSK_V1 | RATE_MCS_VHT_MSK_V1))) return; mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); @@ -249,10 +244,10 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, mvmsta->sta_id != mvmvif->ap_sta_id) return; - if (rate_n_flags & RATE_MCS_HT_MSK) { - thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK]; - thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >> - RATE_HT_MCS_NSS_POS); + if (rate_n_flags & RATE_MCS_HT_MSK_V1) { + thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1]; + thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK_V1) >> + RATE_HT_MCS_NSS_POS_V1); } else { if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= ARRAY_SIZE(thresh_tpt))) @@ -262,7 +257,7 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, RATE_VHT_MCS_NSS_POS); } - thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >> + thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) >> RATE_MCS_CHAN_WIDTH_POS); mdata->uapsd_nonagg_detect.rx_bytes += len; @@ -455,7 +450,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, } /* Set up the HT phy flags */ - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: @@ -468,20 +463,20 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->bw = RATE_INFO_BW_160; break; } - if (!(rate_n_flags & RATE_MCS_CCK_MSK) && - rate_n_flags & RATE_MCS_SGI_MSK) + if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) && + rate_n_flags & RATE_MCS_SGI_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; - if (rate_n_flags & RATE_MCS_LDPC_MSK) + if (rate_n_flags & RATE_MCS_LDPC_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (rate_n_flags & RATE_MCS_HT_MSK) { + if (rate_n_flags & RATE_MCS_HT_MSK_V1) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index c12f303cf652ce..e0601f802628cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -240,8 +240,7 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, - struct ieee80211_sta *sta, - bool csi) + struct ieee80211_sta *sta) { if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); @@ -269,7 +268,6 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; - rx_status->chain_signal[2] = S8_MIN; } static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, @@ -620,7 +618,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, - sta, false); + sta); reorder_buf->num_stored--; } } @@ -1198,7 +1196,7 @@ static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, } if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) && - (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) { + (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); @@ -1235,7 +1233,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, * the TSF/timers are not be transmitted in HE-MU. */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -1290,13 +1288,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, if (he_mu) he_mu->flags2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG) + else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } @@ -1508,9 +1506,9 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + ((rate_n_flags & RATE_MCS_NSS_MSK) >> + RATE_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_HE; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) @@ -1562,14 +1560,15 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, } break; case 3: - if ((he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_EXT_SU) && - rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; - else - rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; break; + case 4: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + break; + default: + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; } he->data5 |= le16_encode_bits(ltf, @@ -1653,7 +1652,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_rx_phy_data phy_data = { .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; - bool csi = false; + u32 format; + bool is_sgi; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; @@ -1691,6 +1691,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.d2 = desc->v1.phy_data2; phy_data.d3 = desc->v1.phy_data3; } + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + REPLY_RX_MPDU_CMD, 0) < 4) { + rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n", + rate_n_flags); + } + format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; len = le16_to_cpu(desc->mpdu_len); @@ -1744,7 +1751,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, break; } - if (rate_n_flags & RATE_MCS_HE_MSK) + if (format == RATE_MCS_HE_MSK) iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, phy_info, queue); @@ -1761,7 +1768,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* set the preamble flag if appropriate */ - if (rate_n_flags & RATE_MCS_CCK_MSK && + if (format == RATE_MCS_CCK_MSK && phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; @@ -1937,33 +1944,34 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } } - if (!(rate_n_flags & RATE_MCS_CCK_MSK) && - rate_n_flags & RATE_MCS_SGI_MSK) + is_sgi = format == RATE_MCS_HE_MSK ? + iwl_he_is_sgi(rate_n_flags) : + rate_n_flags & RATE_MCS_SGI_MSK; + + if (!(format == RATE_MCS_CCK_MSK) && is_sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (rate_n_flags & RATE_MCS_HT_MSK) { + if (format == RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; + RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + } else if (format == RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + RATE_MCS_STBC_POS; + rx_status->nss = + ((rate_n_flags & RATE_MCS_NSS_MSK) >> + RATE_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) { - int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status->band); + } else if (!(format == RATE_MCS_HE_MSK)) { + int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", @@ -1994,7 +2002,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, - sta, csi); + sta); out: rcu_read_unlock(); } @@ -2013,12 +2021,24 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 channel, energy_a, energy_b; + u32 format; struct iwl_mvm_rx_phy_data phy_data = { .info_type = le32_get_bits(desc->phy_info[1], IWL_RX_PHY_DATA1_INFO_TYPE_MASK), .d0 = desc->phy_info[0], .d1 = desc->phy_info[1], }; + bool is_sgi; + + if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + RX_NO_DATA_NOTIF, 0) < 2) { + IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n", + rate_n_flags); + rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n", + rate_n_flags); + } + format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc))) return; @@ -2075,7 +2095,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, break; } - if (rate_n_flags & RATE_MCS_HE_MSK) + if (format == RATE_MCS_HE_MSK) iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, phy_info, queue); @@ -2091,23 +2111,24 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); - if (!(rate_n_flags & RATE_MCS_CCK_MSK) && - rate_n_flags & RATE_MCS_SGI_MSK) + is_sgi = format == RATE_MCS_HE_MSK ? + iwl_he_is_sgi(rate_n_flags) : + rate_n_flags & RATE_MCS_SGI_MSK; + + if (!(format == RATE_MCS_CCK_MSK) && is_sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (rate_n_flags & RATE_MCS_HT_MSK) { + if (format == RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + } else if (format == RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) @@ -2120,12 +2141,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; - } else if (rate_n_flags & RATE_MCS_HE_MSK) { + } else if (format == RATE_MCS_HE_MSK) { rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; } else { - int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index d78e436fa8b531..a138b5c4cce84b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -163,7 +163,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; if (band == NL80211_BAND_2GHZ && !no_cck) - return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | + return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 | tx_ant); else return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); @@ -1995,8 +1995,16 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, { u16 flags = 0; + /* + * If no direct SSIDs are provided perform a passive scan. Otherwise, + * if there is a single SSID which is not the broadcast SSID, assume + * that the scan is intended for roaming purposes and thus enable Rx on + * all chains to improve chances of hearing the beacons/probe responses. + */ if (params->n_ssids == 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE; + else if (params->n_ssids == 1 && params->ssids[0].ssid_len) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS; if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 0a13c2bda2eed2..bdd4ee43254835 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -268,6 +268,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, int rate_idx = -1; u8 rate_plcp; u32 rate_flags = 0; + bool is_cck; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); /* info->control is only relevant for non HW rate control */ @@ -299,11 +300,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); + is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; + /* Set CCK or OFDM flag */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) { + if (!is_cck) + rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; + else + rate_flags |= RATE_MCS_CCK_MSK; + } else if (is_cck) { + rate_flags |= RATE_MCS_CCK_MSK_V1; + } return (u32)rate_plcp | rate_flags; } @@ -1284,31 +1292,72 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status) } #endif /* CONFIG_IWLWIFI_DEBUG */ -void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum nl80211_band band, - struct ieee80211_tx_rate *r) +static int iwl_mvm_get_hwrate_chan_width(u32 chan_width) { - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - r->flags |= IEEE80211_TX_RC_GREEN_FIELD; - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + switch (chan_width) { case RATE_MCS_CHAN_WIDTH_20: - break; + return 0; case RATE_MCS_CHAN_WIDTH_40: - r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - break; + return IEEE80211_TX_RC_40_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_80: - r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; - break; + return IEEE80211_TX_RC_80_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_160: - r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; - break; + return IEEE80211_TX_RC_160_MHZ_WIDTH; + default: + return 0; } +} + +void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, + enum nl80211_band band, + struct ieee80211_tx_rate *r) +{ + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + u32 rate = format == RATE_MCS_HT_MSK ? + RATE_HT_MCS_INDEX(rate_n_flags) : + rate_n_flags & RATE_MCS_CODE_MSK; + + r->flags |= + iwl_mvm_get_hwrate_chan_width(rate_n_flags & + RATE_MCS_CHAN_WIDTH_MSK); + if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; - if (rate_n_flags & RATE_MCS_HT_MSK) { + if (format == RATE_MCS_HT_MSK) { r->flags |= IEEE80211_TX_RC_MCS; - r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + r->idx = rate; + } else if (format == RATE_MCS_VHT_MSK) { + ieee80211_rate_set_vht(r, rate, + ((rate_n_flags & RATE_MCS_NSS_MSK) >> + RATE_MCS_NSS_POS) + 1); + r->flags |= IEEE80211_TX_RC_VHT_MCS; + } else if (format == RATE_MCS_HE_MSK) { + /* mac80211 cannot do this without ieee80211_tx_status_ext() + * but it only matters for radiotap */ + r->idx = 0; + } else { + r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + band); + } +} + +void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, + enum nl80211_band band, + struct ieee80211_tx_rate *r) +{ + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + + r->flags |= + iwl_mvm_get_hwrate_chan_width(rate_n_flags & + RATE_MCS_CHAN_WIDTH_MSK_V1); + + if (rate_n_flags & RATE_MCS_SGI_MSK_V1) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + if (rate_n_flags & RATE_MCS_HT_MSK_V1) { + r->flags |= IEEE80211_TX_RC_MCS; + r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; + } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { ieee80211_rate_set_vht( r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> @@ -1323,14 +1372,20 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, /* * translate ucode response to mac80211 tx status control values */ -static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, +static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, + u32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->status.rates[0]; + if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, + TX_CMD, 0) > 6) + rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + info->status.antenna = - ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); - iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); + ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); + iwl_mvm_hwrate_to_tx_rate(rate_n_flags, + info->band, r); } static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, @@ -1450,7 +1505,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* the FW should have stopped the queue and not * return this status */ - WARN_ON(1); + IWL_ERR_LIMIT(mvm, + "FW reported TX filtered, status=0x%x, FC=0x%x\n", + status, le16_to_cpu(hdr->frame_control)); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: @@ -1472,8 +1529,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); info->status.rates[0].count = tx_resp->failure_frame + 1; - iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), + + iwl_mvm_hwrate_to_tx_status(mvm->fw, + le32_to_cpu(tx_resp->initial_rate), info); + + /* Don't assign the converted initial_rate, because driver + * TLC uses this and doesn't support the new FW rate + */ info->status.status_driver_data[1] = (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); @@ -1835,7 +1898,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, info->flags |= IEEE80211_TX_STAT_AMPDU; memcpy(&info->status, &tx_info->status, sizeof(tx_info->status)); - iwl_mvm_hwrate_to_tx_status(rate, info); + iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); } } @@ -1856,7 +1919,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, goto out; tx_info->band = chanctx_conf->def.chan->band; - iwl_mvm_hwrate_to_tx_status(rate, tx_info); + iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); if (!iwl_mvm_has_tlc_offload(mvm)) { IWL_DEBUG_TX_REPLY(mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 4a3d2971a98b74..caf1dcf488886e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -135,31 +135,25 @@ int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, return iwl_mvm_send_cmd_status(mvm, &cmd, status); } -#define IWL_DECLARE_RATE_INFO(r) \ - [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP +int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, + enum nl80211_band band) +{ + int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; + bool is_LB = band == NL80211_BAND_2GHZ; -/* - * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP - */ -static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { - IWL_DECLARE_RATE_INFO(1), - IWL_DECLARE_RATE_INFO(2), - IWL_DECLARE_RATE_INFO(5), - IWL_DECLARE_RATE_INFO(11), - IWL_DECLARE_RATE_INFO(6), - IWL_DECLARE_RATE_INFO(9), - IWL_DECLARE_RATE_INFO(12), - IWL_DECLARE_RATE_INFO(18), - IWL_DECLARE_RATE_INFO(24), - IWL_DECLARE_RATE_INFO(36), - IWL_DECLARE_RATE_INFO(48), - IWL_DECLARE_RATE_INFO(54), -}; + if (format == RATE_MCS_LEGACY_OFDM_MSK) + return is_LB ? rate + IWL_FIRST_OFDM_RATE : + rate; + + /* CCK is not allowed in HB */ + return is_LB ? rate : -1; +} int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; int idx; int band_offset = 0; @@ -167,16 +161,24 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, if (band != NL80211_BAND_2GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) - if (fw_rate_idx_to_plcp[idx] == rate) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) return idx - band_offset; return -1; } -u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) +u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx) { - /* Get PLCP rate for tx_cmd->rate_n_flags */ - return fw_rate_idx_to_plcp[rate_idx]; + if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP, + TX_CMD, 0) > 8) + /* In the new rate legacy rates are indexed: + * 0 - 3 for CCK and 0 - 7 for OFDM. + */ + return (rate_idx >= IWL_FIRST_OFDM_RATE ? + rate_idx - IWL_FIRST_OFDM_RATE : + rate_idx); + + return iwl_fw_rate_idx_to_plcp(rate_idx); } u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac) @@ -217,6 +219,7 @@ u8 first_antenna(u8 mask) return BIT(ffs(mask) - 1); } +#define MAX_ANT_NUM 2 /* * Toggles between TX antennas to send the probe request on. * Receives the bitmask of valid TX antennas and the *index* used @@ -405,6 +408,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) + return false; + if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return false; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 239a722cd79d85..85a6da70ca787b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -57,6 +57,10 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical); dbg_cfg->hwm_size = cpu_to_le32(frag->size); + dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset); + IWL_DEBUG_FW(trans, + "WRT: Applying DRAM destination (debug_token_config=%u)\n", + dbg_cfg->debug_token_config); IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n", alloc_id, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e3996ff99bad54..c574f041f09692 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -499,6 +499,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* Ma devices */ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, + {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)}, /* Bz devices */ {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, @@ -518,7 +519,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_NO_CDB, _cfg, _name) + IWL_CFG_ANY, _cfg, _name) static const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -532,15 +533,25 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), + IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), + IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690i_name), /* AX200 */ + IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name), IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), - IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), /* Qu with Hr */ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), @@ -639,6 +650,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL), + /* So with JF */ + IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), + IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), + IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), + /* SnJ with HR */ IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL), @@ -648,6 +665,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), + IWL_DEV_INFO(0x2726, 0x1671, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x2726, 0x1672, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, @@ -702,17 +725,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9462_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9560_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9560_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, @@ -931,9 +943,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ @@ -945,7 +957,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax203_name), /* QuZ */ @@ -1051,11 +1063,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, @@ -1112,6 +1119,50 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_bz_a0_mr_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_bz_a0_fm_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl_a0_fm_a0, iwl_bz_name), + +/* SoF with JF2 */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), + +/* SoF with JF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1191,16 +1242,158 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { #endif /* CONFIG_IWLMVM */ }; +/* + * In case that there is no OTP on the NIC, get the rf id and cdb info + * from the prph registers. + */ +static int get_crf_id(struct iwl_trans *iwl_trans) +{ + int ret = 0; + u32 wfpm_ctrl_addr; + u32 wfpm_otp_cfg_addr; + u32 sd_reg_ver_addr; + u32 cdb = 0; + u32 val; + + if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + wfpm_ctrl_addr = WFPM_CTRL_REG_GEN2; + wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR_GEN2; + sd_reg_ver_addr = SD_REG_VER_GEN2; + /* Qu/Pu families have other addresses */ + } else { + wfpm_ctrl_addr = WFPM_CTRL_REG; + wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR; + sd_reg_ver_addr = SD_REG_VER; + } + + if (!iwl_trans_grab_nic_access(iwl_trans)) { + IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); + ret = -EIO; + goto out; + } + + /* Enable access to peripheral registers */ + val = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr); + val |= ENABLE_WFPM; + iwl_write_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr, val); + + /* Read crf info */ + val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + + /* Read cdb info (also contains the jacket info if needed in the future */ + cdb = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_otp_cfg_addr); + + /* Map between crf id to rf id */ + switch (REG_CRF_ID_TYPE(val)) { + case REG_CRF_ID_TYPE_JF_1: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); + break; + case REG_CRF_ID_TYPE_JF_2: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); + break; + case REG_CRF_ID_TYPE_HR_NONE_CDB: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); + break; + case REG_CRF_ID_TYPE_HR_CDB: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); + break; + case REG_CRF_ID_TYPE_GF: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); + break; + case REG_CRF_ID_TYPE_MR: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); + break; + case REG_CRF_ID_TYPE_FM: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); + break; + default: + ret = -EIO; + IWL_ERR(iwl_trans, + "Can find a correct rfid for crf id 0x%x\n", + REG_CRF_ID_TYPE(val)); + goto out_release; + + } + + /* Set CDB capabilities */ + if (cdb & BIT(4)) { + iwl_trans->hw_rf_id += BIT(28); + IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); + } + + IWL_INFO(iwl_trans, "Detected RF 0x%x from crf id 0x%x\n", + iwl_trans->hw_rf_id, REG_CRF_ID_TYPE(val)); + +out_release: + iwl_trans_release_nic_access(iwl_trans); + +out: + return ret; +} + /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 +static const struct iwl_dev_info * +iwl_pci_find_dev_info(u16 device, u16 subsystem_device, + u16 mac_type, u8 mac_step, + u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores) +{ + int i; + + for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) { + const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; + + if (dev_info->device != (u16)IWL_CFG_ANY && + dev_info->device != device) + continue; + + if (dev_info->subdevice != (u16)IWL_CFG_ANY && + dev_info->subdevice != subsystem_device) + continue; + + if (dev_info->mac_type != (u16)IWL_CFG_ANY && + dev_info->mac_type != mac_type) + continue; + + if (dev_info->mac_step != (u8)IWL_CFG_ANY && + dev_info->mac_step != mac_step) + continue; + + if (dev_info->rf_type != (u16)IWL_CFG_ANY && + dev_info->rf_type != rf_type) + continue; + + if (dev_info->cdb != (u8)IWL_CFG_ANY && + dev_info->cdb != cdb) + continue; + + if (dev_info->rf_id != (u8)IWL_CFG_ANY && + dev_info->rf_id != rf_id) + continue; + + if (dev_info->no_160 != (u8)IWL_CFG_ANY && + dev_info->no_160 != no_160) + continue; + + if (dev_info->cores != (u8)IWL_CFG_ANY && + dev_info->cores != cores) + continue; + + return dev_info; + } + + return NULL; +} + static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg_trans_params *trans; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; + const struct iwl_dev_info *dev_info; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; - int i, ret; + int ret; const struct iwl_cfg *cfg; trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); @@ -1222,37 +1415,48 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); + /* + * Let's try to grab NIC access early here. Sometimes, NICs may + * fail to initialize, and if that happens it's better if we see + * issues early on (and can reprobe, per the logic inside), than + * first trying to load the firmware etc. and potentially only + * detecting any problems when the first interface is brought up. + */ + ret = iwl_finish_nic_init(iwl_trans); + if (ret) + goto out_free_trans; + if (iwl_trans_grab_nic_access(iwl_trans)) { + /* all good */ + iwl_trans_release_nic_access(iwl_trans); + } else { + ret = -EIO; + goto out_free_trans; + } + iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); - for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) { - const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; - if ((dev_info->device == (u16)IWL_CFG_ANY || - dev_info->device == pdev->device) && - (dev_info->subdevice == (u16)IWL_CFG_ANY || - dev_info->subdevice == pdev->subsystem_device) && - (dev_info->mac_type == (u16)IWL_CFG_ANY || - dev_info->mac_type == - CSR_HW_REV_TYPE(iwl_trans->hw_rev)) && - (dev_info->mac_step == (u8)IWL_CFG_ANY || - dev_info->mac_step == - CSR_HW_REV_STEP(iwl_trans->hw_rev)) && - (dev_info->rf_type == (u16)IWL_CFG_ANY || - dev_info->rf_type == - CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id)) && - (dev_info->cdb == IWL_CFG_NO_CDB || - CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id)) && - (dev_info->rf_id == (u8)IWL_CFG_ANY || - dev_info->rf_id == - IWL_SUBDEVICE_RF_ID(pdev->subsystem_device)) && - (dev_info->no_160 == (u8)IWL_CFG_ANY || - dev_info->no_160 == - IWL_SUBDEVICE_NO_160(pdev->subsystem_device)) && - (dev_info->cores == (u8)IWL_CFG_ANY || - dev_info->cores == - IWL_SUBDEVICE_CORES(pdev->subsystem_device))) { - iwl_trans->cfg = dev_info->cfg; - iwl_trans->name = dev_info->name; - } + /* + * The RF_ID is set to zero in blank OTP so read version to + * extract the RF_ID. + * This is relevant only for family 9000 and up. + */ + if (iwl_trans->trans_cfg->rf_id && + iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) + goto out_free_trans; + + dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, + CSR_HW_REV_TYPE(iwl_trans->hw_rev), + CSR_HW_REV_STEP(iwl_trans->hw_rev), + CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), + CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), + IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), + IWL_SUBDEVICE_NO_160(pdev->subsystem_device), + IWL_SUBDEVICE_CORES(pdev->subsystem_device)); + + if (dev_info) { + iwl_trans->cfg = dev_info->cfg; + iwl_trans->name = dev_info->name; } #if IS_ENABLED(CONFIG_IWLMVM) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 8e45eb38304b24..14602d6d6699f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2149,6 +2149,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; u32 inta_fh, inta_hw; bool polling = false; + bool sw_err; if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; @@ -2221,9 +2222,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) wake_up(&trans_pcie->ucode_write_waitq); } + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; + else + sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; + /* Error detected by uCode */ - if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { + if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index bf0c32a74ca473..645cb4dd4e5a34 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -47,7 +47,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); - ret = iwl_finish_nic_init(trans, trans->trans_cfg); + ret = iwl_finish_nic_init(trans); if (ret) return ret; @@ -131,21 +131,9 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (trans_pcie->is_down) return; - if (trans->state >= IWL_TRANS_FW_STARTED) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); - iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, - CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, - 5000); - msleep(100); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_SW_RESET); - } else if (trans_pcie->fw_reset_handshake) { + if (trans->state >= IWL_TRANS_FW_STARTED) + if (trans_pcie->fw_reset_handshake) iwl_trans_pcie_fw_reset_handshake(trans); - } - } trans_pcie->is_down = true; @@ -175,18 +163,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) else iwl_pcie_ctxt_info_free(trans); - /* Make sure (redundant) we've released our request to stay awake */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); - else - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_SW_RESET); - } /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); @@ -466,13 +442,15 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, iwl_pcie_set_ltr(trans); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_ROM_START); - else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); - else + } else { iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); + } /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f252680f18e889..1efb53f78a62f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -129,7 +129,12 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_SW_RESET); + else + iwl_set_bit(trans, CSR_RESET, + CSR_RESET_REG_FLAG_SW_RESET); usleep_range(5000, 6000); } @@ -306,7 +311,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) if (trans->trans_cfg->base_params->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); - ret = iwl_finish_nic_init(trans, trans->trans_cfg); + ret = iwl_finish_nic_init(trans); if (ret) return ret; @@ -378,7 +383,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) iwl_trans_pcie_sw_reset(trans); - ret = iwl_finish_nic_init(trans, trans->trans_cfg); + ret = iwl_finish_nic_init(trans); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, @@ -458,6 +463,7 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 100); + msleep(100); } else { iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); @@ -1056,7 +1062,7 @@ struct iwl_causes_list { u8 addr; }; -static struct iwl_causes_list causes_list[] = { +static const struct iwl_causes_list causes_list_common[] = { {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, @@ -1067,30 +1073,50 @@ static struct iwl_causes_list causes_list[] = { {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, - {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; +static const struct iwl_causes_list causes_list_pre_bz[] = { + {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, +}; + +static const struct iwl_causes_list causes_list_bz[] = { + {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29}, +}; + +static void iwl_pcie_map_list(struct iwl_trans *trans, + const struct iwl_causes_list *causes, + int arr_size, int val) +{ + int i; + + for (i = 0; i < arr_size; i++) { + iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); + iwl_clear_bit(trans, causes[i].mask_reg, + causes[i].cause_num); + } +} + static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; - int i, arr_size = ARRAY_SIZE(causes_list); - struct iwl_causes_list *causes = causes_list; - /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes. */ - for (i = 0; i < arr_size; i++) { - iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); - iwl_clear_bit(trans, causes[i].mask_reg, - causes[i].cause_num); - } + iwl_pcie_map_list(trans, causes_list_common, + ARRAY_SIZE(causes_list_common), val); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_pcie_map_list(trans, causes_list_bz, + ARRAY_SIZE(causes_list_bz), val); + else + iwl_pcie_map_list(trans, causes_list_pre_bz, + ARRAY_SIZE(causes_list_pre_bz), val); } static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) @@ -1208,8 +1234,12 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) } /* Make sure (redundant) we've released our request to stay awake */ - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); @@ -1501,7 +1531,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - ret = iwl_finish_nic_init(trans, trans->trans_cfg); + ret = iwl_finish_nic_init(trans); if (ret) return ret; @@ -1734,7 +1764,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) { int ret; - ret = iwl_finish_nic_init(trans, trans->trans_cfg); + ret = iwl_finish_nic_init(trans); if (ret < 0) return ret; @@ -2140,9 +2170,12 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) if (trans_pcie->cmd_hold_nic_awake) goto out; - - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the @@ -3203,9 +3236,11 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) return 0; } -static struct iwl_trans_dump_data -*iwl_trans_pcie_dump_data(struct iwl_trans *trans, - u32 dump_mask) +static struct iwl_trans_dump_data * +iwl_trans_pcie_dump_data(struct iwl_trans *trans, + u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; @@ -3305,6 +3340,10 @@ static struct iwl_trans_dump_data txcmd->caplen = cpu_to_le32(caplen); memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); + if (sanitize_ops && sanitize_ops->frob_hcmd) + sanitize_ops->frob_hcmd(sanitize_ctx, + txcmd->data, + caplen); txcmd = (void *)((u8 *)txcmd->data + caplen); } @@ -3365,7 +3404,10 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) if (trans_pcie->msix_enabled) { inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; - sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; + else + sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; } else { inta_addr = CSR_INT; sw_err_bit = CSR_INT_BIT_SW_ERR; diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 9a19046217df68..e459e7192ae969 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -1403,14 +1403,17 @@ static int prism2_hw_init2(struct net_device *dev, int initial) hfa384x_events_only_cmd(dev); if (initial) { + u8 addr[ETH_ALEN] = {}; struct list_head *ptr; + prism2_check_sta_fw_version(local); if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR, - dev->dev_addr, 6, 1) < 0) { + addr, ETH_ALEN, 1) < 0) { printk("%s: could not get own MAC address\n", dev->name); } + eth_hw_addr_set(dev, addr); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); eth_hw_addr_inherit(iface->dev, dev); diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 54f67b682b6a0b..787f685e70b492 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -713,9 +713,9 @@ static int prism2_set_mac_address(struct net_device *dev, void *p) read_lock_bh(&local->iface_lock); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); - memcpy(iface->dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(iface->dev, addr->sa_data); } - memcpy(local->dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(local->dev, addr->sa_data); read_unlock_bh(&local->iface_lock); return 0; diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 0e73a10cc06c44..7df88d20ff3dc7 100644 --- a/drivers/net/wireless/intersil/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c @@ -2265,7 +2265,7 @@ int orinoco_if_add(struct orinoco_private *priv, netif_carrier_off(dev); - memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); + eth_hw_addr_set(dev, wiphy->perm_addr); dev->base_addr = base_addr; dev->irq = irq; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0adae76eb8df1d..23219f3747f816 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2802,7 +2802,6 @@ static void hwsim_mcast_new_radio(int id, struct genl_info *info, static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = { { - /* TODO: should we support other types, e.g., P2P?*/ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .he_cap = { @@ -2850,7 +2849,6 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = { }, #ifdef CONFIG_MAC80211_MESH { - /* TODO: should we support other types, e.g., IBSS?*/ .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), .he_cap = { .has_he = true, @@ -2988,6 +2986,122 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = { #endif }; +static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = { + { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .he_6ghz_capa = { + .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN | + IEEE80211_HE_6GHZ_CAP_SM_PS | + IEEE80211_HE_6GHZ_CAP_RD_RESPONDER | + IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | + IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS), + }, + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, + }, + }, +#ifdef CONFIG_MAC80211_MESH + { + /* TODO: should we support other types, e.g., IBSS?*/ + .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), + .he_6ghz_capa = { + .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN | + IEEE80211_HE_6GHZ_CAP_SM_PS | + IEEE80211_HE_6GHZ_CAP_RD_RESPONDER | + IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | + IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS), + }, + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = 0, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, + }, + }, +#endif +}; + static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband) { u16 n_iftype_data; @@ -3000,6 +3114,10 @@ static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband) n_iftype_data = ARRAY_SIZE(he_capa_5ghz); sband->iftype_data = (struct ieee80211_sband_iftype_data *)he_capa_5ghz; + } else if (sband->band == NL80211_BAND_6GHZ) { + n_iftype_data = ARRAY_SIZE(he_capa_6ghz); + sband->iftype_data = + (struct ieee80211_sband_iftype_data *)he_capa_6ghz; } else { return; } @@ -3290,6 +3408,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, sband->vht_cap.vht_mcs.tx_mcs_map = sband->vht_cap.vht_mcs.rx_mcs_map; break; + case NL80211_BAND_6GHZ: + sband->channels = data->channels_6ghz; + sband->n_channels = ARRAY_SIZE(hwsim_channels_6ghz); + sband->bitrates = data->rates + 4; + sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; + break; case NL80211_BAND_S1GHZ: memcpy(&sband->s1g_cap, &hwsim_s1g_cap, sizeof(sband->s1g_cap)); @@ -3300,19 +3424,21 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, continue; } - sband->ht_cap.ht_supported = true; - sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | - IEEE80211_HT_CAP_GRN_FLD | - IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_DSSSCCK40; - sband->ht_cap.ampdu_factor = 0x3; - sband->ht_cap.ampdu_density = 0x6; - memset(&sband->ht_cap.mcs, 0, - sizeof(sband->ht_cap.mcs)); - sband->ht_cap.mcs.rx_mask[0] = 0xff; - sband->ht_cap.mcs.rx_mask[1] = 0xff; - sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (band != NL80211_BAND_6GHZ){ + sband->ht_cap.ht_supported = true; + sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40; + sband->ht_cap.ampdu_factor = 0x3; + sband->ht_cap.ampdu_density = 0x6; + memset(&sband->ht_cap.mcs, 0, + sizeof(sband->ht_cap.mcs)); + sband->ht_cap.mcs.rx_mask[0] = 0xff; + sband->ht_cap.mcs.rx_mask[1] = 0xff; + sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + } mac80211_hwsim_he_capab(sband); @@ -3527,13 +3653,16 @@ static const struct net_device_ops hwsim_netdev_ops = { static void hwsim_mon_setup(struct net_device *dev) { + u8 addr[ETH_ALEN]; + dev->netdev_ops = &hwsim_netdev_ops; dev->needs_free_netdev = true; ether_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; - eth_zero_addr(dev->dev_addr); - dev->dev_addr[0] = 0x12; + eth_zero_addr(addr); + addr[0] = 0x12; + eth_hw_addr_set(dev, addr); } static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index a4d9dd73b25886..104d2b6dc9af6e 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -150,10 +150,9 @@ int lbs_update_hw_spec(struct lbs_private *priv) memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); if (!priv->copied_hwaddr) { - memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN); + eth_hw_addr_set(priv->dev, priv->current_addr); if (priv->mesh_dev) - memcpy(priv->mesh_dev->dev_addr, - priv->current_addr, ETH_ALEN); + eth_hw_addr_set(priv->mesh_dev, priv->current_addr); priv->copied_hwaddr = 1; } diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index 20436a289d5cd9..5d6dc1dd050d4a 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -292,6 +292,7 @@ static int if_usb_probe(struct usb_interface *intf, if_usb_reset_device(cardp); dealloc: if_usb_free(cardp); + kfree(cardp); error: return r; @@ -316,6 +317,7 @@ static void if_usb_disconnect(struct usb_interface *intf) /* Unlink and free urb */ if_usb_free(cardp); + kfree(cardp); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index 64fc5e41086482..5c9f295536ead9 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -302,9 +302,9 @@ int lbs_set_mac_address(struct net_device *dev, void *addr) dev = priv->dev; memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN); - memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, phwaddr->sa_data); if (priv->mesh_dev) - memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN); + eth_hw_addr_set(priv->mesh_dev, phwaddr->sa_data); return ret; } diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c index 6cbba84989b8ad..a58c1e141f2ca4 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.c +++ b/drivers/net/wireless/marvell/libertas/mesh.c @@ -169,7 +169,7 @@ static ssize_t anycast_mask_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0])); + return sysfs_emit(buf, "0x%X\n", le32_to_cpu(mesh_access.data[0])); } /** @@ -222,7 +222,7 @@ static ssize_t prb_rsp_limit_show(struct device *dev, return ret; retry_limit = le32_to_cpu(mesh_access.data[1]); - return snprintf(buf, 10, "%d\n", retry_limit); + return sysfs_emit(buf, "%d\n", retry_limit); } /** @@ -270,7 +270,7 @@ static ssize_t lbs_mesh_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; - return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev); + return sysfs_emit(buf, "0x%X\n", !!priv->mesh_dev); } /** @@ -369,7 +369,7 @@ static ssize_t bootflag_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag)); + return sysfs_emit(buf, "%d\n", le32_to_cpu(defs.bootflag)); } /** @@ -419,7 +419,7 @@ static ssize_t boottime_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 12, "%d\n", defs.boottime); + return sysfs_emit(buf, "%d\n", defs.boottime); } /** @@ -479,7 +479,7 @@ static ssize_t channel_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel)); + return sysfs_emit(buf, "%d\n", le16_to_cpu(defs.channel)); } /** @@ -605,7 +605,7 @@ static ssize_t protocol_id_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id); + return sysfs_emit(buf, "%d\n", defs.meshie.val.active_protocol_id); } /** @@ -667,7 +667,7 @@ static ssize_t metric_id_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id); + return sysfs_emit(buf, "%d\n", defs.meshie.val.active_metric_id); } /** @@ -729,7 +729,7 @@ static ssize_t capability_show(struct device *dev, if (ret) return ret; - return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability); + return sysfs_emit(buf, "%d\n", defs.meshie.val.mesh_capability); } /** diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index fe0a69e804d8c4..75b5319d033f33 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -230,6 +230,7 @@ static int if_usb_probe(struct usb_interface *intf, dealloc: if_usb_free(cardp); + kfree(cardp); error: lbtf_deb_leave(LBTF_DEB_MAIN); return -ENOMEM; @@ -254,6 +255,7 @@ static void if_usb_disconnect(struct usb_interface *intf) /* Unlink and free urb */ if_usb_free(cardp); + kfree(cardp); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 6696bce5617862..9ff2058bcd7e47 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -125,7 +125,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv, tx_ba_tbl->ra); } else { /* * In case of failure, recreate the deleted stream in case - * we initiated the ADDBA + * we initiated the DELBA */ if (!INITIATOR_BIT(del_ba_param_set)) return 0; @@ -657,14 +657,15 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac, uint16_t del_ba_param_set; memset(&delba, 0, sizeof(delba)); - delba.del_ba_param_set = cpu_to_le16(tid << DELBA_TID_POS); - del_ba_param_set = le16_to_cpu(delba.del_ba_param_set); + del_ba_param_set = tid << DELBA_TID_POS; + if (initiator) del_ba_param_set |= IEEE80211_DELBA_PARAM_INITIATOR_MASK; else del_ba_param_set &= ~IEEE80211_DELBA_PARAM_INITIATOR_MASK; + delba.del_ba_param_set = cpu_to_le16(del_ba_param_set); memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN); /* We don't wait for the response of this command */ diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 0961f4a5e415c9..6f23ec34e2e2f6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -519,8 +519,14 @@ mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy, encrypt_key.is_igtk_def_key = true; eth_broadcast_addr(encrypt_key.mac_addr); - return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL, - HostCmd_ACT_GEN_SET, true, &encrypt_key, true); + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL, + HostCmd_ACT_GEN_SET, true, &encrypt_key, true)) { + mwifiex_dbg(priv->adapter, ERROR, + "Sending KEY_MATERIAL command failed\n"); + return -1; + } + + return 0; } /* @@ -908,16 +914,20 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: - priv->bss_role = MWIFIEX_BSS_ROLE_STA; + priv->bss_role = MWIFIEX_BSS_ROLE_STA; + priv->bss_type = MWIFIEX_BSS_TYPE_STA; break; case NL80211_IFTYPE_P2P_CLIENT: - priv->bss_role = MWIFIEX_BSS_ROLE_STA; + priv->bss_role = MWIFIEX_BSS_ROLE_STA; + priv->bss_type = MWIFIEX_BSS_TYPE_P2P; break; case NL80211_IFTYPE_P2P_GO: - priv->bss_role = MWIFIEX_BSS_ROLE_UAP; + priv->bss_role = MWIFIEX_BSS_ROLE_UAP; + priv->bss_type = MWIFIEX_BSS_TYPE_P2P; break; case NL80211_IFTYPE_AP: priv->bss_role = MWIFIEX_BSS_ROLE_UAP; + priv->bss_type = MWIFIEX_BSS_TYPE_UAP; break; default: mwifiex_dbg(adapter, ERROR, @@ -939,6 +949,117 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, return 0; } +static bool +is_vif_type_change_allowed(struct mwifiex_adapter *adapter, + enum nl80211_iftype old_iftype, + enum nl80211_iftype new_iftype) +{ + switch (old_iftype) { + case NL80211_IFTYPE_ADHOC: + switch (new_iftype) { + case NL80211_IFTYPE_STATION: + return true; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + return adapter->curr_iface_comb.p2p_intf != + adapter->iface_limit.p2p_intf; + case NL80211_IFTYPE_AP: + return adapter->curr_iface_comb.uap_intf != + adapter->iface_limit.uap_intf; + default: + return false; + } + + case NL80211_IFTYPE_STATION: + switch (new_iftype) { + case NL80211_IFTYPE_ADHOC: + return true; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + return adapter->curr_iface_comb.p2p_intf != + adapter->iface_limit.p2p_intf; + case NL80211_IFTYPE_AP: + return adapter->curr_iface_comb.uap_intf != + adapter->iface_limit.uap_intf; + default: + return false; + } + + case NL80211_IFTYPE_AP: + switch (new_iftype) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + return adapter->curr_iface_comb.sta_intf != + adapter->iface_limit.sta_intf; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + return adapter->curr_iface_comb.p2p_intf != + adapter->iface_limit.p2p_intf; + default: + return false; + } + + case NL80211_IFTYPE_P2P_CLIENT: + switch (new_iftype) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + return true; + case NL80211_IFTYPE_P2P_GO: + return true; + case NL80211_IFTYPE_AP: + return adapter->curr_iface_comb.uap_intf != + adapter->iface_limit.uap_intf; + default: + return false; + } + + case NL80211_IFTYPE_P2P_GO: + switch (new_iftype) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + return true; + case NL80211_IFTYPE_P2P_CLIENT: + return true; + case NL80211_IFTYPE_AP: + return adapter->curr_iface_comb.uap_intf != + adapter->iface_limit.uap_intf; + default: + return false; + } + + default: + break; + } + + return false; +} + +static void +update_vif_type_counter(struct mwifiex_adapter *adapter, + enum nl80211_iftype iftype, + int change) +{ + switch (iftype) { + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + adapter->curr_iface_comb.sta_intf += change; + break; + case NL80211_IFTYPE_AP: + adapter->curr_iface_comb.uap_intf += change; + break; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + adapter->curr_iface_comb.p2p_intf += change; + break; + default: + mwifiex_dbg(adapter, ERROR, + "%s: Unsupported iftype passed: %d\n", + __func__, iftype); + break; + } +} + static int mwifiex_change_vif_to_p2p(struct net_device *dev, enum nl80211_iftype curr_iftype, @@ -955,13 +1076,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, adapter = priv->adapter; - if (adapter->curr_iface_comb.p2p_intf == - adapter->iface_limit.p2p_intf) { - mwifiex_dbg(adapter, ERROR, - "cannot create multiple P2P ifaces\n"); - return -1; - } - mwifiex_dbg(adapter, INFO, "%s: changing role to p2p\n", dev->name); @@ -970,6 +1084,10 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, if (mwifiex_init_new_priv_params(priv, dev, type)) return -1; + update_vif_type_counter(adapter, curr_iftype, -1); + update_vif_type_counter(adapter, type, +1); + dev->ieee80211_ptr->iftype = type; + switch (type) { case NL80211_IFTYPE_P2P_CLIENT: if (mwifiex_cfg80211_init_p2p_client(priv)) @@ -993,21 +1111,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, if (mwifiex_sta_init_cmd(priv, false, false)) return -1; - switch (curr_iftype) { - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_ADHOC: - adapter->curr_iface_comb.sta_intf--; - break; - case NL80211_IFTYPE_AP: - adapter->curr_iface_comb.uap_intf--; - break; - default: - break; - } - - adapter->curr_iface_comb.p2p_intf++; - dev->ieee80211_ptr->iftype = type; - return 0; } @@ -1027,15 +1130,6 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev, adapter = priv->adapter; - if ((curr_iftype != NL80211_IFTYPE_P2P_CLIENT && - curr_iftype != NL80211_IFTYPE_P2P_GO) && - (adapter->curr_iface_comb.sta_intf == - adapter->iface_limit.sta_intf)) { - mwifiex_dbg(adapter, ERROR, - "cannot create multiple station/adhoc ifaces\n"); - return -1; - } - if (type == NL80211_IFTYPE_STATION) mwifiex_dbg(adapter, INFO, "%s: changing role to station\n", dev->name); @@ -1047,26 +1141,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev, return -1; if (mwifiex_init_new_priv_params(priv, dev, type)) return -1; + + update_vif_type_counter(adapter, curr_iftype, -1); + update_vif_type_counter(adapter, type, +1); + dev->ieee80211_ptr->iftype = type; + if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL, true)) return -1; if (mwifiex_sta_init_cmd(priv, false, false)) return -1; - switch (curr_iftype) { - case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: - adapter->curr_iface_comb.p2p_intf--; - break; - case NL80211_IFTYPE_AP: - adapter->curr_iface_comb.uap_intf--; - break; - default: - break; - } - - adapter->curr_iface_comb.sta_intf++; - dev->ieee80211_ptr->iftype = type; return 0; } @@ -1086,13 +1171,6 @@ mwifiex_change_vif_to_ap(struct net_device *dev, adapter = priv->adapter; - if (adapter->curr_iface_comb.uap_intf == - adapter->iface_limit.uap_intf) { - mwifiex_dbg(adapter, ERROR, - "cannot create multiple AP ifaces\n"); - return -1; - } - mwifiex_dbg(adapter, INFO, "%s: changing role to AP\n", dev->name); @@ -1100,27 +1178,17 @@ mwifiex_change_vif_to_ap(struct net_device *dev, return -1; if (mwifiex_init_new_priv_params(priv, dev, type)) return -1; + + update_vif_type_counter(adapter, curr_iftype, -1); + update_vif_type_counter(adapter, type, +1); + dev->ieee80211_ptr->iftype = type; + if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL, true)) return -1; if (mwifiex_sta_init_cmd(priv, false, false)) return -1; - switch (curr_iftype) { - case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: - adapter->curr_iface_comb.p2p_intf--; - break; - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_ADHOC: - adapter->curr_iface_comb.sta_intf--; - break; - default: - break; - } - - adapter->curr_iface_comb.uap_intf++; - dev->ieee80211_ptr->iftype = type; return 0; } /* @@ -1141,6 +1209,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return -EBUSY; } + if (type == NL80211_IFTYPE_UNSPECIFIED) { + mwifiex_dbg(priv->adapter, INFO, + "%s: no new type specified, keeping old type %d\n", + dev->name, curr_iftype); + return 0; + } + + if (curr_iftype == type) { + mwifiex_dbg(priv->adapter, INFO, + "%s: interface already is of type %d\n", + dev->name, curr_iftype); + return 0; + } + + if (!is_vif_type_change_allowed(priv->adapter, curr_iftype, type)) { + mwifiex_dbg(priv->adapter, ERROR, + "%s: change from type %d to %d is not allowed\n", + dev->name, curr_iftype, type); + return -EOPNOTSUPP; + } + switch (curr_iftype) { case NL80211_IFTYPE_ADHOC: switch (type) { @@ -1160,19 +1249,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_AP: return mwifiex_change_vif_to_ap(dev, curr_iftype, type, params); - case NL80211_IFTYPE_UNSPECIFIED: - mwifiex_dbg(priv->adapter, INFO, - "%s: kept type as IBSS\n", dev->name); - fallthrough; - case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */ - return 0; default: - mwifiex_dbg(priv->adapter, ERROR, - "%s: changing to %d not supported\n", - dev->name, type); - return -EOPNOTSUPP; + goto errnotsupp; } - break; + case NL80211_IFTYPE_STATION: switch (type) { case NL80211_IFTYPE_ADHOC: @@ -1191,22 +1271,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_AP: return mwifiex_change_vif_to_ap(dev, curr_iftype, type, params); - case NL80211_IFTYPE_UNSPECIFIED: - mwifiex_dbg(priv->adapter, INFO, - "%s: kept type as STA\n", dev->name); - fallthrough; - case NL80211_IFTYPE_STATION: /* This shouldn't happen */ - return 0; default: - mwifiex_dbg(priv->adapter, ERROR, - "%s: changing to %d not supported\n", - dev->name, type); - return -EOPNOTSUPP; + goto errnotsupp; } - break; + case NL80211_IFTYPE_AP: switch (type) { case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype, type, params); break; @@ -1214,69 +1286,60 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_GO: return mwifiex_change_vif_to_p2p(dev, curr_iftype, type, params); - case NL80211_IFTYPE_UNSPECIFIED: - mwifiex_dbg(priv->adapter, INFO, - "%s: kept type as AP\n", dev->name); - fallthrough; - case NL80211_IFTYPE_AP: /* This shouldn't happen */ - return 0; default: - mwifiex_dbg(priv->adapter, ERROR, - "%s: changing to %d not supported\n", - dev->name, type); - return -EOPNOTSUPP; + goto errnotsupp; } - break; + case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: + if (mwifiex_cfg80211_deinit_p2p(priv)) + return -EFAULT; + switch (type) { - case NL80211_IFTYPE_STATION: - if (mwifiex_cfg80211_deinit_p2p(priv)) - return -EFAULT; - priv->adapter->curr_iface_comb.p2p_intf--; - priv->adapter->curr_iface_comb.sta_intf++; - dev->ieee80211_ptr->iftype = type; - if (mwifiex_deinit_priv_params(priv)) - return -1; - if (mwifiex_init_new_priv_params(priv, dev, type)) - return -1; - if (mwifiex_sta_init_cmd(priv, false, false)) - return -1; - break; case NL80211_IFTYPE_ADHOC: - if (mwifiex_cfg80211_deinit_p2p(priv)) - return -EFAULT; + case NL80211_IFTYPE_STATION: return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype, type, params); - break; + case NL80211_IFTYPE_P2P_GO: + return mwifiex_change_vif_to_p2p(dev, curr_iftype, + type, params); case NL80211_IFTYPE_AP: - if (mwifiex_cfg80211_deinit_p2p(priv)) - return -EFAULT; return mwifiex_change_vif_to_ap(dev, curr_iftype, type, params); - case NL80211_IFTYPE_UNSPECIFIED: - mwifiex_dbg(priv->adapter, INFO, - "%s: kept type as P2P\n", dev->name); - fallthrough; + default: + goto errnotsupp; + } + + case NL80211_IFTYPE_P2P_GO: + if (mwifiex_cfg80211_deinit_p2p(priv)) + return -EFAULT; + + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype, + type, params); case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: - return 0; + return mwifiex_change_vif_to_p2p(dev, curr_iftype, + type, params); + case NL80211_IFTYPE_AP: + return mwifiex_change_vif_to_ap(dev, curr_iftype, type, + params); default: - mwifiex_dbg(priv->adapter, ERROR, - "%s: changing to %d not supported\n", - dev->name, type); - return -EOPNOTSUPP; + goto errnotsupp; } - break; + default: - mwifiex_dbg(priv->adapter, ERROR, - "%s: unknown iftype: %d\n", - dev->name, dev->ieee80211_ptr->iftype); - return -EOPNOTSUPP; + goto errnotsupp; } return 0; + +errnotsupp: + mwifiex_dbg(priv->adapter, ERROR, + "unsupported interface type transition: %d to %d\n", + curr_iftype, type); + return -EOPNOTSUPP; } static void @@ -2997,7 +3060,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->bss_type = MWIFIEX_BSS_TYPE_P2P; priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; - priv->bss_priority = MWIFIEX_BSS_ROLE_STA; + priv->bss_priority = 0; priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_started = 0; @@ -3108,23 +3171,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, mwifiex_dev_debugfs_init(priv); #endif - switch (type) { - case NL80211_IFTYPE_UNSPECIFIED: - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_ADHOC: - adapter->curr_iface_comb.sta_intf++; - break; - case NL80211_IFTYPE_AP: - adapter->curr_iface_comb.uap_intf++; - break; - case NL80211_IFTYPE_P2P_CLIENT: - adapter->curr_iface_comb.p2p_intf++; - break; - default: - /* This should be dead code; checked above */ - mwifiex_dbg(adapter, ERROR, "type not supported\n"); - return ERR_PTR(-EINVAL); - } + update_vif_type_counter(adapter, type, +1); return &priv->wdev; @@ -3177,37 +3224,18 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) cfg80211_unregister_netdevice(wdev->netdev); if (priv->dfs_cac_workqueue) { - flush_workqueue(priv->dfs_cac_workqueue); destroy_workqueue(priv->dfs_cac_workqueue); priv->dfs_cac_workqueue = NULL; } if (priv->dfs_chan_sw_workqueue) { - flush_workqueue(priv->dfs_chan_sw_workqueue); destroy_workqueue(priv->dfs_chan_sw_workqueue); priv->dfs_chan_sw_workqueue = NULL; } /* Clear the priv in adapter */ priv->netdev = NULL; - switch (priv->bss_mode) { - case NL80211_IFTYPE_UNSPECIFIED: - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_ADHOC: - adapter->curr_iface_comb.sta_intf--; - break; - case NL80211_IFTYPE_AP: - adapter->curr_iface_comb.uap_intf--; - break; - case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: - adapter->curr_iface_comb.p2p_intf--; - break; - default: - mwifiex_dbg(adapter, ERROR, - "del_virtual_intf: type not supported\n"); - break; - } + update_vif_type_counter(adapter, priv->bss_mode, -1); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; @@ -3470,7 +3498,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, } if (!wowlan) { - mwifiex_dbg(adapter, ERROR, + mwifiex_dbg(adapter, INFO, "None of the WOWLAN triggers enabled\n"); ret = 0; goto done; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 171a2574260051..d6a61f850c6fe9 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -608,6 +608,11 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, return -1; } + if (priv->adapter->hs_activated_manually && + cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { + mwifiex_cancel_hs(priv, MWIFIEX_ASYNC_CMD); + priv->adapter->hs_activated_manually = false; + } /* Get a new command node */ cmd_node = mwifiex_get_cmd_node(adapter); @@ -714,6 +719,15 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, } } + /* Same with exit host sleep cmd, luckily that can't happen at the same time as EXIT_PS */ + if (command == HostCmd_CMD_802_11_HS_CFG_ENH) { + struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = + &host_cmd->params.opt_hs_cfg; + + if (le16_to_cpu(hs_cfg->action) == HS_ACTIVATE) + add_tail = false; + } + spin_lock_bh(&adapter->cmd_pending_q_lock); if (add_tail) list_add_tail(&cmd_node->list, &adapter->cmd_pending_q); @@ -1216,6 +1230,13 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) __func__); adapter->if_ops.wakeup(adapter); + + if (adapter->hs_activated_manually) { + mwifiex_cancel_hs(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_ASYNC_CMD); + adapter->hs_activated_manually = false; + } + adapter->hs_activated = false; clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 17399d4aa12902..19b996c6a2605a 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -401,6 +401,12 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) !adapter->scan_processing) && !adapter->data_sent && !skb_queue_empty(&adapter->tx_data_q)) { + if (adapter->hs_activated_manually) { + mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_ASYNC_CMD); + adapter->hs_activated_manually = false; + } + mwifiex_process_tx_queue(adapter); if (adapter->hs_activated) { clear_bit(MWIFIEX_IS_HS_CONFIGURED, @@ -418,6 +424,12 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) !mwifiex_bypass_txlist_empty(adapter) && !mwifiex_is_tdls_chan_switching (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { + if (adapter->hs_activated_manually) { + mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_ASYNC_CMD); + adapter->hs_activated_manually = false; + } + mwifiex_process_bypass_tx(adapter); if (adapter->hs_activated) { clear_bit(MWIFIEX_IS_HS_CONFIGURED, @@ -434,6 +446,12 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) && !mwifiex_is_tdls_chan_switching (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { + if (adapter->hs_activated_manually) { + mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_ASYNC_CMD); + adapter->hs_activated_manually = false; + } + mwifiex_wmm_process_tx(adapter); if (adapter->hs_activated) { clear_bit(MWIFIEX_IS_HS_CONFIGURED, @@ -498,13 +516,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter) static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) { if (adapter->workqueue) { - flush_workqueue(adapter->workqueue); destroy_workqueue(adapter->workqueue); adapter->workqueue = NULL; } if (adapter->rx_workqueue) { - flush_workqueue(adapter->rx_workqueue); destroy_workqueue(adapter->rx_workqueue); adapter->rx_workqueue = NULL; } @@ -987,7 +1003,7 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, return ret; } - ether_addr_copy(dev->dev_addr, priv->curr_addr); + eth_hw_addr_set(dev, priv->curr_addr); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 5923c5c14c8df0..90012cbcfd1597 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -986,6 +986,7 @@ struct mwifiex_adapter { struct timer_list wakeup_timer; struct mwifiex_hs_config_param hs_cfg; u8 hs_activated; + u8 hs_activated_manually; u16 hs_activate_wait_q_woken; wait_queue_head_t hs_activate_wait_q; u8 event_body[MAX_EVENT_SIZE]; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index c6ccce426b496d..c3f5583ea70df1 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -17,6 +17,7 @@ * this warranty disclaimer. */ +#include #include #include "decl.h" @@ -647,11 +648,15 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, "max count reached while accessing sleep cookie\n"); } +#define N_WAKEUP_TRIES_SHORT_INTERVAL 15 +#define N_WAKEUP_TRIES_LONG_INTERVAL 35 + /* This function wakes up the card by reading fw_status register. */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int retval; mwifiex_dbg(adapter, EVENT, "event: Wakeup device...\n"); @@ -659,11 +664,24 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) if (reg->sleep_cookie) mwifiex_pcie_dev_wakeup_delay(adapter); - /* Accessing fw_status register will wakeup device */ - if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { - mwifiex_dbg(adapter, ERROR, - "Writing fw_status register failed\n"); - return -1; + /* The 88W8897 PCIe+USB firmware (latest version 15.68.19.p21) sometimes + * appears to ignore or miss our wakeup request, so we continue trying + * until we receive an interrupt from the card. + */ + if (read_poll_timeout(mwifiex_write_reg, retval, + READ_ONCE(adapter->int_status) != 0, + 500, 500 * N_WAKEUP_TRIES_SHORT_INTERVAL, + false, + adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { + if (read_poll_timeout(mwifiex_write_reg, retval, + READ_ONCE(adapter->int_status) != 0, + 10000, 10000 * N_WAKEUP_TRIES_LONG_INTERVAL, + false, + adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { + mwifiex_dbg(adapter, ERROR, + "Firmware didn't wake up\n"); + return -EIO; + } } if (reg->sleep_cookie) { @@ -1490,6 +1508,14 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, ret = -1; goto done_unmap; } + + /* The firmware (latest version 15.68.19.p21) of the 88W8897 PCIe+USB card + * seems to crash randomly after setting the TX ring write pointer when + * ASPM powersaving is enabled. A workaround seems to be keeping the bus + * busy by reading a random register afterwards. + */ + mwifiex_read_reg(adapter, PCI_VENDOR_ID, &rx_val); + if ((mwifiex_pcie_txbd_not_full(card)) && tx_param->next_pkt_len) { /* have more packets and TxBD still can hold more */ diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 48ea00da1fc9e1..1e2798dce18f6c 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -396,6 +396,10 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, if (hs_activate) { hs_cfg->action = cpu_to_le16(HS_ACTIVATE); hs_cfg->params.hs_activate.resp_ctrl = cpu_to_le16(RESP_NEEDED); + + adapter->hs_activated_manually = true; + mwifiex_dbg(priv->adapter, CMD, + "cmd: Activating host sleep manually\n"); } else { hs_cfg->action = cpu_to_le16(HS_CONFIGURE); hs_cfg->params.hs_config.conditions = hscfg_param->conditions; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index 9121447e270184..2e25d72dcac563 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c @@ -197,8 +197,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_dbg(adapter, EVENT, "AP EVENT: event id: %#x\n", eventcause); priv->port_open = false; - memcpy(priv->netdev->dev_addr, adapter->event_body + 2, - ETH_ALEN); + eth_hw_addr_set(priv->netdev, adapter->event_body + 2); if (priv->hist_data) mwifiex_hist_data_reset(priv); mwifiex_check_uap_capabilities(priv, adapter->event_skb); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 426e39d4ccf0f0..9736aa0ab7fd43 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -505,6 +505,22 @@ static int mwifiex_usb_probe(struct usb_interface *intf, } } + switch (card->usb_boot_state) { + case USB8XXX_FW_DNLD: + /* Reject broken descriptors. */ + if (!card->rx_cmd_ep || !card->tx_cmd_ep) + return -ENODEV; + if (card->bulk_out_maxpktsize == 0) + return -ENODEV; + break; + case USB8XXX_FW_READY: + /* Assume the driver can handle missing endpoints for now. */ + break; + default: + WARN_ON(1); + return -ENODEV; + } + usb_set_intfdata(intf, card); ret = mwifiex_add_card(card, &card->fw_done, &usb_ops, diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 3bf6571f41490c..529e325498cdb0 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -5800,8 +5800,8 @@ static void mwl8k_fw_state_machine(const struct firmware *fw, void *context) fail: priv->fw_state = FW_STATE_ERROR; complete(&priv->firmware_loading_complete); - device_release_driver(&priv->pdev->dev); mwl8k_release_firmware(priv); + device_release_driver(&priv->pdev->dev); } #define MAX_RESTART_ATTEMPTS 1 diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 94efe3c2905313..79ab850a45a28e 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -14,7 +14,7 @@ mt76-$(CONFIG_PCI) += pci.o mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o mt76-usb-y := usb.o usb_trace.o -mt76-sdio-y := sdio.o +mt76-sdio-y := sdio.o sdio_txrx.o CFLAGS_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index fa48cc3a7a8f72..b8bcf22a07fd88 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -56,14 +56,14 @@ int mt76_queues_read(struct seq_file *s, void *data) struct mt76_dev *dev = dev_get_drvdata(s->private); int i; + seq_puts(s, " queue | hw-queued | head | tail |\n"); for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) { struct mt76_queue *q = dev->phy.q_tx[i]; if (!q) continue; - seq_printf(s, - "%d: queued=%d head=%d tail=%d\n", + seq_printf(s, " %9d | %9d | %9d | %9d |\n", i, q->queued, q->head, q->tail); } @@ -76,12 +76,13 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data) struct mt76_dev *dev = dev_get_drvdata(s->private); int i, queued; + seq_puts(s, " queue | hw-queued | head | tail |\n"); mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; - seq_printf(s, "%d: queued=%d head=%d tail=%d\n", - i, queued, q->head, q->tail); + seq_printf(s, " %9d | %9d | %9d | %9d |\n", + i, q->queued, q->head, q->tail); } return 0; @@ -116,18 +117,21 @@ static int mt76_read_rate_txpower(struct seq_file *s, void *data) return 0; } -struct dentry *mt76_register_debugfs(struct mt76_dev *dev) +struct dentry * +mt76_register_debugfs_fops(struct mt76_phy *phy, + const struct file_operations *ops) { + const struct file_operations *fops = ops ? ops : &fops_regval; + struct mt76_dev *dev = phy->dev; struct dentry *dir; - dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir); + dir = debugfs_create_dir("mt76", phy->hw->wiphy->debugfsdir); if (!dir) return NULL; debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin); debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); - debugfs_create_file_unsafe("regval", 0600, dir, dev, - &fops_regval); + debugfs_create_file_unsafe("regval", 0600, dir, dev, fops); debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev, &fops_napi_threaded); debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom); @@ -140,4 +144,4 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev) return dir; } -EXPORT_SYMBOL_GPL(mt76_register_debugfs); +EXPORT_SYMBOL_GPL(mt76_register_debugfs_fops); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 3b47e85e95e7c9..2d58aa31db934d 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -15,6 +15,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) struct device_node *np = dev->dev->of_node; struct mtd_info *mtd; const __be32 *list; + const void *data; const char *part; phandle phandle; int size; @@ -24,6 +25,16 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) if (!np) return -ENOENT; + data = of_get_property(np, "mediatek,eeprom-data", &size); + if (data) { + if (size > len) + return -EINVAL; + + memcpy(eep, data, size); + + return 0; + } + list = of_get_property(np, "mediatek,mtd-eeprom", &size); if (!list) return -ENOENT; @@ -54,8 +65,11 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) offset = be32_to_cpup(list); ret = mtd_read(mtd, offset, len, &retlen, eep); put_mtd_device(mtd); - if (ret) + if (ret) { + dev_err(dev->dev, "reading EEPROM from mtd %s failed: %i\n", + part, ret); goto out_put_node; + } if (retlen < len) { ret = -EINVAL; @@ -285,6 +299,9 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy, case NL80211_BAND_5GHZ: band = '5'; break; + case NL80211_BAND_6GHZ: + band = '6'; + break; default: return target_power; } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index d03aedc3286bb6..62807dc311c19d 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -20,6 +20,13 @@ .max_power = 30, \ } +#define CHAN6G(_idx, _freq) { \ + .band = NL80211_BAND_6GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + static const struct ieee80211_channel mt76_channels_2ghz[] = { CHAN2G(1, 2412), CHAN2G(2, 2417), @@ -70,6 +77,72 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = { CHAN5G(173, 5865), }; +static const struct ieee80211_channel mt76_channels_6ghz[] = { + /* UNII-5 */ + CHAN6G(1, 5955), + CHAN6G(5, 5975), + CHAN6G(9, 5995), + CHAN6G(13, 6015), + CHAN6G(17, 6035), + CHAN6G(21, 6055), + CHAN6G(25, 6075), + CHAN6G(29, 6095), + CHAN6G(33, 6115), + CHAN6G(37, 6135), + CHAN6G(41, 6155), + CHAN6G(45, 6175), + CHAN6G(49, 6195), + CHAN6G(53, 6215), + CHAN6G(57, 6235), + CHAN6G(61, 6255), + CHAN6G(65, 6275), + CHAN6G(69, 6295), + CHAN6G(73, 6315), + CHAN6G(77, 6335), + CHAN6G(81, 6355), + CHAN6G(85, 6375), + CHAN6G(89, 6395), + CHAN6G(93, 6415), + /* UNII-6 */ + CHAN6G(97, 6435), + CHAN6G(101, 6455), + CHAN6G(105, 6475), + CHAN6G(109, 6495), + CHAN6G(113, 6515), + CHAN6G(117, 6535), + /* UNII-7 */ + CHAN6G(121, 6555), + CHAN6G(125, 6575), + CHAN6G(129, 6595), + CHAN6G(133, 6615), + CHAN6G(137, 6635), + CHAN6G(141, 6655), + CHAN6G(145, 6675), + CHAN6G(149, 6695), + CHAN6G(153, 6715), + CHAN6G(157, 6735), + CHAN6G(161, 6755), + CHAN6G(165, 6775), + CHAN6G(169, 6795), + CHAN6G(173, 6815), + CHAN6G(177, 6835), + CHAN6G(181, 6855), + CHAN6G(185, 6875), + /* UNII-8 */ + CHAN6G(189, 6895), + CHAN6G(193, 6915), + CHAN6G(197, 6935), + CHAN6G(201, 6955), + CHAN6G(205, 6975), + CHAN6G(209, 6995), + CHAN6G(213, 7015), + CHAN6G(217, 7035), + CHAN6G(221, 7055), + CHAN6G(225, 7075), + CHAN6G(229, 7095), + CHAN6G(233, 7115), +}; + static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { { .throughput = 0 * 1024, .blink_time = 334 }, { .throughput = 1 * 1024, .blink_time = 260 }, @@ -99,6 +172,21 @@ struct ieee80211_rate mt76_rates[] = { }; EXPORT_SYMBOL_GPL(mt76_rates); +static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = { + { .start_freq = 2402, .end_freq = 2494, }, + { .start_freq = 5150, .end_freq = 5350, }, + { .start_freq = 5350, .end_freq = 5470, }, + { .start_freq = 5470, .end_freq = 5725, }, + { .start_freq = 5725, .end_freq = 5950, }, +}; + +const struct cfg80211_sar_capa mt76_sar_capa = { + .type = NL80211_SAR_TYPE_POWER, + .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), + .freq_ranges = &mt76_sar_freq_ranges[0], +}; +EXPORT_SYMBOL_GPL(mt76_sar_capa); + static int mt76_led_init(struct mt76_dev *dev) { struct device_node *np = dev->dev->of_node; @@ -179,13 +267,16 @@ void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); if (phy->cap.has_5ghz) mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); + if (phy->cap.has_6ghz) + mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht); } EXPORT_SYMBOL_GPL(mt76_set_stream_caps); static int mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, const struct ieee80211_channel *chan, int n_chan, - struct ieee80211_rate *rates, int n_rates, bool vht) + struct ieee80211_rate *rates, int n_rates, + bool ht, bool vht) { struct ieee80211_supported_band *sband = &msband->sband; struct ieee80211_sta_vht_cap *vht_cap; @@ -209,6 +300,9 @@ mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, sband->bitrates = rates; sband->n_bitrates = n_rates; + if (!ht) + return 0; + ht_cap = &sband->ht_cap; ht_cap->ht_supported = true; ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | @@ -245,7 +339,7 @@ mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz), rates, - n_rates, false); + n_rates, true, false); } static int @@ -256,7 +350,18 @@ mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz), rates, - n_rates, vht); + n_rates, true, vht); +} + +static int +mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates, + int n_rates) +{ + phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband; + + return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz, + ARRAY_SIZE(mt76_channels_6ghz), rates, + n_rates, false, false); } static void @@ -322,12 +427,8 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - - if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { - ieee80211_hw_set(hw, TX_AMSDU); - ieee80211_hw_set(hw, TX_FRAG_LIST); - } - + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, TX_FRAG_LIST); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); @@ -385,9 +486,16 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht, return ret; } + if (phy->cap.has_6ghz) { + ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); + if (ret) + return ret; + } + wiphy_read_of_freq_limits(phy->hw->wiphy); mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); + mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ); ret = ieee80211_register_hw(phy->hw); if (ret) @@ -403,7 +511,7 @@ void mt76_unregister_phy(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; - mt76_tx_status_check(dev, NULL, true); + mt76_tx_status_check(dev, true); ieee80211_unregister_hw(phy->hw); dev->phy2 = NULL; } @@ -435,9 +543,9 @@ mt76_alloc_device(struct device *pdev, unsigned int size, spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); spin_lock_init(&dev->cc_lock); + spin_lock_init(&dev->status_lock); mutex_init(&dev->mutex); init_waitqueue_head(&dev->tx_wait); - skb_queue_head_init(&dev->status_list); skb_queue_head_init(&dev->mcu.res_q); init_waitqueue_head(&dev->mcu.wait); @@ -458,6 +566,8 @@ mt76_alloc_device(struct device *pdev, unsigned int size, spin_lock_init(&dev->token_lock); idr_init(&dev->token); + INIT_LIST_HEAD(&dev->wcid_list); + INIT_LIST_HEAD(&dev->txwi_cache); for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) @@ -495,9 +605,16 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, return ret; } + if (phy->cap.has_6ghz) { + ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); + if (ret) + return ret; + } + wiphy_read_of_freq_limits(hw->wiphy); mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); + mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ); if (IS_ENABLED(CONFIG_MT76_LEDS)) { ret = mt76_led_init(dev); @@ -522,7 +639,7 @@ void mt76_unregister_device(struct mt76_dev *dev) if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(dev); - mt76_tx_status_check(dev, NULL, true); + mt76_tx_status_check(dev, true); ieee80211_unregister_hw(hw); } EXPORT_SYMBOL_GPL(mt76_unregister_device); @@ -642,6 +759,8 @@ mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) if (c->band == NL80211_BAND_2GHZ) msband = &phy->sband_2g; + else if (c->band == NL80211_BAND_6GHZ) + msband = &phy->sband_6g; else msband = &phy->sband_5g; @@ -717,10 +836,16 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx, if (idx == 0 && dev->drv->update_survey) mt76_update_survey(phy); - sband = &phy->sband_2g; - if (idx >= sband->sband.n_channels) { - idx -= sband->sband.n_channels; + if (idx >= phy->sband_2g.sband.n_channels + + phy->sband_5g.sband.n_channels) { + idx -= (phy->sband_2g.sband.n_channels + + phy->sband_5g.sband.n_channels); + sband = &phy->sband_6g; + } else if (idx >= phy->sband_2g.sband.n_channels) { + idx -= phy->sband_2g.sband.n_channels; sband = &phy->sband_5g; + } else { + sband = &phy->sband_2g; } if (idx >= sband->sband.n_channels) { @@ -777,10 +902,17 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, return; wcid->rx_check_pn = true; + + /* data frame */ for (i = 0; i < IEEE80211_NUM_TIDS; i++) { ieee80211_get_key_rx_seq(key, i, &seq); memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); } + + /* robust management frame */ + ieee80211_get_key_rx_seq(key, -1, &seq); + memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); + } EXPORT_SYMBOL(mt76_wcid_key_setup); @@ -790,6 +922,7 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_sta **sta) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_rx_status mstat; mstat = *((struct mt76_rx_status *)skb->cb); @@ -812,6 +945,10 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, status->device_timestamp = mstat.timestamp; status->mactime = mstat.timestamp; + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + status->boottime_ns = ktime_get_boottime_ns(); + BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal)); @@ -828,7 +965,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; struct ieee80211_hdr *hdr; - u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; + int security_idx; int ret; if (!(status->flag & RX_FLAG_DECRYPTED)) @@ -837,24 +974,39 @@ mt76_check_ccmp_pn(struct sk_buff *skb) if (!wcid || !wcid->rx_check_pn) return 0; + security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; + if (status->flag & RX_FLAG_8023) + goto skip_hdr_check; + + hdr = mt76_skb_get_hdr(skb); if (!(status->flag & RX_FLAG_IV_STRIPPED)) { /* * Validate the first fragment both here and in mac80211 * All further fragments will be validated by mac80211 only. */ - hdr = mt76_skb_get_hdr(skb); if (ieee80211_is_frag(hdr) && !ieee80211_is_first_frag(hdr->frame_control)) return 0; } + /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c): + * + * the recipient shall maintain a single replay counter for received + * individually addressed robust Management frames that are received + * with the To DS subfield equal to 0, [...] + */ + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_has_tods(hdr->frame_control)) + security_idx = IEEE80211_NUM_TIDS; + +skip_hdr_check: BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); - ret = memcmp(status->iv, wcid->rx_key_pn[tidno], + ret = memcmp(status->iv, wcid->rx_key_pn[security_idx], sizeof(status->iv)); if (ret <= 0) return -EINVAL; /* replay */ - memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv)); + memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv)); if (status->flag & RX_FLAG_IV_STRIPPED) status->flag |= RX_FLAG_PN_VALIDATED; @@ -1109,6 +1261,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, wcid->ext_phy = ext_phy; rcu_assign_pointer(dev->wcid[wcid->idx], wcid); + mt76_packet_id_init(wcid); out: mutex_unlock(&dev->mutex); @@ -1127,7 +1280,8 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); - mt76_tx_status_check(dev, wcid, true); + mt76_packet_id_flush(dev, wcid); + mt76_wcid_mask_clear(dev->wcid_mask, idx); mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); } @@ -1270,7 +1424,7 @@ int mt76_get_rate(struct mt76_dev *dev, int i, offset = 0, len = sband->n_bitrates; if (cck) { - if (sband == &dev->phy.sband_5g.sband) + if (sband != &dev->phy.sband_2g.sband) return 0; idx &= ~BIT(2); /* short preamble */ @@ -1336,3 +1490,49 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, return hwq; } EXPORT_SYMBOL_GPL(mt76_init_queue); + +u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) +{ + int offset = 0; + struct ieee80211_rate *rate; + + if (phy->chandef.chan->band != NL80211_BAND_2GHZ) + offset = 4; + + /* pick the lowest rate for hidden nodes */ + if (rateidx < 0) + rateidx = 0; + + rate = &mt76_rates[offset + rateidx]; + + return rate->hw_value; +} +EXPORT_SYMBOL_GPL(mt76_calculate_default_rate); + +void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, + struct mt76_sta_stats *stats) +{ + int i, ei = wi->initial_stat_idx; + u64 *data = wi->data; + + wi->sta_count++; + + data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB]; + data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU]; + + for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++) + data[ei++] += stats->tx_bw[i]; + + for (i = 0; i < 12; i++) + data[ei++] += stats->tx_mcs[i]; + + wi->worker_stat_count = ei - wi->initial_stat_idx; +} +EXPORT_SYMBOL_GPL(mt76_ethtool_worker); diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index d3a5e2c4f12a3d..3f94c37251df15 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -106,13 +106,13 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(mt76_mcu_skb_send_and_get_msg); -int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, - int len) +int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, + int len, int max_len) { int err, cur_len; while (len > 0) { - cur_len = min_t(int, 4096 - dev->mcu_ops->headroom, len); + cur_len = min_t(int, max_len, len); err = mt76_mcu_send_msg(dev, cmd, data, cur_len, false); if (err) @@ -129,4 +129,4 @@ int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, return 0; } -EXPORT_SYMBOL_GPL(mt76_mcu_send_firmware); +EXPORT_SYMBOL_GPL(__mt76_mcu_send_firmware); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 25c5ceef525773..e2da720a91b619 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -29,6 +29,7 @@ struct mt76_dev; struct mt76_phy; struct mt76_wcid; +struct mt76s_intr; struct mt76_reg_pair { u32 reg; @@ -244,6 +245,8 @@ struct mt76_wcid { struct ewma_signal rssi; int inactive_count; + struct rate_info rate; + u16 idx; u8 hw_key_idx; u8 hw_key_idx2; @@ -253,13 +256,14 @@ struct mt76_wcid { u8 amsdu:1; u8 rx_check_pn; - u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; + u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; u16 cipher; u32 tx_info; bool sw_iv; - u8 packet_id; + struct list_head list; + struct idr pktid; }; struct mt76_txq { @@ -305,8 +309,13 @@ struct mt76_rx_tid { #define MT_PACKET_ID_NO_SKB 1 #define MT_PACKET_ID_FIRST 2 #define MT_PACKET_ID_HAS_RATE BIT(7) - -#define MT_TX_STATUS_SKB_TIMEOUT HZ +/* This is timer for when to give up when waiting for TXS callback, + * with starting time being the time at which the DMA_DONE callback + * was seen (so, we know packet was processed then, it should not take + * long after that for firmware to send the TXS callback if it is going + * to do so.) + */ +#define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4) struct mt76_tx_cb { unsigned long jiffies; @@ -344,7 +353,6 @@ struct mt76_hw_cap { #define MT_DRV_SW_RX_AIRTIME BIT(2) #define MT_DRV_RX_DMA_HDR BIT(3) #define MT_DRV_HW_MGMT_TXQ BIT(4) -#define MT_DRV_AMSDU_OFFLOAD BIT(5) struct mt76_driver_ops { u32 drv_flags; @@ -498,13 +506,18 @@ struct mt76_sdio { struct sdio_func *func; void *intr_data; + u8 hw_ver; + wait_queue_head_t wait; struct { int pse_data_quota; int ple_data_quota; int pse_mcu_quota; + int pse_page_size; int deficit; } sched; + + int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr); }; struct mt76_mmio { @@ -545,6 +558,11 @@ struct mt76_rx_status { s8 chain_signal[IEEE80211_MAX_CHAINS]; }; +struct mt76_freq_range_power { + const struct cfg80211_sar_freq_ranges *range; + s8 power; +}; + struct mt76_testmode_ops { int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state); int (*set_params)(struct mt76_phy *phy, struct nlattr **tb, @@ -617,6 +635,7 @@ struct mt76_phy { struct mt76_hw_cap cap; struct mt76_sband sband_2g; struct mt76_sband sband_5g; + struct mt76_sband sband_6g; u8 macaddr[ETH_ALEN]; @@ -636,6 +655,8 @@ struct mt76_phy { struct sk_buff **tail; u16 seqno; } rx_amsdu[__MT_RXQ_MAX]; + + struct mt76_freq_range_power *frp; }; struct mt76_dev { @@ -683,7 +704,8 @@ struct mt76_dev { int token_count; wait_queue_head_t tx_wait; - struct sk_buff_head status_list; + /* spinclock used to protect wcid pktid linked list */ + spinlock_t status_lock; u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; @@ -692,6 +714,7 @@ struct mt76_dev { struct mt76_wcid global_wcid; struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; + struct list_head wcid_list; u32 rev; @@ -753,6 +776,22 @@ enum mt76_phy_type { MT_PHY_TYPE_HE_EXT_SU, MT_PHY_TYPE_HE_TB, MT_PHY_TYPE_HE_MU, + __MT_PHY_TYPE_HE_MAX, +}; + +struct mt76_sta_stats { + u64 tx_mode[__MT_PHY_TYPE_HE_MAX]; + u64 tx_bw[4]; /* 20, 40, 80, 160 */ + u64 tx_nss[4]; /* 1, 2, 3, 4 */ + u64 tx_mcs[16]; /* mcs idx */ +}; + +struct mt76_ethtool_worker_info { + u64 *data; + int idx; + int initial_stat_idx; + int worker_stat_count; + int sta_count; }; #define CCK_RATE(_idx, _rate) { \ @@ -769,6 +808,7 @@ enum mt76_phy_type { } extern struct ieee80211_rate mt76_rates[12]; +extern const struct cfg80211_sar_capa mt76_sar_capa; #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) @@ -869,7 +909,13 @@ struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, int mt76_register_phy(struct mt76_phy *phy, bool vht, struct ieee80211_rate *rates, int n_rates); -struct dentry *mt76_register_debugfs(struct mt76_dev *dev); +struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, + const struct file_operations *ops); +static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) +{ + return mt76_register_debugfs_fops(&dev->phy, NULL); +} + int mt76_queues_read(struct seq_file *s, void *data); void mt76_seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len); @@ -881,6 +927,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, int ring_base); +u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx); static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, int n_desc, int ring_base) { @@ -1077,9 +1124,9 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key); void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) - __acquires(&dev->status_list.lock); + __acquires(&dev->status_lock); void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) - __releases(&dev->status_list.lock); + __releases(&dev->status_lock); int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, struct sk_buff *skb); @@ -1096,8 +1143,7 @@ mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) __mt76_tx_complete_skb(dev, wcid, skb, NULL); } -void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, - bool flush); +void mt76_tx_status_check(struct mt76_dev *dev, bool flush); int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, @@ -1203,6 +1249,8 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); } +void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, + struct mt76_sta_stats *stats); int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); int mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, @@ -1220,8 +1268,27 @@ void mt76u_queues_deinit(struct mt76_dev *dev); int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, const struct mt76_bus_ops *bus_ops); -int mt76s_alloc_queues(struct mt76_dev *dev); +int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid); +int mt76s_alloc_tx(struct mt76_dev *dev); void mt76s_deinit(struct mt76_dev *dev); +void mt76s_sdio_irq(struct sdio_func *func); +void mt76s_txrx_worker(struct mt76_sdio *sdio); +bool mt76s_txqs_empty(struct mt76_dev *dev); +int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, + int hw_ver); +u32 mt76s_rr(struct mt76_dev *dev, u32 offset); +void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val); +u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); +u32 mt76s_read_pcr(struct mt76_dev *dev); +void mt76s_write_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len); +void mt76s_read_copy(struct mt76_dev *dev, u32 offset, + void *data, int len); +int mt76s_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, + int len); +int mt76s_rd_rp(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *data, int len); struct sk_buff * mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, @@ -1233,8 +1300,17 @@ int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, int len, bool wait_resp, struct sk_buff **ret); int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, bool wait_resp, struct sk_buff **ret); -int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, - int len); +int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, + int len, int max_len); +static inline int +mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, + int len) +{ + int max_len = 4096 - dev->mcu_ops->headroom; + + return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); +} + static inline int mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, bool wait_resp) @@ -1293,14 +1369,22 @@ mt76_token_put(struct mt76_dev *dev, int token) return txwi; } -static inline int -mt76_get_next_pkt_id(struct mt76_wcid *wcid) +static inline void mt76_packet_id_init(struct mt76_wcid *wcid) { - wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; - if (wcid->packet_id == MT_PACKET_ID_NO_ACK || - wcid->packet_id == MT_PACKET_ID_NO_SKB) - wcid->packet_id = MT_PACKET_ID_FIRST; + INIT_LIST_HEAD(&wcid->list); + idr_init(&wcid->pktid); +} - return wcid->packet_id; +static inline void +mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid) +{ + struct sk_buff_head list; + + mt76_tx_status_lock(dev, &list); + mt76_tx_status_skb_get(dev, wcid, -1, &list); + mt76_tx_status_unlock(dev, &list); + + idr_destroy(&wcid->pktid); } + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 3972c56136a200..fe03e31989bb14 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1458,7 +1458,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt76_queue_rx_reset(dev, i); } - mt76_tx_status_check(&dev->mt76, NULL, true); + mt76_tx_status_check(&dev->mt76, true); mt7603_dma_sched_reset(dev); @@ -1471,17 +1471,20 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mutex_unlock(&dev->mt76.mutex); mt76_worker_enable(&dev->mt76.tx_worker); - napi_enable(&dev->mt76.tx_napi); - napi_schedule(&dev->mt76.tx_napi); tasklet_enable(&dev->mt76.pre_tbtt_tasklet); mt7603_beacon_set_timer(dev, -1, beacon_int); + local_bh_disable(); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + napi_enable(&dev->mt76.napi[0]); napi_schedule(&dev->mt76.napi[0]); napi_enable(&dev->mt76.napi[1]); napi_schedule(&dev->mt76.napi[1]); + local_bh_enable(); ieee80211_wake_queues(dev->mt76.hw); mt76_txq_schedule_all(&dev->mphy); @@ -1814,7 +1817,7 @@ void mt7603_mac_work(struct work_struct *work) bool reset = false; int i, idx; - mt76_tx_status_check(&dev->mt76, NULL, false); + mt76_tx_status_check(&dev->mt76, false); mutex_lock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 8edea1e7a602f7..7ac4cd247a730c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -69,6 +69,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) INIT_LIST_HEAD(&mvif->sta.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.hw_key_idx = -1; + mt76_packet_id_init(&mvif->sta.wcid); eth_broadcast_addr(bc_addr); mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); @@ -107,6 +108,8 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mutex_lock(&dev->mt76.mutex); dev->mt76.vif_mask &= ~BIT(mvif->idx); mutex_unlock(&dev->mt76.mutex); + + mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); } void mt7603_init_edcca(struct mt7603_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c index aa6cb668b012e3..3d94cdb4314ae6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c @@ -28,7 +28,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile index 83f9861ff5226b..2b97b9dde477bc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -17,4 +17,4 @@ mt7615e-$(CONFIG_MT7622_WMAC) += soc.o mt7663-usb-sdio-common-y := usb_sdio.o mt7663u-y := usb.o usb_mcu.o -mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o +mt7663s-y := sdio.o sdio_mcu.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index cb4659771fd973..6fd6f067da49f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -2,6 +2,33 @@ #include "mt7615.h" +static int +mt7615_reg_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + mt7615_mutex_acquire(dev); + mt76_wr(dev, dev->mt76.debugfs_reg, val); + mt7615_mutex_release(dev); + + return 0; +} + +static int +mt7615_reg_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + mt7615_mutex_acquire(dev); + *val = mt76_rr(dev, dev->mt76.debugfs_reg); + mt7615_mutex_release(dev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7615_reg_get, mt7615_reg_set, + "0x%08llx\n"); + static int mt7615_radar_pattern_set(void *data, u64 val) { @@ -506,7 +533,7 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) { struct dentry *dir; - dir = mt76_register_debugfs(&dev->mt76); + dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval); if (!dir) return -ENOMEM; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 2f1ac644e018e4..47f23ac905a3cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -49,12 +49,14 @@ int mt7615_thermal_init(struct mt7615_dev *dev) { struct wiphy *wiphy = mt76_hw(dev)->wiphy; struct device *hwmon; + const char *name; if (!IS_REACHABLE(CONFIG_HWMON)) return 0; - hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, - wiphy_name(wiphy), dev, + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s", + wiphy_name(wiphy)); + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, mt7615_hwmon_groups); if (IS_ERR(hwmon)) return PTR_ERR(hwmon); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index ff3f85e4087c9c..423f69015e3ec1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -755,12 +755,15 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, if (info->flags & IEEE80211_TX_CTL_NO_ACK) txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); - txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | - FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | - FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); - if (!is_mmio) - txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | - FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); + val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | + FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | + FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); + txwi[7] = cpu_to_le32(val); + if (!is_mmio) { + val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | + FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); + txwi[8] = cpu_to_le32(val); + } return 0; } @@ -1494,32 +1497,41 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) } static void -mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) +mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi) { struct mt76_dev *mdev = &dev->mt76; - struct mt76_txwi_cache *txwi; __le32 *txwi_data; u32 val; u8 wcid; - trace_mac_tx_free(dev, token); - txwi = mt76_token_put(mdev, token); - if (!txwi) - return; + mt7615_txp_skb_unmap(mdev, txwi); + if (!txwi->skb) + goto out; txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi); val = le32_to_cpu(txwi_data[1]); wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val); + mt76_tx_complete_skb(mdev, wcid, txwi->skb); - mt7615_txp_skb_unmap(mdev, txwi); - if (txwi->skb) { - mt76_tx_complete_skb(mdev, wcid, txwi->skb); - txwi->skb = NULL; - } - +out: + txwi->skb = NULL; mt76_put_txwi(mdev, txwi); } +static void +mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) +{ + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + + trace_mac_tx_free(dev, token); + txwi = mt76_token_put(mdev, token); + if (!txwi) + return; + + mt7615_txwi_free(dev, txwi); +} + static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; @@ -2014,7 +2026,7 @@ void mt7615_mac_work(struct work_struct *work) mt7615_mutex_release(phy->dev); - mt76_tx_status_check(mphy->dev, NULL, false); + mt76_tx_status_check(mphy->dev, false); timeout = mt7615_get_macwork_timeout(phy->dev); ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout); @@ -2026,16 +2038,8 @@ void mt7615_tx_token_put(struct mt7615_dev *dev) int id; spin_lock_bh(&dev->mt76.token_lock); - idr_for_each_entry(&dev->mt76.token, txwi, id) { - mt7615_txp_skb_unmap(&dev->mt76, txwi); - if (txwi->skb) { - struct ieee80211_hw *hw; - - hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); - ieee80211_free_txskb(hw, txwi->skb); - } - mt76_put_txwi(&dev->mt76, txwi); - } + idr_for_each_entry(&dev->mt76.token, txwi, id) + mt7615_txwi_free(dev, txwi); spin_unlock_bh(&dev->mt76.token_lock); idr_destroy(&dev->mt76.token); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index dada43d6d879e6..890d9b07e1563c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -135,8 +135,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask) int i; switch (type) { - case NL80211_IFTYPE_MESH_POINT: - case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: /* prefer hw bssid slot 1-3 */ i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3); @@ -160,6 +158,8 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask) return HW_BSSID_0; break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP: /* ap uses hw bssid 0 and ext bssid */ @@ -231,6 +231,8 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mvif->sta.wcid.idx = idx; mvif->sta.wcid.ext_phy = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; + mt76_packet_id_init(&mvif->sta.wcid); + mt7615_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -281,6 +283,8 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); spin_unlock_bh(&dev->sta_poll_lock); + + mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); } static void mt7615_init_dfs_state(struct mt7615_phy *phy) @@ -567,8 +571,8 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, mt7615_mcu_add_bss_info(phy, vif, NULL, true); mt7615_mcu_sta_add(phy, vif, NULL, true); - if (vif->p2p) - mt7615_mcu_set_p2p_oppps(hw, vif); + if (mt7615_firmware_offload(dev) && vif->p2p) + mt76_connac_mcu_set_p2p_oppps(hw, vif); } if (changed & (BSS_CHANGED_BEACON | @@ -858,8 +862,6 @@ mt7615_get_stats(struct ieee80211_hw *hw, stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; - memset(mib, 0, sizeof(*mib)); - mt7615_mutex_release(phy->dev); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index f8a09692d3e4c0..25f9cbe2cd6102 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -808,7 +808,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) static int mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) + struct ieee80211_sta *sta, struct mt7615_phy *phy, + bool enable) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA; @@ -821,6 +822,7 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MONITOR: break; case NL80211_IFTYPE_STATION: /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */ @@ -840,14 +842,19 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } bss = (struct bss_info_basic *)tlv; - memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN); - bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); bss->network_type = cpu_to_le32(type); - bss->dtim_period = vif->bss_conf.dtim_period; bss->bmc_tx_wlan_idx = wlan_idx; bss->wmm_idx = mvif->mt76.wmm_idx; bss->active = enable; + if (vif->type != NL80211_IFTYPE_MONITOR) { + memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN); + bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); + bss->dtim_period = vif->bss_conf.dtim_period; + } else { + memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN); + } + return 0; } @@ -863,6 +870,7 @@ mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac)); switch (vif->type) { + case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: if (vif->p2p) @@ -929,7 +937,7 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, if (enable) mt7615_mcu_bss_omac_tlv(skb, vif); - mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable); + mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable); if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START && mvif->mt76.omac_idx < REPEATER_BSSID_START) @@ -1690,6 +1698,19 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl) sizeof(data), true); } +static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev) +{ + struct { + bool cache_enable; + u8 pad[3]; + } data = { + .cache_enable = true + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CAL_CACHE, &data, + sizeof(data), false); +} + static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) { u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL; @@ -1898,9 +1919,14 @@ int mt7615_mcu_init(struct mt7615_dev *dev) mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); dev_dbg(dev->mt76.dev, "Firmware init done\n"); set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); - mt7615_mcu_fw_log_2_host(dev, 0); - return 0; + if (dev->dbdc_support) { + ret = mt7615_mcu_cal_cache_apply(dev); + if (ret) + return ret; + } + + return mt7615_mcu_fw_log_2_host(dev, 0); } EXPORT_SYMBOL_GPL(mt7615_mcu_init); @@ -2761,53 +2787,3 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req, sizeof(req), false); } - -int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; - struct mt7615_dev *dev = mt7615_hw_dev(hw); - struct { - __le32 ct_win; - u8 bss_idx; - u8 rsv[3]; - } __packed req = { - .ct_win = cpu_to_le32(ct_window), - .bss_idx = mvif->mt76.idx, - }; - - if (!mt7615_firmware_offload(dev)) - return -ENOTSUPP; - - return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, &req, - sizeof(req), false); -} - -u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset) -{ - struct { - __le32 addr; - __le32 val; - } __packed req = { - .addr = cpu_to_le32(offset), - }; - - return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req), - true); -} -EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr); - -void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val) -{ - struct { - __le32 addr; - __le32 val; - } __packed req = { - .addr = cpu_to_le32(offset), - .val = cpu_to_le32(val), - }; - - mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false); -} -EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index d0c64a9b09cfb5..6ff6d580091882 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -107,6 +107,18 @@ struct mt7615_wtbl_rate_desc { struct mt7615_sta *sta; }; +struct mt7663s_intr { + u32 isr; + struct { + u32 wtqcr[8]; + } tx; + struct { + u16 num[2]; + u16 len[2][16]; + } rx; + u32 rec_mb[2]; +} __packed; + struct mt7615_sta { struct mt76_wcid wcid; /* must be first */ @@ -541,8 +553,6 @@ int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy); int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy); int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy); -int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration); @@ -555,8 +565,6 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable); int __mt7663_load_firmware(struct mt7615_dev *dev); -u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset); -void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val); void mt7615_coredump_work(struct work_struct *work); void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en); @@ -573,10 +581,6 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); int mt7663u_mcu_init(struct mt7615_dev *dev); /* sdio */ -u32 mt7663s_read_pcr(struct mt7615_dev *dev); int mt7663s_mcu_init(struct mt7615_dev *dev); -void mt7663s_txrx_worker(struct mt76_worker *w); -void mt7663s_rx_work(struct work_struct *work); -void mt7663s_sdio_irq(struct sdio_func *func); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index 11f169cdd6036a..b808248943ea00 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -39,7 +39,7 @@ static int mt7615_pci_probe(struct pci_dev *pdev, if (ret < 0) return ret; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) goto error; @@ -164,12 +164,14 @@ static int mt7615_pci_resume(struct pci_dev *pdev) dev_err(mdev->dev, "PDMA engine must be reinitialized\n"); mt76_worker_enable(&mdev->tx_worker); + local_bh_disable(); mt76_for_each_q_rx(mdev, i) { napi_enable(&mdev->napi[i]); napi_schedule(&mdev->napi[i]); } napi_enable(&mdev->tx_napi); napi_schedule(&mdev->tx_napi); + local_bh_enable(); if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && mt7615_firmware_offload(dev)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index da87c02a73eb0c..5ee52cd70a4b45 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -198,7 +198,7 @@ void mt7615_dma_reset(struct mt7615_dev *dev) mt76_for_each_q_rx(&dev->mt76, i) mt76_queue_rx_reset(dev, i); - mt76_tx_status_check(&dev->mt76, NULL, true); + mt76_tx_status_check(&dev->mt76, true); mt7615_dma_start(dev); } @@ -326,6 +326,8 @@ void mt7615_mac_reset_work(struct work_struct *work) clear_bit(MT76_RESET, &phy2->mt76->state); mt76_worker_enable(&dev->mt76.tx_worker); + + local_bh_disable(); napi_enable(&dev->mt76.tx_napi); napi_schedule(&dev->mt76.tx_napi); @@ -334,6 +336,7 @@ void mt7615_mac_reset_work(struct work_struct *work) napi_enable(&dev->mt76.napi[1]); napi_schedule(&dev->mt76.napi[1]); + local_bh_enable(); ieee80211_wake_queues(mt76_hw(dev)); if (ext_phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index 305bb8597531b0..31c4a76b7f91a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -14,8 +14,8 @@ #include #include +#include "../sdio.h" #include "mt7615.h" -#include "sdio.h" #include "mac.h" #include "mcu.h" @@ -24,200 +24,19 @@ static const struct sdio_device_id mt7663s_table[] = { { } /* Terminating entry */ }; -static u32 mt7663s_read_whisr(struct mt76_dev *dev) +static void mt7663s_txrx_worker(struct mt76_worker *w) { - return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); -} - -u32 mt7663s_read_pcr(struct mt7615_dev *dev) -{ - struct mt76_sdio *sdio = &dev->mt76.sdio; - - return sdio_readl(sdio->func, MCR_WHLPCR, NULL); -} - -static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset) -{ - struct sdio_func *func = dev->sdio.func; - u32 val = ~0, status; - int err; - - sdio_claim_host(func); - - sdio_writel(func, offset, MCR_H2DSM0R, &err); - if (err < 0) { - dev_err(dev->dev, "failed setting address [err=%d]\n", err); - goto out; - } - - sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); - if (err < 0) { - dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); - goto out; - } - - err = readx_poll_timeout(mt7663s_read_whisr, dev, status, - status & H2D_SW_INT_READ, 0, 1000000); - if (err < 0) { - dev_err(dev->dev, "query whisr timeout\n"); - goto out; - } - - sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); - if (err < 0) { - dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); - goto out; - } - - val = sdio_readl(func, MCR_H2DSM0R, &err); - if (err < 0) { - dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); - goto out; - } - - if (val != offset) { - dev_err(dev->dev, "register mismatch\n"); - val = ~0; - goto out; - } - - val = sdio_readl(func, MCR_D2HRM1R, &err); - if (err < 0) - dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); - -out: - sdio_release_host(func); - - return val; -} - -static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) -{ - struct sdio_func *func = dev->sdio.func; - u32 status; - int err; - - sdio_claim_host(func); - - sdio_writel(func, offset, MCR_H2DSM0R, &err); - if (err < 0) { - dev_err(dev->dev, "failed setting address [err=%d]\n", err); - goto out; - } - - sdio_writel(func, val, MCR_H2DSM1R, &err); - if (err < 0) { - dev_err(dev->dev, - "failed setting write value [err=%d]\n", err); - goto out; - } - - sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); - if (err < 0) { - dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); - goto out; - } - - err = readx_poll_timeout(mt7663s_read_whisr, dev, status, - status & H2D_SW_INT_WRITE, 0, 1000000); - if (err < 0) { - dev_err(dev->dev, "query whisr timeout\n"); - goto out; - } - - sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); - if (err < 0) { - dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); - goto out; - } - - val = sdio_readl(func, MCR_H2DSM0R, &err); - if (err < 0) { - dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); - goto out; - } - - if (val != offset) - dev_err(dev->dev, "register mismatch\n"); - -out: - sdio_release_host(func); -} - -static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset) -{ - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) - return dev->mcu_ops->mcu_rr(dev, offset); - else - return mt7663s_read_mailbox(dev, offset); -} - -static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val) -{ - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) - dev->mcu_ops->mcu_wr(dev, offset, val); - else - mt7663s_write_mailbox(dev, offset, val); -} + struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, + txrx_worker); + struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); -static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) -{ - val |= mt7663s_rr(dev, offset) & ~mask; - mt7663s_wr(dev, offset, val); - - return val; -} - -static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset, - const void *data, int len) -{ - const u32 *val = data; - int i; - - for (i = 0; i < len / sizeof(u32); i++) { - mt7663s_wr(dev, offset, val[i]); - offset += sizeof(u32); - } -} - -static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset, - void *data, int len) -{ - u32 *val = data; - int i; - - for (i = 0; i < len / sizeof(u32); i++) { - val[i] = mt7663s_rr(dev, offset); - offset += sizeof(u32); - } -} - -static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base, - const struct mt76_reg_pair *data, - int len) -{ - int i; - - for (i = 0; i < len; i++) { - mt7663s_wr(dev, data->reg, data->value); - data++; - } - - return 0; -} - -static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base, - struct mt76_reg_pair *data, - int len) -{ - int i; - - for (i = 0; i < len; i++) { - data->value = mt7663s_rr(dev, data->reg); - data++; + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + queue_work(mdev->wq, &dev->pm.wake_work); + return; } - - return 0; + mt76s_txrx_worker(sdio); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); } static void mt7663s_init_work(struct work_struct *work) @@ -231,64 +50,24 @@ static void mt7663s_init_work(struct work_struct *work) mt7615_init_work(dev); } -static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func) +static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr) { - u32 status, ctrl; - int ret; - - sdio_claim_host(func); - - ret = sdio_enable_func(func); - if (ret < 0) - goto release; + struct mt76_sdio *sdio = &dev->sdio; + struct mt7663s_intr *irq_data = sdio->intr_data; + int i, err; - /* Get ownership from the device */ - sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, - MCR_WHLPCR, &ret); - if (ret < 0) - goto disable_func; - - ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, - status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); - if (ret < 0) { - dev_err(dev->mt76.dev, "Cannot get ownership from device"); - goto disable_func; - } - - ret = sdio_set_block_size(func, 512); - if (ret < 0) - goto disable_func; - - /* Enable interrupt */ - sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); - if (ret < 0) - goto disable_func; - - ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; - sdio_writel(func, ctrl, MCR_WHIER, &ret); - if (ret < 0) - goto disable_func; - - /* set WHISR as read clear and Rx aggregation number as 16 */ - ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); - sdio_writel(func, ctrl, MCR_WHCR, &ret); - if (ret < 0) - goto disable_func; - - ret = sdio_claim_irq(func, mt7663s_sdio_irq); - if (ret < 0) - goto disable_func; + err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data)); + if (err) + return err; - sdio_release_host(func); + intr->isr = irq_data->isr; + intr->rec_mb = irq_data->rec_mb; + intr->tx.wtqcr = irq_data->tx.wtqcr; + intr->rx.num = irq_data->rx.num; + for (i = 0; i < 2 ; i++) + intr->rx.len[i] = irq_data->rx.len[i]; return 0; - -disable_func: - sdio_disable_func(func); -release: - sdio_release_host(func); - - return ret; } static int mt7663s_probe(struct sdio_func *func, @@ -307,13 +86,13 @@ static int mt7663s_probe(struct sdio_func *func, .update_survey = mt7615_update_channel, }; static const struct mt76_bus_ops mt7663s_ops = { - .rr = mt7663s_rr, - .rmw = mt7663s_rmw, - .wr = mt7663s_wr, - .write_copy = mt7663s_write_copy, - .read_copy = mt7663s_read_copy, - .wr_rp = mt7663s_wr_rp, - .rd_rp = mt7663s_rd_rp, + .rr = mt76s_rr, + .rmw = mt76s_rmw, + .wr = mt76s_wr, + .write_copy = mt76s_write_copy, + .read_copy = mt76s_read_copy, + .wr_rp = mt76s_wr_rp, + .rd_rp = mt76s_rd_rp, .type = MT76_BUS_SDIO, }; struct ieee80211_ops *ops; @@ -341,7 +120,7 @@ static int mt7663s_probe(struct sdio_func *func, if (ret < 0) goto error; - ret = mt7663s_hw_init(dev, func); + ret = mt76s_hw_init(mdev, func, MT76_CONNAC_SDIO); if (ret) goto error; @@ -349,8 +128,9 @@ static int mt7663s_probe(struct sdio_func *func, (mt76_rr(dev, MT_HW_REV) & 0xff); dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + mdev->sdio.parse_irq = mt7663s_parse_intr; mdev->sdio.intr_data = devm_kmalloc(mdev->dev, - sizeof(struct mt76s_intr), + sizeof(struct mt7663s_intr), GFP_KERNEL); if (!mdev->sdio.intr_data) { ret = -ENOMEM; @@ -367,7 +147,11 @@ static int mt7663s_probe(struct sdio_func *func, } } - ret = mt76s_alloc_queues(&dev->mt76); + ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN); + if (ret) + goto error; + + ret = mt76s_alloc_tx(mdev); if (ret) goto error; @@ -432,7 +216,7 @@ static int mt7663s_suspend(struct device *dev) cancel_work_sync(&mdev->mt76.sdio.stat_work); clear_bit(MT76_READING_STATS, &mdev->mphy.state); - mt76_tx_status_check(&mdev->mt76, NULL, true); + mt76_tx_status_check(&mdev->mt76, true); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c index 45c1cd3b9f4989..dc9a2f0b45a528 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c @@ -10,11 +10,11 @@ #include #include +#include "../sdio.h" #include "mt7615.h" #include "mac.h" #include "mcu.h" #include "regs.h" -#include "sdio.h" static int mt7663s_mcu_init_sched(struct mt7615_dev *dev) { @@ -27,6 +27,7 @@ static int mt7663s_mcu_init_sched(struct mt7615_dev *dev) MT_HIF1_MIN_QUOTA); sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); + sdio->sched.pse_page_size = MT_PSE_PAGE_SZ; txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT, MT_PP_TXDWCNT_TX1_ADD_DW_CNT); sdio->sched.deficit = txdwcnt << 2; @@ -63,7 +64,7 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev) sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL); - ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, + ret = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status, status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); if (ret < 0) { dev_err(dev->mt76.dev, "Cannot get ownership from device"); @@ -111,7 +112,7 @@ static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev) sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL); - ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, + ret = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status, !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000); if (ret < 0) { dev_err(dev->mt76.dev, "Cannot set ownership to device"); @@ -137,8 +138,8 @@ int mt7663s_mcu_init(struct mt7615_dev *dev) .mcu_skb_send_msg = mt7663s_mcu_send_message, .mcu_parse_response = mt7615_mcu_parse_response, .mcu_restart = mt7615_mcu_restart, - .mcu_rr = mt7615_mcu_reg_rr, - .mcu_wr = mt7615_mcu_reg_wr, + .mcu_rr = mt76_connac_mcu_reg_rr, + .mcu_wr = mt76_connac_mcu_reg_wr, }; struct mt7615_mcu_ops *mcu_ops; int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 996d48cca18a61..bd2939ebcbf484 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -169,7 +169,7 @@ bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) mt7615_mac_sta_poll(dev); mt7615_mutex_release(dev); - return 0; + return false; } EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index f49d97d0a1c51d..e7f01c2978a28d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -85,9 +85,14 @@ struct mt76_connac_coredump { extern const struct wiphy_wowlan_support mt76_connac_wowlan_support; +static inline bool is_mt7922(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7922; +} + static inline bool is_mt7921(struct mt76_dev *dev) { - return mt76_chip(dev) == 0x7961; + return mt76_chip(dev) == 0x7961 || is_mt7922(dev); } static inline bool is_mt7663(struct mt76_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 5c3a81e5f559dd..26b4b875dcc024 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -74,7 +74,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_init_download); int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy) { - struct mt76_dev *dev = phy->dev; + int len, i, n_max_channels, n_2ch = 0, n_5ch = 0, n_6ch = 0; struct mt76_connac_mcu_channel_domain { u8 alpha2[4]; /* regulatory_request.alpha2 */ u8 bw_2g; /* BW_20_40M 0 @@ -84,25 +84,29 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy) * BW_20_40_80_8080M 4 */ u8 bw_5g; - __le16 pad; + u8 bw_6g; + u8 pad; u8 n_2ch; u8 n_5ch; - __le16 pad2; + u8 n_6ch; + u8 pad2; } __packed hdr = { .bw_2g = 0, - .bw_5g = 3, + .bw_5g = 3, /* BW_20_40_80_160M */ + .bw_6g = 3, }; struct mt76_connac_mcu_chan { __le16 hw_value; __le16 pad; __le32 flags; } __packed channel; - int len, i, n_max_channels, n_2ch = 0, n_5ch = 0; + struct mt76_dev *dev = phy->dev; struct ieee80211_channel *chan; struct sk_buff *skb; n_max_channels = phy->sband_2g.sband.n_channels + - phy->sband_5g.sband.n_channels; + phy->sband_5g.sband.n_channels + + phy->sband_6g.sband.n_channels; len = sizeof(hdr) + n_max_channels * sizeof(channel); skb = mt76_mcu_msg_alloc(dev, NULL, len); @@ -135,11 +139,24 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy) skb_put_data(skb, &channel, sizeof(channel)); n_5ch++; } + for (i = 0; i < phy->sband_6g.sband.n_channels; i++) { + chan = &phy->sband_6g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_6ch++; + } BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(hdr.alpha2)); memcpy(hdr.alpha2, dev->alpha2, sizeof(dev->alpha2)); hdr.n_2ch = n_2ch; hdr.n_5ch = n_5ch; + hdr.n_6ch = n_6ch; memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); @@ -689,9 +706,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, if (ht_cap->ht_supported) mode |= PHY_TYPE_BIT_HT; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_TYPE_BIT_HE; - } else if (band == NL80211_BAND_5GHZ) { + } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { mode |= PHY_TYPE_BIT_OFDM; if (ht_cap->ht_supported) @@ -700,7 +717,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, if (vht_cap->vht_supported) mode |= PHY_TYPE_BIT_VHT; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_TYPE_BIT_HE; } @@ -719,6 +736,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct sta_rec_state *state; struct sta_rec_phy *phy; struct tlv *tlv; + u16 supp_rates; /* starec ht */ if (sta->ht_cap.ht_supported) { @@ -748,12 +766,22 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, if (!is_mt7921(dev)) return; - if (sta->ht_cap.ht_supported) + if (sta->ht_cap.ht_supported || sta->he_cap.has_he) mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif); /* starec he */ - if (sta->he_cap.has_he) + if (sta->he_cap.has_he) { mt76_connac_mcu_sta_he_tlv(skb, sta); + if (band == NL80211_BAND_6GHZ && + sta_state == MT76_STA_INFO_STATE_ASSOC) { + struct sta_rec_he_6g_capa *he_6g_capa; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, + sizeof(*he_6g_capa)); + he_6g_capa = (struct sta_rec_he_6g_capa *)tlv; + he_6g_capa->capa = sta->he_6ghz_capa.capa; + } + } tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); phy = (struct sta_rec_phy *)tlv; @@ -767,7 +795,15 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); ra_info = (struct sta_rec_ra_info *)tlv; - ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]); + + supp_rates = sta->supp_rates[band]; + if (band == NL80211_BAND_2GHZ) + supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) | + FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf); + else + supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates); + + ra_info->legacy = cpu_to_le16(supp_rates); if (sta->ht_cap.ht_supported) memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask, @@ -1145,7 +1181,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, if (he_cap->has_he) mode |= PHY_MODE_AX_24G; - } else if (band == NL80211_BAND_5GHZ) { + } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { mode |= PHY_MODE_A; if (ht_cap->ht_supported) @@ -1154,8 +1190,12 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, if (vht_cap->vht_supported) mode |= PHY_MODE_AC; - if (he_cap->has_he) - mode |= PHY_MODE_AX_5G; + if (he_cap->has_he) { + if (band == NL80211_BAND_6GHZ) + mode |= PHY_MODE_AX_6G; + else + mode |= PHY_MODE_AX_5G; + } } return mode; @@ -1252,7 +1292,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, u8 short_st; u8 ht_op_info; u8 sco; - u8 pad[3]; + u8 band; + u8 pad[2]; } __packed rlm; } __packed rlm_req = { .hdr = { @@ -1268,13 +1309,19 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, .ht_op_info = 4, /* set HT 40M allowed */ .rx_streams = phy->chainmask, .short_st = true, + .band = band, }, }; int err, conn_type; - u8 idx; + u8 idx, basic_phy; idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; basic_req.basic.hw_bss_idx = idx; + if (band == NL80211_BAND_6GHZ) + basic_req.basic.phymode_ext = BIT(0); + + basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, NULL); + basic_req.basic.nonht_basic_phy = cpu_to_le16(basic_phy); switch (vif->type) { case NL80211_IFTYPE_MESH_POINT: @@ -1445,7 +1492,17 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, else chan = &req->channels[i]; - chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2; + switch (scan_list[i]->band) { + case NL80211_BAND_2GHZ: + chan->band = 1; + break; + case NL80211_BAND_6GHZ: + chan->band = 3; + break; + default: + chan->band = 2; + break; + } chan->channel_num = scan_list[i]->hw_value; } req->channel_type = sreq->n_channels ? 4 : 0; @@ -1531,8 +1588,10 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, get_random_mask_addr(addr, sreq->mac_addr, sreq->mac_addr_mask); } - if (is_mt7921(phy->dev)) + if (is_mt7921(phy->dev)) { req->mt7921.bss_idx = mvif->idx; + req->mt7921.delay = cpu_to_le32(sreq->delay); + } req->ssids_num = sreq->n_ssids; for (i = 0; i < req->ssids_num; i++) { @@ -1554,7 +1613,18 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, req->channels_num = min_t(u8, sreq->n_channels, 64); for (i = 0; i < req->channels_num; i++) { chan = &req->channels[i]; - chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2; + + switch (scan_list[i]->band) { + case NL80211_BAND_2GHZ: + chan->band = 1; + break; + case NL80211_BAND_6GHZ: + chan->band = 3; + break; + default: + chan->band = 2; + break; + } chan->channel_num = scan_list[i]->hw_value; } @@ -1652,6 +1722,61 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event); +static void mt76_connac_mcu_parse_tx_resource(struct mt76_dev *dev, + struct sk_buff *skb) +{ + struct mt76_sdio *sdio = &dev->sdio; + struct mt76_connac_tx_resource { + __le32 version; + __le32 pse_data_quota; + __le32 pse_mcu_quota; + __le32 ple_data_quota; + __le32 ple_mcu_quota; + __le16 pse_page_size; + __le16 ple_page_size; + u8 pp_padding; + u8 pad[3]; + } __packed * tx_res; + + tx_res = (struct mt76_connac_tx_resource *)skb->data; + sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota); + sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota); + sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota); + sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size); + sdio->sched.deficit = tx_res->pp_padding; +} + +static void mt76_connac_mcu_parse_phy_cap(struct mt76_dev *dev, + struct sk_buff *skb) +{ + struct mt76_connac_phy_cap { + u8 ht; + u8 vht; + u8 _5g; + u8 max_bw; + u8 nss; + u8 dbdc; + u8 tx_ldpc; + u8 rx_ldpc; + u8 tx_stbc; + u8 rx_stbc; + u8 hw_path; + u8 he; + } __packed * cap; + + enum { + WF0_24G, + WF0_5G + }; + + cap = (struct mt76_connac_phy_cap *)skb->data; + + dev->phy.antenna_mask = BIT(cap->nss) - 1; + dev->phy.chainmask = dev->phy.antenna_mask; + dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); + dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); +} + int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy) { struct mt76_connac_cap_hdr { @@ -1694,6 +1819,17 @@ int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy) case MT_NIC_CAP_6G: phy->cap.has_6ghz = skb->data[0]; break; + case MT_NIC_CAP_MAC_ADDR: + memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN); + break; + case MT_NIC_CAP_PHY: + mt76_connac_mcu_parse_phy_cap(phy->dev, skb); + break; + case MT_NIC_CAP_TX_RESOURCE: + if (mt76_is_sdio(phy->dev)) + mt76_connac_mcu_parse_tx_resource(phy->dev, + skb); + break; default: break; } @@ -1749,6 +1885,70 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, } } +static s8 mt76_connac_get_sar_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + s8 target_power) +{ + const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; + struct mt76_freq_range_power *frp = phy->frp; + int freq, i; + + if (!capa || !frp) + return target_power; + + freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); + for (i = 0 ; i < capa->num_freq_ranges; i++) { + if (frp[i].range && + freq >= frp[i].range->start_freq && + freq < frp[i].range->end_freq) { + target_power = min_t(s8, frp[i].power, target_power); + break; + } + } + + return target_power; +} + +static s8 mt76_connac_get_ch_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + s8 target_power) +{ + struct mt76_dev *dev = phy->dev; + struct ieee80211_supported_band *sband; + int i; + + switch (chan->band) { + case NL80211_BAND_2GHZ: + sband = &phy->sband_2g.sband; + break; + case NL80211_BAND_5GHZ: + sband = &phy->sband_5g.sband; + break; + case NL80211_BAND_6GHZ: + sband = &phy->sband_6g.sband; + break; + default: + return target_power; + } + + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *ch = &sband->channels[i]; + + if (ch->hw_value == chan->hw_value) { + if (!(ch->flags & IEEE80211_CHAN_DISABLED)) { + int power = 2 * ch->max_reg_power; + + if (is_mt7663(dev) && (power > 63 || power < -64)) + power = 63; + target_power = min_t(s8, power, target_power); + } + break; + } + } + + return target_power; +} + static int mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, enum nl80211_band band) @@ -1768,6 +1968,24 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, 142, 144, 149, 151, 153, 155, 157, 159, 161, 165 }; + static const u8 chan_list_6ghz[] = { + 1, 3, 5, 7, 9, 11, 13, + 15, 17, 19, 21, 23, 25, 27, + 29, 33, 35, 37, 39, 41, 43, + 45, 47, 49, 51, 53, 55, 57, + 59, 61, 65, 67, 69, 71, 73, + 75, 77, 79, 81, 83, 85, 87, + 89, 91, 93, 97, 99, 101, 103, + 105, 107, 109, 111, 113, 115, 117, + 119, 121, 123, 125, 129, 131, 133, + 135, 137, 139, 141, 143, 145, 147, + 149, 151, 153, 155, 157, 161, 163, + 165, 167, 169, 171, 173, 175, 177, + 179, 181, 183, 185, 187, 189, 193, + 195, 197, 199, 201, 203, 205, 207, + 209, 211, 213, 215, 217, 219, 221, + 225, 227, 229, 233 + }; int i, n_chan, batch_size, idx = 0, tx_power, last_ch; struct mt76_connac_sku_tlv sku_tlbv; struct mt76_power_limits limits; @@ -1781,6 +1999,9 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, if (band == NL80211_BAND_2GHZ) { n_chan = ARRAY_SIZE(chan_list_2ghz); ch_list = chan_list_2ghz; + } else if (band == NL80211_BAND_6GHZ) { + n_chan = ARRAY_SIZE(chan_list_6ghz); + ch_list = chan_list_6ghz; } else { n_chan = ARRAY_SIZE(chan_list_5ghz); ch_list = chan_list_5ghz; @@ -1789,13 +2010,13 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, if (!phy->cap.has_5ghz) last_ch = chan_list_2ghz[n_chan - 1]; + else if (phy->cap.has_6ghz) + last_ch = chan_list_6ghz[n_chan - 1]; else last_ch = chan_list_5ghz[n_chan - 1]; for (i = 0; i < batch_size; i++) { - struct mt76_connac_tx_power_limit_tlv tx_power_tlv = { - .band = band == NL80211_BAND_2GHZ ? 1 : 2, - }; + struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {}; int j, err, msg_len, num_ch; struct sk_buff *skb; @@ -1811,14 +2032,32 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2)); tx_power_tlv.n_chan = num_ch; + switch (band) { + case NL80211_BAND_2GHZ: + tx_power_tlv.band = 1; + break; + case NL80211_BAND_6GHZ: + tx_power_tlv.band = 3; + break; + default: + tx_power_tlv.band = 2; + break; + } + for (j = 0; j < num_ch; j++, idx++) { struct ieee80211_channel chan = { .hw_value = ch_list[idx], .band = band, }; + s8 reg_power, sar_power; + + reg_power = mt76_connac_get_ch_power(phy, &chan, + tx_power); + sar_power = mt76_connac_get_sar_power(phy, &chan, + reg_power); mt76_get_rate_power_limits(phy, &chan, &limits, - tx_power); + sar_power); tx_power_tlv.last_msg = ch_list[idx] == last_ch; sku_tlbv.channel = ch_list[idx]; @@ -1855,6 +2094,12 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy) if (err < 0) return err; } + if (phy->cap.has_6ghz) { + err = mt76_connac_mcu_rate_txpower_band(phy, + NL80211_BAND_6GHZ); + if (err < 0) + return err; + } return 0; } @@ -1902,6 +2147,26 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter); +int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; + struct mt76_phy *phy = hw->priv; + struct { + __le32 ct_win; + u8 bss_idx; + u8 rsv[3]; + } __packed req = { + .ct_win = cpu_to_le32(ct_window), + .bss_idx = mvif->idx, + }; + + return mt76_mcu_send_msg(phy->dev, MCU_CMD_SET_P2P_OPPPS, &req, + sizeof(req), false); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_p2p_oppps); + #ifdef CONFIG_PM const struct wiphy_wowlan_support mt76_connac_wowlan_support = { @@ -1929,19 +2194,22 @@ mt76_connac_mcu_key_iter(struct ieee80211_hw *hw, key->cipher != WLAN_CIPHER_SUITE_TKIP) return; - if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { - gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1); + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) cipher = BIT(3); - } else { - gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2); + else cipher = BIT(4); - } /* we are assuming here to have a single pairwise key */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) + gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1); + else + gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2); + gtk_tlv->pairwise_cipher = cpu_to_le32(cipher); - gtk_tlv->group_cipher = cpu_to_le32(cipher); gtk_tlv->keyid = key->keyidx; + } else { + gtk_tlv->group_cipher = cpu_to_le32(cipher); } } @@ -2209,8 +2477,35 @@ void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, mt76_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_iter); - #endif /* CONFIG_PM */ +u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset) +{ + struct { + __le32 addr; + __le32 val; + } __packed req = { + .addr = cpu_to_le32(offset), + }; + + return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req), + true); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_rr); + +void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val) +{ + struct { + __le32 addr; + __le32 val; + } __packed req = { + .addr = cpu_to_le32(offset), + .val = cpu_to_le32(val), + }; + + mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr); + MODULE_AUTHOR("Lorenzo Bianconi "); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 1c73beb2267719..4e2c9dafd77656 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -124,6 +124,8 @@ struct sta_rec_state { u8 rsv[1]; } __packed; +#define RA_LEGACY_OFDM GENMASK(13, 6) +#define RA_LEGACY_CCK GENMASK(3, 0) #define HT_MCS_MASK_NUM 10 struct sta_rec_ra_info { __le16 tag; @@ -143,6 +145,13 @@ struct sta_rec_phy { u8 rsv[2]; } __packed; +struct sta_rec_he_6g_capa { + __le16 tag; + __le16 len; + __le16 capa; + u8 rsv[2]; +} __packed; + /* wtbl_rec */ struct wtbl_req_hdr { @@ -301,6 +310,7 @@ struct wtbl_raw { sizeof(struct sta_rec_vht) + \ sizeof(struct sta_rec_uapsd) + \ sizeof(struct sta_rec_amsdu) + \ + sizeof(struct sta_rec_he_6g_capa) + \ sizeof(struct tlv) + \ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE) @@ -327,6 +337,7 @@ enum { STA_REC_MUEDCA, STA_REC_BFEE, STA_REC_PHY = 0x15, + STA_REC_HE_6G = 0x17, STA_REC_MAX_NUM }; @@ -520,6 +531,7 @@ enum { MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, MCU_EXT_CMD_RXDCOC_CAL = 0x59, MCU_EXT_CMD_TXDPD_CAL = 0x60, + MCU_EXT_CMD_CAL_CACHE = 0x67, MCU_EXT_CMD_SET_RDD_TH = 0x7c, MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d, }; @@ -548,6 +560,7 @@ enum { /* offload mcu commands */ enum { + MCU_CMD_TEST_CTRL = MCU_CE_PREFIX | 0x01, MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03, MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05, MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f, @@ -560,6 +573,7 @@ enum { MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61, MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62, MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a, + MCU_CMD_SET_MU_EDCA_PARMS = MCU_CE_PREFIX | 0xb0, MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0, MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0, MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca, @@ -656,10 +670,14 @@ struct mt76_connac_bss_basic_tlv { * bit(3): GN * bit(4): AN * bit(5): AC + * bit(6): AX2 + * bit(7): AX5 + * bit(8): AX6 */ __le16 sta_idx; - u8 nonht_basic_phy; - u8 pad[3]; + __le16 nonht_basic_phy; + u8 phymode_ext; /* bit(0) AX_6G */ + u8 pad[1]; } __packed; struct mt76_connac_bss_qos_tlv { @@ -802,7 +820,9 @@ struct mt76_connac_sched_scan_req { } mt7663; struct { u8 bss_idx; - u8 pad2[19]; + u8 pad1[3]; + __le32 delay; + u8 pad2[12]; u8 random_mac[ETH_ALEN]; u8 pad3[38]; } mt7921; @@ -844,14 +864,14 @@ struct mt76_connac_gtk_rekey_tlv { * 2: rekey update */ u8 keyid; - u8 pad[2]; + u8 option; /* 1: rekey data update without enabling offload */ + u8 pad[1]; __le32 proto; /* WPA-RSN-WAPI-OPSN */ __le32 pairwise_cipher; __le32 group_cipher; __le32 key_mgmt; /* NONE-PSK-IEEE802.1X */ __le32 mgmt_group_cipher; - u8 option; /* 1: rekey data update without enabling offload */ - u8 reserverd[3]; + u8 reserverd[4]; } __packed; #define MT76_CONNAC_WOW_MASK_MAX_LEN 16 @@ -961,7 +981,7 @@ struct mt76_connac_tx_power_limit_tlv { __le16 len; /* DW1 - cmd hint */ u8 n_chan; /* # channel */ - u8 band; /* 2.4GHz - 5GHz */ + u8 band; /* 2.4GHz - 5GHz - 6GHz */ u8 last_msg; u8 pad1; /* DW3 */ @@ -1093,4 +1113,8 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable); void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, struct mt76_connac_coredump *coredump); int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy); +int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset); +void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val); #endif /* __MT76_CONNAC_MCU_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index cea24213186c74..da2ca2563ac9f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -201,7 +201,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev, t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8); /* vht mcs 8, 9 5GHz */ - val = mt76x02_eeprom_get(dev, 0x132); + val = mt76x02_eeprom_get(dev, 0x12c); t->vht[8] = s6_to_s8(val); t->vht[9] = s6_to_s8(val >> 8); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index b795e7245c0750..f19228fc5a7087 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -176,7 +176,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -276,6 +276,7 @@ static int mt76x0e_resume(struct pci_dev *pdev) mt76_worker_enable(&mdev->tx_worker); + local_bh_disable(); mt76_for_each_q_rx(mdev, i) { mt76_queue_rx_reset(dev, i); napi_enable(&mdev->napi[i]); @@ -284,6 +285,7 @@ static int mt76x0e_resume(struct pci_dev *pdev) napi_enable(&mdev->tx_napi); napi_schedule(&mdev->tx_napi); + local_bh_enable(); return mt76x0e_init_hardware(dev, true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index c32e6dc6877393..a404fd7ea9684b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -176,7 +176,7 @@ void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop) mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); } -static __le16 +static u16 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val) { @@ -222,14 +222,14 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, rateval |= MT_RXWI_RATE_SGI; *nss_val = nss; - return cpu_to_le16(rateval); + return rateval; } void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, const struct ieee80211_tx_rate *rate) { s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); - __le16 rateval; + u16 rateval; u32 tx_info; s8 nss; @@ -342,7 +342,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, struct ieee80211_key_conf *key = info->control.hw_key; u32 wcid_tx_info; u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); - u16 txwi_flags = 0; + u16 txwi_flags = 0, rateval; u8 nss; s8 txpwr_adj, max_txpwr_adj; u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf; @@ -380,14 +380,15 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, if (wcid && (rate->idx < 0 || !rate->count)) { wcid_tx_info = wcid->tx_info; - txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info); + rateval = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info); max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ, wcid_tx_info); nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info); } else { - txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss); + rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss); max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); } + txwi->rate = cpu_to_le16(rateval); txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf, max_txpwr_adj); @@ -1185,7 +1186,7 @@ void mt76x02_mac_work(struct work_struct *work) mutex_unlock(&dev->mt76.mutex); - mt76_tx_status_check(&dev->mt76, NULL, false); + mt76_tx_status_check(&dev->mt76, false); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, MT_MAC_WORK_INTERVAL); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index b50084bbe83de9..ec0de691129af3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -53,7 +53,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) mt76_skb_set_moredata(data.tail[i], false); } - spin_lock_bh(&q->lock); + spin_lock(&q->lock); while ((skb = __skb_dequeue(&data.q)) != NULL) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; @@ -61,7 +61,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL); } - spin_unlock_bh(&q->lock); + spin_unlock(&q->lock); } static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en) @@ -472,7 +472,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) mt76_queue_rx_reset(dev, i); } - mt76_tx_status_check(&dev->mt76, NULL, true); + mt76_tx_status_check(&dev->mt76, true); mt76x02_mac_start(dev); @@ -491,15 +491,17 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) clear_bit(MT76_RESET, &dev->mphy.state); mt76_worker_enable(&dev->mt76.tx_worker); + tasklet_enable(&dev->mt76.pre_tbtt_tasklet); + + local_bh_disable(); napi_enable(&dev->mt76.tx_napi); napi_schedule(&dev->mt76.tx_napi); - tasklet_enable(&dev->mt76.pre_tbtt_tasklet); - mt76_for_each_q_rx(&dev->mt76, i) { napi_enable(&dev->mt76.napi[i]); napi_schedule(&dev->mt76.napi[i]); } + local_bh_enable(); if (restart) { set_bit(MT76_RESTART, &dev->mphy.state); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index ccdbab34127146..1f17d86ff755e4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -287,6 +287,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; + mt76_packet_id_init(&mvif->group_wcid); + mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq->wcid = &mvif->group_wcid; } @@ -341,6 +343,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; dev->mt76.vif_mask &= ~BIT(mvif->idx); + mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index adf288e50e2127..8a22ee5816748c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -47,7 +47,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -149,12 +149,15 @@ mt76x2e_resume(struct pci_dev *pdev) pci_restore_state(pdev); mt76_worker_enable(&mdev->tx_worker); + + local_bh_disable(); mt76_for_each_q_rx(mdev, i) { napi_enable(&mdev->napi[i]); napi_schedule(&mdev->napi[i]); } napi_enable(&mdev->tx_napi); napi_schedule(&mdev->tx_napi); + local_bh_enable(); return mt76x2_resume_device(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 64048243e34b22..a15aa256d0cfd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -7,6 +7,13 @@ /** global debugfs **/ +struct hw_queue_map { + const char *name; + u8 index; + u8 pid; + u8 qid; +}; + static int mt7915_implicit_txbf_set(void *data, u64 val) { @@ -75,7 +82,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL, mt7915_radar_trigger, "%lld\n"); static int -mt7915_fw_debug_set(void *data, u64 val) +mt7915_fw_debug_wm_set(void *data, u64 val) { struct mt7915_dev *dev = data; enum { @@ -85,41 +92,112 @@ mt7915_fw_debug_set(void *data, u64 val) DEBUG_SPL, DEBUG_RPT_RX, } debug; + int ret; - dev->fw_debug = !!val; + dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0; - mt7915_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0); + ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm); + if (ret) + return ret; - for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) - mt7915_mcu_fw_dbg_ctrl(dev, debug, dev->fw_debug); + for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) { + ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm); + if (ret) + return ret; + } + + /* WM CPU info record control */ + mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0)); + mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw_debug_wm); + mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5)); + mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5)); return 0; } static int -mt7915_fw_debug_get(void *data, u64 *val) +mt7915_fw_debug_wm_get(void *data, u64 *val) { struct mt7915_dev *dev = data; - *val = dev->fw_debug; + *val = dev->fw_debug_wm; return 0; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7915_fw_debug_get, - mt7915_fw_debug_set, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wm, mt7915_fw_debug_wm_get, + mt7915_fw_debug_wm_set, "%lld\n"); + +static int +mt7915_fw_debug_wa_set(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + int ret; + + dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0; + + ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa); + if (ret) + return ret; + + return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX, + !!dev->fw_debug_wa, 0); +} + +static int +mt7915_fw_debug_wa_get(void *data, u64 *val) +{ + struct mt7915_dev *dev = data; + + *val = dev->fw_debug_wa; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get, + mt7915_fw_debug_wa_set, "%lld\n"); + +static int +mt7915_fw_util_wm_show(struct seq_file *file, void *data) +{ + struct mt7915_dev *dev = file->private; + + if (dev->fw_debug_wm) { + seq_printf(file, "Busy: %u%% Peak busy: %u%%\n", + mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT), + mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT)); + seq_printf(file, "Idle count: %u Peak idle count: %u\n", + mt76_rr(dev, MT_CPU_UTIL_IDLE_CNT), + mt76_rr(dev, MT_CPU_UTIL_PEAK_IDLE_CNT)); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mt7915_fw_util_wm); + +static int +mt7915_fw_util_wa_show(struct seq_file *file, void *data) +{ + struct mt7915_dev *dev = file->private; + + if (dev->fw_debug_wa) + return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY), + MCU_WA_PARAM_CPU_UTIL, 0, 0); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mt7915_fw_util_wa); static void mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy, struct seq_file *file) { - struct mt7915_dev *dev = file->private; + struct mt7915_dev *dev = phy->dev; bool ext_phy = phy != &dev->phy; int bound[15], range[4], i, n; - if (!phy) - return; - /* Tx ampdu stat */ for (i = 0; i < ARRAY_SIZE(range); i++) range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i)); @@ -146,56 +224,46 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy, static void mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s) { - struct mt7915_dev *dev = s->private; - bool ext_phy = phy != &dev->phy; static const char * const bw[] = { "BW20", "BW40", "BW80", "BW160" }; - int cnt; - - if (!phy) - return; + struct mib_stats *mib = &phy->mib; /* Tx Beamformer monitor */ seq_puts(s, "\nTx Beamformer applied PPDU counts: "); - cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy)); - seq_printf(s, "iBF: %ld, eBF: %ld\n", - FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt), - FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt)); + seq_printf(s, "iBF: %d, eBF: %d\n", + mib->tx_bf_ibf_ppdu_cnt, + mib->tx_bf_ebf_ppdu_cnt); /* Tx Beamformer Rx feedback monitor */ seq_puts(s, "Tx Beamformer Rx feedback statistics: "); - cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy)); - seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld, ", - FIELD_GET(MT_ETBF_RX_FB_ALL, cnt), - FIELD_GET(MT_ETBF_RX_FB_HE, cnt), - FIELD_GET(MT_ETBF_RX_FB_VHT, cnt), - FIELD_GET(MT_ETBF_RX_FB_HT, cnt)); - cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy)); - seq_printf(s, "%s, NC: %ld, NR: %ld\n", - bw[FIELD_GET(MT_ETBF_RX_FB_BW, cnt)], - FIELD_GET(MT_ETBF_RX_FB_NC, cnt), - FIELD_GET(MT_ETBF_RX_FB_NR, cnt)); + seq_printf(s, "All: %d, HE: %d, VHT: %d, HT: %d, ", + mib->tx_bf_rx_fb_all_cnt, + mib->tx_bf_rx_fb_he_cnt, + mib->tx_bf_rx_fb_vht_cnt, + mib->tx_bf_rx_fb_ht_cnt); + + seq_printf(s, "%s, NC: %d, NR: %d\n", + bw[mib->tx_bf_rx_fb_bw], + mib->tx_bf_rx_fb_nc_cnt, + mib->tx_bf_rx_fb_nr_cnt); /* Tx Beamformee Rx NDPA & Tx feedback report */ - cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy)); - seq_printf(s, "Tx Beamformee successful feedback frames: %ld\n", - FIELD_GET(MT_ETBF_TX_FB_CPL, cnt)); - seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n", - FIELD_GET(MT_ETBF_TX_FB_TRI, cnt)); + seq_printf(s, "Tx Beamformee successful feedback frames: %d\n", + mib->tx_bf_fb_cpl_cnt); + seq_printf(s, "Tx Beamformee feedback triggered counts: %d\n", + mib->tx_bf_fb_trig_cnt); /* Tx SU & MU counters */ - cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy)); - seq_printf(s, "Tx multi-user Beamforming counts: %ld\n", - FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt)); - cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy)); - seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt); - cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy)); - seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt); - cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy)); - seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt); + seq_printf(s, "Tx multi-user Beamforming counts: %d\n", + mib->tx_bf_cnt); + seq_printf(s, "Tx multi-user MPDU counts: %d\n", mib->tx_mu_mpdu_cnt); + seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", + mib->tx_mu_acked_mpdu_cnt); + seq_printf(s, "Tx single-user successful MPDU counts: %d\n", + mib->tx_su_acked_mpdu_cnt); seq_puts(s, "\n"); } @@ -203,91 +271,189 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s) static int mt7915_tx_stats_show(struct seq_file *file, void *data) { - struct mt7915_dev *dev = file->private; - int stat[8], i, n; + struct mt7915_phy *phy = file->private; + struct mt7915_dev *dev = phy->dev; + struct mib_stats *mib = &phy->mib; + int i; - mt7915_ampdu_stat_read_phy(&dev->phy, file); - mt7915_txbf_stat_read_phy(&dev->phy, file); + mutex_lock(&dev->mt76.mutex); - mt7915_ampdu_stat_read_phy(mt7915_ext_phy(dev), file); - mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file); + mt7915_ampdu_stat_read_phy(phy, file); + mt7915_mac_update_stats(phy); + mt7915_txbf_stat_read_phy(phy, file); /* Tx amsdu info */ seq_puts(file, "Tx MSDU statistics:\n"); - for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) { - stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); - n += stat[i]; - } - - for (i = 0; i < ARRAY_SIZE(stat); i++) { - seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ", - i + 1, stat[i]); - if (n != 0) - seq_printf(file, "(%d%%)\n", stat[i] * 100 / n); + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { + seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ", + i + 1, mib->tx_amsdu[i]); + if (mib->tx_amsdu_cnt) + seq_printf(file, "(%3d%%)\n", + mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt); else seq_puts(file, "\n"); } + mutex_unlock(&dev->mt76.mutex); + return 0; } DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats); -static int -mt7915_queues_acq(struct seq_file *s, void *data) +static void +mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size, + const struct hw_queue_map *map) { - struct mt7915_dev *dev = dev_get_drvdata(s->private); - int i; + struct mt7915_phy *phy = s->private; + struct mt7915_dev *dev = phy->dev; + u32 i, val; - for (i = 0; i < 16; i++) { - int j, acs = i / 4, index = i % 4; - u32 ctrl, val, qlen = 0; + val = mt76_rr(dev, base + MT_FL_Q_EMPTY); + for (i = 0; i < size; i++) { + u32 ctrl, head, tail, queued; + + if (val & BIT(map[i].index)) + continue; - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); - ctrl = BIT(31) | BIT(15) | (acs << 8); + ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24); + mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl); - for (j = 0; j < 32; j++) { - if (val & BIT(j)) - continue; + head = mt76_get_field(dev, base + MT_FL_Q2_CTRL, + GENMASK(11, 0)); + tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL, + GENMASK(27, 16)); + queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL, + GENMASK(11, 0)); - mt76_wr(dev, MT_PLE_FL_Q0_CTRL, - ctrl | (j + (index << 5))); - qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, - GENMASK(11, 0)); - } - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + seq_printf(s, "\t%s: ", map[i].name); + seq_printf(s, "queued:0x%03x head:0x%03x tail:0x%03x\n", + queued, head, tail); } +} + +static void +mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_dev *dev = msta->vif->phy->dev; + struct seq_file *s = data; + u8 ac; + + for (ac = 0; ac < 4; ac++) { + u32 qlen, ctrl, val; + u32 idx = msta->wcid.idx >> 5; + u8 offs = msta->wcid.idx & GENMASK(4, 0); + + ctrl = BIT(31) | BIT(11) | (ac << 24); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx)); + + if (val & BIT(offs)) + continue; + + mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx); + qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL, + GENMASK(11, 0)); + seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n", + sta->addr, msta->wcid.idx, msta->vif->wmm_idx, + ac, qlen); + } +} + +static int +mt7915_hw_queues_show(struct seq_file *file, void *data) +{ + struct mt7915_phy *phy = file->private; + struct mt7915_dev *dev = phy->dev; + static const struct hw_queue_map ple_queue_map[] = { + { "CPU_Q0", 0, 1, MT_CTX0 }, + { "CPU_Q1", 1, 1, MT_CTX0 + 1 }, + { "CPU_Q2", 2, 1, MT_CTX0 + 2 }, + { "CPU_Q3", 3, 1, MT_CTX0 + 3 }, + { "ALTX_Q0", 8, 2, MT_LMAC_ALTX0 }, + { "BMC_Q0", 9, 2, MT_LMAC_BMC0 }, + { "BCN_Q0", 10, 2, MT_LMAC_BCN0 }, + { "PSMP_Q0", 11, 2, MT_LMAC_PSMP0 }, + { "ALTX_Q1", 12, 2, MT_LMAC_ALTX0 + 4 }, + { "BMC_Q1", 13, 2, MT_LMAC_BMC0 + 4 }, + { "BCN_Q1", 14, 2, MT_LMAC_BCN0 + 4 }, + { "PSMP_Q1", 15, 2, MT_LMAC_PSMP0 + 4 }, + }; + static const struct hw_queue_map pse_queue_map[] = { + { "CPU Q0", 0, 1, MT_CTX0 }, + { "CPU Q1", 1, 1, MT_CTX0 + 1 }, + { "CPU Q2", 2, 1, MT_CTX0 + 2 }, + { "CPU Q3", 3, 1, MT_CTX0 + 3 }, + { "HIF_Q0", 8, 0, MT_HIF0 }, + { "HIF_Q1", 9, 0, MT_HIF0 + 1 }, + { "HIF_Q2", 10, 0, MT_HIF0 + 2 }, + { "HIF_Q3", 11, 0, MT_HIF0 + 3 }, + { "HIF_Q4", 12, 0, MT_HIF0 + 4 }, + { "HIF_Q5", 13, 0, MT_HIF0 + 5 }, + { "LMAC_Q", 16, 2, 0 }, + { "MDP_TXQ", 17, 2, 1 }, + { "MDP_RXQ", 18, 2, 2 }, + { "SEC_TXQ", 19, 2, 3 }, + { "SEC_RXQ", 20, 2, 4 }, + }; + u32 val, head, tail; + + /* ple queue */ + val = mt76_rr(dev, MT_PLE_FREEPG_CNT); + head = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(11, 0)); + tail = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(27, 16)); + seq_puts(file, "PLE page info:\n"); + seq_printf(file, + "\tTotal free page: 0x%08x head: 0x%03x tail: 0x%03x\n", + val, head, tail); + + val = mt76_rr(dev, MT_PLE_PG_HIF_GROUP); + head = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(11, 0)); + tail = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(27, 16)); + seq_printf(file, "\tHIF free page: 0x%03x res: 0x%03x used: 0x%03x\n", + val, head, tail); + + seq_puts(file, "PLE non-empty queue info:\n"); + mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map), + &ple_queue_map[0]); + + /* iterate per-sta ple queue */ + ieee80211_iterate_stations_atomic(phy->mt76->hw, + mt7915_sta_hw_queue_read, file); + /* pse queue */ + seq_puts(file, "PSE non-empty queue info:\n"); + mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map), + &pse_queue_map[0]); return 0; } +DEFINE_SHOW_ATTRIBUTE(mt7915_hw_queues); + static int -mt7915_queues_read(struct seq_file *s, void *data) +mt7915_xmit_queues_show(struct seq_file *file, void *data) { - struct mt7915_dev *dev = dev_get_drvdata(s->private); - struct mt76_phy *mphy_ext = dev->mt76.phy2; - struct mt76_queue *ext_q = mphy_ext ? mphy_ext->q_tx[MT_TXQ_BE] : NULL; + struct mt7915_phy *phy = file->private; + struct mt7915_dev *dev = phy->dev; struct { struct mt76_queue *q; char *queue; } queue_map[] = { - { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" }, - { ext_q, "WFDMA1" }, - { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" }, - { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" }, - { dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" }, - { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, + { phy->mt76->q_tx[MT_TXQ_BE], " MAIN" }, + { dev->mt76.q_mcu[MT_MCUQ_WM], " MCUWM" }, + { dev->mt76.q_mcu[MT_MCUQ_WA], " MCUWA" }, + { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWDL" }, }; int i; + seq_puts(file, " queue | hw-queued | head | tail |\n"); for (i = 0; i < ARRAY_SIZE(queue_map); i++) { struct mt76_queue *q = queue_map[i].q; if (!q) continue; - seq_printf(s, - "%s: queued=%d head=%d tail=%d\n", + seq_printf(file, " %s | %9d | %9d | %9d |\n", queue_map[i].queue, q->queued, q->head, q->tail); } @@ -295,8 +461,10 @@ mt7915_queues_read(struct seq_file *s, void *data) return 0; } -static void -mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy) +DEFINE_SHOW_ATTRIBUTE(mt7915_xmit_queues); + +static int +mt7915_rate_txpower_show(struct seq_file *file, void *data) { static const char * const sku_group_name[] = { "CCK", "OFDM", "HT20", "HT40", @@ -304,14 +472,11 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy) "RU26", "RU52", "RU106", "RU242/SU20", "RU484/SU40", "RU996/SU80", "RU2x996/SU160" }; + struct mt7915_phy *phy = file->private; s8 txpower[MT7915_SKU_RATE_NUM], *buf; int i; - if (!phy) - return; - - seq_printf(s, "\nBand %d\n", phy != &phy->dev->phy); - + seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy); mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower)); for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { u8 mcs_num = mt7915_sku_group_len[i]; @@ -319,45 +484,75 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy) if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) mcs_num = 10; - mt76_seq_puts_array(s, sku_group_name[i], buf, mcs_num); + mt76_seq_puts_array(file, sku_group_name[i], buf, mcs_num); buf += mt7915_sku_group_len[i]; } + + return 0; } +DEFINE_SHOW_ATTRIBUTE(mt7915_rate_txpower); + static int -mt7915_read_rate_txpower(struct seq_file *s, void *data) +mt7915_twt_stats(struct seq_file *s, void *data) { struct mt7915_dev *dev = dev_get_drvdata(s->private); + struct mt7915_twt_flow *iter; + + rcu_read_lock(); - mt7915_puts_rate_txpower(s, &dev->phy); - mt7915_puts_rate_txpower(s, mt7915_ext_phy(dev)); + seq_puts(s, " wcid | id | flags | exp | mantissa"); + seq_puts(s, " | duration | tsf |\n"); + list_for_each_entry_rcu(iter, &dev->twt_list, list) + seq_printf(s, + "%9d | %8d | %5c%c%c%c | %8d | %8d | %8d | %14lld |\n", + iter->wcid, iter->id, + iter->sched ? 's' : 'u', + iter->protection ? 'p' : '-', + iter->trigger ? 't' : '-', + iter->flowtype ? '-' : 'a', + iter->exp, iter->mantissa, + iter->duration, iter->tsf); + + rcu_read_unlock(); return 0; } -int mt7915_init_debugfs(struct mt7915_dev *dev) +int mt7915_init_debugfs(struct mt7915_phy *phy) { + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; struct dentry *dir; - dir = mt76_register_debugfs(&dev->mt76); + dir = mt76_register_debugfs_fops(phy->mt76, NULL); if (!dir) return -ENOMEM; - debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, - mt7915_queues_read); - debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, - mt7915_queues_acq); - debugfs_create_file("tx_stats", 0400, dir, dev, &mt7915_tx_stats_fops); - debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + debugfs_create_file("hw-queues", 0400, dir, phy, + &mt7915_hw_queues_fops); + debugfs_create_file("xmit-queues", 0400, dir, phy, + &mt7915_xmit_queues_fops); + debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops); + debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm); + debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa); + debugfs_create_file("fw_util_wm", 0400, dir, dev, + &mt7915_fw_util_wm_fops); + debugfs_create_file("fw_util_wa", 0400, dir, dev, + &mt7915_fw_util_wa_fops); debugfs_create_file("implicit_txbf", 0600, dir, dev, &fops_implicit_txbf); - debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); - /* test knobs */ - debugfs_create_file("radar_trigger", 0200, dir, dev, - &fops_radar_trigger); + debugfs_create_file("txpower_sku", 0400, dir, phy, + &mt7915_rate_txpower_fops); + debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir, + mt7915_twt_stats); debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger); - debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, - mt7915_read_rate_txpower); + if (!dev->dbdc_support || ext_phy) { + debugfs_create_u32("dfs_hw_pattern", 0400, dir, + &dev->hw_pattern); + debugfs_create_file("radar_trigger", 0200, dir, dev, + &fops_radar_trigger); + } return 0; } @@ -365,68 +560,89 @@ int mt7915_init_debugfs(struct mt7915_dev *dev) #ifdef CONFIG_MAC80211_DEBUGFS /** per-station debugfs **/ -/* usage: */ -static int mt7915_sta_fixed_rate_set(void *data, u64 rate) +static ssize_t mt7915_sta_fixed_rate_set(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { - struct ieee80211_sta *sta = data; + struct ieee80211_sta *sta = file->private_data; struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_dev *dev = msta->vif->phy->dev; + struct ieee80211_vif *vif; + struct sta_phy phy = {}; + char buf[100]; + int ret; + u32 field; + u8 i, gi, he_ltf; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9 + * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3 + * nss - vht: 1~4, he: 1~4, others: ignore + * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2 + * gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2 + * ldpc - off: 0, on: 1 + * stbc - off: 0, on: 1 + * he_ltf - 1xltf: 0, 2xltf: 1, 4xltf: 2 + */ + if (sscanf(buf, "%hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu", + &phy.type, &phy.bw, &phy.nss, &phy.mcs, &gi, + &phy.ldpc, &phy.stbc, &he_ltf) != 8) { + dev_warn(dev->mt76.dev, + "format: Mode BW NSS MCS (HE)GI LDPC STBC HE_LTF\n"); + field = RATE_PARAM_AUTO; + goto out; + } + + phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0); + for (i = 0; i <= phy.bw; i++) { + phy.sgi |= gi << (i << sta->he_cap.has_he); + phy.he_ltf |= he_ltf << (i << sta->he_cap.has_he); + } + field = RATE_PARAM_FIXED; + +out: + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, field); + if (ret) + return -EFAULT; - return mt7915_mcu_set_fixed_rate(msta->vif->phy->dev, sta, rate); + return count; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL, - mt7915_sta_fixed_rate_set, "%llx\n"); +static const struct file_operations fops_fixed_rate = { + .write = mt7915_sta_fixed_rate_set, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; static int -mt7915_sta_stats_show(struct seq_file *s, void *data) +mt7915_queues_show(struct seq_file *s, void *data) { struct ieee80211_sta *sta = s->private; - struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct mt7915_sta_stats *stats = &msta->stats; - struct rate_info *rate = &stats->prob_rate; - static const char * const bw[] = { - "BW20", "BW5", "BW10", "BW40", - "BW80", "BW160", "BW_HE_RU" - }; - - if (!rate->legacy && !rate->flags) - return 0; - seq_puts(s, "Probing rate - "); - if (rate->flags & RATE_INFO_FLAGS_MCS) - seq_puts(s, "HT "); - else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) - seq_puts(s, "VHT "); - else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) - seq_puts(s, "HE "); - else - seq_printf(s, "Bitrate %d\n", rate->legacy); - - if (rate->flags) { - seq_printf(s, "%s NSS%d MCS%d ", - bw[rate->bw], rate->nss, rate->mcs); - - if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) - seq_puts(s, "SGI "); - else if (rate->he_gi) - seq_puts(s, "HE GI "); - - if (rate->he_dcm) - seq_puts(s, "DCM "); - } - - seq_printf(s, "\nPPDU PER: %ld.%1ld%%\n", - stats->per / 10, stats->per % 10); + mt7915_sta_hw_queue_read(s, sta); return 0; } -DEFINE_SHOW_ATTRIBUTE(mt7915_sta_stats); +DEFINE_SHOW_ATTRIBUTE(mt7915_queues); void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate); - debugfs_create_file("stats", 0400, dir, sta, &mt7915_sta_stats_fops); + debugfs_create_file("hw-queues", 0400, dir, sta, &mt7915_queues_fops); } + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 4798d6344305d5..4fa8e7ba93e6dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -42,13 +42,17 @@ static const struct ieee80211_iface_combination if_comb[] = { } }; -static ssize_t mt7915_thermal_show_temp(struct device *dev, +static ssize_t mt7915_thermal_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mt7915_phy *phy = dev_get_drvdata(dev); + int i = to_sensor_dev_attr(attr)->index; int temperature; + if (i) + return sprintf(buf, "%u\n", phy->throttle_temp[i - 1] * 1000); + temperature = mt7915_mcu_get_temperature(phy); if (temperature < 0) return temperature; @@ -57,11 +61,34 @@ static ssize_t mt7915_thermal_show_temp(struct device *dev, return sprintf(buf, "%u\n", temperature * 1000); } -static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7915_thermal_show_temp, - NULL, 0); +static ssize_t mt7915_thermal_temp_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mt7915_phy *phy = dev_get_drvdata(dev); + int ret, i = to_sensor_dev_attr(attr)->index; + long val; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) + return ret; + + mutex_lock(&phy->dev->mt76.mutex); + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130); + phy->throttle_temp[i - 1] = val; + mutex_unlock(&phy->dev->mt76.mutex); + + return count; +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0); +static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1); +static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2); static struct attribute *mt7915_hwmon_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, NULL, }; ATTRIBUTE_GROUPS(mt7915_hwmon); @@ -96,6 +123,9 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev, if (state > MT7915_THERMAL_THROTTLE_MAX) return -EINVAL; + if (phy->throttle_temp[0] > phy->throttle_temp[1]) + return 0; + if (state == phy->throttle_state) return 0; @@ -130,9 +160,12 @@ static int mt7915_thermal_init(struct mt7915_phy *phy) struct wiphy *wiphy = phy->mt76->hw->wiphy; struct thermal_cooling_device *cdev; struct device *hwmon; + const char *name; - cdev = thermal_cooling_device_register(wiphy_name(wiphy), phy, - &mt7915_thermal_ops); + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s", + wiphy_name(wiphy)); + + cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops); if (!IS_ERR(cdev)) { if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj, "cooling_device") < 0) @@ -144,15 +177,76 @@ static int mt7915_thermal_init(struct mt7915_phy *phy) if (!IS_REACHABLE(CONFIG_HWMON)) return 0; - hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, - wiphy_name(wiphy), phy, + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7915_hwmon_groups); if (IS_ERR(hwmon)) return PTR_ERR(hwmon); + /* initialize critical/maximum high temperature */ + phy->throttle_temp[0] = 110; + phy->throttle_temp[1] = 120; + + return 0; +} + +static void mt7915_led_set_config(struct led_classdev *led_cdev, + u8 delay_on, u8 delay_off) +{ + struct mt7915_dev *dev; + struct mt76_dev *mt76; + u32 val; + + mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); + dev = container_of(mt76, struct mt7915_dev, mt76); + + /* select TX blink mode, 2: only data frames */ + mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2); + + /* enable LED */ + mt76_wr(dev, MT_LED_EN(0), 1); + + /* set LED Tx blink on/off time */ + val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) | + FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off); + mt76_wr(dev, MT_LED_TX_BLINK(0), val); + + /* control LED */ + val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK; + if (dev->mt76.led_al) + val |= MT_LED_CTRL_POLARITY; + + mt76_wr(dev, MT_LED_CTRL(0), val); + mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK); +} + +static int mt7915_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + u16 delta_on = 0, delta_off = 0; + +#define HW_TICK 10 +#define TO_HW_TICK(_t) (((_t) > HW_TICK) ? ((_t) / HW_TICK) : HW_TICK) + + if (*delay_on) + delta_on = TO_HW_TICK(*delay_on); + if (*delay_off) + delta_off = TO_HW_TICK(*delay_off); + + mt7915_led_set_config(led_cdev, delta_on, delta_off); + return 0; } +static void mt7915_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + if (!brightness) + mt7915_led_set_config(led_cdev, 0, 0xff); + else + mt7915_led_set_config(led_cdev, 0xff, 0); +} + static void mt7915_init_txpower(struct mt7915_dev *dev, struct ieee80211_supported_band *sband) @@ -232,7 +326,12 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) wiphy->reg_notifier = mt7915_regd_notifier; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); @@ -287,9 +386,7 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band) FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF); mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set); - mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); - - mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536); + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680); /* disable rx rate report by default due to hw issues */ mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); } @@ -298,7 +395,7 @@ static void mt7915_mac_init(struct mt7915_dev *dev) { int i; - mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); + mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400); /* enable hardware de-agg */ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); @@ -307,6 +404,11 @@ static void mt7915_mac_init(struct mt7915_dev *dev) MT_WTBL_UPDATE_ADM_COUNT_CLEAR); for (i = 0; i < 2; i++) mt7915_mac_init_band(dev, i); + + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2; + mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4); + } } static int mt7915_txbf_init(struct mt7915_dev *dev) @@ -350,7 +452,6 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev) mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask; mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1; - INIT_LIST_HEAD(&phy->stats_list); INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work); mt7915_eeprom_parse_band_config(phy); @@ -374,6 +475,10 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev) if (ret) goto error; + ret = mt7915_init_debugfs(phy); + if (ret) + goto error; + return 0; error: @@ -480,7 +585,7 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) } /* Beacon and mgmt frames should occupy wcid 0 */ - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA); if (idx) return -ENOSPC; @@ -525,7 +630,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, int vif, int nss) { struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; - struct ieee80211_he_mcs_nss_supp *mcs = &he_cap->he_mcs_nss_supp; u8 c; #ifdef CONFIG_MAC80211_MESH @@ -577,8 +681,11 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; - /* num_snd_dim */ - c = (nss - 1) | (max_t(int, le16_to_cpu(mcs->tx_mcs_160), 1) << 3); + /* num_snd_dim + * for mt7915, max supported nss is 2 for bw > 80MHz + */ + c = (nss - 1) | + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2; elem->phy_cap_info[5] |= c; c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | @@ -613,12 +720,19 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, { int i, idx = 0, nss = hweight8(phy->mt76->chainmask); u16 mcs_map = 0; + u16 mcs_map_160 = 0; for (i = 0; i < 8; i++) { if (i < nss) mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2)); else mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2)); + + /* Can do 1/2 of NSS streams in 160Mhz mode. */ + if (i < nss / 2) + mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2)); + else + mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2)); } for (i = 0; i < NUM_NL80211_IFTYPES; i++) { @@ -667,6 +781,8 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, switch (i) { case NL80211_IFTYPE_AP: + he_cap_elem->mac_cap_info[0] |= + IEEE80211_HE_MAC_CAP0_TWT_RES; he_cap_elem->mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BSR; he_cap_elem->mac_cap_info[4] |= @@ -677,7 +793,11 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[1] |= @@ -720,10 +840,10 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); - he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); - he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); - he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map); - he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map_160); + he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map_160); + he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160); + he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160); mt7915_set_stream_he_txbf_caps(he_cap, i, nss); @@ -787,11 +907,11 @@ int mt7915_register_device(struct mt7915_dev *dev) dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; - INIT_LIST_HEAD(&dev->phy.stats_list); INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7915_mac_work); INIT_LIST_HEAD(&dev->sta_rc_list); INIT_LIST_HEAD(&dev->sta_poll_list); + INIT_LIST_HEAD(&dev->twt_list); spin_lock_init(&dev->sta_poll_lock); init_waitqueue_head(&dev->reset_wait); @@ -816,6 +936,12 @@ int mt7915_register_device(struct mt7915_dev *dev) dev->mt76.test_ops = &mt7915_testmode_ops; #endif + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = mt7915_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt7915_led_set_blink; + } + ret = mt76_register_device(&dev->mt76, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) @@ -831,7 +957,7 @@ int mt7915_register_device(struct mt7915_dev *dev) if (ret) return ret; - return mt7915_init_debugfs(dev); + return mt7915_init_debugfs(&dev->phy); } void mt7915_unregister_device(struct mt7915_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 2462704094b0a8..5fcf35f2d9fbe4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -6,6 +6,7 @@ #include "mt7915.h" #include "../dma.h" #include "mac.h" +#include "mcu.h" #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) @@ -88,15 +89,14 @@ bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 0, 5000); } -static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid) +u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) { mt76_wr(dev, MT_WTBLON_TOP_WDUCR, FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); - return MT_WTBL_LMAC_OFFS(wcid, 0); + return MT_WTBL_LMAC_OFFS(wcid, dw); } -/* TODO: use txfree airtime info to avoid runtime accessing in the long run */ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) { static const u8 ac_to_tid[] = { @@ -107,6 +107,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) }; struct ieee80211_sta *sta; struct mt7915_sta *msta; + struct rate_info *rate; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); int i; @@ -119,8 +120,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) while (true) { bool clear = false; - u32 addr; + u32 addr, val; u16 idx; + u8 bw; spin_lock_bh(&dev->sta_poll_lock); if (list_empty(&sta_poll_list)) { @@ -133,7 +135,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) spin_unlock_bh(&dev->sta_poll_lock); idx = msta->wcid.idx; - addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4; + addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); for (i = 0; i < IEEE80211_NUM_ACS; i++) { u32 tx_last = msta->airtime_ac[i]; @@ -174,6 +176,43 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); } + + /* + * We don't support reading GI info from txs packets. + * For accurate tx status reporting and AQL improvement, + * we need to make sure that flags match so polling GI + * from per-sta counters directly. + */ + rate = &msta->wcid.rate; + addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); + val = mt76_rr(dev, addr); + + switch (rate->bw) { + case RATE_INFO_BW_160: + bw = IEEE80211_STA_RX_BW_160; + break; + case RATE_INFO_BW_80: + bw = IEEE80211_STA_RX_BW_80; + break; + case RATE_INFO_BW_40: + bw = IEEE80211_STA_RX_BW_40; + break; + default: + bw = IEEE80211_STA_RX_BW_20; + break; + } + + if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { + u8 offs = 24 + 2 * bw; + + rate->he_gi = (val & (0x3 << offs)) >> offs; + } else if (rate->flags & + (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { + if (val & BIT(12 + bw)) + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; + else + rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; + } } rcu_read_unlock(); @@ -228,12 +267,51 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); } +static void +mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, + struct mt76_rx_status *status, + __le32 *rxv) +{ + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | + HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN), + .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), + }; + struct ieee80211_radiotap_he_mu *he_mu = NULL; + + he_mu = skb_push(skb, sizeof(mu_known)); + memcpy(he_mu, &mu_known, sizeof(mu_known)); + +#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f) + + he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx); + if (status->he_dcm) + he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm); + + he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) | + MU_PREP(FLAGS2_SIG_B_SYMS_USERS, + le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER)); + + he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0); + + if (status->bw >= RATE_INFO_BW_40) { + he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN); + he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1); + } + + if (status->bw >= RATE_INFO_BW_80) { + he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2); + he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3); + } +} + static void mt7915_mac_decode_he_radiotap(struct sk_buff *skb, struct mt76_rx_status *status, __le32 *rxv, u32 phy) { - /* TODO: struct ieee80211_radiotap_he_mu */ static const struct ieee80211_radiotap_he known = { .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | HE_BITS(DATA1_DATA_DCM_KNOWN) | @@ -241,6 +319,7 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, HE_BITS(DATA1_CODING_KNOWN) | HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | HE_BITS(DATA1_DOPPLER_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE_KNOWN) | HE_BITS(DATA1_BSS_COLOR_KNOWN), .data2 = HE_BITS(DATA2_GI_KNOWN) | HE_BITS(DATA2_TXBF_KNOWN) | @@ -255,9 +334,12 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) | HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]); + he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) | le16_encode_bits(ltf_size, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); + if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) + he->data5 |= HE_BITS(DATA5_TXBF); he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); @@ -265,12 +347,10 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, case MT_PHY_TYPE_HE_SU: he->data1 |= HE_BITS(DATA1_FORMAT_SU) | HE_BITS(DATA1_UL_DL_KNOWN) | - HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN); + HE_BITS(DATA1_BEAM_CHANGE_KNOWN); he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) | HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); break; case MT_PHY_TYPE_HE_EXT_SU: he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | @@ -280,23 +360,20 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, break; case MT_PHY_TYPE_HE_MU: he->data1 |= HE_BITS(DATA1_FORMAT_MU) | - HE_BITS(DATA1_UL_DL_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN); + HE_BITS(DATA1_UL_DL_KNOWN); he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); + he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]); mt7915_mac_decode_he_radiotap_ru(status, he, rxv); break; case MT_PHY_TYPE_HE_TB: he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN) | HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | HE_BITS(DATA1_SPTL_REUSE4_KNOWN); - he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) | - HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) | + he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) | HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) | HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]); @@ -610,8 +687,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_8023; } - if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) { mt7915_mac_decode_he_radiotap(skb, status, rxv, mode); + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) + mt7915_mac_decode_he_mu_radiotap(skb, status, rxv); + } if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; @@ -825,17 +905,19 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi, static void mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi, - struct sk_buff *skb, struct ieee80211_key_conf *key) + struct sk_buff *skb, struct ieee80211_key_conf *key, + bool *mcast) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - bool multicast = is_multicast_ether_addr(hdr->addr1); u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; __le16 fc = hdr->frame_control; u8 fc_type, fc_stype; u32 val; + *mcast = is_multicast_ether_addr(hdr->addr1); + if (ieee80211_is_action(fc) && mgmt->u.action.category == WLAN_CATEGORY_BACK && mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { @@ -861,15 +943,16 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi, val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | - FIELD_PREP(MT_TXD2_MULTICAST, multicast); + FIELD_PREP(MT_TXD2_MULTICAST, *mcast); - if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && + if (key && *mcast && ieee80211_is_robust_mgmt_frame(skb) && key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { val |= MT_TXD2_BIP; txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); } - if (!ieee80211_is_data(fc) || multicast) + if (!ieee80211_is_data(fc) || *mcast || + info->flags & IEEE80211_TX_CTL_USE_MINRATE) val |= MT_TXD2_FIX_RATE; txwi[2] |= cpu_to_le32(val); @@ -899,6 +982,51 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi, txwi[7] |= cpu_to_le32(val); } +static u16 +mt7915_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif, + bool beacon, bool mcast) +{ + u8 mode = 0, band = mphy->chandef.chan->band; + int rateidx = 0, mcast_rate; + + if (beacon) { + struct cfg80211_bitrate_mask *mask; + + mask = &vif->bss_conf.beacon_tx_rate; + if (hweight16(mask->control[band].he_mcs[0]) == 1) { + rateidx = ffs(mask->control[band].he_mcs[0]) - 1; + mode = MT_PHY_TYPE_HE_SU; + goto out; + } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) { + rateidx = ffs(mask->control[band].vht_mcs[0]) - 1; + mode = MT_PHY_TYPE_VHT; + goto out; + } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) { + rateidx = ffs(mask->control[band].ht_mcs[0]) - 1; + mode = MT_PHY_TYPE_HT; + goto out; + } else if (hweight32(mask->control[band].legacy) == 1) { + rateidx = ffs(mask->control[band].legacy) - 1; + goto legacy; + } + } + + mcast_rate = vif->bss_conf.mcast_rate[band]; + if (mcast && mcast_rate > 0) + rateidx = mcast_rate - 1; + else + rateidx = ffs(vif->bss_conf.basic_rates) - 1; + +legacy: + rateidx = mt76_calculate_default_rate(mphy, rateidx); + mode = rateidx >> 8; + rateidx &= GENMASK(7, 0); + +out: + return FIELD_PREP(MT_TX_RATE_IDX, rateidx) | + FIELD_PREP(MT_TX_RATE_MODE, mode); +} + void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, int pid, struct ieee80211_key_conf *key, bool beacon) @@ -909,6 +1037,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + bool mcast = false; u16 tx_count = 15; u32 val; @@ -939,7 +1068,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, FIELD_PREP(MT_TXD0_Q_IDX, q_idx); txwi[0] = cpu_to_le32(val); - val = MT_TXD1_LONG_FORMAT | + val = MT_TXD1_LONG_FORMAT | MT_TXD1_VTA | FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); @@ -971,19 +1100,14 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, if (is_8023) mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid); else - mt7915_mac_write_txwi_80211(dev, txwi, skb, key); + mt7915_mac_write_txwi_80211(dev, txwi, skb, key, &mcast); if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) { - u16 rate; + u16 rate = mt7915_mac_tx_rate_val(mphy, vif, beacon, mcast); /* hardware won't add HTC for mgmt/ctrl frame */ txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD); - if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) - rate = MT7915_5G_RATE_DEFAULT; - else - rate = MT7915_2G_RATE_DEFAULT; - val = MT_TXD6_FIXED_BW | FIELD_PREP(MT_TXD6_TX_RATE, rate); txwi[6] |= cpu_to_le32(val); @@ -1016,6 +1140,17 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; + if (sta) { + struct mt7915_sta *msta; + + msta = (struct mt7915_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->jiffies + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->jiffies = jiffies; + } + } + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, @@ -1162,7 +1297,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); for (i = 0; i < count; i++) { u32 msdu, info = le32_to_cpu(free->info[i]); - u8 stat; /* * 1'b1: new wcid pair. @@ -1170,7 +1304,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) */ if (info & MT_TX_FREE_PAIR) { struct mt7915_sta *msta; - struct mt7915_phy *phy; struct mt76_wcid *wcid; u16 idx; @@ -1182,10 +1315,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) continue; msta = container_of(wcid, struct mt7915_sta, wcid); - phy = msta->vif->phy; spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->stats_list)) - list_add_tail(&msta->stats_list, &phy->stats_list); if (list_empty(&msta->poll_list)) list_add_tail(&msta->poll_list, &dev->sta_poll_list); spin_unlock_bh(&dev->sta_poll_lock); @@ -1193,8 +1323,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) } msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); - stat = FIELD_GET(MT_TX_FREE_STATUS, info); - txwi = mt76_token_release(mdev, msdu, &wake); if (!txwi) continue; @@ -1219,20 +1347,27 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) static bool mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid, - __le32 *txs_data) + __le32 *txs_data, struct mt76_sta_stats *stats) { + struct ieee80211_supported_band *sband; struct mt76_dev *mdev = &dev->mt76; + struct mt76_phy *mphy; struct ieee80211_tx_info *info; struct sk_buff_head list; + struct rate_info rate = {}; struct sk_buff *skb; + bool cck = false; + u32 txrate, txs, mode; mt76_tx_status_lock(mdev, &list); skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); if (!skb) - goto out; + goto out_no_skb; + + txs = le32_to_cpu(txs_data[0]); info = IEEE80211_SKB_CB(skb); - if (!(txs_data[0] & le32_to_cpu(MT_TXS0_ACK_ERROR_MASK))) + if (!(txs & MT_TXS0_ACK_ERROR_MASK)) info->flags |= IEEE80211_TX_STAT_ACK; info->status.ampdu_len = 1; @@ -1240,9 +1375,92 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid, IEEE80211_TX_STAT_ACK); info->status.rates[0].idx = -1; - mt76_tx_status_skb_done(mdev, skb, &list); + + txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); + + rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); + rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; + + if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) + stats->tx_nss[rate.nss - 1]++; + if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) + stats->tx_mcs[rate.mcs]++; + + mode = FIELD_GET(MT_TX_RATE_MODE, txrate); + switch (mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + mphy = &dev->mphy; + if (wcid->ext_phy && dev->mt76.phy2) + mphy = dev->mt76.phy2; + + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); + rate.legacy = sband->bitrates[rate.mcs].bitrate; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + rate.mcs += (rate.nss - 1) * 8; + if (rate.mcs > 31) + goto out; + + rate.flags = RATE_INFO_FLAGS_MCS; + if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) + rate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_VHT: + if (rate.mcs > 9) + goto out; + + rate.flags = RATE_INFO_FLAGS_VHT_MCS; + break; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + case MT_PHY_TYPE_HE_MU: + if (rate.mcs > 11) + goto out; + + rate.he_gi = wcid->rate.he_gi; + rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); + rate.flags = RATE_INFO_FLAGS_HE_MCS; + break; + default: + goto out; + } + + stats->tx_mode[mode]++; + + switch (FIELD_GET(MT_TXS0_BW, txs)) { + case IEEE80211_STA_RX_BW_160: + rate.bw = RATE_INFO_BW_160; + stats->tx_bw[3]++; + break; + case IEEE80211_STA_RX_BW_80: + rate.bw = RATE_INFO_BW_80; + stats->tx_bw[2]++; + break; + case IEEE80211_STA_RX_BW_40: + rate.bw = RATE_INFO_BW_40; + stats->tx_bw[1]++; + break; + default: + rate.bw = RATE_INFO_BW_20; + stats->tx_bw[0]++; + break; + } + wcid->rate = rate; out: + mt76_tx_status_skb_done(mdev, skb, &list); + +out_no_skb: mt76_tx_status_unlock(mdev, &list); return !!skb; @@ -1279,12 +1497,13 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) if (!wcid) goto out; - mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data); + msta = container_of(wcid, struct mt7915_sta, wcid); + + mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats); if (!wcid->sta) goto out; - msta = container_of(wcid, struct mt7915_sta, wcid); spin_lock_bh(&dev->sta_poll_lock); if (list_empty(&msta->poll_list)) list_add_tail(&msta->poll_list, &dev->sta_poll_list); @@ -1333,15 +1552,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) { - struct mt7915_dev *dev; - if (!e->txwi) { dev_kfree_skb_any(e->skb); return; } - dev = container_of(mdev, struct mt7915_dev, mt76); - /* error path */ if (e->skb == DMA_DUMMY_DATA) { struct mt76_txwi_cache *t; @@ -1403,17 +1618,12 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy) FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); - int sifs, offset; + int offset; bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) return; - if (is_5ghz) - sifs = 16; - else - sifs = 10; - if (ext_phy) { coverage_class = max_t(s16, dev->phy.coverage_class, coverage_class); @@ -1435,11 +1645,14 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy) mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset); mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset); mt76_wr(dev, MT_TMAC_ICR0(ext_phy), - FIELD_PREP(MT_IFS_EIFS, 360) | + FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) | FIELD_PREP(MT_IFS_RIFS, 2) | - FIELD_PREP(MT_IFS_SIFS, sifs) | + FIELD_PREP(MT_IFS_SIFS, 10) | FIELD_PREP(MT_IFS_SLOT, phy->slottime)); + mt76_wr(dev, MT_TMAC_ICR1(ext_phy), + FIELD_PREP(MT_IFS_EIFS_CCK, 314)); + if (phy->slottime < 20 || is_5ghz) val = MT7915_CFEND_RATE_DEFAULT; else @@ -1580,7 +1793,7 @@ mt7915_dma_reset(struct mt7915_dev *dev) mt76_for_each_q_rx(&dev->mt76, i) mt76_queue_rx_reset(dev, i); - mt76_tx_status_check(&dev->mt76, NULL, true); + mt76_tx_status_check(&dev->mt76, true); /* re-init prefetch settings after reset */ mt7915_dma_prefetch(dev); @@ -1668,6 +1881,7 @@ void mt7915_mac_reset_work(struct work_struct *work) if (phy2) clear_bit(MT76_RESET, &phy2->mt76->state); + local_bh_disable(); napi_enable(&dev->mt76.napi[0]); napi_schedule(&dev->mt76.napi[0]); @@ -1676,6 +1890,8 @@ void mt7915_mac_reset_work(struct work_struct *work) napi_enable(&dev->mt76.napi[2]); napi_schedule(&dev->mt76.napi[2]); + local_bh_enable(); + tasklet_schedule(&dev->irq_tasklet); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); @@ -1702,17 +1918,117 @@ void mt7915_mac_reset_work(struct work_struct *work) MT7915_WATCHDOG_TIME); } -static void -mt7915_mac_update_stats(struct mt7915_phy *phy) +void mt7915_mac_update_stats(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; struct mib_stats *mib = &phy->mib; bool ext_phy = phy != &dev->phy; - int i, aggr0, aggr1; + int i, aggr0, aggr1, cnt; mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), MT_MIB_SDR3_FCS_ERR_MASK); + cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy)); + mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy)); + mib->rx_mpdu_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy)); + mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy)); + mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy)); + mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy)); + mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy)); + mib->tx_ampdu_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy)); + mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy)); + mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy)); + mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy)); + mib->rx_ampdu_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy)); + mib->rx_ampdu_bytes_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy)); + mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy)); + mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy)); + mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy)); + mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy)); + mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy)); + mib->rx_vec_queue_overflow_drop_cnt += + FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy)); + mib->rx_ba_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy)); + mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy)); + mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt); + + cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy)); + mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); + + cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy)); + mib->tx_mu_mpdu_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy)); + mib->tx_mu_acked_mpdu_cnt += cnt; + + cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy)); + mib->tx_su_acked_mpdu_cnt += cnt; + + cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy)); + mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); + mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); + + cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy)); + mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); + mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); + mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); + mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); + + cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy)); + mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); + mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); + mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); + + cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy)); + mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); + mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); + + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { + cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); + mib->tx_amsdu[i] += cnt; + mib->tx_amsdu_cnt += cnt; + } + aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { u32 val; @@ -1737,30 +2053,6 @@ mt7915_mac_update_stats(struct mt7915_phy *phy) } } -static void -mt7915_mac_sta_stats_work(struct mt7915_phy *phy) -{ - struct mt7915_dev *dev = phy->dev; - struct mt7915_sta *msta; - LIST_HEAD(list); - - spin_lock_bh(&dev->sta_poll_lock); - list_splice_init(&phy->stats_list, &list); - - while (!list_empty(&list)) { - msta = list_first_entry(&list, struct mt7915_sta, stats_list); - list_del_init(&msta->stats_list); - spin_unlock_bh(&dev->sta_poll_lock); - - /* use MT_TX_FREE_RATE to report Tx rate for further devices */ - mt7915_mcu_get_tx_rate(dev, RATE_CTRL_RU_INFO, msta->wcid.idx); - - spin_lock_bh(&dev->sta_poll_lock); - } - - spin_unlock_bh(&dev->sta_poll_lock); -} - void mt7915_mac_sta_rc_work(struct work_struct *work) { struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); @@ -1776,8 +2068,8 @@ void mt7915_mac_sta_rc_work(struct work_struct *work) while (!list_empty(&list)) { msta = list_first_entry(&list, struct mt7915_sta, rc_list); list_del_init(&msta->rc_list); - changed = msta->stats.changed; - msta->stats.changed = 0; + changed = msta->changed; + msta->changed = 0; spin_unlock_bh(&dev->sta_poll_lock); sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); @@ -1785,10 +2077,8 @@ void mt7915_mac_sta_rc_work(struct work_struct *work) if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED | - IEEE80211_RC_BW_CHANGED)) { - mt7915_mcu_add_he(dev, vif, sta); - mt7915_mcu_add_rate_ctrl(dev, vif, sta); - } + IEEE80211_RC_BW_CHANGED)) + mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); if (changed & IEEE80211_RC_SMPS_CHANGED) mt7915_mcu_add_smps(dev, vif, sta); @@ -1817,14 +2107,9 @@ void mt7915_mac_work(struct work_struct *work) mt7915_mac_update_stats(phy); } - if (++phy->sta_work_count == 10) { - phy->sta_work_count = 0; - mt7915_mac_sta_stats_work(phy); - } - mutex_unlock(&mphy->dev->mutex); - mt76_tx_status_check(mphy->dev, NULL, false); + mt76_tx_status_check(mphy->dev, false); ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT7915_WATCHDOG_TIME); @@ -1961,3 +2246,182 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) mt7915_dfs_stop_radar_detector(phy); return 0; } + +static int +mt7915_mac_twt_duration_align(int duration) +{ + return duration << 8; +} + +static u64 +mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, + struct mt7915_twt_flow *flow) +{ + struct mt7915_twt_flow *iter, *iter_next; + u32 duration = flow->duration << 8; + u64 start_tsf; + + iter = list_first_entry_or_null(&dev->twt_list, + struct mt7915_twt_flow, list); + if (!iter || !iter->sched || iter->start_tsf > duration) { + /* add flow as first entry in the list */ + list_add(&flow->list, &dev->twt_list); + return 0; + } + + list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { + start_tsf = iter->start_tsf + + mt7915_mac_twt_duration_align(iter->duration); + if (list_is_last(&iter->list, &dev->twt_list)) + break; + + if (!iter_next->sched || + iter_next->start_tsf > start_tsf + duration) { + list_add(&flow->list, &iter->list); + goto out; + } + } + + /* add flow as last entry in the list */ + list_add_tail(&flow->list, &dev->twt_list); +out: + return start_tsf; +} + +static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) +{ + struct ieee80211_twt_params *twt_agrt; + u64 interval, duration; + u16 mantissa; + u8 exp; + + /* only individual agreement supported */ + if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) + return -EOPNOTSUPP; + + /* only 256us unit supported */ + if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) + return -EOPNOTSUPP; + + twt_agrt = (struct ieee80211_twt_params *)twt->params; + + /* explicit agreement not supported */ + if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) + return -EOPNOTSUPP; + + exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, + le16_to_cpu(twt_agrt->req_type)); + mantissa = le16_to_cpu(twt_agrt->mantissa); + duration = twt_agrt->min_twt_dur << 8; + + interval = (u64)mantissa << exp; + if (interval < duration) + return -EOPNOTSUPP; + + return 0; +} + +void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt) +{ + enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct ieee80211_twt_params *twt_agrt = (void *)twt->params; + u16 req_type = le16_to_cpu(twt_agrt->req_type); + enum ieee80211_twt_setup_cmd sta_setup_cmd; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_twt_flow *flow; + int flowid, table_id; + u8 exp; + + if (mt7915_mac_check_twt_req(twt)) + goto out; + + mutex_lock(&dev->mt76.mutex); + + if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) + goto unlock; + + if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) + goto unlock; + + flowid = ffs(~msta->twt.flowid_mask) - 1; + le16p_replace_bits(&twt_agrt->req_type, flowid, + IEEE80211_TWT_REQTYPE_FLOWID); + + table_id = ffs(~dev->twt.table_mask) - 1; + exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); + sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); + + flow = &msta->twt.flow[flowid]; + memset(flow, 0, sizeof(*flow)); + INIT_LIST_HEAD(&flow->list); + flow->wcid = msta->wcid.idx; + flow->table_id = table_id; + flow->id = flowid; + flow->duration = twt_agrt->min_twt_dur; + flow->mantissa = twt_agrt->mantissa; + flow->exp = exp; + flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); + flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); + flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); + + if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || + sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { + u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; + u64 flow_tsf, curr_tsf; + u32 rem; + + flow->sched = true; + flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); + curr_tsf = __mt7915_get_tsf(hw, msta->vif); + div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); + flow_tsf = curr_tsf + interval - rem; + twt_agrt->twt = cpu_to_le64(flow_tsf); + } else { + list_add_tail(&flow->list, &dev->twt_list); + } + flow->tsf = le64_to_cpu(twt_agrt->twt); + + if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) + goto unlock; + + setup_cmd = TWT_SETUP_CMD_ACCEPT; + dev->twt.table_mask |= BIT(table_id); + msta->twt.flowid_mask |= BIT(flowid); + dev->twt.n_agrt++; + +unlock: + mutex_unlock(&dev->mt76.mutex); +out: + le16p_replace_bits(&twt_agrt->req_type, setup_cmd, + IEEE80211_TWT_REQTYPE_SETUP_CMD); + twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | + (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); +} + +void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, + struct mt7915_sta *msta, + u8 flowid) +{ + struct mt7915_twt_flow *flow; + + lockdep_assert_held(&dev->mt76.mutex); + + if (flowid >= ARRAY_SIZE(msta->twt.flow)) + return; + + if (!(msta->twt.flowid_mask & BIT(flowid))) + return; + + flow = &msta->twt.flow[flowid]; + if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, + MCU_TWT_AGRT_DELETE)) + return; + + list_del_init(&flow->list); + msta->twt.flowid_mask &= ~BIT(flowid); + dev->twt.table_mask &= ~BIT(flow->table_id); + dev->twt.n_agrt--; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index eb1885f4bd8eb3..7a2c740d1464ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -117,6 +117,7 @@ enum rx_pkt_type { #define MT_PRXV_TX_DCM BIT(4) #define MT_PRXV_TX_ER_SU_106T BIT(5) #define MT_PRXV_NSTS GENMASK(9, 7) +#define MT_PRXV_TXBF BIT(10) #define MT_PRXV_HT_AD_CODE BIT(11) #define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28) #define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0) @@ -133,7 +134,14 @@ enum rx_pkt_type { #define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17) #define MT_CRXV_HE_LDPC_EXT_SYM BIT(20) #define MT_CRXV_HE_PE_DISAMBIG BIT(23) +#define MT_CRXV_HE_NUM_USER GENMASK(30, 24) #define MT_CRXV_HE_UPLINK BIT(31) +#define MT_CRXV_HE_RU0 GENMASK(7, 0) +#define MT_CRXV_HE_RU1 GENMASK(15, 8) +#define MT_CRXV_HE_RU2 GENMASK(23, 16) +#define MT_CRXV_HE_RU3 GENMASK(31, 24) + +#define MT_CRXV_HE_MU_AID GENMASK(30, 20) #define MT_CRXV_HE_SR_MASK GENMASK(11, 8) #define MT_CRXV_HE_SR1_MASK GENMASK(16, 12) @@ -272,7 +280,8 @@ enum tx_mcu_port_q_idx { #define MT_TX_RATE_MODE GENMASK(9, 6) #define MT_TX_RATE_SU_EXT_TONE BIT(5) #define MT_TX_RATE_DCM BIT(4) -#define MT_TX_RATE_IDX GENMASK(3, 0) +/* VHT/HE only use bits 0-3 */ +#define MT_TX_RATE_IDX GENMASK(5, 0) #define MT_TXP_MAX_BUF_NUM 6 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index c25f8da590dd91..057ab27b70832a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -172,6 +172,9 @@ static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif) int i; for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) { + mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI; + mvif->bitrate_mask.control[i].he_gi = GENMASK(7, 0); + mvif->bitrate_mask.control[i].he_ltf = GENMASK(7, 0); mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0); memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0), sizeof(mvif->bitrate_mask.control[i].ht_mcs)); @@ -215,7 +218,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, mvif->phy = phy; mvif->band_idx = ext_phy; - if (ext_phy) + if (dev->mt76.phy2) mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) + mvif->idx % (MT7915_MAX_WMM_SETS / 2); else @@ -231,12 +234,13 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, idx = MT7915_WTBL_RESERVED - mvif->idx; INIT_LIST_HEAD(&mvif->sta.rc_list); - INIT_LIST_HEAD(&mvif->sta.stats_list); INIT_LIST_HEAD(&mvif->sta.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.ext_phy = mvif->band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_packet_id_init(&mvif->sta.wcid); + mt7915_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -252,6 +256,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; mt7915_init_bitrate_mask(vif); + memset(&mvif->cap, -1, sizeof(mvif->cap)); out: mutex_unlock(&dev->mt76.mutex); @@ -291,6 +296,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); spin_unlock_bh(&dev->sta_poll_lock); + + mt76_packet_id_flush(&dev->mt76, &msta->wcid); } static void mt7915_init_dfs_state(struct mt7915_phy *phy) @@ -538,6 +545,29 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, mutex_unlock(&dev->mt76.mutex); } +static void +mt7915_update_bss_color(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_he_bss_color *bss_color) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + + switch (vif->type) { + case NL80211_IFTYPE_AP: { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + + if (mvif->omac_idx > HW_BSSID_MAX) + return; + fallthrough; + } + case NL80211_IFTYPE_STATION: + mt7915_mcu_update_bss_color(dev, vif, bss_color); + break; + default: + break; + } +} + static void mt7915_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, @@ -586,6 +616,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_HE_OBSS_PD) mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable); + if (changed & BSS_CHANGED_HE_BSS_COLOR) + mt7915_update_bss_color(hw, vif, &info->he_bss_color); + if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) mt7915_mcu_add_beacon(hw, vif, info->enable_beacon); @@ -613,19 +646,18 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; int ret, idx; - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA); if (idx < 0) return -ENOSPC; INIT_LIST_HEAD(&msta->rc_list); - INIT_LIST_HEAD(&msta->stats_list); INIT_LIST_HEAD(&msta->poll_list); msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; msta->wcid.ext_phy = mvif->band_idx; msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->stats.jiffies = jiffies; + msta->jiffies = jiffies; mt7915_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -634,7 +666,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (ret) return ret; - return mt7915_mcu_add_sta_adv(dev, vif, sta, true); + return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false); } void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -642,18 +674,19 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, { struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + int i; - mt7915_mcu_add_sta_adv(dev, vif, sta, false); mt7915_mcu_add_sta(dev, vif, sta, false); mt7915_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) + mt7915_mac_twt_teardown_flow(dev, msta, i); + spin_lock_bh(&dev->sta_poll_lock); if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); - if (!list_empty(&msta->stats_list)) - list_del_init(&msta->stats_list); if (!list_empty(&msta->rc_list)) list_del_init(&msta->rc_list); spin_unlock_bh(&dev->sta_poll_lock); @@ -781,22 +814,19 @@ mt7915_get_stats(struct ieee80211_hw *hw, struct mib_stats *mib = &phy->mib; mutex_lock(&dev->mt76.mutex); + stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; - memset(mib, 0, sizeof(*mib)); - mutex_unlock(&dev->mt76.mutex); return 0; } -static u64 -mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif) { - struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); bool band = phy != &dev->phy; @@ -806,7 +836,7 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } tsf; u16 n; - mutex_lock(&dev->mt76.mutex); + lockdep_assert_held(&dev->mt76.mutex); n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx; /* TSF software read */ @@ -815,9 +845,21 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band)); tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band)); + return tsf.t64; +} + +static u64 +mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + u64 ret; + + mutex_lock(&dev->mt76.mutex); + ret = __mt7915_get_tsf(hw, mvif); mutex_unlock(&dev->mt76.mutex); - return tsf.t64; + return ret; } static void @@ -926,7 +968,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, { struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct mt7915_sta_stats *stats = &msta->stats; + struct rate_info *txrate = &msta->wcid.rate; struct rate_info rxrate = {}; if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) { @@ -934,20 +976,20 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } - if (!stats->tx_rate.legacy && !stats->tx_rate.flags) + if (!txrate->legacy && !txrate->flags) return; - if (stats->tx_rate.legacy) { - sinfo->txrate.legacy = stats->tx_rate.legacy; + if (txrate->legacy) { + sinfo->txrate.legacy = txrate->legacy; } else { - sinfo->txrate.mcs = stats->tx_rate.mcs; - sinfo->txrate.nss = stats->tx_rate.nss; - sinfo->txrate.bw = stats->tx_rate.bw; - sinfo->txrate.he_gi = stats->tx_rate.he_gi; - sinfo->txrate.he_dcm = stats->tx_rate.he_dcm; - sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc; + sinfo->txrate.mcs = txrate->mcs; + sinfo->txrate.nss = txrate->nss; + sinfo->txrate.bw = txrate->bw; + sinfo->txrate.he_gi = txrate->he_gi; + sinfo->txrate.he_dcm = txrate->he_dcm; + sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; } - sinfo->txrate.flags = stats->tx_rate.flags; + sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } @@ -955,16 +997,13 @@ static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta) { struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct mt7915_dev *dev = msta->vif->phy->dev; - struct ieee80211_hw *hw = msta->vif->phy->mt76->hw; u32 *changed = data; spin_lock_bh(&dev->sta_poll_lock); - msta->stats.changed |= *changed; + msta->changed |= *changed; if (list_empty(&msta->rc_list)) list_add_tail(&msta->rc_list, &dev->sta_rc_list); spin_unlock_bh(&dev->sta_poll_lock); - - ieee80211_queue_work(hw, &dev->rc_work); } static void mt7915_sta_rc_update(struct ieee80211_hw *hw, @@ -972,7 +1011,11 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u32 changed) { + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = phy->dev; + mt7915_sta_rc_work(&changed, sta); + ieee80211_queue_work(hw, &dev->rc_work); } static int @@ -980,22 +1023,22 @@ mt7915_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - enum nl80211_band band = mvif->phy->mt76->chandef.chan->band; - u32 changed; - - if (mask->control[band].gi == NL80211_TXRATE_FORCE_LGI) - return -EINVAL; + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = phy->dev; + u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED; - changed = IEEE80211_RC_SUPP_RATES_CHANGED; mvif->bitrate_mask = *mask; - /* Update firmware rate control to add a boundary on top of table - * to limit the rate selection for each peer, so when set bitrates - * vht-mcs-5 1:9, which actually means nss = 1 mcs = 0~9. This only - * applies to data frames as for the other mgmt, mcast, bcast still - * use legacy rates as it is. + /* if multiple rates across different preambles are given we can + * reconfigure this info with all peers using sta_rec command with + * the below exception cases. + * - single rate : if a rate is passed along with different preambles, + * we select the highest one as fixed rate. i.e VHT MCS for VHT peers. + * - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT + * then multiple MCS setting (MCS 4,5,6) is not supported. */ ieee80211_iterate_stations_atomic(hw, mt7915_sta_rc_work, &changed); + ieee80211_queue_work(hw, &dev->rc_work); return 0; } @@ -1032,6 +1075,240 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw, mt7915_mcu_sta_update_hdr_trans(dev, vif, sta); } +static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx_ampdu_cnt", + "tx_stop_q_empty_cnt", + "tx_mpdu_attempts", + "tx_mpdu_success", + "tx_rwp_fail_cnt", + "tx_rwp_need_cnt", + "tx_pkt_ebf_cnt", + "tx_pkt_ibf_cnt", + "tx_ampdu_len:0-1", + "tx_ampdu_len:2-10", + "tx_ampdu_len:11-19", + "tx_ampdu_len:20-28", + "tx_ampdu_len:29-37", + "tx_ampdu_len:38-46", + "tx_ampdu_len:47-55", + "tx_ampdu_len:56-79", + "tx_ampdu_len:80-103", + "tx_ampdu_len:104-127", + "tx_ampdu_len:128-151", + "tx_ampdu_len:152-175", + "tx_ampdu_len:176-199", + "tx_ampdu_len:200-223", + "tx_ampdu_len:224-247", + "ba_miss_count", + "tx_beamformer_ppdu_iBF", + "tx_beamformer_ppdu_eBF", + "tx_beamformer_rx_feedback_all", + "tx_beamformer_rx_feedback_he", + "tx_beamformer_rx_feedback_vht", + "tx_beamformer_rx_feedback_ht", + "tx_beamformer_rx_feedback_bw", /* zero based idx: 20, 40, 80, 160 */ + "tx_beamformer_rx_feedback_nc", + "tx_beamformer_rx_feedback_nr", + "tx_beamformee_ok_feedback_pkts", + "tx_beamformee_feedback_trig", + "tx_mu_beamforming", + "tx_mu_mpdu", + "tx_mu_successful_mpdu", + "tx_su_successful_mpdu", + "tx_msdu_pack_1", + "tx_msdu_pack_2", + "tx_msdu_pack_3", + "tx_msdu_pack_4", + "tx_msdu_pack_5", + "tx_msdu_pack_6", + "tx_msdu_pack_7", + "tx_msdu_pack_8", + + /* rx counters */ + "rx_fifo_full_cnt", + "rx_mpdu_cnt", + "channel_idle_cnt", + "rx_vector_mismatch_cnt", + "rx_delimiter_fail_cnt", + "rx_len_mismatch_cnt", + "rx_ampdu_cnt", + "rx_ampdu_bytes_cnt", + "rx_ampdu_valid_subframe_cnt", + "rx_ampdu_valid_subframe_b_cnt", + "rx_pfdrop_cnt", + "rx_vec_queue_overflow_drop_cnt", + "rx_ba_cnt", + + /* per vif counters */ + "v_tx_mode_cck", + "v_tx_mode_ofdm", + "v_tx_mode_ht", + "v_tx_mode_ht_gf", + "v_tx_mode_vht", + "v_tx_mode_he_su", + "v_tx_mode_he_ext_su", + "v_tx_mode_he_tb", + "v_tx_mode_he_mu", + "v_tx_bw_20", + "v_tx_bw_40", + "v_tx_bw_80", + "v_tx_bw_160", + "v_tx_mcs_0", + "v_tx_mcs_1", + "v_tx_mcs_2", + "v_tx_mcs_3", + "v_tx_mcs_4", + "v_tx_mcs_5", + "v_tx_mcs_6", + "v_tx_mcs_7", + "v_tx_mcs_8", + "v_tx_mcs_9", + "v_tx_mcs_10", + "v_tx_mcs_11", +}; + +#define MT7915_SSTATS_LEN ARRAY_SIZE(mt7915_gstrings_stats) + +/* Ethtool related API */ +static +void mt7915_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *mt7915_gstrings_stats, + sizeof(mt7915_gstrings_stats)); +} + +static +int mt7915_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return MT7915_SSTATS_LEN; + + return 0; +} + +static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) +{ + struct mt76_ethtool_worker_info *wi = wi_data; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + if (msta->vif->idx != wi->idx) + return; + + mt76_ethtool_worker(wi, &msta->stats); +} + +static +void mt7915_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt76_ethtool_worker_info wi = { + .data = data, + .idx = mvif->idx, + }; + struct mib_stats *mib = &phy->mib; + /* See mt7915_ampdu_stat_read_phy, etc */ + bool ext_phy = phy != &dev->phy; + int i, n, ei = 0; + + mutex_lock(&dev->mt76.mutex); + + mt7915_mac_update_stats(phy); + + data[ei++] = mib->tx_ampdu_cnt; + data[ei++] = mib->tx_stop_q_empty_cnt; + data[ei++] = mib->tx_mpdu_attempts_cnt; + data[ei++] = mib->tx_mpdu_success_cnt; + data[ei++] = mib->tx_rwp_fail_cnt; + data[ei++] = mib->tx_rwp_need_cnt; + data[ei++] = mib->tx_pkt_ebf_cnt; + data[ei++] = mib->tx_pkt_ibf_cnt; + + /* Tx ampdu stat */ + n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; + for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++) + data[ei++] = dev->mt76.aggr_stats[i + n]; + + data[ei++] = phy->mib.ba_miss_cnt; + + /* Tx Beamformer monitor */ + data[ei++] = mib->tx_bf_ibf_ppdu_cnt; + data[ei++] = mib->tx_bf_ebf_ppdu_cnt; + + /* Tx Beamformer Rx feedback monitor */ + data[ei++] = mib->tx_bf_rx_fb_all_cnt; + data[ei++] = mib->tx_bf_rx_fb_he_cnt; + data[ei++] = mib->tx_bf_rx_fb_vht_cnt; + data[ei++] = mib->tx_bf_rx_fb_ht_cnt; + + data[ei++] = mib->tx_bf_rx_fb_bw; + data[ei++] = mib->tx_bf_rx_fb_nc_cnt; + data[ei++] = mib->tx_bf_rx_fb_nr_cnt; + + /* Tx Beamformee Rx NDPA & Tx feedback report */ + data[ei++] = mib->tx_bf_fb_cpl_cnt; + data[ei++] = mib->tx_bf_fb_trig_cnt; + + /* Tx SU & MU counters */ + data[ei++] = mib->tx_bf_cnt; + data[ei++] = mib->tx_mu_mpdu_cnt; + data[ei++] = mib->tx_mu_acked_mpdu_cnt; + data[ei++] = mib->tx_su_acked_mpdu_cnt; + + /* Tx amsdu info (pack-count histogram) */ + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) + data[ei++] = mib->tx_amsdu[i]; + + /* rx counters */ + data[ei++] = mib->rx_fifo_full_cnt; + data[ei++] = mib->rx_mpdu_cnt; + data[ei++] = mib->channel_idle_cnt; + data[ei++] = mib->rx_vector_mismatch_cnt; + data[ei++] = mib->rx_delimiter_fail_cnt; + data[ei++] = mib->rx_len_mismatch_cnt; + data[ei++] = mib->rx_ampdu_cnt; + data[ei++] = mib->rx_ampdu_bytes_cnt; + data[ei++] = mib->rx_ampdu_valid_subframe_cnt; + data[ei++] = mib->rx_ampdu_valid_subframe_bytes_cnt; + data[ei++] = mib->rx_pfdrop_cnt; + data[ei++] = mib->rx_vec_queue_overflow_drop_cnt; + data[ei++] = mib->rx_ba_cnt; + + /* Add values for all stations owned by this vif */ + wi.initial_stat_idx = ei; + ieee80211_iterate_stations_atomic(hw, mt7915_ethtool_worker, &wi); + + mutex_unlock(&dev->mt76.mutex); + + if (wi.sta_count == 0) + return; + + ei += wi.worker_stat_count; + if (ei != MT7915_SSTATS_LEN) + dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d", + ei, (int)MT7915_SSTATS_LEN); +} + +static void +mt7915_twt_teardown_request(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 flowid) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + mt7915_mac_twt_teardown_flow(dev, msta, flowid); + mutex_unlock(&dev->mt76.mutex); +} + const struct ieee80211_ops mt7915_ops = { .tx = mt7915_tx, .start = mt7915_start, @@ -1056,6 +1333,9 @@ const struct ieee80211_ops mt7915_ops = { .get_txpower = mt76_get_txpower, .channel_switch_beacon = mt7915_channel_switch_beacon, .get_stats = mt7915_get_stats, + .get_et_sset_count = mt7915_get_et_sset_count, + .get_et_stats = mt7915_get_et_stats, + .get_et_strings = mt7915_get_et_strings, .get_tsf = mt7915_get_tsf, .set_tsf = mt7915_set_tsf, .offset_tsf = mt7915_offset_tsf, @@ -1067,6 +1347,8 @@ const struct ieee80211_ops mt7915_ops = { .sta_statistics = mt7915_sta_statistics, .sta_set_4addr = mt7915_sta_set_4addr, .sta_set_decap_offload = mt7915_sta_set_decap_offload, + .add_twt_setup = mt7915_mac_add_twt_setup, + .twt_teardown_request = mt7915_twt_teardown_request, CFG80211_TESTMODE_CMD(mt76_testmode_cmd) CFG80211_TESTMODE_DUMP(mt76_testmode_dump) #ifdef CONFIG_MAC80211_DEBUGFS diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 43960770a9af20..899957b9d0f19c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -416,8 +416,7 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0); } -static void -mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) +int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) { struct { __le32 args[3]; @@ -429,7 +428,7 @@ mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) }, }; - mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false); } static void @@ -488,152 +487,6 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) dev->hw_pattern++; } -static int -mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, - struct rate_info *rate, u16 r) -{ - struct ieee80211_supported_band *sband; - u16 ru_idx = le16_to_cpu(ra->ru_idx); - bool cck = false; - - rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r); - rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1; - - switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) { - case MT_PHY_TYPE_CCK: - cck = true; - fallthrough; - case MT_PHY_TYPE_OFDM: - if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) - sband = &mphy->sband_5g.sband; - else - sband = &mphy->sband_2g.sband; - - rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck); - rate->legacy = sband->bitrates[rate->mcs].bitrate; - break; - case MT_PHY_TYPE_HT: - case MT_PHY_TYPE_HT_GF: - rate->mcs += (rate->nss - 1) * 8; - if (rate->mcs > 31) - return -EINVAL; - - rate->flags = RATE_INFO_FLAGS_MCS; - if (ra->gi) - rate->flags |= RATE_INFO_FLAGS_SHORT_GI; - break; - case MT_PHY_TYPE_VHT: - if (rate->mcs > 9) - return -EINVAL; - - rate->flags = RATE_INFO_FLAGS_VHT_MCS; - if (ra->gi) - rate->flags |= RATE_INFO_FLAGS_SHORT_GI; - break; - case MT_PHY_TYPE_HE_SU: - case MT_PHY_TYPE_HE_EXT_SU: - case MT_PHY_TYPE_HE_TB: - case MT_PHY_TYPE_HE_MU: - if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) - return -EINVAL; - - rate->he_gi = ra->gi; - rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r); - rate->flags = RATE_INFO_FLAGS_HE_MCS; - break; - default: - return -EINVAL; - } - - if (ru_idx) { - switch (ru_idx) { - case 1 ... 2: - rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - case 3 ... 6: - rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_484; - break; - case 7 ... 14: - rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_242; - break; - default: - rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; - break; - } - rate->bw = RATE_INFO_BW_HE_RU; - } else { - u8 bw = mt7915_mcu_chan_bw(&mphy->chandef) - - FIELD_GET(MT_RA_RATE_BW, r); - - switch (bw) { - case IEEE80211_STA_RX_BW_160: - rate->bw = RATE_INFO_BW_160; - break; - case IEEE80211_STA_RX_BW_80: - rate->bw = RATE_INFO_BW_80; - break; - case IEEE80211_STA_RX_BW_40: - rate->bw = RATE_INFO_BW_40; - break; - default: - rate->bw = RATE_INFO_BW_20; - break; - } - } - - return 0; -} - -static void -mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) -{ - struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data; - struct rate_info rate = {}, prob_rate = {}; - u16 probe = le16_to_cpu(ra->prob_up_rate); - u16 attempts = le16_to_cpu(ra->attempts); - u16 curr = le16_to_cpu(ra->curr_rate); - u16 wcidx = le16_to_cpu(ra->wlan_idx); - struct ieee80211_tx_status status = {}; - struct mt76_phy *mphy = &dev->mphy; - struct mt7915_sta_stats *stats; - struct mt7915_sta *msta; - struct mt76_wcid *wcid; - - if (wcidx >= MT76_N_WCIDS) - return; - - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); - if (!wcid) - return; - - msta = container_of(wcid, struct mt7915_sta, wcid); - stats = &msta->stats; - - if (msta->wcid.ext_phy && dev->mt76.phy2) - mphy = dev->mt76.phy2; - - /* current rate */ - if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr)) - stats->tx_rate = rate; - - /* probing rate */ - if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe)) - stats->prob_rate = prob_rate; - - if (attempts) { - u16 success = le16_to_cpu(ra->success); - - stats->per = 1000 * (attempts - success) / attempts; - } - - status.sta = wcid_to_sta(wcid); - if (!status.sta) - return; - - status.rate = &stats->tx_rate; - ieee80211_tx_status_ext(mphy->hw, &status); -} - static void mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) { @@ -657,6 +510,15 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) (int)(skb->len - sizeof(*rxd)), data); } +static void +mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (!vif->color_change_active) + return; + + ieee80211_color_change_finish(vif); +} + static void mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) { @@ -672,12 +534,14 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) case MCU_EXT_EVENT_CSA_NOTIFY: mt7915_mcu_rx_csa_notify(dev, skb); break; - case MCU_EXT_EVENT_RATE_REPORT: - mt7915_mcu_tx_rate_report(dev, skb); - break; case MCU_EXT_EVENT_FW_LOG_2_HOST: mt7915_mcu_rx_log_message(dev, skb); break; + case MCU_EXT_EVENT_BCC_NOTIFY: + ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7915_mcu_cca_finish, dev); + break; default: break; } @@ -706,7 +570,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb) rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || - rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT || + rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY || !rxd->seq) mt7915_mcu_rx_unsolicited_event(dev, skb); else @@ -721,7 +585,7 @@ mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif, .bss_idx = mvif->idx, .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0, .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0, - .muar_idx = msta ? mvif->omac_idx : 0, + .muar_idx = msta && msta->wcid.sta ? mvif->omac_idx : 0xe, .is_tlv_append = 1, }; struct sk_buff *skb; @@ -757,7 +621,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta, } if (sta_hdr) - sta_hdr->len = cpu_to_le16(sizeof(hdr)); + le16_add_cpu(&sta_hdr->len, sizeof(hdr)); return skb_put_data(nskb, &hdr, sizeof(hdr)); } @@ -923,12 +787,15 @@ static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, struct mt7915_he_obss_narrow_bw_ru_data *data = _data; const struct element *elem; + rcu_read_lock(); elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY); - if (!elem || elem->datalen < 10 || + if (!elem || elem->datalen <= 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) data->tolerated = false; + + rcu_read_unlock(); } static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, @@ -1201,7 +1068,7 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb, u8 cipher; cipher = mt7915_mcu_get_cipher(key->cipher); - if (cipher == MT_CIPHER_NONE) + if (cipher == MCU_CIPHER_NONE) return -EOPNOTSUPP; sec_key = &sec->key[0]; @@ -1317,7 +1184,7 @@ mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb, ba->rst_ba_sb = 1; } - if (enable && tx) + if (enable) ba->ba_winsize = cpu_to_le16(params->buf_size); } @@ -1469,17 +1336,21 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } static void -mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) { struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; - struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem; enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band; const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs; struct sta_rec_he *he; struct tlv *tlv; u32 cap = 0; + if (!sta->he_cap.has_he) + return; + tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he)); he = (struct sta_rec_he *)tlv; @@ -1504,8 +1375,8 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G)) cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT; - if (elem->phy_cap_info[1] & - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) + if (mvif->cap.ldpc && (elem->phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) cap |= STA_REC_HE_CAP_LDPC; if (elem->phy_cap_info[1] & @@ -1524,6 +1395,10 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC; + if (elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB) + cap |= STA_REC_HE_CAP_TRIG_CQI_FK; + if (elem->phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE; @@ -1548,10 +1423,6 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI) cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI; - if (elem->phy_cap_info[9] & - IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK) - cap |= STA_REC_HE_CAP_TRIG_CQI_FK; - if (elem->phy_cap_info[9] & IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU) cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242; @@ -1640,19 +1511,45 @@ mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, } static void -mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) { - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; - struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem; struct sta_rec_muru *muru; struct tlv *tlv; + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return; + + if (!sta->vht_cap.vht_supported) + return; + tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru)); muru = (struct sta_rec_muru *)tlv; - muru->cfg.ofdma_dl_en = true; - muru->cfg.mimo_dl_en = true; + muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer || + mvif->cap.vht_mu_ebfer || + mvif->cap.vht_mu_ebfee; + + muru->mimo_dl.vht_mu_bfee = + !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + + if (!sta->he_cap.has_he) + return; + + muru->mimo_dl.partial_bw_dl_mimo = + HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]); + + muru->cfg.mimo_ul_en = true; + muru->mimo_ul.full_ul_mimo = + HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]); + muru->mimo_ul.partial_ul_mimo = + HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); + + muru->cfg.ofdma_dl_en = true; muru->ofdma_dl.punc_pream_rx = HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); muru->ofdma_dl.he_20m_in_40m_2g = @@ -1661,9 +1558,6 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); muru->ofdma_dl.he_80m_in_160m = HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); - muru->ofdma_dl.lt16_sigb = 0; - muru->ofdma_dl.rx_su_comp_sigb = 0; - muru->ofdma_dl.rx_su_non_comp_sigb = 0; muru->ofdma_ul.t_frame_dur = HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); @@ -1671,18 +1565,18 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); muru->ofdma_ul.uo_ra = HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); - muru->ofdma_ul.he_2x996_tone = 0; - muru->ofdma_ul.rx_t_frame_11ac = 0; +} - muru->mimo_dl.vht_mu_bfee = - !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); - muru->mimo_dl.partial_bw_dl_mimo = - HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]); +static void +mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_ht *ht; + struct tlv *tlv; - muru->mimo_ul.full_ul_mimo = - HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]); - muru->mimo_ul.partial_ul_mimo = - HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); + tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); + + ht = (struct sta_rec_ht *)tlv; + ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); } static void @@ -1691,6 +1585,9 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) struct sta_rec_vht *vht; struct tlv *tlv; + if (!sta->vht_cap.vht_supported) + return; + tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); vht = (struct sta_rec_vht *)tlv; @@ -1700,12 +1597,17 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) } static void -mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct sta_rec_amsdu *amsdu; struct tlv *tlv; + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return; + if (!sta->max_amsdu_len) return; @@ -1718,44 +1620,6 @@ mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) msta->wcid.amsdu = true; } -static bool -mt7915_hw_amsdu_supported(struct ieee80211_vif *vif) -{ - switch (vif->type) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_STATION: - return true; - default: - return false; - } -} - -static void -mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb, - struct ieee80211_sta *sta, struct ieee80211_vif *vif) -{ - struct tlv *tlv; - - /* starec ht */ - if (sta->ht_cap.ht_supported) { - struct sta_rec_ht *ht; - - tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); - ht = (struct sta_rec_ht *)tlv; - ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); - - if (mt7915_hw_amsdu_supported(vif)) - mt7915_mcu_sta_amsdu_tlv(skb, sta); - } - - /* starec he */ - if (sta->he_cap.has_he) - mt7915_mcu_sta_he_tlv(skb, sta); - - /* starec uapsd */ - mt7915_mcu_sta_uapsd_tlv(skb, sta, vif); -} - static void mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, void *sta_wtbl, void *wtbl_tlv) @@ -1766,15 +1630,15 @@ mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps), wtbl_tlv, sta_wtbl); smps = (struct wtbl_smps *)tlv; - - if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) - smps->smps = true; + smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC); } static void -mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, - void *sta_wtbl, void *wtbl_tlv) +mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, void *sta_wtbl, + void *wtbl_tlv) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct wtbl_ht *ht = NULL; struct tlv *tlv; @@ -1783,7 +1647,8 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht), wtbl_tlv, sta_wtbl); ht = (struct wtbl_ht *)tlv; - ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING); + ht->ldpc = mvif->cap.ldpc && + (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING); ht->af = sta->ht_cap.ampdu_factor; ht->mm = sta->ht_cap.ampdu_density; ht->ht = true; @@ -1797,7 +1662,8 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht), wtbl_tlv, sta_wtbl); vht = (struct wtbl_vht *)tlv; - vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + vht->ldpc = mvif->cap.ldpc && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); vht->vht = true; af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, @@ -1838,6 +1704,32 @@ mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } } +static int +mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *tlv; + + msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; + + tlv = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, + tlv, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, tlv, wtbl_hdr); + mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, tlv, wtbl_hdr); + + if (sta) + mt7915_mcu_wtbl_ht_tlv(skb, vif, sta, tlv, wtbl_hdr); + + return 0; +} + int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -1887,10 +1779,48 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, MCU_EXT_CMD(STA_REC_UPDATE), true); } +static inline bool +mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool bfee) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + int tx_ant = hweight8(phy->mt76->chainmask) - 1; + + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return false; + + if (!bfee && tx_ant < 2) + return false; + + if (sta->he_cap.has_he) { + struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem; + + if (bfee) + return mvif->cap.he_su_ebfee && + HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]); + else + return mvif->cap.he_su_ebfer && + HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); + } + + if (sta->vht_cap.vht_supported) { + u32 cap = sta->vht_cap.cap; + + if (bfee) + return mvif->cap.vht_su_ebfee && + (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); + else + return mvif->cap.vht_su_ebfer && + (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); + } + + return false; +} + static void mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf) { - bf->bf_cap = MT_EBF; bf->sounding_phy = MT_PHY_TYPE_OFDM; bf->ndp_rate = 0; /* mcs0 */ bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ @@ -1905,9 +1835,8 @@ mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy, u8 n = 0; bf->tx_mode = MT_PHY_TYPE_HT; - bf->bf_cap = MT_IBF; - if (mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF && + if ((mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) && (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED)) n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK, mcs->tx_params); @@ -1918,8 +1847,8 @@ mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy, else if (mcs->rx_mask[1]) n = 1; - bf->nr = hweight8(phy->mt76->chainmask) - 1; - bf->nc = min_t(u8, bf->nr, n); + bf->nrow = hweight8(phy->mt76->chainmask) - 1; + bf->ncol = min_t(u8, bf->nrow, n); bf->ibf_ncol = n; } @@ -1936,23 +1865,23 @@ mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy, bf->tx_mode = MT_PHY_TYPE_VHT; if (explicit) { - u8 bfee_nr, bfer_nr; + u8 sts, snd_dim; mt7915_mcu_sta_sounding_rate(bf); - bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, - pc->cap); - bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, + + sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, + pc->cap); + snd_dim = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, vc->cap); - bf->nr = min_t(u8, min_t(u8, bfer_nr, bfee_nr), tx_ant); - bf->nc = min_t(u8, nss_mcs, bf->nr); - bf->ibf_ncol = bf->nc; + bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant); + bf->ncol = min_t(u8, nss_mcs, bf->nrow); + bf->ibf_ncol = bf->ncol; if (sta->bandwidth == IEEE80211_STA_RX_BW_160) - bf->nr = 1; + bf->nrow = 1; } else { - bf->bf_cap = MT_IBF; - bf->nr = tx_ant; - bf->nc = min_t(u8, nss_mcs, bf->nr); + bf->nrow = tx_ant; + bf->ncol = min_t(u8, nss_mcs, bf->nrow); bf->ibf_ncol = nss_mcs; if (sta->bandwidth == IEEE80211_STA_RX_BW_160) @@ -1970,21 +1899,23 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem; u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80); u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); - u8 bfee_nr, bfer_nr; + u8 snd_dim, sts; bf->tx_mode = MT_PHY_TYPE_HE_SU; + mt7915_mcu_sta_sounding_rate(bf); + bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB, pe->phy_cap_info[6]); bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB, pe->phy_cap_info[6]); - bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, + snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, ve->phy_cap_info[5]); - bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK, - pe->phy_cap_info[4]); - bf->nr = min_t(u8, bfer_nr, bfee_nr); - bf->nc = min_t(u8, nss_mcs, bf->nr); - bf->ibf_ncol = bf->nc; + sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK, + pe->phy_cap_info[4]); + bf->nrow = min_t(u8, snd_dim, sts); + bf->ncol = min_t(u8, nss_mcs, bf->nrow); + bf->ibf_ncol = bf->ncol; if (sta->bandwidth != IEEE80211_STA_RX_BW_160) return; @@ -1995,7 +1926,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160); nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); - bf->nc_bw160 = nss_mcs; + bf->ncol_bw160 = nss_mcs; } if (pe->phy_cap_info[0] & @@ -2003,25 +1934,27 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80); nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); - if (bf->nc_bw160) - bf->nc_bw160 = min_t(u8, bf->nc_bw160, nss_mcs); + if (bf->ncol_bw160) + bf->ncol_bw160 = min_t(u8, bf->ncol_bw160, nss_mcs); else - bf->nc_bw160 = nss_mcs; + bf->ncol_bw160 = nss_mcs; } - bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK, + snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK, ve->phy_cap_info[5]); - bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK, - pe->phy_cap_info[4]); + sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK, + pe->phy_cap_info[4]); - bf->nr_bw160 = min_t(int, bfer_nr, bfee_nr); + bf->nrow_bw160 = min_t(int, snd_dim, sts); } static void -mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, - struct ieee80211_vif *vif, struct mt7915_phy *phy, - bool enable, bool explicit) +mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_phy *phy = + mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; int tx_ant = hweight8(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; @@ -2031,43 +1964,42 @@ mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */ }; + bool ebf; -#define MT_BFER_FREE cpu_to_le16(GENMASK(15, 0)) + ebf = mt7915_is_ebf_supported(phy, vif, sta, false); + if (!ebf && !dev->ibf) + return; tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf)); bf = (struct sta_rec_bf *)tlv; - if (!enable) { - bf->pfmu = MT_BFER_FREE; - return; - } - /* he: eBF only, in accordance with spec * vht: support eBF and iBF * ht: iBF only, since mac80211 lacks of eBF support */ - if (sta->he_cap.has_he && explicit) + if (sta->he_cap.has_he && ebf) mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); else if (sta->vht_cap.vht_supported) - mt7915_mcu_sta_bfer_vht(sta, phy, bf, explicit); + mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf); else if (sta->ht_cap.ht_supported) mt7915_mcu_sta_bfer_ht(sta, phy, bf); else return; + bf->bf_cap = ebf ? ebf : dev->ibf << 1; bf->bw = sta->bandwidth; bf->ibf_dbw = sta->bandwidth; bf->ibf_nrow = tx_ant; - if (!explicit && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc) + if (!ebf && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) bf->ibf_timeout = 0x48; else bf->ibf_timeout = 0x18; - if (explicit && bf->nr != tx_ant) - bf->mem_20m = matrix[tx_ant][bf->nc]; + if (ebf && bf->nrow != tx_ant) + bf->mem_20m = matrix[tx_ant][bf->ncol]; else - bf->mem_20m = matrix[bf->nr][bf->nc]; + bf->mem_20m = matrix[bf->nrow][bf->ncol]; switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: @@ -2084,13 +2016,19 @@ mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, } static void -mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, - struct mt7915_phy *phy) +mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_phy *phy = + mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; int tx_ant = hweight8(phy->mt76->chainmask) - 1; struct sta_rec_bfee *bfee; struct tlv *tlv; - u8 nr = 0; + u8 nrow = 0; + + if (!mt7915_is_ebf_supported(phy, vif, sta, true)) + return; tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee)); bfee = (struct sta_rec_bfee *)tlv; @@ -2098,94 +2036,137 @@ mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, if (sta->he_cap.has_he) { struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem; - nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, - pe->phy_cap_info[5]); + nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, + pe->phy_cap_info[5]); } else if (sta->vht_cap.vht_supported) { struct ieee80211_sta_vht_cap *pc = &sta->vht_cap; - nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, - pc->cap); + nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, + pc->cap); } /* reply with identity matrix to avoid 2x2 BF negative gain */ - bfee->fb_identity_matrix = !!(nr == 1 && tx_ant == 2); + bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2); } -static int -mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) +int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + void *data, u32 field) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct mt7915_phy *phy; + struct sta_phy *phy = data; + struct sta_rec_ra_fixed *ra; struct sk_buff *skb; - int r, len; - bool ebfee = 0, ebf = 0; - - if (vif->type != NL80211_IFTYPE_STATION && - vif->type != NL80211_IFTYPE_AP) - return 0; - - phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; + struct tlv *tlv; + int len = sizeof(struct sta_req_hdr) + sizeof(*ra); - if (sta->he_cap.has_he) { - struct ieee80211_he_cap_elem *pe; - const struct ieee80211_he_cap_elem *ve; - const struct ieee80211_sta_he_cap *vc; - - pe = &sta->he_cap.he_cap_elem; - vc = mt7915_get_he_phy_cap(phy, vif); - ve = &vc->he_cap_elem; - - ebfee = !!(HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) && - HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4])); - ebf = !!(HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) && - HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4])); - } else if (sta->vht_cap.vht_supported) { - struct ieee80211_sta_vht_cap *pc; - struct ieee80211_sta_vht_cap *vc; + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); - pc = &sta->vht_cap; - vc = &phy->mt76->sband_5g.sband.vht_cap; + tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); + ra = (struct sta_rec_ra_fixed *)tlv; - ebfee = !!((pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) && - (vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); - ebf = !!((vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) && - (pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); + switch (field) { + case RATE_PARAM_AUTO: + break; + case RATE_PARAM_FIXED: + case RATE_PARAM_FIXED_MCS: + case RATE_PARAM_FIXED_GI: + case RATE_PARAM_FIXED_HE_LTF: + ra->phy = *phy; + break; + default: + break; } + ra->field = cpu_to_le32(field); - /* must keep each tag independent */ + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(STA_REC_UPDATE), true); +} - /* starec bf */ - if (ebf || dev->ibf) { - len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bf); +static int +mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; + struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; + enum nl80211_band band = chandef->chan->band; + struct sta_phy phy = {}; + int ret, nrates = 0; + +#define __sta_phy_bitrate_mask_check(_mcs, _gi, _he) \ + do { \ + u8 i, gi = mask->control[band]._gi; \ + gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ + for (i = 0; i <= sta->bandwidth; i++) { \ + phy.sgi |= gi << (i << (_he)); \ + phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\ + } \ + for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \ + nrates += hweight16(mask->control[band]._mcs[i]); \ + phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \ + } while (0) - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); - if (IS_ERR(skb)) - return PTR_ERR(skb); + if (sta->he_cap.has_he) { + __sta_phy_bitrate_mask_check(he_mcs, he_gi, 1); + } else if (sta->vht_cap.vht_supported) { + __sta_phy_bitrate_mask_check(vht_mcs, gi, 0); + } else if (sta->ht_cap.ht_supported) { + __sta_phy_bitrate_mask_check(ht_mcs, gi, 0); + } else { + nrates = hweight32(mask->control[band].legacy); + phy.mcs = ffs(mask->control[band].legacy) - 1; + } +#undef __sta_phy_bitrate_mask_check - mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable, ebf); + /* fall back to auto rate control */ + if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI && + mask->control[band].he_gi == GENMASK(7, 0) && + mask->control[band].he_ltf == GENMASK(7, 0) && + nrates != 1) + return 0; - r = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); - if (r) - return r; + /* fixed single rate */ + if (nrates == 1) { + ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, + RATE_PARAM_FIXED_MCS); + if (ret) + return ret; } - /* starec bfee */ - if (ebfee) { - len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bfee); - - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); - if (IS_ERR(skb)) - return PTR_ERR(skb); + /* fixed GI */ + if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || + mask->control[band].he_gi != GENMASK(7, 0)) { + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + u32 addr; + + /* firmware updates only TXCMD but doesn't take WTBL into + * account, so driver should update here to reflect the + * actual txrate hardware sends out. + */ + addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); + if (sta->he_cap.has_he) + mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); + else + mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); - mt7915_mcu_sta_bfee_tlv(skb, sta, phy); + ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, + RATE_PARAM_FIXED_GI); + if (ret) + return ret; + } - r = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); - if (r) - return r; + /* fixed HE_LTF */ + if (mask->control[band].he_ltf != GENMASK(7, 0)) { + ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, + RATE_PARAM_FIXED_HE_LTF); + if (ret) + return ret; } return 0; @@ -2233,8 +2214,6 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, } if (sta->ht_cap.ht_supported) { - const u8 *mcs_mask = mask->control[band].ht_mcs; - ra->supp_mode |= MODE_HT; ra->af = sta->ht_cap.ampdu_factor; ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); @@ -2248,15 +2227,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, cap |= STA_CAP_TX_STBC; if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) cap |= STA_CAP_RX_STBC; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + if (mvif->cap.ldpc && + (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) cap |= STA_CAP_LDPC; - mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, mcs_mask); + mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, + mask->control[band].ht_mcs); ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs; } if (sta->vht_cap.vht_supported) { - const u16 *mcs_mask = mask->control[band].vht_mcs; u8 af; ra->supp_mode |= MODE_VHT; @@ -2273,10 +2253,12 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, cap |= STA_CAP_VHT_TX_STBC; if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) cap |= STA_CAP_VHT_RX_STBC; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + if (mvif->cap.ldpc && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) cap |= STA_CAP_VHT_LDPC; - mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, mcs_mask); + mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, + mask->control[band].vht_mcs); } if (sta->he_cap.has_he) { @@ -2288,44 +2270,40 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, } int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, bool changed) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct sk_buff *skb; - int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_ra); + int ret; - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, + MT7915_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); - mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); -} - -int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct sk_buff *skb; - int len; - - if (!sta->he_cap.has_he) - return 0; - - len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_he); + /* firmware rc algorithm refers to sta_rec_he for HE control. + * once dev->rc_work changes the settings driver should also + * update sta_rec_he here. + */ + if (sta->he_cap.has_he && changed) + mt7915_mcu_sta_he_tlv(skb, sta, vif); - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); - if (IS_ERR(skb)) - return PTR_ERR(skb); + /* sta_rec_ra accommodates BW, NSS and only MCS range format + * i.e 0-{7,8,9} for VHT. + */ + mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); - mt7915_mcu_sta_he_tlv(skb, sta); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(STA_REC_UPDATE), true); + if (ret) + return ret; - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); + /* sta_rec_ra_fixed accommodates single rate, (HE)GI and HE_LTE, + * and updates as peer fixed rate parameters, which overrides + * sta_rec_ra and firmware rate control algorithm. + */ + return mt7915_mcu_add_rate_ctrl_fixed(dev, vif, sta); } static int @@ -2334,7 +2312,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, { #define MT_STA_BSS_GROUP 1 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_sta *msta; struct { __le32 action; u8 wlan_idx_lo; @@ -2345,75 +2323,24 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, u8 rsv1[8]; } __packed req = { .action = cpu_to_le32(MT_STA_BSS_GROUP), - .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), - .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), .val = cpu_to_le32(mvif->idx % 16), }; + msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; + req.wlan_idx_lo = to_wcid_lo(msta->wcid.idx); + req.wlan_idx_hi = to_wcid_hi(msta->wcid.idx); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req, sizeof(req), true); } -static int -mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct sk_buff *skb; - int ret; - - if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he) - return 0; - - ret = mt7915_mcu_add_group(dev, vif, sta); - if (ret) - return ret; - - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, - MT7915_STA_UPDATE_MAX_SIZE); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - /* wait until TxBF and MU ready to update stare vht */ - - /* starec muru */ - mt7915_mcu_sta_muru_tlv(skb, sta); - /* starec vht */ - mt7915_mcu_sta_vht_tlv(skb, sta); - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); -} - -int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) -{ - int ret; - - if (!sta) - return 0; - - /* must keep the order */ - ret = mt7915_mcu_add_txbf(dev, vif, sta, enable); - if (ret || !enable) - return ret; - - ret = mt7915_mcu_add_mu(dev, vif, sta); - if (ret) - return ret; - - return mt7915_mcu_add_rate_ctrl(dev, vif, sta); -} - int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct wtbl_req_hdr *wtbl_hdr; struct mt7915_sta *msta; - struct tlv *sta_wtbl; struct sk_buff *skb; + int ret; msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; @@ -2422,24 +2349,42 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, if (IS_ERR(skb)) return PTR_ERR(skb); + /* starec basic */ mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable); - if (enable && sta) - mt7915_mcu_sta_tlv(dev, skb, sta, vif); + if (!enable) + goto out; - sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + /* tag order is in accordance with firmware dependency. */ + if (sta && sta->ht_cap.ht_supported) { + /* starec bfer */ + mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta); + /* starec ht */ + mt7915_mcu_sta_ht_tlv(skb, sta); + /* starec vht */ + mt7915_mcu_sta_vht_tlv(skb, sta); + /* starec uapsd */ + mt7915_mcu_sta_uapsd_tlv(skb, sta, vif); + } - wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, - sta_wtbl, &skb); - if (IS_ERR(wtbl_hdr)) - return PTR_ERR(wtbl_hdr); + ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta); + if (ret) + return ret; - if (enable) { - mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); - mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); - if (sta) - mt7915_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr); + if (sta && sta->ht_cap.ht_supported) { + /* starec amsdu */ + mt7915_mcu_sta_amsdu_tlv(skb, vif, sta); + /* starec he */ + mt7915_mcu_sta_he_tlv(skb, sta, vif); + /* starec muru */ + mt7915_mcu_sta_muru_tlv(skb, sta, vif); + /* starec bfee */ + mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta); } + ret = mt7915_mcu_add_group(dev, vif, sta); + if (ret) + return ret; +out: return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(STA_REC_UPDATE), true); } @@ -2464,10 +2409,9 @@ int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, if (!rate) { ra->field = cpu_to_le32(RATE_PARAM_AUTO); goto out; - } else { - ra->field = cpu_to_le32(RATE_PARAM_FIXED); } + ra->field = cpu_to_le32(RATE_PARAM_FIXED); ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate); ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate); ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate); @@ -2480,10 +2424,12 @@ int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7; /* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */ - if (ra->phy.type > MT_PHY_TYPE_VHT) - ra->phy.sgi = ra->phy.mcs * 85; - else - ra->phy.sgi = ra->phy.mcs * 15; + if (ra->phy.type > MT_PHY_TYPE_VHT) { + ra->phy.he_ltf = FIELD_GET(RATE_CFG_HE_LTF, rate) * 85; + ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 85; + } else { + ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 15; + } out: return mt76_mcu_skb_send_msg(&dev->mt76, skb, @@ -2534,25 +2480,28 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, } static void -mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb, - struct bss_info_bcn *bcn, - struct ieee80211_mutable_offsets *offs) +mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb, + struct sk_buff *skb, struct bss_info_bcn *bcn, + struct ieee80211_mutable_offsets *offs) { - if (offs->cntdwn_counter_offs[0]) { - struct tlv *tlv; - struct bss_info_bcn_csa *csa; + struct bss_info_bcn_cntdwn *info; + struct tlv *tlv; + int sub_tag; - tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CSA, - sizeof(*csa), &bcn->sub_ntlv, - &bcn->len); - csa = (struct bss_info_bcn_csa *)tlv; - csa->cnt = skb->data[offs->cntdwn_counter_offs[0]]; - } + if (!offs->cntdwn_counter_offs[0]) + return; + + sub_tag = vif->csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC; + tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info), + &bcn->sub_ntlv, &bcn->len); + info = (struct bss_info_bcn_cntdwn *)tlv; + info->cnt = skb->data[offs->cntdwn_counter_offs[0]]; } static void -mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb, - struct sk_buff *skb, struct bss_info_bcn *bcn, +mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct sk_buff *rskb, struct sk_buff *skb, + struct bss_info_bcn *bcn, struct ieee80211_mutable_offsets *offs) { struct mt76_wcid *wcid = &dev->mt76.global_wcid; @@ -2568,8 +2517,14 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb, cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); cont->tim_ofs = cpu_to_le16(offs->tim_offset); - if (offs->cntdwn_counter_offs[0]) - cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4); + if (offs->cntdwn_counter_offs[0]) { + u16 offset = offs->cntdwn_counter_offs[0]; + + if (vif->csa_active) + cont->csa_ofs = cpu_to_le16(offset - 4); + if (vif->color_change_active) + cont->bcc_ofs = cpu_to_le16(offset - 3); + } buf = (u8 *)tlv + sizeof(*cont); mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL, @@ -2577,6 +2532,82 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb, memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); } +static void +mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif, + struct sk_buff *skb) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_vif_cap *vc = &mvif->cap; + const struct ieee80211_he_cap_elem *he; + const struct ieee80211_vht_cap *vht; + const struct ieee80211_ht_cap *ht; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + const u8 *ie; + u32 len, bc; + + /* Check missing configuration options to allow AP mode in mac80211 + * to remain in sync with hostapd settings, and get a subset of + * beacon and hardware capabilities. + */ + if (WARN_ON_ONCE(skb->len <= (mgmt->u.beacon.variable - skb->data))) + return; + + memset(vc, 0, sizeof(*vc)); + + len = skb->len - (mgmt->u.beacon.variable - skb->data); + + ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, mgmt->u.beacon.variable, + len); + if (ie && ie[1] >= sizeof(*ht)) { + ht = (void *)(ie + 2); + vc->ldpc |= !!(le16_to_cpu(ht->cap_info) & + IEEE80211_HT_CAP_LDPC_CODING); + } + + ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable, + len); + if (ie && ie[1] >= sizeof(*vht)) { + u32 pc = phy->mt76->sband_5g.sband.vht_cap.cap; + + vht = (void *)(ie + 2); + bc = le32_to_cpu(vht->vht_cap_info); + + vc->ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC); + vc->vht_su_ebfer = + (bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) && + (pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); + vc->vht_su_ebfee = + (bc & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) && + (pc & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); + vc->vht_mu_ebfer = + (bc & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && + (pc & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); + vc->vht_mu_ebfee = + (bc & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && + (pc & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + } + + ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, + mgmt->u.beacon.variable, len); + if (ie && ie[1] >= sizeof(*he) + 1) { + const struct ieee80211_sta_he_cap *pc = + mt7915_get_he_phy_cap(phy, vif); + const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem; + + he = (void *)(ie + 3); + + vc->he_su_ebfer = + HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) && + HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]); + vc->he_su_ebfee = + HE_PHY(CAP4_SU_BEAMFORMEE, he->phy_cap_info[4]) && + HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); + vc->he_mu_ebfer = + HE_PHY(CAP4_MU_BEAMFORMER, he->phy_cap_info[4]) && + HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4]); + } +} + int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int en) { @@ -2617,9 +2648,11 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; } - /* TODO: subtag - bss color count & 11v MBSSID */ - mt7915_mcu_beacon_csa(rskb, skb, bcn, &offs); - mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs); + mt7915_mcu_beacon_check_caps(phy, vif, skb); + + /* TODO: subtag - 11v MBSSID */ + mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs); + mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); dev_kfree_skb(skb); out: @@ -2770,8 +2803,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev) goto out; } - ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), - dl, len); + ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), + dl, len, 4096); if (ret) { dev_err(dev->mt76.dev, "Failed to send patch\n"); goto out; @@ -2790,7 +2823,7 @@ static int mt7915_load_patch(struct mt7915_dev *dev) default: ret = -EAGAIN; dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); - goto out; + break; } release_firmware(fw); @@ -2839,8 +2872,8 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev, return err; } - err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), - data + offset, len); + err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), + data + offset, len, 4096); if (err) { dev_err(dev->mt76.dev, "Failed to send firmware.\n"); return err; @@ -2946,7 +2979,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) return 0; } -int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl) +int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl) { struct { u8 ctrl_val; @@ -2955,6 +2988,10 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl) .ctrl_val = ctrl }; + if (type == MCU_FW_LOG_WA) + return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(FW_LOG_2_HOST), + &data, sizeof(data), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), &data, sizeof(data), true); } @@ -2990,6 +3027,62 @@ static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled) sizeof(req), false); } +int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val) +{ + struct { + __le32 cmd; + u8 val[4]; + } __packed req = { + .cmd = cpu_to_le32(cmd), + }; + + put_unaligned_le32(val, req.val); + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req, + sizeof(req), false); +} + +static int +mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev) +{ +#define RX_AIRTIME_FEATURE_CTRL 1 +#define RX_AIRTIME_BITWISE_CTRL 2 +#define RX_AIRTIME_CLEAR_EN 1 + struct { + __le16 field; + __le16 sub_field; + __le32 set_status; + __le32 get_status; + u8 _rsv[12]; + + bool airtime_en; + bool mibtime_en; + bool earlyend_en; + u8 _rsv1[9]; + + bool airtime_clear; + bool mibtime_clear; + u8 _rsv2[98]; + } __packed req = { + .field = cpu_to_le16(RX_AIRTIME_BITWISE_CTRL), + .sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN), + .airtime_clear = true, + }; + int ret; + + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req, + sizeof(req), true); + if (ret) + return ret; + + req.field = cpu_to_le16(RX_AIRTIME_FEATURE_CTRL); + req.sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN); + req.airtime_en = true; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req, + sizeof(req), true); +} + int mt7915_mcu_init(struct mt7915_dev *dev) { static const struct mt76_mcu_ops mt7915_mcu_ops = { @@ -3011,11 +3104,29 @@ int mt7915_mcu_init(struct mt7915_dev *dev) return ret; set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); - mt7915_mcu_fw_log_2_host(dev, 0); - mt7915_mcu_set_mwds(dev, 1); - mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_RED, 0, 0); + ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, 0); + if (ret) + return ret; - return 0; + ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0); + if (ret) + return ret; + + ret = mt7915_mcu_set_mwds(dev, 1); + if (ret) + return ret; + + ret = mt7915_mcu_set_muru_ctrl(dev, MURU_SET_PLATFORM_TYPE, + MURU_PLATFORM_TYPE_PERF_LEVEL_2); + if (ret) + return ret; + + ret = mt7915_mcu_init_rx_airtime(dev); + if (ret) + return ret; + + return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), + MCU_WA_PARAM_RED, 0, 0); } void mt7915_mcu_exit(struct mt7915_dev *dev) @@ -3391,20 +3502,20 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev) { -#define TOTAL_PAGE_MASK GENMASK(7, 5) +#define MAX_PAGE_IDX_MASK GENMASK(7, 5) #define PAGE_IDX_MASK GENMASK(4, 2) #define PER_PAGE_SIZE 0x400 struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER }; - u8 total = MT7915_EEPROM_SIZE / PER_PAGE_SIZE; + u8 total = DIV_ROUND_UP(MT7915_EEPROM_SIZE, PER_PAGE_SIZE); u8 *eep = (u8 *)dev->mt76.eeprom.data; int eep_len; int i; - for (i = 0; i <= total; i++, eep += eep_len) { + for (i = 0; i < total; i++, eep += eep_len) { struct sk_buff *skb; int ret; - if (i == total) + if (i == total - 1 && !!(MT7915_EEPROM_SIZE % PER_PAGE_SIZE)) eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE; else eep_len = PER_PAGE_SIZE; @@ -3414,7 +3525,7 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev) if (!skb) return -ENOMEM; - req.format = FIELD_PREP(TOTAL_PAGE_MASK, total) | + req.format = FIELD_PREP(MAX_PAGE_IDX_MASK, total - 1) | FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE; req.len = cpu_to_le16(eep_len); @@ -3481,7 +3592,7 @@ static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, u8 idx; u8 rsv[4]; __le32 len; - } req; + } req = {}; struct sk_buff *skb; skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len); @@ -3691,10 +3802,6 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state) }; int level; -#define TRIGGER_TEMPERATURE 122 -#define RESTORE_TEMPERATURE 116 -#define SUSTAIN_PERIOD 10 - if (!state) { req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE; goto out; @@ -3707,7 +3814,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state) req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG; req.ctrl.duty.duty_level = level; req.ctrl.duty.duty_cycle = state; - state = state * 4 / 5; + state /= 2; ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), &req, sizeof(req.ctrl), false); @@ -3715,15 +3822,12 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state) return ret; } - /* currently use fixed values for throttling, and would be better - * to implement thermal zone for dynamic trip in the long run. - */ - /* set high-temperature trigger threshold */ req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE; - req.trigger_temp = cpu_to_le32(TRIGGER_TEMPERATURE); - req.restore_temp = cpu_to_le32(RESTORE_TEMPERATURE); - req.sustain_time = cpu_to_le16(SUSTAIN_PERIOD); + /* add a safety margin ~10 */ + req.restore_temp = cpu_to_le32(phy->throttle_temp[0] - 10); + req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]); + req.sustain_time = cpu_to_le16(10); out: req.ctrl.type.protect_type = 1; @@ -3733,24 +3837,6 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state) &req, sizeof(req), false); } -int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx) -{ - struct { - __le32 cmd; - __le16 wlan_idx; - __le16 ru_idx; - __le16 direction; - __le16 dump_group; - } req = { - .cmd = cpu_to_le32(cmd), - .wlan_idx = cpu_to_le16(wlan_idx), - .dump_group = cpu_to_le16(1), - }; - - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RATE_CTRL), &req, - sizeof(req), false); -} - int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; @@ -4069,3 +4155,75 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, return ret; } + +int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct cfg80211_he_bss_color *he_bss_color) +{ + int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_color); + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct bss_info_color *bss_color; + struct sk_buff *skb; + struct tlv *tlv; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, sizeof(*bss_color)); + bss_color = (struct bss_info_color *)tlv; + bss_color->disable = !he_bss_color->enabled; + bss_color->color = he_bss_color->color; + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(BSS_INFO_UPDATE), true); +} + +#define TWT_AGRT_TRIGGER BIT(0) +#define TWT_AGRT_ANNOUNCE BIT(1) +#define TWT_AGRT_PROTECT BIT(2) + +int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, + struct mt7915_vif *mvif, + struct mt7915_twt_flow *flow, + int cmd) +{ + struct { + u8 tbl_idx; + u8 cmd; + u8 own_mac_idx; + u8 flowid; /* 0xff for group id */ + __le16 peer_id; /* specify the peer_id (msb=0) + * or group_id (msb=1) + */ + u8 duration; /* 256 us */ + u8 bss_idx; + __le64 start_tsf; + __le16 mantissa; + u8 exponent; + u8 is_ap; + u8 agrt_params; + u8 rsv[23]; + } __packed req = { + .tbl_idx = flow->table_id, + .cmd = cmd, + .own_mac_idx = mvif->omac_idx, + .flowid = flow->id, + .peer_id = cpu_to_le16(flow->wcid), + .duration = flow->duration, + .bss_idx = mvif->idx, + .start_tsf = cpu_to_le64(flow->tsf), + .mantissa = flow->mantissa, + .exponent = flow->exp, + .is_ap = true, + }; + + if (flow->protection) + req.agrt_params |= TWT_AGRT_PROTECT; + if (!flow->flowtype) + req.agrt_params |= TWT_AGRT_ANNOUNCE; + if (flow->trigger) + req.agrt_params |= TWT_AGRT_TRIGGER; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE), + &req, sizeof(req), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index e68a562cc5b4f6..1f5a64ba9b59d9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -43,7 +43,7 @@ enum { MCU_EXT_EVENT_ASSERT_DUMP = 0x23, MCU_EXT_EVENT_RDD_REPORT = 0x3a, MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, - MCU_EXT_EVENT_RATE_REPORT = 0x87, + MCU_EXT_EVENT_BCC_NOTIFY = 0x75, }; enum { @@ -164,41 +164,6 @@ struct mt7915_mcu_eeprom_info { u8 data[16]; } __packed; -struct mt7915_mcu_ra_info { - struct mt7915_mcu_rxd rxd; - - __le32 event_id; - __le16 wlan_idx; - __le16 ru_idx; - __le16 direction; - __le16 dump_group; - - __le32 suggest_rate; - __le32 min_rate; /* for dynamic sounding */ - __le32 max_rate; /* for dynamic sounding */ - __le32 init_rate_down_rate; - - __le16 curr_rate; - __le16 init_rate_down_total; - __le16 init_rate_down_succ; - __le16 success; - __le16 attempts; - - __le16 prev_rate; - __le16 prob_up_rate; - u8 no_rate_up_cnt; - u8 ppdu_cnt; - u8 gi; - - u8 try_up_fail; - u8 try_up_total; - u8 suggest_wf; - u8 try_up_check; - u8 prob_up_period; - u8 prob_down_pending; -} __packed; - - struct mt7915_mcu_phy_rx_info { u8 category; u8 rate; @@ -210,12 +175,6 @@ struct mt7915_mcu_phy_rx_info { u8 bw; }; -#define MT_RA_RATE_NSS GENMASK(8, 6) -#define MT_RA_RATE_MCS GENMASK(3, 0) -#define MT_RA_RATE_TX_MODE GENMASK(12, 9) -#define MT_RA_RATE_DCM_EN BIT(4) -#define MT_RA_RATE_BW GENMASK(14, 13) - struct mt7915_mcu_mib { __le32 band; __le32 offs; @@ -270,6 +229,11 @@ enum { MCU_S2D_H2CN }; +enum { + MCU_FW_LOG_WM, + MCU_FW_LOG_WA, + MCU_FW_LOG_TO_HOST, +}; #define __MCU_CMD_FIELD_ID GENMASK(7, 0) #define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8) @@ -312,21 +276,31 @@ enum { MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, MCU_EXT_CMD_RX_HDR_TRANS = 0x47, MCU_EXT_CMD_MUAR_UPDATE = 0x48, + MCU_EXT_CMD_RX_AIRTIME_CTRL = 0x4a, MCU_EXT_CMD_SET_RX_PATH = 0x4e, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, MCU_EXT_CMD_GET_MIB_INFO = 0x5a, MCU_EXT_CMD_MWDS_SUPPORT = 0x80, MCU_EXT_CMD_SET_SER_TRIGGER = 0x81, MCU_EXT_CMD_SCS_CTRL = 0x82, - MCU_EXT_CMD_RATE_CTRL = 0x87, + MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94, MCU_EXT_CMD_FW_DBG_CTRL = 0x95, MCU_EXT_CMD_SET_RDD_TH = 0x9d, + MCU_EXT_CMD_MURU_CTRL = 0x9f, MCU_EXT_CMD_SET_SPR = 0xa8, MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab, MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac, MCU_EXT_CMD_PHY_STAT_INFO = 0xad, }; +enum { + MCU_TWT_AGRT_ADD, + MCU_TWT_AGRT_MODIFY, + MCU_TWT_AGRT_DELETE, + MCU_TWT_AGRT_TEARDOWN, + MCU_TWT_AGRT_GET_TSF, +}; + enum { MCU_WA_PARAM_CMD_QUERY, MCU_WA_PARAM_CMD_SET, @@ -335,6 +309,8 @@ enum { }; enum { + MCU_WA_PARAM_PDMA_RX = 0x04, + MCU_WA_PARAM_CPU_UTIL = 0x0b, MCU_WA_PARAM_RED = 0x0e, }; @@ -545,6 +521,14 @@ struct bss_info_hw_amsdu { u8 rsv; } __packed; +struct bss_info_color { + __le16 tag; + __le16 len; + u8 disable; + u8 color; + u8 rsv[2]; +} __packed; + struct bss_info_he { __le16 tag; __le16 len; @@ -563,14 +547,7 @@ struct bss_info_bcn { __le16 sub_ntlv; } __packed __aligned(4); -struct bss_info_bcn_csa { - __le16 tag; - __le16 len; - u8 cnt; - u8 rsv[3]; -} __packed __aligned(4); - -struct bss_info_bcn_bcc { +struct bss_info_bcn_cntdwn { __le16 tag; __le16 len; u8 cnt; @@ -716,6 +693,7 @@ struct wtbl_ba { __le16 sn; u8 ba_en; u8 ba_winsize_idx; + /* originator & recipient */ __le16 ba_winsize; /* recipient only */ u8 peer_addr[ETH_ALEN]; @@ -915,7 +893,7 @@ struct sta_rec_sec { struct sec_key key[2]; } __packed; -struct ra_phy { +struct sta_phy { u8 type; u8 flag; u8 stbc; @@ -959,7 +937,7 @@ struct sta_rec_ra { __le32 sta_cap; - struct ra_phy phy; + struct sta_phy phy; } __packed; struct sta_rec_ra_fixed { @@ -972,7 +950,7 @@ struct sta_rec_ra_fixed { u8 op_vht_rx_nss; u8 op_vht_rx_nss_type; - struct ra_phy phy; + struct sta_phy phy; u8 spe_en; u8 short_preamble; @@ -980,8 +958,14 @@ struct sta_rec_ra_fixed { u8 mmps_mode; } __packed; -#define RATE_PARAM_FIXED 3 -#define RATE_PARAM_AUTO 20 +enum { + RATE_PARAM_FIXED = 3, + RATE_PARAM_FIXED_HE_LTF = 7, + RATE_PARAM_FIXED_MCS, + RATE_PARAM_FIXED_GI = 11, + RATE_PARAM_AUTO = 20, +}; + #define RATE_CFG_MCS GENMASK(3, 0) #define RATE_CFG_NSS GENMASK(7, 4) #define RATE_CFG_GI GENMASK(11, 8) @@ -989,6 +973,7 @@ struct sta_rec_ra_fixed { #define RATE_CFG_STBC GENMASK(19, 16) #define RATE_CFG_LDPC GENMASK(23, 20) #define RATE_CFG_PHY_TYPE GENMASK(27, 24) +#define RATE_CFG_HE_LTF GENMASK(31, 28) struct sta_rec_bf { __le16 tag; @@ -1002,8 +987,8 @@ struct sta_rec_bf { u8 ndp_rate; u8 rept_poll_rate; u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */ - u8 nc; - u8 nr; + u8 ncol; + u8 nrow; u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */ u8 mem_total; @@ -1023,8 +1008,8 @@ struct sta_rec_bf { u8 ibf_dbw; u8 ibf_ncol; u8 ibf_nrow; - u8 nr_bw160; - u8 nc_bw160; + u8 nrow_bw160; + u8 ncol_bw160; u8 ru_start_idx; u8 ru_end_idx; @@ -1036,7 +1021,7 @@ struct sta_rec_bf { bool codebook75_mu; u8 he_ltf; - u8 rsv[2]; + u8 rsv[3]; } __packed; struct sta_rec_bfee { @@ -1115,17 +1100,22 @@ enum { THERMAL_PROTECT_STATE_ACT, }; -enum { - MT_EBF = BIT(0), /* explicit beamforming */ - MT_IBF = BIT(1) /* implicit beamforming */ -}; - enum { MT_BF_SOUNDING_ON = 1, MT_BF_TYPE_UPDATE = 20, MT_BF_MODULE_UPDATE = 25 }; +enum { + MURU_SET_ARB_OP_MODE = 14, + MURU_SET_PLATFORM_TYPE = 25, +}; + +enum { + MURU_PLATFORM_TYPE_PERF_LEVEL_1 = 1, + MURU_PLATFORM_TYPE_PERF_LEVEL_2, +}; + #define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ sizeof(struct wtbl_generic) + \ sizeof(struct wtbl_rx) + \ @@ -1137,12 +1127,15 @@ enum { #define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct sta_rec_basic) + \ + sizeof(struct sta_rec_bf) + \ sizeof(struct sta_rec_ht) + \ sizeof(struct sta_rec_he) + \ sizeof(struct sta_rec_ba) + \ sizeof(struct sta_rec_vht) + \ sizeof(struct sta_rec_uapsd) + \ sizeof(struct sta_rec_amsdu) + \ + sizeof(struct sta_rec_muru) + \ + sizeof(struct sta_rec_bfee) + \ sizeof(struct tlv) + \ MT7915_WTBL_UPDATE_MAX_SIZE) @@ -1157,8 +1150,7 @@ enum { sizeof(struct bss_info_ext_bss)) #define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct bss_info_bcn_csa) + \ - sizeof(struct bss_info_bcn_bcc) + \ + sizeof(struct bss_info_bcn_cntdwn) + \ sizeof(struct bss_info_bcn_mbss) + \ sizeof(struct bss_info_bcn_cont)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index af712a936ef683..1f6ba306c85098 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -34,6 +34,9 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) u32 mapped; u32 size; } fixed_map[] = { + { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */ + { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */ + { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ @@ -92,8 +95,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) } if ((addr >= 0x18000000 && addr < 0x18c00000) || - (addr >= 0x70000000 && addr < 0x78000000) || - (addr >= 0x7c000000 && addr < 0x7c400000)) + (addr >= 0x70000000 && addr < 0x78000000)) return mt7915_reg_map_l1(dev, addr); return mt7915_reg_map_l2(dev, addr); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 3f613fae6218e1..e69b4c8974ee74 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -36,13 +36,14 @@ #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ -#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */ -#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */ #define MT7915_THERMAL_THROTTLE_MAX 100 #define MT7915_SKU_RATE_NUM 161 +#define MT7915_MAX_TWT_AGRT 16 +#define MT7915_MAX_STA_TWT_AGRT 8 + struct mt7915_vif; struct mt7915_sta; struct mt7915_dfs_pulse; @@ -64,35 +65,59 @@ enum mt7915_rxq_id { MT7915_RXQ_MCU_WA_EXT, }; -struct mt7915_sta_stats { - struct rate_info prob_rate; - struct rate_info tx_rate; - - unsigned long per; - unsigned long changed; - unsigned long jiffies; -}; - struct mt7915_sta_key_conf { s8 keyidx; u8 key[16]; }; +struct mt7915_twt_flow { + struct list_head list; + u64 start_tsf; + u64 tsf; + u32 duration; + u16 wcid; + __le16 mantissa; + u8 exp; + u8 table_id; + u8 id; + u8 protection:1; + u8 flowtype:1; + u8 trigger:1; + u8 sched:1; +}; + struct mt7915_sta { struct mt76_wcid wcid; /* must be first */ struct mt7915_vif *vif; - struct list_head stats_list; struct list_head poll_list; struct list_head rc_list; u32 airtime_ac[8]; - struct mt7915_sta_stats stats; - + unsigned long changed; + unsigned long jiffies; unsigned long ampdu_state; + struct mt76_sta_stats stats; + struct mt7915_sta_key_conf bip; + + struct { + u8 flowid_mask; + struct mt7915_twt_flow flow[MT7915_MAX_STA_TWT_AGRT]; + } twt; +}; + +struct mt7915_vif_cap { + bool ldpc:1; + bool vht_su_ebfer:1; + bool vht_su_ebfee:1; + bool vht_mu_ebfer:1; + bool vht_mu_ebfee:1; + bool he_su_ebfer:1; + bool he_su_ebfee:1; + bool he_mu_ebfer:1; }; struct mt7915_vif { @@ -101,6 +126,7 @@ struct mt7915_vif { u8 band_idx; u8 wmm_idx; + struct mt7915_vif_cap cap; struct mt7915_sta sta; struct mt7915_phy *phy; @@ -108,12 +134,58 @@ struct mt7915_vif { struct cfg80211_bitrate_mask bitrate_mask; }; +/* per-phy stats. */ struct mib_stats { u32 ack_fail_cnt; u32 fcs_err_cnt; u32 rts_cnt; u32 rts_retries_cnt; u32 ba_miss_cnt; + u32 tx_bf_cnt; + u32 tx_mu_mpdu_cnt; + u32 tx_mu_acked_mpdu_cnt; + u32 tx_su_acked_mpdu_cnt; + u32 tx_bf_ibf_ppdu_cnt; + u32 tx_bf_ebf_ppdu_cnt; + + u32 tx_bf_rx_fb_all_cnt; + u32 tx_bf_rx_fb_he_cnt; + u32 tx_bf_rx_fb_vht_cnt; + u32 tx_bf_rx_fb_ht_cnt; + + u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ + u32 tx_bf_rx_fb_nc_cnt; + u32 tx_bf_rx_fb_nr_cnt; + u32 tx_bf_fb_cpl_cnt; + u32 tx_bf_fb_trig_cnt; + + u32 tx_ampdu_cnt; + u32 tx_stop_q_empty_cnt; + u32 tx_mpdu_attempts_cnt; + u32 tx_mpdu_success_cnt; + u32 tx_pkt_ebf_cnt; + u32 tx_pkt_ibf_cnt; + + u32 tx_rwp_fail_cnt; + u32 tx_rwp_need_cnt; + + /* rx stats */ + u32 rx_fifo_full_cnt; + u32 channel_idle_cnt; + u32 rx_vector_mismatch_cnt; + u32 rx_delimiter_fail_cnt; + u32 rx_len_mismatch_cnt; + u32 rx_mpdu_cnt; + u32 rx_ampdu_cnt; + u32 rx_ampdu_bytes_cnt; + u32 rx_ampdu_valid_subframe_cnt; + u32 rx_ampdu_valid_subframe_bytes_cnt; + u32 rx_pfdrop_cnt; + u32 rx_vec_queue_overflow_drop_cnt; + u32 rx_ba_cnt; + + u32 tx_amsdu[8]; + u32 tx_amsdu_cnt; }; struct mt7915_hif { @@ -134,6 +206,7 @@ struct mt7915_phy { struct thermal_cooling_device *cdev; u8 throttle_state; + u32 throttle_temp[2]; /* 0: critical high, 1: maximum */ u32 rxfilter; u64 omac_mask; @@ -151,9 +224,6 @@ struct mt7915_phy { struct mib_stats mib; struct mt76_channel_state state_ts; - struct list_head stats_list; - - u8 sta_work_count; #ifdef CONFIG_NL80211_TESTMODE struct { @@ -193,16 +263,23 @@ struct mt7915_dev { struct list_head sta_rc_list; struct list_head sta_poll_list; + struct list_head twt_list; spinlock_t sta_poll_lock; u32 hw_pattern; bool dbdc_support; bool flash_mode; - bool fw_debug; bool ibf; + u8 fw_debug_wm; + u8 fw_debug_wa; void *cal; + + struct { + u8 table_mask; + u8 n_agrt; + } twt; }; enum { @@ -220,13 +297,17 @@ enum { }; enum { - MT_LMAC_AC00, + MT_CTX0, + MT_HIF0 = 0x0, + + MT_LMAC_AC00 = 0x0, MT_LMAC_AC01, MT_LMAC_AC02, MT_LMAC_AC03, MT_LMAC_ALTX0 = 0x10, MT_LMAC_BMC0, MT_LMAC_BCN0, + MT_LMAC_PSMP0, }; enum { @@ -250,13 +331,6 @@ enum mt7915_rdd_cmd { RDD_IRQ_OFF, }; -enum { - RATE_CTRL_RU_INFO, - RATE_CTRL_FIXED_RATE_INFO, - RATE_CTRL_DUMP_INFO, - RATE_CTRL_MU_INFO, -}; - static inline struct mt7915_phy * mt7915_hw_phy(struct ieee80211_hw *hw) { @@ -294,7 +368,7 @@ extern const struct ieee80211_ops mt7915_ops; extern const struct mt76_testmode_ops mt7915_testmode_ops; u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr); - +u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif); int mt7915_register_device(struct mt7915_dev *dev); void mt7915_unregister_device(struct mt7915_dev *dev); int mt7915_eeprom_init(struct mt7915_dev *dev); @@ -307,14 +381,16 @@ int mt7915_dma_init(struct mt7915_dev *dev); void mt7915_dma_prefetch(struct mt7915_dev *dev); void mt7915_dma_cleanup(struct mt7915_dev *dev); int mt7915_mcu_init(struct mt7915_dev *dev); +int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, + struct mt7915_vif *mvif, + struct mt7915_twt_flow *flow, + int cmd); int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, bool enable); int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, int enable); int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable); -int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable); int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -327,22 +403,24 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct mt7915_sta *msta, struct ieee80211_key_conf *key, enum set_key_cmd cmd); +int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct cfg80211_he_bss_color *he_bss_color); int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int enable); int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct ieee80211_sta *sta, bool changed); int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int mt7915_set_channel(struct mt7915_phy *phy); int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd); int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif); int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req); -int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, - struct ieee80211_sta *sta, u32 rate); +int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + void *data, u32 field); int mt7915_mcu_set_eeprom(struct mt7915_dev *dev); int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset); int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, @@ -362,17 +440,18 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, const struct mt7915_dfs_pulse *pulse); int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, const struct mt7915_dfs_pattern *pattern); +int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val); int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev); int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy); int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch); int mt7915_mcu_get_temperature(struct mt7915_phy *phy); int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state); -int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx); int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct rate_info *rate); int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd, u8 index, u8 rx_sel, u8 val); -int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl); +int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3); +int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl); int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level); void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb); void mt7915_mcu_exit(struct mt7915_dev *dev); @@ -403,6 +482,7 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask) mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } +u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw); bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask); void mt7915_mac_reset_counters(struct mt7915_phy *phy); void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); @@ -418,7 +498,14 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7915_mac_work(struct work_struct *work); void mt7915_mac_reset_work(struct work_struct *work); void mt7915_mac_sta_rc_work(struct work_struct *work); +void mt7915_mac_update_stats(struct mt7915_phy *phy); int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq); +void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, + struct mt7915_sta *msta, + u8 flowid); +void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt); int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, @@ -435,7 +522,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy); void mt7915_set_stream_he_caps(struct mt7915_phy *phy); void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy); void mt7915_update_channel(struct mt76_phy *mphy); -int mt7915_init_debugfs(struct mt7915_dev *dev); +int mt7915_init_debugfs(struct mt7915_phy *phy); #ifdef CONFIG_MAC80211_DEBUGFS void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 340b364da5f0db..0af4cdb897b71c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -222,8 +222,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp), - .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ | - MT_DRV_AMSDU_OFFLOAD, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, @@ -251,7 +250,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index a213b5cb82f813..59693535b098c6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -22,15 +22,22 @@ #define MT_PLE_BASE 0x8000 #define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) -#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) -#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) -#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8) -#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc) - -#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \ +#define MT_FL_Q_EMPTY 0x0b0 +#define MT_FL_Q0_CTRL 0x1b0 +#define MT_FL_Q2_CTRL 0x1b8 +#define MT_FL_Q3_CTRL 0x1bc + +#define MT_PLE_FREEPG_CNT MT_PLE(0x100) +#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104) +#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110) +#define MT_PLE_HIF_PG_INFO MT_PLE(0x114) +#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \ ((n) << 2)) #define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) +#define MT_PSE_BASE 0xc000 +#define MT_PSE(ofs) (MT_PSE_BASE + (ofs)) + #define MT_MDP_BASE 0xf000 #define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) @@ -57,6 +64,7 @@ #define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) #define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0) +#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6) #define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25) #define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090) @@ -72,11 +80,14 @@ #define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16) #define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4) -#define MT_IFS_EIFS GENMASK(8, 0) +#define MT_IFS_EIFS_OFDM GENMASK(8, 0) #define MT_IFS_RIFS GENMASK(14, 10) #define MT_IFS_SIFS GENMASK(22, 16) #define MT_IFS_SLOT GENMASK(30, 24) +#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4) +#define MT_IFS_EIFS_CCK GENMASK(8, 0) + #define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4) #define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0) #define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) @@ -128,15 +139,120 @@ #define MT_LPON_TCR_SW_READ GENMASK(1, 0) /* MIB: band 0(0x24800), band 1(0xa4800) */ +/* These counters are (mostly?) clear-on-read. So, some should not + * be read at all in case firmware is already reading them. These + * are commented with 'DNR' below. The DNR stats will be read by querying + * the firmware API for the appropriate message. For counters the driver + * does read, the driver should accumulate the counters. + */ #define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800) #define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) +#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010) +#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0) + #define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014) #define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0) +#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018) +#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0) + +/* rx mpdu counter, full 32 bits */ +#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c) + +#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020) +#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024) +#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028) +#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0) + +/* aka CCA_NAV_TX_TIME */ +#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c) +#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0) + +#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030) +#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0) + +#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034) +#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0) + +/* tx ampdu cnt, full 32 bits */ +#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038) + +#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c) +#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0) + +/* counts all mpdus in ampdu, regardless of success */ +#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040) +#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0) + +/* counts all successfully tx'd mpdus in ampdu */ +#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044) +#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0) + +/* in units of 'us' */ +#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048) +#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0) + +#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c) +#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0) + +#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050) +#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0) + +/* units are us */ +#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054) +#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0) + +#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058) +#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0) + +#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c) +#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0) + +/* rx ampdu count, 32-bit */ +#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060) + +/* rx ampdu bytes count, 32-bit */ +#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064) + +/* rx ampdu valid subframe count */ +#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068) +#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0) + +/* rx ampdu valid subframe bytes count, 32bits */ +#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c) + +/* remaining windows protected stats */ +#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074) +#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078) +#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c) +#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0) + +#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080) +#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0) + +/* rx blockack count, 32 bits */ +#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084) + +#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088) +#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c) +#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0) + #define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090) #define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0) +/* 36, 37 both DNR */ + #define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0) #define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4) #define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc) @@ -248,7 +364,6 @@ #define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380) #define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31) -#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30) /* WFDMA0 */ #define MT_WFDMA0_BASE 0xd4000 @@ -413,6 +528,18 @@ #define MT_HIF_REMAP_L2_BASE GENMASK(31, 12) #define MT_HIF_REMAP_BASE_L2 0x00000 +#define MT_DIC_CMD_REG_BASE 0x41f000 +#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs)) +#define MT_DIC_CMD_REG_CMD MT_DIC_CMD_REG(0x10) + +#define MT_CPU_UTIL_BASE 0x41f030 +#define MT_CPU_UTIL(ofs) (MT_CPU_UTIL_BASE + (ofs)) +#define MT_CPU_UTIL_BUSY_PCT MT_CPU_UTIL(0x00) +#define MT_CPU_UTIL_PEAK_BUSY_PCT MT_CPU_UTIL(0x04) +#define MT_CPU_UTIL_IDLE_CNT MT_CPU_UTIL(0x08) +#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c) +#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c) + #define MT_SWDEF_BASE 0x41f200 #define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) #define MT_SWDEF_MODE MT_SWDEF(0x3c) @@ -420,6 +547,20 @@ #define MT_SWDEF_ICAP_MODE 1 #define MT_SWDEF_SPECTRUM_MODE 2 +#define MT_LED_TOP_BASE 0x18013000 +#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n)) + +#define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4)) +#define MT_LED_CTRL_KICK BIT(7) +#define MT_LED_CTRL_BLINK_MODE BIT(2) +#define MT_LED_CTRL_POLARITY BIT(1) + +#define MT_LED_TX_BLINK(_n) MT_LED_PHYS(0x10 + ((_n) * 4)) +#define MT_LED_TX_BLINK_ON_MASK GENMASK(7, 0) +#define MT_LED_TX_BLINK_OFF_MASK GENMASK(15, 8) + +#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4)) + #define MT_TOP_BASE 0x18060000 #define MT_TOP(ofs) (MT_TOP_BASE + (ofs)) @@ -430,6 +571,10 @@ #define MT_TOP_MISC MT_TOP(0xf0) #define MT_TOP_MISC_FW_STATE GENMASK(2, 0) +#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */ +#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */ +#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8) + #define MT_HW_BOUND 0x70010020 #define MT_HW_CHIPID 0x70010200 #define MT_HW_REV 0x70010204 @@ -457,4 +602,9 @@ #define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18) #define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29) +#define MT_MCU_WM_CIRQ_BASE 0x89010000 +#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs)) +#define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80) +#define MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR MT_MCU_WM_CIRQ(0xc0) + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c index b220b334906bcf..89aae323d29e24 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c @@ -165,6 +165,22 @@ mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs) sizeof(req), false); } +static int +mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu) +{ + struct mt7915_dev *dev = phy->dev; + u32 op_mode; + + if (!enable) + op_mode = TAM_ARB_OP_MODE_NORMAL; + else if (mu) + op_mode = TAM_ARB_OP_MODE_TEST; + else + op_mode = TAM_ARB_OP_MODE_FORCE_SU; + + return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode); +} + static int mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min, u16 cw_max, u16 txop) @@ -397,6 +413,10 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en) mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en); mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en); + mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en); + + if (!en) + mt7915_tm_set_tam_arb(phy, en, 0); } static void @@ -438,6 +458,9 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en) } } + mt7915_tm_set_tam_arb(phy, en, + td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU); + /* if all three params are set, duty_cycle will be ignored */ if (duty_cycle && tx_time && !ipg) { ipg = tx_time * 100 / duty_cycle - tx_time; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h index 397a6b5532bcc7..5573ac309363d4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h @@ -96,4 +96,10 @@ enum { RF_OPER_WIFI_SPECTRUM, }; +enum { + TAM_ARB_OP_MODE_NORMAL = 1, + TAM_ARB_OP_MODE_TEST, + TAM_ARB_OP_MODE_FORCE_SU = 5, +}; + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig index 001f2b9cec2636..71154fc2a87ca3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig @@ -1,11 +1,26 @@ # SPDX-License-Identifier: ISC -config MT7921E - tristate "MediaTek MT7921E (PCIe) support" +config MT7921_COMMON + tristate select MT76_CONNAC_LIB select WANT_DEV_COREDUMP + +config MT7921E + tristate "MediaTek MT7921E (PCIe) support" + select MT7921_COMMON depends on MAC80211 depends on PCI help This adds support for MT7921E 802.11ax 2x2:2SS wireless devices. To compile this driver as a module, choose M here. + +config MT7921S + tristate "MediaTek MT7921S (SDIO) support" + select MT76_SDIO + select MT7921_COMMON + depends on MAC80211 + depends on MMC + help + This adds support for MT7921S 802.11ax 2x2:2SS wireless devices. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile index 0ebb59966a0836..1187acedfedafc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile @@ -1,7 +1,12 @@ # SPDX-License-Identifier: ISC +obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o obj-$(CONFIG_MT7921E) += mt7921e.o +obj-$(CONFIG_MT7921S) += mt7921s.o CFLAGS_trace.o := -I$(src) -mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o trace.o +mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o +mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o +mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o +mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c index 77468bdae460be..7cdfdf83529f6a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c @@ -4,6 +4,32 @@ #include "mt7921.h" #include "eeprom.h" +static int +mt7921_reg_set(void *data, u64 val) +{ + struct mt7921_dev *dev = data; + + mt7921_mutex_acquire(dev); + mt76_wr(dev, dev->mt76.debugfs_reg, val); + mt7921_mutex_release(dev); + + return 0; +} + +static int +mt7921_reg_get(void *data, u64 *val) +{ + struct mt7921_dev *dev = data; + + mt7921_mutex_acquire(dev); + *val = mt76_rr(dev, dev->mt76.debugfs_reg); + mt7921_mutex_release(dev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7921_reg_get, mt7921_reg_set, + "0x%08llx\n"); static int mt7921_fw_debug_set(void *data, u64 val) { @@ -42,6 +68,8 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy, if (!phy) return; + mt7921_mac_update_mib_stats(phy); + /* Tx ampdu stat */ for (i = 0; i < ARRAY_SIZE(range); i++) range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i)); @@ -67,26 +95,27 @@ static int mt7921_tx_stats_show(struct seq_file *file, void *data) { struct mt7921_dev *dev = file->private; - int stat[8], i, n; + struct mt7921_phy *phy = &dev->phy; + struct mib_stats *mib = &phy->mib; + int i; - mt7921_ampdu_stat_read_phy(&dev->phy, file); + mt7921_mutex_acquire(dev); - /* Tx amsdu info */ - seq_puts(file, "Tx MSDU stat:\n"); - for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) { - stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); - n += stat[i]; - } + mt7921_ampdu_stat_read_phy(phy, file); - for (i = 0; i < ARRAY_SIZE(stat); i++) { - seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ", - i + 1, stat[i]); - if (n != 0) - seq_printf(file, "(%d%%)\n", stat[i] * 100 / n); + seq_puts(file, "Tx MSDU stat:\n"); + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { + seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ", + i + 1, mib->tx_amsdu[i]); + if (mib->tx_amsdu_cnt) + seq_printf(file, "(%3d%%)\n", + mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt); else seq_puts(file, "\n"); } + mt7921_mutex_release(dev); + return 0; } @@ -98,6 +127,8 @@ mt7921_queues_acq(struct seq_file *s, void *data) struct mt7921_dev *dev = dev_get_drvdata(s->private); int i; + mt7921_mutex_acquire(dev); + for (i = 0; i < 16; i++) { int j, acs = i / 4, index = i % 4; u32 ctrl, val, qlen = 0; @@ -117,6 +148,8 @@ mt7921_queues_acq(struct seq_file *s, void *data) seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); } + mt7921_mutex_release(dev); + return 0; } @@ -229,30 +262,38 @@ mt7921_txpwr(struct seq_file *s, void *data) return 0; } +static void +mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt7921_dev *dev = priv; + + mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable); +} + static int mt7921_pm_set(void *data, u64 val) { struct mt7921_dev *dev = data; struct mt76_connac_pm *pm = &dev->pm; - struct mt76_phy *mphy = dev->phy.mt76; - - if (val == pm->enable) - return 0; mt7921_mutex_acquire(dev); + if (val == pm->enable) + goto out; + if (!pm->enable) { pm->stats.last_wake_event = jiffies; pm->stats.last_doze_event = jiffies; } pm->enable = val; - ieee80211_iterate_active_interfaces(mphy->hw, + ieee80211_iterate_active_interfaces(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, - mt7921_pm_interface_iter, mphy->priv); + mt7921_pm_interface_iter, dev); mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable); +out: mt7921_mutex_release(dev); return 0; @@ -369,11 +410,25 @@ static int mt7921_chip_reset(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n"); +static int +mt7921s_sched_quota_read(struct seq_file *s, void *data) +{ + struct mt7921_dev *dev = dev_get_drvdata(s->private); + struct mt76_sdio *sdio = &dev->mt76.sdio; + + seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota); + seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota); + seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota); + seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit); + + return 0; +} + int mt7921_init_debugfs(struct mt7921_dev *dev) { struct dentry *dir; - dir = mt76_register_debugfs(&dev->mt76); + dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval); if (!dir) return -ENOMEM; @@ -392,6 +447,8 @@ int mt7921_init_debugfs(struct mt7921_dev *dev) debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, mt7921_pm_stats); debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds); - + if (mt76_is_sdio(&dev->mt76)) + debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir, + mt7921s_sched_quota_read); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c index 7d7d43a5422f80..cdff1fd52d93a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c @@ -19,46 +19,6 @@ int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc) return 0; } -void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb) -{ - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - __le32 *rxd = (__le32 *)skb->data; - enum rx_pkt_type type; - u16 flag; - - type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); - flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); - - if (type == PKT_TYPE_RX_EVENT && flag == 0x1) - type = PKT_TYPE_NORMAL_MCU; - - switch (type) { - case PKT_TYPE_TXRX_NOTIFY: - mt7921_mac_tx_free(dev, skb); - break; - case PKT_TYPE_RX_EVENT: - mt7921_mcu_rx_event(dev, skb); - break; - case PKT_TYPE_NORMAL_MCU: - case PKT_TYPE_NORMAL: - if (!mt7921_mac_fill_rx(dev, skb)) { - mt76_rx(&dev->mt76, q, skb); - return; - } - fallthrough; - default: - dev_kfree_skb(skb); - break; - } -} - -void mt7921_tx_cleanup(struct mt7921_dev *dev) -{ - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false); -} - static int mt7921_poll_tx(struct napi_struct *napi, int budget) { struct mt7921_dev *dev; @@ -71,7 +31,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget) return 0; } - mt7921_tx_cleanup(dev); + mt7921_mcu_tx_cleanup(dev); if (napi_complete(napi)) mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL); mt76_connac_pm_unref(&dev->mphy, &dev->pm); @@ -125,36 +85,37 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr) u32 mapped; u32 size; } fixed_map[] = { - { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */ - { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */ - { 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */ + { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ + { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ + { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ + { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ + { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ + { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ + { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ + { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ + { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */ + { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */ + { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */ - { 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */ + { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ - { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ + { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */ + { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ - { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ - { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ - { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ - { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ - { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ - { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ - { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ - { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ @@ -306,7 +267,7 @@ static int mt7921_dma_reset(struct mt7921_dev *dev, bool force) mt76_for_each_q_rx(&dev->mt76, i) mt76_queue_reset(dev, &dev->mt76.q_rx[i]); - mt76_tx_status_check(&dev->mt76, NULL, true); + mt76_tx_status_check(&dev->mt76, true); return mt7921_dma_enable(dev); } @@ -383,6 +344,9 @@ int mt7921_dma_init(struct mt7921_dev *dev) struct mt76_bus_ops *bus_ops; int ret; + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; dev->bus_ops = dev->mt76.bus; bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), GFP_KERNEL); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c deleted file mode 100644 index 691d14a1a7bf74..00000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c +++ /dev/null @@ -1,100 +0,0 @@ -// SPDX-License-Identifier: ISC -/* Copyright (C) 2020 MediaTek Inc. */ - -#include "mt7921.h" -#include "eeprom.h" - -static u32 mt7921_eeprom_read(struct mt7921_dev *dev, u32 offset) -{ - u8 *data = dev->mt76.eeprom.data; - - if (data[offset] == 0xff) - mt7921_mcu_get_eeprom(dev, offset); - - return data[offset]; -} - -static int mt7921_eeprom_load(struct mt7921_dev *dev) -{ - int ret; - - ret = mt76_eeprom_init(&dev->mt76, MT7921_EEPROM_SIZE); - if (ret < 0) - return ret; - - memset(dev->mt76.eeprom.data, -1, MT7921_EEPROM_SIZE); - - return 0; -} - -static int mt7921_check_eeprom(struct mt7921_dev *dev) -{ - u8 *eeprom = dev->mt76.eeprom.data; - u16 val; - - mt7921_eeprom_read(dev, MT_EE_CHIP_ID); - val = get_unaligned_le16(eeprom); - - switch (val) { - case 0x7961: - return 0; - default: - return -EINVAL; - } -} - -void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy) -{ - struct mt7921_dev *dev = phy->dev; - u32 val; - - val = mt7921_eeprom_read(dev, MT_EE_WIFI_CONF); - val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val); - - switch (val) { - case MT_EE_5GHZ: - phy->mt76->cap.has_5ghz = true; - break; - case MT_EE_2GHZ: - phy->mt76->cap.has_2ghz = true; - break; - default: - phy->mt76->cap.has_2ghz = true; - phy->mt76->cap.has_5ghz = true; - break; - } -} - -static void mt7921_eeprom_parse_hw_cap(struct mt7921_dev *dev) -{ - u8 tx_mask; - - mt7921_eeprom_parse_band_config(&dev->phy); - - /* TODO: read NSS with MCU_CMD_NIC_CAPV2 */ - tx_mask = 2; - dev->chainmask = BIT(tx_mask) - 1; - dev->mphy.antenna_mask = dev->chainmask; - dev->mphy.chainmask = dev->mphy.antenna_mask; -} - -int mt7921_eeprom_init(struct mt7921_dev *dev) -{ - int ret; - - ret = mt7921_eeprom_load(dev); - if (ret < 0) - return ret; - - ret = mt7921_check_eeprom(dev); - if (ret) - return ret; - - mt7921_eeprom_parse_hw_cap(dev); - memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, - ETH_ALEN); - - mt76_eeprom_override(&dev->mphy); - - return 0; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index a9ce10b9882731..210998f086ab3d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -41,7 +41,7 @@ mt7921_regd_notifier(struct wiphy *wiphy, mt7921_mutex_release(dev); } -static void +static int mt7921_init_wiphy(struct ieee80211_hw *hw) { struct mt7921_phy *phy = mt7921_hw_phy(hw); @@ -62,7 +62,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) hw->vif_data_size = sizeof(struct mt7921_vif); wiphy->iface_combinations = if_comb; - wiphy->flags &= ~WIPHY_FLAG_IBSS_RSN; + wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | + WIPHY_FLAG_4ADDR_STATION); wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; @@ -75,6 +76,14 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) wiphy->max_sched_scan_reqs = 1; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->reg_notifier = mt7921_regd_notifier; + wiphy->sar_capa = &mt76_sar_capa; + + phy->mt76->frp = devm_kcalloc(dev->mt76.dev, + wiphy->sar_capa->num_freq_ranges, + sizeof(struct mt76_freq_range_power), + GFP_KERNEL); + if (!phy->mt76->frp) + return -ENOMEM; wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; @@ -92,6 +101,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, CONNECTION_MONITOR); hw->max_tx_fragments = 4; + + return 0; } static void @@ -106,6 +117,10 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band) mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); + /* enable MIB tx-rx time reporting */ + mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN); + mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN); + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536); /* disable rx rate report by default due to hw issues */ mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); @@ -127,35 +142,53 @@ int mt7921_mac_init(struct mt7921_dev *dev) for (i = 0; i < 2; i++) mt7921_mac_init_band(dev, i); + dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0)); + return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0); } +EXPORT_SYMBOL_GPL(mt7921_mac_init); -static int mt7921_init_hardware(struct mt7921_dev *dev) +static int __mt7921_init_hardware(struct mt7921_dev *dev) { - int ret, idx; - - ret = mt7921_dma_init(dev); - if (ret) - return ret; - - set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + int ret; /* force firmware operation mode into normal state, * which should be set before firmware download stage. */ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); - ret = mt7921_mcu_init(dev); if (ret) - return ret; + goto out; - ret = mt7921_eeprom_init(dev); - if (ret < 0) - return ret; + mt76_eeprom_override(&dev->mphy); ret = mt7921_mcu_set_eeprom(dev); if (ret) + goto out; + + ret = mt7921_mac_init(dev); +out: + return ret; +} + +static int mt7921_init_hardware(struct mt7921_dev *dev) +{ + int ret, idx, i; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + for (i = 0; i < MT7921_MCU_INIT_RETRY_COUNT; i++) { + ret = __mt7921_init_hardware(dev); + if (!ret) + break; + + mt7921_init_reset(dev); + } + + if (i == MT7921_MCU_INIT_RETRY_COUNT) { + dev_err(dev->mt76.dev, "hardware init failed\n"); return ret; + } /* Beacon and mgmt frames should occupy wcid 0 */ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); @@ -167,7 +200,7 @@ static int mt7921_init_hardware(struct mt7921_dev *dev) dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); - return mt7921_mac_init(dev); + return 0; } int mt7921_register_device(struct mt7921_dev *dev) @@ -185,8 +218,9 @@ int mt7921_register_device(struct mt7921_dev *dev) spin_lock_init(&dev->pm.wake.lock); mutex_init(&dev->pm.mutex); init_waitqueue_head(&dev->pm.wait); + if (mt76_is_sdio(&dev->mt76)) + init_waitqueue_head(&dev->mt76.sdio.wait); spin_lock_init(&dev->pm.txq_lock); - INIT_LIST_HEAD(&dev->phy.stats_list); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work); INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work); @@ -200,14 +234,24 @@ int mt7921_register_device(struct mt7921_dev *dev) dev->pm.idle_timeout = MT7921_PM_TIMEOUT; dev->pm.stats.last_wake_event = jiffies; dev->pm.stats.last_doze_event = jiffies; - dev->pm.enable = true; - dev->pm.ds_enable = true; + + /* TODO: mt7921s run sleep mode on default */ + if (mt76_is_mmio(&dev->mt76)) { + dev->pm.enable = true; + dev->pm.ds_enable = true; + } + + if (mt76_is_sdio(&dev->mt76)) + hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; ret = mt7921_init_hardware(dev); if (ret) return ret; - mt7921_init_wiphy(hw); + ret = mt7921_init_wiphy(hw); + if (ret) + return ret; + dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; @@ -244,14 +288,4 @@ int mt7921_register_device(struct mt7921_dev *dev) return 0; } - -void mt7921_unregister_device(struct mt7921_dev *dev) -{ - mt76_unregister_device(&dev->mt76); - mt7921_tx_token_put(dev); - mt7921_dma_cleanup(dev); - mt7921_mcu_exit(dev); - - tasklet_disable(&dev->irq_tasklet); - mt76_free_device(&dev->mt76); -} +EXPORT_SYMBOL_GPL(mt7921_register_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 7fe2e3a50428fe..db3302b1576a04 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -39,6 +39,7 @@ static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev, void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { } +EXPORT_SYMBOL_GPL(mt7921_sta_ps); bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) { @@ -49,7 +50,7 @@ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) 0, 5000); } -static void mt7921_mac_sta_poll(struct mt7921_dev *dev) +void mt7921_mac_sta_poll(struct mt7921_dev *dev) { static const u8 ac_to_tid[] = { [IEEE80211_AC_BE] = 0, @@ -61,18 +62,18 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev) struct mt7921_sta *msta; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); + struct rate_info *rate; int i; spin_lock_bh(&dev->sta_poll_lock); list_splice_init(&dev->sta_poll_list, &sta_poll_list); spin_unlock_bh(&dev->sta_poll_lock); - rcu_read_lock(); - while (true) { bool clear = false; - u32 addr; + u32 addr, val; u16 idx; + u8 bw; spin_lock_bh(&dev->sta_poll_lock); if (list_empty(&sta_poll_list)) { @@ -85,7 +86,7 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev) spin_unlock_bh(&dev->sta_poll_lock); idx = msta->wcid.idx; - addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4; + addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET); for (i = 0; i < IEEE80211_NUM_ACS; i++) { u32 tx_last = msta->airtime_ac[i]; @@ -126,10 +127,46 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev) ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); } - } - rcu_read_unlock(); + /* We don't support reading GI info from txs packets. + * For accurate tx status reporting and AQL improvement, + * we need to make sure that flags match so polling GI + * from per-sta counters directly. + */ + rate = &msta->wcid.rate; + addr = mt7921_mac_wtbl_lmac_addr(idx, + MT_WTBL_TXRX_CAP_RATE_OFFSET); + val = mt76_rr(dev, addr); + + switch (rate->bw) { + case RATE_INFO_BW_160: + bw = IEEE80211_STA_RX_BW_160; + break; + case RATE_INFO_BW_80: + bw = IEEE80211_STA_RX_BW_80; + break; + case RATE_INFO_BW_40: + bw = IEEE80211_STA_RX_BW_40; + break; + default: + bw = IEEE80211_STA_RX_BW_20; + break; + } + + if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { + u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw; + + rate->he_gi = (val & (0x3 << offs)) >> offs; + } else if (rate->flags & + (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { + if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw)) + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; + else + rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; + } + } } +EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll); static void mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, @@ -180,12 +217,56 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); } +static void +mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, + struct mt76_rx_status *status, + __le32 *rxv) +{ + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | + HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN), + .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN) | + HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN), + }; + struct ieee80211_radiotap_he_mu *he_mu; + + he_mu = skb_push(skb, sizeof(mu_known)); + memcpy(he_mu, &mu_known, sizeof(mu_known)); + +#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f) + + he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx); + if (status->he_dcm) + he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm); + + he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) | + MU_PREP(FLAGS2_SIG_B_SYMS_USERS, + le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER)); + + he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, le32_to_cpu(rxv[3])); + + if (status->bw >= RATE_INFO_BW_40) { + he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN); + he_mu->ru_ch2[0] = + FIELD_GET(MT_CRXV_HE_RU1, le32_to_cpu(rxv[3])); + } + + if (status->bw >= RATE_INFO_BW_80) { + he_mu->ru_ch1[1] = + FIELD_GET(MT_CRXV_HE_RU2, le32_to_cpu(rxv[3])); + he_mu->ru_ch2[1] = + FIELD_GET(MT_CRXV_HE_RU3, le32_to_cpu(rxv[3])); + } +} + static void mt7921_mac_decode_he_radiotap(struct sk_buff *skb, struct mt76_rx_status *status, __le32 *rxv, u32 phy) { - /* TODO: struct ieee80211_radiotap_he_mu */ static const struct ieee80211_radiotap_he known = { .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | HE_BITS(DATA1_DATA_DCM_KNOWN) | @@ -193,6 +274,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, HE_BITS(DATA1_CODING_KNOWN) | HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | HE_BITS(DATA1_DOPPLER_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE_KNOWN) | HE_BITS(DATA1_BSS_COLOR_KNOWN), .data2 = HE_BITS(DATA2_GI_KNOWN) | HE_BITS(DATA2_TXBF_KNOWN) | @@ -207,9 +289,12 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) | HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]); + he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) | le16_encode_bits(ltf_size, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); + if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) + he->data5 |= HE_BITS(DATA5_TXBF); he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); @@ -217,8 +302,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, case MT_PHY_TYPE_HE_SU: he->data1 |= HE_BITS(DATA1_FORMAT_SU) | HE_BITS(DATA1_UL_DL_KNOWN) | - HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN); + HE_BITS(DATA1_BEAM_CHANGE_KNOWN); he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) | HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); @@ -232,17 +316,15 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, break; case MT_PHY_TYPE_HE_MU: he->data1 |= HE_BITS(DATA1_FORMAT_MU) | - HE_BITS(DATA1_UL_DL_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN); + HE_BITS(DATA1_UL_DL_KNOWN); he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); + he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]); mt7921_mac_decode_he_radiotap_ru(status, he, rxv); break; case MT_PHY_TYPE_HE_TB: he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN) | HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | HE_BITS(DATA1_SPTL_REUSE4_KNOWN); @@ -271,7 +353,14 @@ mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy, return; } - status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + if (chfreq > 180) { + status->band = NL80211_BAND_6GHZ; + chfreq = (chfreq - 181) * 4 + 1; + } else if (chfreq > 14) { + status->band = NL80211_BAND_5GHZ; + } else { + status->band = NL80211_BAND_2GHZ; + } status->freq = ieee80211_channel_to_frequency(chfreq, status->band); } @@ -306,7 +395,8 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb) mt7921_mac_rssi_iter, skb); } -int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) +static int +mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) { u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -356,10 +446,17 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) mt7921_get_status_freq_info(dev, mphy, status, chfreq); - if (status->band == NL80211_BAND_5GHZ) + switch (status->band) { + case NL80211_BAND_5GHZ: sband = &mphy->sband_5g.sband; - else + break; + case NL80211_BAND_6GHZ: + sband = &mphy->sband_6g.sband; + break; + default: sband = &mphy->sband_2g.sband; + break; + } if (!sband->channels) return -EINVAL; @@ -606,9 +703,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) mt7921_mac_assoc_rssi(dev, skb); - if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) { mt7921_mac_decode_he_radiotap(skb, status, rxv, mode); + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) + mt7921_mac_decode_he_mu_radiotap(skb, status, rxv); + } + if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; @@ -702,7 +803,8 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi, txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); } - if (!ieee80211_is_data(fc) || multicast) + if (!ieee80211_is_data(fc) || multicast || + info->flags & IEEE80211_TX_CTL_USE_MINRATE) val |= MT_TXD2_FIX_RATE; txwi[2] |= cpu_to_le32(val); @@ -732,31 +834,17 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi, txwi[7] |= cpu_to_le32(val); } -static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi) -{ - struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid); - u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]); - - if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2))) - return; - - if (time_is_after_eq_jiffies(msta->next_txs_ts)) - return; - - msta->next_txs_ts = jiffies + msecs_to_jiffies(250); - pid = mt76_get_next_pkt_id(wcid); - txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU | - FIELD_PREP(MT_TXD5_PID, pid)); -} - void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_key_conf *key, bool beacon) + struct ieee80211_key_conf *key, int pid, + bool beacon) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_phy *mphy = &dev->mphy; u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; + bool is_mmio = mt76_is_mmio(&dev->mt76); + u32 sz_txd = is_mmio ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE; bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; u16 tx_count = 15; u32 val; @@ -772,15 +860,15 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, p_fmt = MT_TX_TYPE_FW; q_idx = MT_LMAC_BCN0; } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { - p_fmt = MT_TX_TYPE_CT; + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; q_idx = MT_LMAC_ALTX0; } else { - p_fmt = MT_TX_TYPE_CT; + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; q_idx = wmm_idx * MT7921_MAX_WMM_SETS + mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb)); } - val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | FIELD_PREP(MT_TXD0_Q_IDX, q_idx); txwi[0] = cpu_to_le32(val); @@ -800,7 +888,12 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, txwi[3] = cpu_to_le32(val); txwi[4] = 0; - txwi[5] = 0; + + val = FIELD_PREP(MT_TXD5_PID, pid); + if (pid >= MT_PACKET_ID_FIRST) + val |= MT_TXD5_TX_STATUS_HOST; + txwi[5] = cpu_to_le32(val); + txwi[6] = 0; txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0; @@ -810,105 +903,32 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, mt7921_mac_write_txwi_80211(dev, txwi, skb, key); if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) { - u16 rate; + int rateidx = ffs(vif->bss_conf.basic_rates) - 1; + u16 rate, mode; /* hardware won't add HTC for mgmt/ctrl frame */ txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD); - if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) - rate = MT7921_5G_RATE_DEFAULT; - else - rate = MT7921_2G_RATE_DEFAULT; + rate = mt76_calculate_default_rate(mphy, rateidx); + mode = rate >> 8; + rate &= GENMASK(7, 0); + rate |= FIELD_PREP(MT_TX_RATE_MODE, mode); val = MT_TXD6_FIXED_BW | FIELD_PREP(MT_TXD6_TX_RATE, rate); txwi[6] |= cpu_to_le32(val); txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); } - - mt7921_update_txs(wcid, txwi); -} - -static void -mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info, - void *txp_ptr, u32 id) -{ - struct mt7921_hw_txp *txp = txp_ptr; - struct mt7921_txp_ptr *ptr = &txp->ptr[0]; - int i, nbuf = tx_info->nbuf - 1; - - tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); - tx_info->nbuf = 1; - - txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); - - for (i = 0; i < nbuf; i++) { - u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK; - u32 addr = tx_info->buf[i + 1].addr; - - if (i == nbuf - 1) - len |= MT_TXD_LEN_LAST; - - if (i & 1) { - ptr->buf1 = cpu_to_le32(addr); - ptr->len1 = cpu_to_le16(len); - ptr++; - } else { - ptr->buf0 = cpu_to_le32(addr); - ptr->len0 = cpu_to_le16(len); - } - } } +EXPORT_SYMBOL_GPL(mt7921_mac_write_txwi); -int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, - enum mt76_txq_id qid, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, - struct mt76_tx_info *tx_info) -{ - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); - struct ieee80211_key_conf *key = info->control.hw_key; - struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb); - struct mt76_txwi_cache *t; - struct mt7921_txp_common *txp; - int id; - u8 *txwi = (u8 *)txwi_ptr; - - if (unlikely(tx_info->skb->len <= ETH_HLEN)) - return -EINVAL; - - if (!wcid) - wcid = &dev->mt76.global_wcid; - - cb->wcid = wcid->idx; - - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); - t->skb = tx_info->skb; - - id = mt76_token_consume(mdev, &t); - if (id < 0) - return id; - - mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, - false); - - txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE); - memset(txp, 0, sizeof(struct mt7921_txp_common)); - mt7921_write_hw_txp(dev, tx_info, txp, id); - - tx_info->skb = DMA_DUMMY_DATA; - - return 0; -} - -static void -mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) { struct mt7921_sta *msta; u16 fc, tid; u32 val; - if (!sta || !sta->ht_cap.ht_supported) + if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he)) return; tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1])); @@ -925,203 +945,209 @@ mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) if (!test_and_set_bit(tid, &msta->ampdu_state)) ieee80211_start_tx_ba_session(sta, tid, 0); } +EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr); -static void -mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, - struct ieee80211_sta *sta, u8 stat, - struct list_head *free_list) +static bool +mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid, + __le32 *txs_data) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_tx_status status = { - .sta = sta, - .info = info, - .skb = skb, - .free_list = free_list, - }; - struct ieee80211_hw *hw; + struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid); + struct mt76_sta_stats *stats = &msta->stats; + struct ieee80211_supported_band *sband; + struct mt76_dev *mdev = &dev->mt76; + struct ieee80211_tx_info *info; + struct rate_info rate = {}; + struct sk_buff_head list; + u32 txrate, txs, mode; + struct sk_buff *skb; + bool cck = false; + + mt76_tx_status_lock(mdev, &list); + skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); + if (!skb) + goto out; - if (sta) { - struct mt7921_sta *msta; + info = IEEE80211_SKB_CB(skb); + txs = le32_to_cpu(txs_data[0]); + if (!(txs & MT_TXS0_ACK_ERROR_MASK)) + info->flags |= IEEE80211_TX_STAT_ACK; - msta = (struct mt7921_sta *)sta->drv_priv; - status.rate = &msta->stats.tx_rate; - } + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = !!(info->flags & + IEEE80211_TX_STAT_ACK); - hw = mt76_tx_status_get_hw(mdev, skb); + info->status.rates[0].idx = -1; - if (info->flags & IEEE80211_TX_CTL_AMPDU) - info->flags |= IEEE80211_TX_STAT_AMPDU; + if (!wcid->sta) + goto out; - if (stat) - ieee80211_tx_info_clear_status(info); + txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - info->flags |= IEEE80211_TX_STAT_ACK; + rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); + rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; - info->status.tx_time = 0; - ieee80211_tx_status_ext(hw, &status); -} + if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) + stats->tx_nss[rate.nss - 1]++; + if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) + stats->tx_mcs[rate.mcs]++; -void mt7921_txp_skb_unmap(struct mt76_dev *dev, - struct mt76_txwi_cache *t) -{ - struct mt7921_txp_common *txp; - int i; + mode = FIELD_GET(MT_TX_RATE_MODE, txrate); + switch (mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) + sband = &dev->mphy.sband_5g.sband; + else + sband = &dev->mphy.sband_2g.sband; - txp = mt7921_txwi_to_txp(dev, t); + rate.mcs = mt76_get_rate(dev->mphy.dev, sband, rate.mcs, cck); + rate.legacy = sband->bitrates[rate.mcs].bitrate; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + rate.mcs += (rate.nss - 1) * 8; + if (rate.mcs > 31) + goto out; + + rate.flags = RATE_INFO_FLAGS_MCS; + if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) + rate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_VHT: + if (rate.mcs > 9) + goto out; - for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) { - struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i]; - bool last; - u16 len; + rate.flags = RATE_INFO_FLAGS_VHT_MCS; + break; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + case MT_PHY_TYPE_HE_MU: + if (rate.mcs > 11) + goto out; - len = le16_to_cpu(ptr->len0); - last = len & MT_TXD_LEN_LAST; - len &= MT_TXD_LEN_MASK; - dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, - DMA_TO_DEVICE); - if (last) - break; + rate.he_gi = wcid->rate.he_gi; + rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); + rate.flags = RATE_INFO_FLAGS_HE_MCS; + break; + default: + goto out; + } + stats->tx_mode[mode]++; - len = le16_to_cpu(ptr->len1); - last = len & MT_TXD_LEN_LAST; - len &= MT_TXD_LEN_MASK; - dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, - DMA_TO_DEVICE); - if (last) - break; + switch (FIELD_GET(MT_TXS0_BW, txs)) { + case IEEE80211_STA_RX_BW_160: + rate.bw = RATE_INFO_BW_160; + stats->tx_bw[3]++; + break; + case IEEE80211_STA_RX_BW_80: + rate.bw = RATE_INFO_BW_80; + stats->tx_bw[2]++; + break; + case IEEE80211_STA_RX_BW_40: + rate.bw = RATE_INFO_BW_40; + stats->tx_bw[1]++; + break; + default: + rate.bw = RATE_INFO_BW_20; + stats->tx_bw[0]++; + break; } -} + wcid->rate = rate; -void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) -{ - struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data; - struct mt76_dev *mdev = &dev->mt76; - struct mt76_txwi_cache *txwi; - struct ieee80211_sta *sta = NULL; - LIST_HEAD(free_list); - struct sk_buff *tmp; - bool wake = false; - u8 i, count; - - /* clean DMA queues and unmap buffers first */ - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); - - /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, - * to the time ack is received or dropped by hw (air + hw queue time). - * Should avoid accessing WTBL to get Tx airtime, and use it instead. - */ - count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); - for (i = 0; i < count; i++) { - u32 msdu, info = le32_to_cpu(free->info[i]); - u8 stat; - - /* 1'b1: new wcid pair. - * 1'b0: msdu_id with the same 'wcid pair' as above. - */ - if (info & MT_TX_FREE_PAIR) { - struct mt7921_sta *msta; - struct mt7921_phy *phy; - struct mt76_wcid *wcid; - u16 idx; - - count++; - idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); - wcid = rcu_dereference(dev->mt76.wcid[idx]); - sta = wcid_to_sta(wcid); - if (!sta) - continue; +out: + if (skb) + mt76_tx_status_skb_done(mdev, skb, &list); + mt76_tx_status_unlock(mdev, &list); - msta = container_of(wcid, struct mt7921_sta, wcid); - phy = msta->vif->phy; - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->stats_list)) - list_add_tail(&msta->stats_list, &phy->stats_list); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); - continue; - } + return !!skb; +} - msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); - stat = FIELD_GET(MT_TX_FREE_STATUS, info); +static void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data) +{ + struct mt7921_sta *msta = NULL; + struct mt76_wcid *wcid; + __le32 *txs_data = data; + u16 wcidx; + u32 txs; + u8 pid; - txwi = mt76_token_release(mdev, msdu, &wake); - if (!txwi) - continue; + txs = le32_to_cpu(txs_data[0]); + if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) + return; - mt7921_txp_skb_unmap(mdev, txwi); - if (txwi->skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb); - void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi); + txs = le32_to_cpu(txs_data[2]); + wcidx = FIELD_GET(MT_TXS2_WCID, txs); - if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7921_tx_check_aggr(sta, txwi_ptr); + txs = le32_to_cpu(txs_data[3]); + pid = FIELD_GET(MT_TXS3_PID, txs); - if (sta && !info->tx_time_est) { - struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; - int pending; + if (pid < MT_PACKET_ID_FIRST) + return; - pending = atomic_dec_return(&wcid->non_aql_packets); - if (pending < 0) - atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); - } + if (wcidx >= MT7921_WTBL_SIZE) + return; - mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list); - txwi->skb = NULL; - } + rcu_read_lock(); - mt76_put_txwi(mdev, txwi); - } + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + if (!wcid) + goto out; - if (wake) - mt76_set_tx_blocked(&dev->mt76, false); + mt7921_mac_add_txs_skb(dev, wcid, pid, txs_data); - napi_consume_skb(skb, 1); + if (!wcid->sta) + goto out; - list_for_each_entry_safe(skb, tmp, &free_list, list) { - skb_list_del_init(skb); - napi_consume_skb(skb, 1); - } + msta = container_of(wcid, struct mt7921_sta, wcid); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); - mt7921_mac_sta_poll(dev); - mt76_worker_schedule(&dev->mt76.tx_worker); +out: + rcu_read_unlock(); } -void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) +void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) { - struct mt7921_dev *dev; - - if (!e->txwi) { - dev_kfree_skb_any(e->skb); - return; - } - - dev = container_of(mdev, struct mt7921_dev, mt76); - - /* error path */ - if (e->skb == DMA_DUMMY_DATA) { - struct mt76_txwi_cache *t; - struct mt7921_txp_common *txp; - u16 token; - - txp = mt7921_txwi_to_txp(mdev, e->txwi); - token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; - t = mt76_token_put(mdev, token); - e->skb = t ? t->skb : NULL; - } + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + __le32 *end = (__le32 *)&skb->data[skb->len]; + enum rx_pkt_type type; + u16 flag; - if (e->skb) { - struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb); - struct mt76_wcid *wcid; + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); + flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); - wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]); + if (type == PKT_TYPE_RX_EVENT && flag == 0x1) + type = PKT_TYPE_NORMAL_MCU; - mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0, - NULL); + switch (type) { + case PKT_TYPE_RX_EVENT: + mt7921_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_TXS: + for (rxd += 2; rxd + 8 <= end; rxd += 8) + mt7921_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; + case PKT_TYPE_NORMAL_MCU: + case PKT_TYPE_NORMAL: + if (!mt7921_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + fallthrough; + default: + dev_kfree_skb(skb); + break; } } +EXPORT_SYMBOL_GPL(mt7921_queue_rx_skb); void mt7921_mac_reset_counters(struct mt7921_phy *phy) { @@ -1154,17 +1180,12 @@ void mt7921_mac_set_timing(struct mt7921_phy *phy) FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); - int sifs, offset; - bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; + bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ; + int sifs = is_2ghz ? 10 : 16, offset; if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) return; - if (is_5ghz) - sifs = 16; - else - sifs = 10; - mt76_set(dev, MT_ARB_SCR(0), MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); udelay(1); @@ -1181,7 +1202,7 @@ void mt7921_mac_set_timing(struct mt7921_phy *phy) FIELD_PREP(MT_IFS_SIFS, sifs) | FIELD_PREP(MT_IFS_SLOT, phy->slottime)); - if (phy->slottime < 20 || is_5ghz) + if (phy->slottime < 20 || !is_2ghz) val = MT7921_CFEND_RATE_DEFAULT; else val = MT7921_CFEND_RATE_11B; @@ -1242,27 +1263,7 @@ void mt7921_update_channel(struct mt76_phy *mphy) mt76_connac_power_save_sched(mphy, &dev->pm); } - -void mt7921_tx_token_put(struct mt7921_dev *dev) -{ - struct mt76_txwi_cache *txwi; - int id; - - spin_lock_bh(&dev->mt76.token_lock); - idr_for_each_entry(&dev->mt76.token, txwi, id) { - mt7921_txp_skb_unmap(&dev->mt76, txwi); - if (txwi->skb) { - struct ieee80211_hw *hw; - - hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); - ieee80211_free_txskb(hw, txwi->skb); - } - mt76_put_txwi(&dev->mt76, txwi); - dev->mt76.token_count--; - } - spin_unlock_bh(&dev->mt76.token_lock); - idr_destroy(&dev->mt76.token); -} +EXPORT_SYMBOL_GPL(mt7921_update_channel); static void mt7921_vif_connect_iter(void *priv, u8 *mac, @@ -1278,69 +1279,6 @@ mt7921_vif_connect_iter(void *priv, u8 *mac, mt7921_mcu_set_tx(dev, vif); } -static int -mt7921_mac_reset(struct mt7921_dev *dev) -{ - int i, err; - - mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); - - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - - set_bit(MT76_RESET, &dev->mphy.state); - set_bit(MT76_MCU_RESET, &dev->mphy.state); - wake_up(&dev->mt76.mcu.wait); - skb_queue_purge(&dev->mt76.mcu.res_q); - - mt76_txq_schedule_all(&dev->mphy); - - mt76_worker_disable(&dev->mt76.tx_worker); - napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); - napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); - napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); - napi_disable(&dev->mt76.tx_napi); - - mt7921_tx_token_put(dev); - idr_init(&dev->mt76.token); - - mt7921_wpdma_reset(dev, true); - - mt76_for_each_q_rx(&dev->mt76, i) { - napi_enable(&dev->mt76.napi[i]); - napi_schedule(&dev->mt76.napi[i]); - } - - clear_bit(MT76_MCU_RESET, &dev->mphy.state); - - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, - MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | - MT_INT_MCU_CMD); - mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); - - err = mt7921_run_firmware(dev); - if (err) - goto out; - - err = mt7921_mcu_set_eeprom(dev); - if (err) - goto out; - - err = mt7921_mac_init(dev); - if (err) - goto out; - - err = __mt7921_start(&dev->phy); -out: - clear_bit(MT76_RESET, &dev->mphy.state); - - napi_enable(&dev->mt76.tx_napi); - napi_schedule(&dev->mt76.tx_napi); - mt76_worker_enable(&dev->mt76.tx_worker); - - return err; -} - /* system error recovery */ void mt7921_mac_reset_work(struct work_struct *work) { @@ -1359,12 +1297,9 @@ void mt7921_mac_reset_work(struct work_struct *work) cancel_work_sync(&pm->wake_work); mutex_lock(&dev->mt76.mutex); - for (i = 0; i < 10; i++) { - __mt7921_mcu_drv_pmctrl(dev); - - if (!mt7921_mac_reset(dev)) + for (i = 0; i < 10; i++) + if (!mt7921_dev_reset(dev)) break; - } mutex_unlock(&dev->mt76.mutex); if (i == 10) @@ -1399,12 +1334,12 @@ void mt7921_reset(struct mt76_dev *mdev) queue_work(dev->mt76.wq, &dev->reset_work); } -static void -mt7921_mac_update_mib_stats(struct mt7921_phy *phy) +void mt7921_mac_update_mib_stats(struct mt7921_phy *phy) { struct mt7921_dev *dev = phy->dev; struct mib_stats *mib = &phy->mib; int i, aggr0 = 0, aggr1; + u32 val; mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0), MT_MIB_SDR3_FCS_ERR_MASK); @@ -1417,8 +1352,37 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy) mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), MT_MIB_RTS_FAIL_COUNT_MASK); + mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0)); + mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0)); + mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0)); + + val = mt76_rr(dev, MT_MIB_SDR32(0)); + mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val); + mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val); + + val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0)); + mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val); + mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val); + + val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0)); + mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val); + mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val); + mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val); + mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val); + + mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0)); + mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0)); + mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0)); + mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0)); + + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { + val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); + mib->tx_amsdu[i] += val; + mib->tx_amsdu_cnt += val; + } + for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { - u32 val, val2; + u32 val2; val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); @@ -1449,6 +1413,8 @@ void mt7921_mac_work(struct work_struct *work) } mt7921_mutex_release(phy->dev); + + mt76_tx_status_check(mphy->dev, false); ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, MT7921_WATCHDOG_TIME); } @@ -1463,12 +1429,18 @@ void mt7921_pm_wake_work(struct work_struct *work) mphy = dev->phy.mt76; if (!mt7921_mcu_drv_pmctrl(dev)) { + struct mt76_dev *mdev = &dev->mt76; int i; - mt76_for_each_q_rx(&dev->mt76, i) - napi_schedule(&dev->mt76.napi[i]); - mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); - mt7921_tx_cleanup(dev); + if (mt76_is_sdio(mdev)) { + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + mt76_worker_schedule(&mdev->sdio.txrx_worker); + } else { + mt76_for_each_q_rx(mdev, i) + napi_schedule(&mdev->napi[i]); + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + mt7921_mcu_tx_cleanup(dev); + } if (test_bit(MT76_STATE_RUNNING, &mphy->state)) ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT7921_WATCHDOG_TIME); @@ -1506,34 +1478,6 @@ void mt7921_pm_power_save_work(struct work_struct *work) queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); } -int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, - struct ieee80211_vif *vif, - bool enable) -{ - struct mt7921_dev *dev = phy->dev; - bool ext_phy = phy != &dev->phy; - int err; - - if (!dev->pm.enable) - return -EOPNOTSUPP; - - err = mt7921_mcu_set_bss_pm(dev, vif, enable); - if (err) - return err; - - if (enable) { - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - mt76_set(dev, MT_WF_RFCR(ext_phy), - MT_WF_RFCR_DROP_OTHER_BEACON); - } else { - vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; - mt76_clear(dev, MT_WF_RFCR(ext_phy), - MT_WF_RFCR_DROP_OTHER_BEACON); - } - - return 0; -} - void mt7921_coredump_work(struct work_struct *work) { struct mt7921_dev *dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h index 3af67fac213df1..544a1c33126a45 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h @@ -116,6 +116,7 @@ enum rx_pkt_type { #define MT_PRXV_TX_DCM BIT(4) #define MT_PRXV_TX_ER_SU_106T BIT(5) #define MT_PRXV_NSTS GENMASK(9, 7) +#define MT_PRXV_TXBF BIT(10) #define MT_PRXV_HT_AD_CODE BIT(11) #define MT_PRXV_FRAME_MODE GENMASK(14, 12) #define MT_PRXV_SGI GENMASK(16, 15) @@ -138,8 +139,15 @@ enum rx_pkt_type { #define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17) #define MT_CRXV_HE_LDPC_EXT_SYM BIT(20) #define MT_CRXV_HE_PE_DISAMBIG BIT(23) +#define MT_CRXV_HE_NUM_USER GENMASK(30, 24) #define MT_CRXV_HE_UPLINK BIT(31) +#define MT_CRXV_HE_RU0 GENMASK(7, 0) +#define MT_CRXV_HE_RU1 GENMASK(15, 8) +#define MT_CRXV_HE_RU2 GENMASK(23, 16) +#define MT_CRXV_HE_RU3 GENMASK(31, 24) +#define MT_CRXV_HE_MU_AID GENMASK(30, 20) + #define MT_CRXV_HE_SR_MASK GENMASK(11, 8) #define MT_CRXV_HE_SR1_MASK GENMASK(16, 12) #define MT_CRXV_HE_SR2_MASK GENMASK(20, 17) @@ -191,6 +199,10 @@ enum tx_mcu_port_q_idx { #define MT_TXD_SIZE (8 * 4) +#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4) +#define MT_SDIO_TAIL_SIZE 8 +#define MT_SDIO_HDR_SIZE 4 + #define MT_TXD0_Q_IDX GENMASK(31, 25) #define MT_TXD0_PKT_FMT GENMASK(24, 23) #define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) @@ -309,6 +321,15 @@ struct mt7921_tx_free { /* will support this field in further revision */ #define MT_TX_FREE_RATE GENMASK(13, 0) +#define MT_TXS0_BW GENMASK(30, 29) +#define MT_TXS0_TXS_FORMAT GENMASK(24, 23) +#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) +#define MT_TXS0_TX_RATE GENMASK(13, 0) + +#define MT_TXS2_WCID GENMASK(25, 16) + +#define MT_TXS3_PID GENMASK(31, 24) + static inline struct mt7921_txp_common * mt7921_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t) { @@ -350,4 +371,15 @@ struct mt7921_txp_common { }; }; +#define MT_WTBL_TXRX_CAP_RATE_OFFSET 7 +#define MT_WTBL_TXRX_RATE_G2_HE 24 +#define MT_WTBL_TXRX_RATE_G2 12 + +#define MT_WTBL_AC0_CTT_OFFSET 20 + +static inline u32 mt7921_mac_wtbl_lmac_addr(int idx, u8 offset) +{ + return MT_WTBL_LMAC_OFFS(idx, 0) + offset * 4; +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 63ec140c9c372f..633c6d2a57acd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -72,7 +72,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, if (band == NL80211_BAND_2GHZ) he_cap_elem->phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; - else if (band == NL80211_BAND_5GHZ) + else he_cap_elem->phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; @@ -93,7 +93,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, if (band == NL80211_BAND_2GHZ) he_cap_elem->phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G; - else if (band == NL80211_BAND_5GHZ) + else he_cap_elem->phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G; @@ -142,6 +142,32 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; } + + if (band == NL80211_BAND_6GHZ) { + struct ieee80211_supported_band *sband = + &phy->mt76->sband_5g.sband; + struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; + u32 exp; + u16 cap; + + cap = u16_encode_bits(ht_cap->ampdu_density, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + exp = u32_get_bits(vht_cap->cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + cap |= u16_encode_bits(exp, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + exp = u32_get_bits(vht_cap->cap, + IEEE80211_VHT_CAP_MAX_MPDU_MASK); + cap |= u16_encode_bits(exp, + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); + if (vht_cap->cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) + cap |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; + if (vht_cap->cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) + cap |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; + + data->he_6ghz_capa.capa = cpu_to_le16(cap); + } idx++; } @@ -170,6 +196,15 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy) band = &phy->mt76->sband_5g.sband; band->iftype_data = data; band->n_iftype_data = n; + + if (phy->mt76->cap.has_6ghz) { + data = phy->iftype[NL80211_BAND_6GHZ]; + n = mt7921_init_he_caps(phy, NL80211_BAND_6GHZ, data); + + band = &phy->mt76->sband_6g.sband; + band->iftype_data = data; + band->n_iftype_data = n; + } } } @@ -202,6 +237,7 @@ int __mt7921_start(struct mt7921_phy *phy) return 0; } +EXPORT_SYMBOL_GPL(__mt7921_start); static int mt7921_start(struct ieee80211_hw *hw) { @@ -243,10 +279,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, mt7921_mutex_acquire(dev); - if (vif->type == NL80211_IFTYPE_MONITOR && - is_zero_ether_addr(vif->addr)) - phy->monitor_vif = vif; - mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1; if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) { ret = -ENOSPC; @@ -268,12 +300,13 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, idx = MT7921_WTBL_RESERVED - mvif->mt76.idx; - INIT_LIST_HEAD(&mvif->sta.stats_list); INIT_LIST_HEAD(&mvif->sta.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.ext_phy = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_packet_id_init(&mvif->sta.wcid); + mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -306,9 +339,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw, struct mt7921_phy *phy = mt7921_hw_phy(hw); int idx = msta->wcid.idx; - if (vif == phy->monitor_vif) - phy->monitor_vif = NULL; - mt7921_mutex_acquire(dev); mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); @@ -323,6 +353,8 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw, if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); spin_unlock_bh(&dev->sta_poll_lock); + + mt76_packet_id_flush(&dev->mt76, &msta->wcid); } static int mt7921_set_channel(struct mt7921_phy *phy) @@ -533,36 +565,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, mt7921_mutex_release(dev); } -static int -mt7921_bss_bcnft_apply(struct mt7921_dev *dev, struct ieee80211_vif *vif, - bool assoc) -{ - int ret; - - if (!dev->pm.enable) - return 0; - - if (assoc) { - ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true); - if (ret) - return ret; - - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); - - return 0; - } - - ret = mt7921_mcu_set_bss_pm(dev, vif, false); - if (ret) - return ret; - - vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; - mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); - - return 0; -} - static void mt7921_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, @@ -592,7 +594,8 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ASSOC) { mt7921_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_ASSOC); - mt7921_bss_bcnft_apply(dev, vif, info->assoc); + if (dev->pm.enable) + mt7921_mcu_set_beacon_filter(dev, vif, info->assoc); } if (changed & BSS_CHANGED_ARP_FILTER) { @@ -617,14 +620,13 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->stats_list); INIT_LIST_HEAD(&msta->poll_list); msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; msta->wcid.ext_phy = mvif->mt76.band_idx; msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->stats.jiffies = jiffies; + msta->last_txs = jiffies; ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); if (ret) @@ -645,6 +647,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, return 0; } +EXPORT_SYMBOL_GPL(mt7921_mac_sta_add); void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -666,6 +669,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7921_mutex_release(dev); } +EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc); void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -693,12 +697,11 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, spin_lock_bh(&dev->sta_poll_lock); if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); - if (!list_empty(&msta->stats_list)) - list_del_init(&msta->stats_list); spin_unlock_bh(&dev->sta_poll_lock); mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } +EXPORT_SYMBOL_GPL(mt7921_mac_sta_remove); void mt7921_tx_worker(struct mt76_worker *w) { @@ -853,13 +856,175 @@ mt7921_get_stats(struct ieee80211_hw *hw, stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; - memset(mib, 0, sizeof(*mib)); - mt7921_mutex_release(phy->dev); return 0; } +static const char mt7921_gstrings_stats[][ETH_GSTRING_LEN] = { + /* tx counters */ + "tx_ampdu_cnt", + "tx_mpdu_attempts", + "tx_mpdu_success", + "tx_pkt_ebf_cnt", + "tx_pkt_ibf_cnt", + "tx_ampdu_len:0-1", + "tx_ampdu_len:2-10", + "tx_ampdu_len:11-19", + "tx_ampdu_len:20-28", + "tx_ampdu_len:29-37", + "tx_ampdu_len:38-46", + "tx_ampdu_len:47-55", + "tx_ampdu_len:56-79", + "tx_ampdu_len:80-103", + "tx_ampdu_len:104-127", + "tx_ampdu_len:128-151", + "tx_ampdu_len:152-175", + "tx_ampdu_len:176-199", + "tx_ampdu_len:200-223", + "tx_ampdu_len:224-247", + "ba_miss_count", + "tx_beamformer_ppdu_iBF", + "tx_beamformer_ppdu_eBF", + "tx_beamformer_rx_feedback_all", + "tx_beamformer_rx_feedback_he", + "tx_beamformer_rx_feedback_vht", + "tx_beamformer_rx_feedback_ht", + "tx_msdu_pack_1", + "tx_msdu_pack_2", + "tx_msdu_pack_3", + "tx_msdu_pack_4", + "tx_msdu_pack_5", + "tx_msdu_pack_6", + "tx_msdu_pack_7", + "tx_msdu_pack_8", + /* rx counters */ + "rx_mpdu_cnt", + "rx_ampdu_cnt", + "rx_ampdu_bytes_cnt", + "rx_ba_cnt", + /* per vif counters */ + "v_tx_mode_cck", + "v_tx_mode_ofdm", + "v_tx_mode_ht", + "v_tx_mode_ht_gf", + "v_tx_mode_vht", + "v_tx_mode_he_su", + "v_tx_mode_he_ext_su", + "v_tx_mode_he_tb", + "v_tx_mode_he_mu", + "v_tx_bw_20", + "v_tx_bw_40", + "v_tx_bw_80", + "v_tx_bw_160", + "v_tx_mcs_0", + "v_tx_mcs_1", + "v_tx_mcs_2", + "v_tx_mcs_3", + "v_tx_mcs_4", + "v_tx_mcs_5", + "v_tx_mcs_6", + "v_tx_mcs_7", + "v_tx_mcs_8", + "v_tx_mcs_9", + "v_tx_mcs_10", + "v_tx_mcs_11", +}; + +static void +mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset != ETH_SS_STATS) + return; + + memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats)); +} + +static int +mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int sset) +{ + return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0; +} + +static void +mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) +{ + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt76_ethtool_worker_info *wi = wi_data; + + if (msta->vif->mt76.idx != wi->idx) + return; + + mt76_ethtool_worker(wi, &msta->stats); +} + +static +void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt7921_dev *dev = phy->dev; + struct mib_stats *mib = &phy->mib; + struct mt76_ethtool_worker_info wi = { + .data = data, + .idx = mvif->mt76.idx, + }; + int i, ei = 0; + + mt7921_mutex_acquire(dev); + + mt7921_mac_update_mib_stats(phy); + + data[ei++] = mib->tx_ampdu_cnt; + data[ei++] = mib->tx_mpdu_attempts_cnt; + data[ei++] = mib->tx_mpdu_success_cnt; + data[ei++] = mib->tx_pkt_ebf_cnt; + data[ei++] = mib->tx_pkt_ibf_cnt; + + /* Tx ampdu stat */ + for (i = 0; i < 15; i++) + data[ei++] = dev->mt76.aggr_stats[i]; + + data[ei++] = phy->mib.ba_miss_cnt; + + /* Tx Beamformer monitor */ + data[ei++] = mib->tx_bf_ibf_ppdu_cnt; + data[ei++] = mib->tx_bf_ebf_ppdu_cnt; + + /* Tx Beamformer Rx feedback monitor */ + data[ei++] = mib->tx_bf_rx_fb_all_cnt; + data[ei++] = mib->tx_bf_rx_fb_he_cnt; + data[ei++] = mib->tx_bf_rx_fb_vht_cnt; + data[ei++] = mib->tx_bf_rx_fb_ht_cnt; + + /* Tx amsdu info (pack-count histogram) */ + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) + data[ei++] = mib->tx_amsdu[i]; + + /* rx counters */ + data[ei++] = mib->rx_mpdu_cnt; + data[ei++] = mib->rx_ampdu_cnt; + data[ei++] = mib->rx_ampdu_bytes_cnt; + data[ei++] = mib->rx_ba_cnt; + + /* Add values for all stations owned by this vif */ + wi.initial_stat_idx = ei; + ieee80211_iterate_stations_atomic(hw, mt7921_ethtool_worker, &wi); + + mt7921_mutex_release(dev); + + if (!wi.sta_count) + return; + + ei += wi.worker_stat_count; + if (ei != ARRAY_SIZE(mt7921_gstrings_stats)) + dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu", + ei, ARRAY_SIZE(mt7921_gstrings_stats)); +} + static u64 mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1048,22 +1213,22 @@ static void mt7921_sta_statistics(struct ieee80211_hw *hw, struct station_info *sinfo) { struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct mt7921_sta_stats *stats = &msta->stats; + struct rate_info *txrate = &msta->wcid.rate; - if (!stats->tx_rate.legacy && !stats->tx_rate.flags) + if (!txrate->legacy && !txrate->flags) return; - if (stats->tx_rate.legacy) { - sinfo->txrate.legacy = stats->tx_rate.legacy; + if (txrate->legacy) { + sinfo->txrate.legacy = txrate->legacy; } else { - sinfo->txrate.mcs = stats->tx_rate.mcs; - sinfo->txrate.nss = stats->tx_rate.nss; - sinfo->txrate.bw = stats->tx_rate.bw; - sinfo->txrate.he_gi = stats->tx_rate.he_gi; - sinfo->txrate.he_dcm = stats->tx_rate.he_dcm; - sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc; + sinfo->txrate.mcs = txrate->mcs; + sinfo->txrate.nss = txrate->nss; + sinfo->txrate.bw = txrate->bw; + sinfo->txrate.he_gi = txrate->he_gi; + sinfo->txrate.he_dcm = txrate->he_dcm; + sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; } - sinfo->txrate.flags = stats->tx_rate.flags; + sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } @@ -1172,6 +1337,43 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, MCU_UNI_CMD_STA_REC_UPDATE); } +static int mt7921_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_freq_range_power *data, *frp; + struct mt76_phy *mphy = hw->priv; + int err; + u32 i; + + if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) + return -EINVAL; + + mt7921_mutex_acquire(dev); + + data = mphy->frp; + + for (i = 0; i < sar->num_sub_specs; i++) { + u32 index = sar->sub_specs[i].freq_range_index; + /* SAR specifies power limitaton in 0.25dbm */ + s32 power = sar->sub_specs[i].power >> 1; + + if (power > 127 || power < -127) + power = 127; + + frp = &data[index]; + frp->range = &capa->freq_ranges[index]; + frp->power = power; + } + + err = mt76_connac_mcu_set_rate_txpower(mphy); + + mt7921_mutex_release(dev); + + return err; +} + const struct ieee80211_ops mt7921_ops = { .tx = mt7921_tx, .start = mt7921_start, @@ -1192,6 +1394,9 @@ const struct ieee80211_ops mt7921_ops = { .release_buffered_frames = mt76_release_buffered_frames, .get_txpower = mt76_get_txpower, .get_stats = mt7921_get_stats, + .get_et_sset_count = mt7921_get_et_sset_count, + .get_et_strings = mt7921_get_et_strings, + .get_et_stats = mt7921_get_et_stats, .get_tsf = mt7921_get_tsf, .set_tsf = mt7921_set_tsf, .get_survey = mt76_get_survey, @@ -1203,6 +1408,8 @@ const struct ieee80211_ops mt7921_ops = { .sta_statistics = mt7921_sta_statistics, .sched_scan_start = mt7921_start_sched_scan, .sched_scan_stop = mt7921_stop_sched_scan, + CFG80211_TESTMODE_CMD(mt7921_testmode_cmd) + CFG80211_TESTMODE_DUMP(mt7921_testmode_dump) #ifdef CONFIG_PM .suspend = mt7921_suspend, .resume = mt7921_resume, @@ -1210,4 +1417,9 @@ const struct ieee80211_ops mt7921_ops = { .set_rekey_data = mt7921_set_rekey_data, #endif /* CONFIG_PM */ .flush = mt7921_flush, + .set_sar_specs = mt7921_set_sar_specs, }; +EXPORT_SYMBOL_GPL(mt7921_ops); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Sean Wang "); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 9fbaacc67cfad0..6ada1ebe7d68bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -82,9 +82,17 @@ struct mt7921_fw_region { #define FW_START_OVERRIDE BIT(0) #define FW_START_WORKING_PDA_CR4 BIT(2) +#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0) #define PATCH_SEC_TYPE_MASK GENMASK(15, 0) #define PATCH_SEC_TYPE_INFO 0x2 +#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24) +#define PATCH_SEC_ENC_TYPE_PLAIN 0x00 +#define PATCH_SEC_ENC_TYPE_AES 0x01 +#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02 +#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0) +#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0) + #define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id) #define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id) @@ -152,11 +160,11 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb) return 0; } -static int -mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, - struct sk_buff *skb, int seq) +int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) { struct mt7921_mcu_rxd *rxd; + int mcu_cmd = cmd & MCU_CMD_MASK; int ret = 0; if (!skb) { @@ -194,6 +202,9 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, skb_pull(skb, sizeof(*rxd)); event = (struct mt7921_mcu_uni_event *)skb->data; ret = le32_to_cpu(event->status); + /* skip invalid event */ + if (mcu_cmd != event->cid) + ret = -EAGAIN; break; } case MCU_CMD_REG_READ: { @@ -211,14 +222,13 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, return ret; } +EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response); -static int -mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, int *wait_seq) +int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq) { struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); int txd_len, mcu_cmd = cmd & MCU_CMD_MASK; - enum mt76_mcuq_id txq = MT_MCUQ_WM; struct mt7921_uni_txd *uni_txd; struct mt7921_mcu_txd *mcu_txd; __le32 *txd; @@ -240,10 +250,8 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, if (!seq) seq = ++dev->mt76.mcu.msg_seq & 0xf; - if (cmd == MCU_CMD_FW_SCATTER) { - txq = MT_MCUQ_FWDL; + if (cmd == MCU_CMD_FW_SCATTER) goto exit; - } txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd); txd = (__le32 *)skb_push(skb, txd_len); @@ -307,96 +315,9 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, if (wait_seq) *wait_seq = seq; - return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); -} - -static void -mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy, - struct mt7921_mcu_peer_cap *peer, - struct rate_info *rate, u16 r) -{ - struct ieee80211_supported_band *sband; - u16 flags = 0; - u8 txmode = FIELD_GET(MT_WTBL_RATE_TX_MODE, r); - u8 gi = 0; - u8 bw = 0; - - rate->mcs = FIELD_GET(MT_WTBL_RATE_MCS, r); - rate->nss = FIELD_GET(MT_WTBL_RATE_NSS, r) + 1; - - switch (peer->bw) { - case IEEE80211_STA_RX_BW_160: - gi = peer->g16; - break; - case IEEE80211_STA_RX_BW_80: - gi = peer->g8; - break; - case IEEE80211_STA_RX_BW_40: - gi = peer->g4; - break; - default: - gi = peer->g2; - break; - } - - gi = txmode >= MT_PHY_TYPE_HE_SU ? - FIELD_GET(MT_WTBL_RATE_HE_GI, gi) : - FIELD_GET(MT_WTBL_RATE_GI, gi); - - switch (txmode) { - case MT_PHY_TYPE_CCK: - case MT_PHY_TYPE_OFDM: - if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) - sband = &mphy->sband_5g.sband; - else - sband = &mphy->sband_2g.sband; - - rate->legacy = sband->bitrates[rate->mcs].bitrate; - break; - case MT_PHY_TYPE_HT: - case MT_PHY_TYPE_HT_GF: - flags |= RATE_INFO_FLAGS_MCS; - - if (gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; - break; - case MT_PHY_TYPE_VHT: - flags |= RATE_INFO_FLAGS_VHT_MCS; - - if (gi) - flags |= RATE_INFO_FLAGS_SHORT_GI; - break; - case MT_PHY_TYPE_HE_SU: - case MT_PHY_TYPE_HE_EXT_SU: - case MT_PHY_TYPE_HE_TB: - case MT_PHY_TYPE_HE_MU: - rate->he_gi = gi; - rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r); - - flags |= RATE_INFO_FLAGS_HE_MCS; - break; - default: - break; - } - rate->flags = flags; - - bw = mt7921_mcu_chan_bw(&mphy->chandef) - FIELD_GET(MT_RA_RATE_BW, r); - - switch (bw) { - case IEEE80211_STA_RX_BW_160: - rate->bw = RATE_INFO_BW_160; - break; - case IEEE80211_STA_RX_BW_80: - rate->bw = RATE_INFO_BW_80; - break; - case IEEE80211_STA_RX_BW_40: - rate->bw = RATE_INFO_BW_40; - break; - default: - rate->bw = RATE_INFO_BW_20; - break; - } + return 0; } +EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message); static void mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) @@ -497,49 +418,6 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) trace_lp_event(dev, event->state); } -static void -mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb) -{ - struct mt7921_mcu_tx_done_event *event; - struct mt7921_sta *msta; - struct mt7921_phy *mphy = &dev->phy; - struct mt7921_mcu_peer_cap peer; - struct ieee80211_sta *sta; - LIST_HEAD(list); - - skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); - event = (struct mt7921_mcu_tx_done_event *)skb->data; - - spin_lock_bh(&dev->sta_poll_lock); - list_splice_init(&mphy->stats_list, &list); - - while (!list_empty(&list)) { - msta = list_first_entry(&list, struct mt7921_sta, stats_list); - list_del_init(&msta->stats_list); - - if (msta->wcid.idx != event->wlan_idx) - continue; - - spin_unlock_bh(&dev->sta_poll_lock); - - sta = wcid_to_sta(&msta->wcid); - - /* peer config based on IEEE SPEC */ - memset(&peer, 0x0, sizeof(peer)); - peer.bw = event->bw; - peer.g2 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); - peer.g4 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); - peer.g8 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); - peer.g16 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); - mt7921_mcu_tx_rate_parse(mphy->mt76, &peer, - &msta->stats.tx_rate, event->tx_rate); - - spin_lock_bh(&dev->sta_poll_lock); - break; - } - spin_unlock_bh(&dev->sta_poll_lock); -} - static void mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) { @@ -560,15 +438,13 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) mt7921_mcu_debug_msg_event(dev, skb); break; case MCU_EVENT_COREDUMP: + dev->fw_assert = true; mt76_connac_mcu_coredump_event(&dev->mt76, skb, &dev->coredump); return; case MCU_EVENT_LP_INFO: mt7921_mcu_low_power_event(dev, skb); break; - case MCU_EVENT_TX_DONE: - mt7921_mcu_tx_done_event(dev, skb); - break; default: break; } @@ -577,7 +453,12 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) { - struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; + struct mt7921_mcu_rxd *rxd; + + if (skb_linearize(skb)) + return; + + rxd = (struct mt7921_mcu_rxd *)skb->data; if (rxd->eid == 0x6) { mt76_mcu_rx_event(&dev->mt76, skb); @@ -619,7 +500,7 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb, u8 cipher; cipher = mt7921_mcu_get_cipher(key->cipher); - if (cipher == MT_CIPHER_NONE) + if (cipher == MCU_CIPHER_NONE) return -EOPNOTSUPP; sec_key = &sec->key[0]; @@ -712,7 +593,7 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, enable, false); } -static int mt7921_mcu_restart(struct mt76_dev *dev) +int mt7921_mcu_restart(struct mt76_dev *dev) { struct { u8 power_mode; @@ -724,26 +605,55 @@ static int mt7921_mcu_restart(struct mt76_dev *dev) return mt76_mcu_send_msg(dev, MCU_CMD_NIC_POWER_CTRL, &req, sizeof(req), false); } +EXPORT_SYMBOL_GPL(mt7921_mcu_restart); -static int mt7921_driver_own(struct mt7921_dev *dev) +static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info) { - u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); + u32 mode = DL_MODE_NEED_RSP; - mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN); - if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN, - 0, 500)) { - dev_err(dev->mt76.dev, "Timeout for driver own\n"); - return -EIO; + if (info == PATCH_SEC_NOT_SUPPORT) + return mode; + + switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) { + case PATCH_SEC_ENC_TYPE_PLAIN: + break; + case PATCH_SEC_ENC_TYPE_AES: + mode |= DL_MODE_ENCRYPT; + mode |= FIELD_PREP(DL_MODE_KEY_IDX, + (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX; + mode |= DL_MODE_RESET_SEC_IV; + break; + case PATCH_SEC_ENC_TYPE_SCRAMBLE: + mode |= DL_MODE_ENCRYPT; + mode |= DL_CONFIG_ENCRY_MODE_SEL; + mode |= DL_MODE_RESET_SEC_IV; + break; + default: + dev_err(dev->mt76.dev, "Encryption type not support!\n"); } - return 0; + return mode; +} + +static char *mt7921_patch_name(struct mt7921_dev *dev) +{ + char *ret; + + if (is_mt7922(&dev->mt76)) + ret = MT7922_ROM_PATCH; + else + ret = MT7921_ROM_PATCH; + + return ret; } static int mt7921_load_patch(struct mt7921_dev *dev) { const struct mt7921_patch_hdr *hdr; const struct firmware *fw = NULL; - int i, ret, sem; + int i, ret, sem, max_len; + + max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096; sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); switch (sem) { @@ -756,7 +666,7 @@ static int mt7921_load_patch(struct mt7921_dev *dev) return -EAGAIN; } - ret = request_firmware(&fw, MT7921_ROM_PATCH, dev->mt76.dev); + ret = request_firmware(&fw, mt7921_patch_name(dev), dev->mt76.dev); if (ret) goto out; @@ -774,7 +684,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev) for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) { struct mt7921_patch_sec *sec; const u8 *dl; - u32 len, addr; + u32 len, addr, mode; + u32 sec_info = 0; sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) + i * sizeof(*sec)); @@ -787,16 +698,18 @@ static int mt7921_load_patch(struct mt7921_dev *dev) addr = be32_to_cpu(sec->info.addr); len = be32_to_cpu(sec->info.len); dl = fw->data + be32_to_cpu(sec->offs); + sec_info = be32_to_cpu(sec->info.sec_key_idx); + mode = mt7921_get_data_mode(dev, sec_info); ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len, - DL_MODE_NEED_RSP); + mode); if (ret) { dev_err(dev->mt76.dev, "Download request failed\n"); goto out; } - ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, - dl, len); + ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + dl, len, max_len); if (ret) { dev_err(dev->mt76.dev, "Failed to send patch\n"); goto out; @@ -815,7 +728,7 @@ static int mt7921_load_patch(struct mt7921_dev *dev) default: ret = -EAGAIN; dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); - goto out; + break; } release_firmware(fw); @@ -843,9 +756,11 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev, const struct mt7921_fw_trailer *hdr, const u8 *data, bool is_wa) { - int i, offset = 0; + int i, offset = 0, max_len; u32 override = 0, option = 0; + max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096; + for (i = 0; i < hdr->n_region; i++) { const struct mt7921_fw_region *region; int err; @@ -867,8 +782,8 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev, return err; } - err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, - data + offset, len); + err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + data + offset, len, max_len); if (err) { dev_err(dev->mt76.dev, "Failed to send firmware.\n"); return err; @@ -886,13 +801,25 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev, return mt76_connac_mcu_start_firmware(&dev->mt76, override, option); } +static char *mt7921_ram_name(struct mt7921_dev *dev) +{ + char *ret; + + if (is_mt7922(&dev->mt76)) + ret = MT7922_FIRMWARE_WM; + else + ret = MT7921_FIRMWARE_WM; + + return ret; +} + static int mt7921_load_ram(struct mt7921_dev *dev) { const struct mt7921_fw_trailer *hdr; const struct firmware *fw; int ret; - ret = request_firmware(&fw, MT7921_FIRMWARE_WM, dev->mt76.dev); + ret = request_firmware(&fw, mt7921_ram_name(dev), dev->mt76.dev); if (ret) return ret; @@ -929,7 +856,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) int ret; ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); - if (ret) { + if (ret && mt76_is_mmio(&dev->mt76)) { dev_dbg(dev->mt76.dev, "Firmware is already download\n"); goto fw_loaded; } @@ -950,7 +877,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) } fw_loaded: - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); #ifdef CONFIG_PM dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; @@ -978,39 +904,24 @@ int mt7921_run_firmware(struct mt7921_dev *dev) { int err; - err = mt7921_driver_own(dev); + err = mt7921_load_firmware(dev); if (err) return err; - err = mt7921_load_firmware(dev); + err = mt76_connac_mcu_get_nic_capability(&dev->mphy); if (err) return err; set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); - mt7921_mcu_fw_log_2_host(dev, 1); - - return mt76_connac_mcu_get_nic_capability(&dev->mphy); -} - -int mt7921_mcu_init(struct mt7921_dev *dev) -{ - static const struct mt76_mcu_ops mt7921_mcu_ops = { - .headroom = sizeof(struct mt7921_mcu_txd), - .mcu_skb_send_msg = mt7921_mcu_send_message, - .mcu_parse_response = mt7921_mcu_parse_response, - .mcu_restart = mt7921_mcu_restart, - }; - - dev->mt76.mcu_ops = &mt7921_mcu_ops; - - return mt7921_run_firmware(dev); + return mt7921_mcu_fw_log_2_host(dev, 1); } +EXPORT_SYMBOL_GPL(mt7921_run_firmware); void mt7921_mcu_exit(struct mt7921_dev *dev) { - mt7921_wfsys_reset(dev); skb_queue_purge(&dev->mt76.mcu.res_q); } +EXPORT_SYMBOL_GPL(mt7921_mcu_exit); int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) { @@ -1041,7 +952,30 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) .total = IEEE80211_NUM_ACS, }; struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - int ac; + struct mu_edca { + u8 cw_min; + u8 cw_max; + u8 aifsn; + u8 acm; + u8 timer; + u8 padding[3]; + }; + struct mt7921_mcu_mu_tx { + u8 ver; + u8 pad0; + __le16 len; + u8 bss_idx; + u8 qos; + u8 wmm_idx; + u8 pad1; + struct mu_edca edca[IEEE80211_NUM_ACS]; + u8 pad3[32]; + } __packed req_mu = { + .bss_idx = mvif->mt76.idx, + .qos = vif->bss_conf.qos, + .wmm_idx = mvif->mt76.wmm_idx, + }; + int ac, ret; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; @@ -1062,8 +996,34 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) else e->cw_max = cpu_to_le16(10); } - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, - sizeof(req), true); + + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, + sizeof(req), true); + if (ret) + return ret; + + if (!vif->bss_conf.he_support) + return 0; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_he_mu_edca_param_ac_rec *q; + struct mu_edca *e; + int to_aci[] = {1, 0, 2, 3}; + + if (!mvif->queue_params[ac].mu_edca) + break; + + q = &mvif->queue_params[ac].mu_edca_param_rec; + e = &(req_mu.edca[to_aci[ac]]); + + e->cw_min = q->ecw_min_max & 0xf; + e->cw_max = (q->ecw_min_max & 0xf0) >> 4; + e->aifsn = q->aifsn; + e->timer = q->mu_edca_timer; + } + + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_MU_EDCA_PARMS, &req_mu, + sizeof(req_mu), false); } int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) @@ -1095,9 +1055,13 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) .tx_streams_num = hweight8(phy->mt76->antenna_mask), .rx_streams = phy->mt76->antenna_mask, .band_idx = phy != &dev->phy, - .channel_band = chandef->chan->band, }; + if (chandef->chan->band == NL80211_BAND_6GHZ) + req.channel_band = 2; + else + req.channel_band = chandef->chan->band; + if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && @@ -1132,6 +1096,7 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom); int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset) { @@ -1193,8 +1158,9 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) &ps_req, sizeof(ps_req), true); } -int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, - bool enable) +static int +mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool enable) { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct { @@ -1228,8 +1194,9 @@ int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, &bcnft_req, sizeof(bcnft_req), true); } -int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, - bool enable) +static int +mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool enable) { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct { @@ -1292,35 +1259,6 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); } -int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) -{ - struct mt76_phy *mphy = &dev->mt76.phy; - struct mt76_connac_pm *pm = &dev->pm; - int i, err = 0; - - for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { - mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); - if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL, - PCIE_LPCR_HOST_OWN_SYNC, 0, 50)) - break; - } - - if (i == MT7921_DRV_OWN_RETRY_COUNT) { - dev_err(dev->mt76.dev, "driver own failed\n"); - err = -EIO; - goto out; - } - - mt7921_wpdma_reinit_cond(dev); - clear_bit(MT76_STATE_PM, &mphy->state); - - pm->stats.last_wake_event = jiffies; - pm->stats.doze_time += pm->stats.last_wake_event - - pm->stats.last_doze_event; -out: - return err; -} - int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; @@ -1341,34 +1279,20 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) return err; } +EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl); int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; - int i, err = 0; + int err = 0; mutex_lock(&pm->mutex); if (mt76_connac_skip_fw_pmctrl(mphy, pm)) goto out; - for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { - mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); - if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL, - PCIE_LPCR_HOST_OWN_SYNC, 4, 50)) - break; - } - - if (i == MT7921_DRV_OWN_RETRY_COUNT) { - dev_err(dev->mt76.dev, "firmware own failed\n"); - clear_bit(MT76_STATE_PM, &mphy->state); - err = -EIO; - } - - pm->stats.last_doze_event = jiffies; - pm->stats.awake_time += pm->stats.last_doze_event - - pm->stats.last_wake_event; + err = __mt7921_mcu_fw_pmctrl(dev); out: mutex_unlock(&pm->mutex); @@ -1377,32 +1301,36 @@ int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) return err; } +EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl); -void -mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, + struct ieee80211_vif *vif, + bool enable) { - struct mt7921_phy *phy = priv; - struct mt7921_dev *dev = phy->dev; struct ieee80211_hw *hw = mt76_hw(dev); - int ret; - - if (dev->pm.enable) - ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true); - else - ret = mt7921_mcu_set_bss_pm(dev, vif, false); + int err; - if (ret) - return; + if (enable) { + err = mt7921_mcu_uni_bss_bcnft(dev, vif, true); + if (err) + return err; - if (dev->pm.enable) { vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; ieee80211_hw_set(hw, CONNECTION_MONITOR); mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); - } else { - vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; - __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags); - mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); + + return 0; } + + err = mt7921_mcu_set_bss_pm(dev, vif, false); + if (err) + return err; + + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags); + mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON); + + return 0; } int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index de3c091f673685..edc0c73f8c01c1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -259,25 +259,6 @@ struct mt7921_mcu_ant_id_config { u8 ant_id[4]; } __packed; -struct mt7921_mcu_peer_cap { - struct mt7921_mcu_ant_id_config ant_id_config; - - u8 power_offset; - u8 bw_selector; - u8 change_bw_rate_n; - u8 bw; - u8 spe_idx; - - u8 g2; - u8 g4; - u8 g8; - u8 g16; - - u8 mmss; - u8 ampdu_factor; - u8 rsv[1]; -} __packed; - struct mt7921_txpwr_req { u8 ver; u8 action; @@ -293,31 +274,29 @@ struct mt7921_txpwr_event { struct mt7921_txpwr txpwr; } __packed; -struct mt7921_mcu_tx_done_event { - u8 pid; - u8 status; - u16 seq; - - u8 wlan_idx; - u8 tx_cnt; - u16 tx_rate; - - u8 flag; - u8 tid; - u8 rsp_rate; - u8 mcs; - - u8 bw; - u8 tx_pwr; - u8 reason; - u8 rsv0[1]; +enum { + TM_SWITCH_MODE, + TM_SET_AT_CMD, + TM_QUERY_AT_CMD, +}; - u32 delay; - u32 timestamp; - u32 applied_flag; +enum { + MT7921_TM_NORMAL, + MT7921_TM_TESTMODE, + MT7921_TM_ICAP, + MT7921_TM_ICAP_OVERLAP, + MT7921_TM_WIFISPECTRUM, +}; - u8 txs[28]; +struct mt7921_rftest_cmd { + u8 action; + u8 rsv[3]; + __le32 param0; + __le32 param1; +} __packed; - u8 rsv1[32]; +struct mt7921_rftest_evt { + __le32 param0; + __le32 param1; } __packed; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 2d8bd6bfc820ac..e9c7c3a1950769 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -29,22 +29,47 @@ #define MT7921_RX_MCU_RING_SIZE 512 #define MT7921_DRV_OWN_RETRY_COUNT 10 +#define MT7921_MCU_INIT_RETRY_COUNT 10 #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" +#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin" +#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin" + #define MT7921_EEPROM_SIZE 3584 #define MT7921_TOKEN_SIZE 8192 #define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ -#define MT7921_5G_RATE_DEFAULT 0x4b /* OFDM 6M */ -#define MT7921_2G_RATE_DEFAULT 0x0 /* CCK 1M */ #define MT7921_SKU_RATE_NUM 161 #define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM #define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1) +#define MT7921_SDIO_HDR_TX_BYTES GENMASK(15, 0) +#define MT7921_SDIO_HDR_PKT_TYPE GENMASK(17, 16) + +enum mt7921_sdio_pkt_type { + MT7921_SDIO_TXD, + MT7921_SDIO_DATA, + MT7921_SDIO_CMD, + MT7921_SDIO_FWDL, +}; + +struct mt7921_sdio_intr { + u32 isr; + struct { + u32 wtqcr[16]; + } tx; + struct { + u16 num[2]; + u16 len0[16]; + u16 len1[128]; + } rx; + u32 rec_mb[2]; +} __packed; + #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) #define to_rcpi(rssi) (2 * (rssi) + 220) @@ -64,15 +89,6 @@ enum mt7921_rxq_id { MT7921_RXQ_MCU_WM = 0, }; -struct mt7921_sta_stats { - struct rate_info prob_rate; - struct rate_info tx_rate; - - unsigned long per; - unsigned long changed; - unsigned long jiffies; -}; - struct mt7921_sta_key_conf { s8 keyidx; u8 key[16]; @@ -83,17 +99,14 @@ struct mt7921_sta { struct mt7921_vif *vif; - struct list_head stats_list; struct list_head poll_list; u32 airtime_ac[8]; - struct mt7921_sta_stats stats; - + unsigned long last_txs; unsigned long ampdu_state; + struct mt76_sta_stats stats; struct mt7921_sta_key_conf bip; - - unsigned long next_txs_ts; }; DECLARE_EWMA(rssi, 10, 8); @@ -117,15 +130,34 @@ struct mib_stats { u32 rts_cnt; u32 rts_retries_cnt; u32 ba_miss_cnt; + + u32 tx_bf_ibf_ppdu_cnt; + u32 tx_bf_ebf_ppdu_cnt; + u32 tx_bf_rx_fb_all_cnt; + u32 tx_bf_rx_fb_he_cnt; + u32 tx_bf_rx_fb_vht_cnt; + u32 tx_bf_rx_fb_ht_cnt; + + u32 tx_ampdu_cnt; + u32 tx_mpdu_attempts_cnt; + u32 tx_mpdu_success_cnt; + u32 tx_pkt_ebf_cnt; + u32 tx_pkt_ibf_cnt; + + u32 rx_mpdu_cnt; + u32 rx_ampdu_cnt; + u32 rx_ampdu_bytes_cnt; + u32 rx_ba_cnt; + + u32 tx_amsdu[8]; + u32 tx_amsdu_cnt; }; struct mt7921_phy { struct mt76_phy *mt76; struct mt7921_dev *dev; - struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES]; - - struct ieee80211_vif *monitor_vif; + struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; u32 rxfilter; u64 omac_mask; @@ -139,7 +171,6 @@ struct mt7921_phy { u32 ampdu_ref; struct mib_stats mib; - struct list_head stats_list; u8 sta_work_count; @@ -147,6 +178,19 @@ struct mt7921_phy { struct delayed_work scan_work; }; +#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev)) +#define mt7921_dev_reset(dev) ((dev)->hif_ops->reset(dev)) +#define mt7921_mcu_init(dev) ((dev)->hif_ops->mcu_init(dev)) +#define __mt7921_mcu_drv_pmctrl(dev) ((dev)->hif_ops->drv_own(dev)) +#define __mt7921_mcu_fw_pmctrl(dev) ((dev)->hif_ops->fw_own(dev)) +struct mt7921_hif_ops { + int (*init_reset)(struct mt7921_dev *dev); + int (*reset)(struct mt7921_dev *dev); + int (*mcu_init)(struct mt7921_dev *dev); + int (*drv_own)(struct mt7921_dev *dev); + int (*fw_own)(struct mt7921_dev *dev); +}; + struct mt7921_dev { union { /* must be first */ struct mt76_dev mt76; @@ -157,11 +201,10 @@ struct mt7921_dev { struct mt7921_phy phy; struct tasklet_struct irq_tasklet; - u16 chainmask; - struct work_struct reset_work; bool hw_full_reset:1; bool hw_init_done:1; + bool fw_assert:1; struct list_head sta_poll_list; spinlock_t sta_poll_lock; @@ -170,6 +213,7 @@ struct mt7921_dev { struct mt76_connac_pm pm; struct mt76_connac_coredump coredump; + const struct mt7921_hif_ops *hif_ops; }; enum { @@ -247,18 +291,11 @@ u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr); int __mt7921_start(struct mt7921_phy *phy); int mt7921_register_device(struct mt7921_dev *dev); void mt7921_unregister_device(struct mt7921_dev *dev); -int mt7921_eeprom_init(struct mt7921_dev *dev); -void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy); -int mt7921_eeprom_get_target_power(struct mt7921_dev *dev, - struct ieee80211_channel *chan, - u8 chain_idx); -void mt7921_eeprom_init_sku(struct mt7921_dev *dev); int mt7921_dma_init(struct mt7921_dev *dev); int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force); int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev); void mt7921_dma_cleanup(struct mt7921_dev *dev); int mt7921_run_firmware(struct mt7921_dev *dev); -int mt7921_mcu_init(struct mt7921_dev *dev); int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif, struct mt7921_sta *msta, struct ieee80211_key_conf *key, enum set_key_cmd cmd); @@ -324,16 +361,27 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev) return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); } +static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev) +{ + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false); +} + +static inline void mt7921_skb_add_sdio_hdr(struct sk_buff *skb, + enum mt7921_sdio_pkt_type type) +{ + u32 hdr; + + hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, skb->len + sizeof(hdr)) | + FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type); + + put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr))); +} + int mt7921_mac_init(struct mt7921_dev *dev); bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask); void mt7921_mac_reset_counters(struct mt7921_phy *phy); -void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_key_conf *key, bool beacon); void mt7921_mac_set_timing(struct mt7921_phy *phy); -int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb); -void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb); -void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb); int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -342,27 +390,28 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_work(struct work_struct *work); void mt7921_mac_reset_work(struct work_struct *work); +void mt7921_mac_update_mib_stats(struct mt7921_phy *phy); void mt7921_reset(struct mt76_dev *mdev); -void mt7921_tx_cleanup(struct mt7921_dev *dev); -int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, - enum mt76_txq_id qid, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, - struct mt76_tx_info *tx_info); +int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); void mt7921_tx_worker(struct mt76_worker *w); -void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); +void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc); void mt7921_tx_token_put(struct mt7921_dev *dev); void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); void mt7921_stats_work(struct work_struct *work); -void mt7921_txp_skb_unmap(struct mt76_dev *dev, - struct mt76_txwi_cache *txwi); void mt7921_set_stream_he_caps(struct mt7921_phy *phy); void mt7921_update_channel(struct mt76_phy *mphy); int mt7921_init_debugfs(struct mt7921_dev *dev); +int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, + struct ieee80211_vif *vif, + bool enable); int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, struct ieee80211_ampdu_params *params, bool enable); @@ -371,21 +420,47 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, bool enable); void mt7921_scan_work(struct work_struct *work); int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif); -int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, - bool enable); -int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, - bool enable); -int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); void mt7921_pm_wake_work(struct work_struct *work); void mt7921_pm_power_save_work(struct work_struct *work); bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev); -int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, - struct ieee80211_vif *vif, - bool enable); -void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt7921_coredump_work(struct work_struct *work); int mt7921_wfsys_reset(struct mt7921_dev *dev); int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr); +int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); +int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, + struct netlink_callback *cb, void *data, int len); +void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, int pid, + bool beacon); +void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi); +void mt7921_mac_sta_poll(struct mt7921_dev *dev); +int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq); +int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq); +int mt7921_mcu_restart(struct mt76_dev *dev); + +void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +int mt7921e_mac_reset(struct mt7921_dev *dev); +int mt7921e_mcu_init(struct mt7921_dev *dev); +int mt7921s_wfsys_reset(struct mt7921_dev *dev); +int mt7921s_mac_reset(struct mt7921_dev *dev); +int mt7921s_init_reset(struct mt7921_dev *dev); +int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev); +int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev); + +int mt7921s_mcu_init(struct mt7921_dev *dev); +int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev); +int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev); +int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); +bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index c3905bcab36047..305b63fa1a8a9d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -14,9 +14,14 @@ static const struct pci_device_id mt7921_pci_device_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) }, { }, }; +static bool mt7921_disable_aspm; +module_param_named(disable_aspm, mt7921_disable_aspm, bool, 0644); +MODULE_PARM_DESC(disable_aspm, "disable PCI ASPM support"); + static void mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) { @@ -88,21 +93,46 @@ static void mt7921_irq_tasklet(unsigned long data) napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]); } +static int mt7921e_init_reset(struct mt7921_dev *dev) +{ + return mt7921_wpdma_reset(dev, true); +} + +static void mt7921e_unregister_device(struct mt7921_dev *dev) +{ + int i; + struct mt76_connac_pm *pm = &dev->pm; + + mt76_unregister_device(&dev->mt76); + mt76_for_each_q_rx(&dev->mt76, i) + napi_disable(&dev->mt76.napi[i]); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + mt7921_tx_token_put(dev); + mt7921_mcu_drv_pmctrl(dev); + mt7921_dma_cleanup(dev); + mt7921_wfsys_reset(dev); + mt7921_mcu_exit(dev); + + tasklet_disable(&dev->irq_tasklet); + mt76_free_device(&dev->mt76); +} + static int mt7921_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7921_txp_common), - .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ | - MT_DRV_AMSDU_OFFLOAD, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, .token_size = MT7921_TOKEN_SIZE, - .tx_prepare_skb = mt7921_tx_prepare_skb, - .tx_complete_skb = mt7921_tx_complete_skb, - .rx_skb = mt7921_queue_rx_skb, + .tx_prepare_skb = mt7921e_tx_prepare_skb, + .tx_complete_skb = mt7921e_tx_complete_skb, + .rx_skb = mt7921e_queue_rx_skb, .rx_poll_complete = mt7921_rx_poll_complete, .sta_ps = mt7921_sta_ps, .sta_add = mt7921_mac_sta_add, @@ -110,6 +140,15 @@ static int mt7921_pci_probe(struct pci_dev *pdev, .sta_remove = mt7921_mac_sta_remove, .update_survey = mt7921_update_channel, }; + + static const struct mt7921_hif_ops mt7921_pcie_ops = { + .init_reset = mt7921e_init_reset, + .reset = mt7921e_mac_reset, + .mcu_init = mt7921e_mcu_init, + .drv_own = mt7921e_mcu_drv_pmctrl, + .fw_own = mt7921e_mcu_fw_pmctrl, + }; + struct mt7921_dev *dev; struct mt76_dev *mdev; int ret; @@ -128,11 +167,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev, if (ret < 0) return ret; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) goto err_free_pci_vec; - mt76_pci_disable_aspm(pdev); + if (mt7921_disable_aspm) + mt76_pci_disable_aspm(pdev); mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops, &drv_ops); @@ -142,6 +182,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev, } dev = container_of(mdev, struct mt7921_dev, mt76); + dev->hif_ops = &mt7921_pcie_ops; mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev); @@ -158,6 +199,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev, if (ret) goto err_free_dev; + ret = mt7921_dma_init(dev); + if (ret) + goto err_free_irq; + ret = mt7921_register_device(dev); if (ret) goto err_free_irq; @@ -179,7 +224,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev) struct mt76_dev *mdev = pci_get_drvdata(pdev); struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - mt7921_unregister_device(dev); + mt7921e_unregister_device(dev); devm_free_irq(&pdev->dev, pdev->irq, dev); pci_free_irq_vectors(pdev); } @@ -297,12 +342,15 @@ static int mt7921_pci_resume(struct pci_dev *pdev) MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); mt76_worker_enable(&mdev->tx_worker); + + local_bh_disable(); mt76_for_each_q_rx(mdev, i) { napi_enable(&mdev->napi[i]); napi_schedule(&mdev->napi[i]); } napi_enable(&mdev->tx_napi); napi_schedule(&mdev->tx_napi); + local_bh_enable(); /* restore previous ds setting */ if (!pm->ds_enable) @@ -331,6 +379,8 @@ module_pci_driver(mt7921_pci_driver); MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table); MODULE_FIRMWARE(MT7921_FIRMWARE_WM); MODULE_FIRMWARE(MT7921_ROM_PATCH); +MODULE_FIRMWARE(MT7922_FIRMWARE_WM); +MODULE_FIRMWARE(MT7922_ROM_PATCH); MODULE_AUTHOR("Sean Wang "); MODULE_AUTHOR("Lorenzo Bianconi "); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c new file mode 100644 index 00000000000000..f9547d27356e1b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2021 MediaTek Inc. */ + +#include "mt7921.h" +#include "../dma.h" +#include "mac.h" + +static void +mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info, + void *txp_ptr, u32 id) +{ + struct mt7921_hw_txp *txp = txp_ptr; + struct mt7921_txp_ptr *ptr = &txp->ptr[0]; + int i, nbuf = tx_info->nbuf - 1; + + tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); + tx_info->nbuf = 1; + + txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); + + for (i = 0; i < nbuf; i++) { + u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK; + u32 addr = tx_info->buf[i + 1].addr; + + if (i == nbuf - 1) + len |= MT_TXD_LEN_LAST; + + if (i & 1) { + ptr->buf1 = cpu_to_le32(addr); + ptr->len1 = cpu_to_le16(len); + ptr++; + } else { + ptr->buf0 = cpu_to_le32(addr); + ptr->len0 = cpu_to_le16(len); + } + } +} + +int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct mt76_txwi_cache *t; + struct mt7921_txp_common *txp; + int id, pid; + u8 *txwi = (u8 *)txwi_ptr; + + if (unlikely(tx_info->skb->len <= ETH_HLEN)) + return -EINVAL; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_consume(mdev, &t); + if (id < 0) + return id; + + if (sta) { + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->last_txs + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->last_txs = jiffies; + } + } + + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, + pid, false); + + txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE); + memset(txp, 0, sizeof(struct mt7921_txp_common)); + mt7921_write_hw_txp(dev, tx_info, txp, id); + + tx_info->skb = DMA_DUMMY_DATA; + + return 0; +} + +static void +mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t) +{ + struct mt7921_txp_common *txp; + int i; + + txp = mt7921_txwi_to_txp(dev, t); + + for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) { + struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i]; + bool last; + u16 len; + + len = le16_to_cpu(ptr->len0); + last = len & MT_TXD_LEN_LAST; + len &= MT_TXD_LEN_MASK; + dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, + DMA_TO_DEVICE); + if (last) + break; + + len = le16_to_cpu(ptr->len1); + last = len & MT_TXD_LEN_LAST; + len &= MT_TXD_LEN_MASK; + dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, + DMA_TO_DEVICE); + if (last) + break; + } +} + +static void +mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, bool clear_status, + struct list_head *free_list) +{ + struct mt76_dev *mdev = &dev->mt76; + __le32 *txwi; + u16 wcid_idx; + + mt7921_txp_skb_unmap(mdev, t); + if (!t->skb) + goto out; + + txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); + if (sta) { + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + + if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7921_tx_check_aggr(sta, txwi); + + wcid_idx = wcid->idx; + } else { + wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1])); + } + + __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); + +out: + t->skb = NULL; + mt76_put_txwi(mdev, t); +} + +static void +mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + struct ieee80211_sta *sta = NULL; + LIST_HEAD(free_list); + struct sk_buff *tmp; + bool wake = false; + u8 i, count; + + /* clean DMA queues and unmap buffers first */ + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); + + /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, + * to the time ack is received or dropped by hw (air + hw queue time). + * Should avoid accessing WTBL to get Tx airtime, and use it instead. + */ + count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); + for (i = 0; i < count; i++) { + u32 msdu, info = le32_to_cpu(free->info[i]); + u8 stat; + + /* 1'b1: new wcid pair. + * 1'b0: msdu_id with the same 'wcid pair' as above. + */ + if (info & MT_TX_FREE_PAIR) { + struct mt7921_sta *msta; + struct mt76_wcid *wcid; + u16 idx; + + count++; + idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); + wcid = rcu_dereference(dev->mt76.wcid[idx]); + sta = wcid_to_sta(wcid); + if (!sta) + continue; + + msta = container_of(wcid, struct mt7921_sta, wcid); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + continue; + } + + msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); + stat = FIELD_GET(MT_TX_FREE_STATUS, info); + + txwi = mt76_token_release(mdev, msdu, &wake); + if (!txwi) + continue; + + mt7921_txwi_free(dev, txwi, sta, stat, &free_list); + } + + if (wake) + mt76_set_tx_blocked(&dev->mt76, false); + + napi_consume_skb(skb, 1); + + list_for_each_entry_safe(skb, tmp, &free_list, list) { + skb_list_del_init(skb); + napi_consume_skb(skb, 1); + } + + rcu_read_lock(); + mt7921_mac_sta_poll(dev); + rcu_read_unlock(); + + mt76_worker_schedule(&dev->mt76.tx_worker); +} + +void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + enum rx_pkt_type type; + + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + mt7921_mac_tx_free(dev, skb); + break; + default: + mt7921_queue_rx_skb(mdev, q, skb); + break; + } +} + +void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) +{ + if (!e->txwi) { + dev_kfree_skb_any(e->skb); + return; + } + + /* error path */ + if (e->skb == DMA_DUMMY_DATA) { + struct mt76_txwi_cache *t; + struct mt7921_txp_common *txp; + u16 token; + + txp = mt7921_txwi_to_txp(mdev, e->txwi); + token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; + t = mt76_token_put(mdev, token); + e->skb = t ? t->skb : NULL; + } + + if (e->skb) + mt76_tx_complete_skb(mdev, e->wcid, e->skb); +} + +void mt7921_tx_token_put(struct mt7921_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) { + mt7921_txwi_free(dev, txwi, NULL, false, NULL); + dev->mt76.token_count--; + } + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); +} + +int mt7921e_mac_reset(struct mt7921_dev *dev) +{ + int i, err; + + mt7921e_mcu_drv_pmctrl(dev); + + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + + mt76_txq_schedule_all(&dev->mphy); + + mt76_worker_disable(&dev->mt76.tx_worker); + napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); + napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); + napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); + napi_disable(&dev->mt76.tx_napi); + + mt7921_tx_token_put(dev); + idr_init(&dev->mt76.token); + + mt7921_wpdma_reset(dev, true); + + local_bh_disable(); + mt76_for_each_q_rx(&dev->mt76, i) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } + local_bh_enable(); + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, + MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_MCU_CMD); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + err = mt7921_run_firmware(dev); + if (err) + goto out; + + err = mt7921_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7921_mac_init(dev); + if (err) + goto out; + + err = __mt7921_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + local_bh_disable(); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + local_bh_enable(); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c new file mode 100644 index 00000000000000..583a89a34734a8 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2021 MediaTek Inc. */ + +#include "mt7921.h" +#include "mcu.h" + +static int mt7921e_driver_own(struct mt7921_dev *dev) +{ + u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); + + mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN); + if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN, + 0, 500)) { + dev_err(dev->mt76.dev, "Timeout for driver own\n"); + return -EIO; + } + + return 0; +} + +static int +mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + enum mt76_mcuq_id txq = MT_MCUQ_WM; + int ret; + + ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq); + if (ret) + return ret; + + if (cmd == MCU_CMD_FW_SCATTER) + txq = MT_MCUQ_FWDL; + + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); +} + +int mt7921e_mcu_init(struct mt7921_dev *dev) +{ + static const struct mt76_mcu_ops mt7921_mcu_ops = { + .headroom = sizeof(struct mt7921_mcu_txd), + .mcu_skb_send_msg = mt7921_mcu_send_message, + .mcu_parse_response = mt7921_mcu_parse_response, + .mcu_restart = mt7921_mcu_restart, + }; + int err; + + dev->mt76.mcu_ops = &mt7921_mcu_ops; + + err = mt7921e_driver_own(dev); + if (err) + return err; + + err = mt7921_run_firmware(dev); + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); + + return err; +} + +int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int i, err = 0; + + for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { + mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); + if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL, + PCIE_LPCR_HOST_OWN_SYNC, 0, 50)) + break; + } + + if (i == MT7921_DRV_OWN_RETRY_COUNT) { + dev_err(dev->mt76.dev, "driver own failed\n"); + err = -EIO; + goto out; + } + + mt7921_wpdma_reinit_cond(dev); + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; +out: + return err; +} + +int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int i, err = 0; + + for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { + mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); + if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL, + PCIE_LPCR_HOST_OWN_SYNC, 4, 50)) + break; + } + + if (i == MT7921_DRV_OWN_RETRY_COUNT) { + dev_err(dev->mt76.dev, "firmware own failed\n"); + clear_bit(MT76_STATE_PM, &mphy->state); + err = -EIO; + } + + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index b6944c867a5734..cbd38122c510f8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -14,7 +14,7 @@ #define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2) #define MT_MCU_INT_EVENT_RESET_DONE BIT(3) -#define MT_PLE_BASE 0x8000 +#define MT_PLE_BASE 0x820c0000 #define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) #define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) @@ -26,7 +26,7 @@ ((n) << 2)) #define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) -#define MT_MDP_BASE 0xf000 +#define MT_MDP_BASE 0x820cd000 #define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) #define MT_MDP_DCR0 MT_MDP(0x000) @@ -49,7 +49,7 @@ #define MT_MDP_TO_WM 1 /* TMAC: band 0(0x21000), band 1(0xa1000) */ -#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000) +#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000) #define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) #define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0) @@ -74,7 +74,7 @@ #define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c) #define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) -#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00) +#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000) #define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs)) #define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000) @@ -82,7 +82,7 @@ #define MT_DMA_DCR0_RXD_G5_EN BIT(23) /* LPON: band 0(0x24200), band 1(0xa4200) */ -#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200) +#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000) #define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs)) #define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080) @@ -92,25 +92,57 @@ #define MT_LPON_TCR_SW_MODE GENMASK(1, 0) #define MT_LPON_TCR_SW_WRITE BIT(0) +/* ETBF: band 0(0x24000), band 1(0xa4000) */ +#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000) +#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs)) + +#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x150) +#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16) +#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0) + +#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x158) +#define MT_ETBF_RX_FB_ALL GENMASK(31, 24) +#define MT_ETBF_RX_FB_HE GENMASK(23, 16) +#define MT_ETBF_RX_FB_VHT GENMASK(15, 8) +#define MT_ETBF_RX_FB_HT GENMASK(7, 0) + /* MIB: band 0(0x24800), band 1(0xa4800) */ -#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800) +#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000) #define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) +#define MT_MIB_SCR1(_band) MT_WF_MIB(_band, 0x004) +#define MT_MIB_TXDUR_EN BIT(8) +#define MT_MIB_RXDUR_EN BIT(9) + #define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698) #define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16) +#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x780) + #define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) #define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) +#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x558) +#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x564) +#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x568) + #define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) #define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) +#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x770) +#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x774) +#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x55c) + +#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x7a8) +#define MT_MIB_SDR9_IBF_CNT_MASK GENMASK(31, 16) +#define MT_MIB_SDR9_EBF_CNT_MASK GENMASK(15, 0) + #define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090) #define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0) -#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098) +#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x054) #define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) -#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c) +#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x058) #define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) #define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0) @@ -138,7 +170,7 @@ #define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2)) #define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) -#define MT_WTBLON_TOP_BASE 0x34000 +#define MT_WTBLON_TOP_BASE 0x820d4000 #define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) #define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200) #define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0) @@ -148,7 +180,7 @@ #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) #define MT_WTBL_UPDATE_BUSY BIT(31) -#define MT_WTBL_BASE 0x38000 +#define MT_WTBL_BASE 0x820d8000 #define MT_WTBL_LMAC_ID GENMASK(14, 8) #define MT_WTBL_LMAC_DW GENMASK(7, 2) #define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \ @@ -156,7 +188,7 @@ FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) /* AGG: band 0(0x20800), band 1(0xa0800) */ -#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800) +#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000) #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4) @@ -187,7 +219,7 @@ #define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4) /* ARB: band 0(0x20c00), band 1(0xa0c00) */ -#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00) +#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000) #define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) #define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080) @@ -197,7 +229,7 @@ #define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4) /* RMAC: band 0(0x21400), band 1(0xa1400) */ -#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400) +#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000) #define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs)) #define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c new file mode 100644 index 00000000000000..ddf0eeb8b68891 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2021 MediaTek Inc. + * + */ + +#include +#include +#include + +#include +#include +#include + +#include "mt7921.h" +#include "../sdio.h" +#include "mac.h" +#include "mcu.h" + +static const struct sdio_device_id mt7921s_table[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901) }, + { } /* Terminating entry */ +}; + +static void mt7921s_txrx_worker(struct mt76_worker *w) +{ + struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, + txrx_worker); + struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio); + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + queue_work(mdev->wq, &dev->pm.wake_work); + return; + } + + mt76s_txrx_worker(sdio); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); +} + +static void mt7921s_unregister_device(struct mt7921_dev *dev) +{ + struct mt76_connac_pm *pm = &dev->pm; + + mt76_unregister_device(&dev->mt76); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + mt76s_deinit(&dev->mt76); + mt7921s_wfsys_reset(dev); + mt7921_mcu_exit(dev); + + mt76_free_device(&dev->mt76); +} + +static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr) +{ + struct mt76_sdio *sdio = &dev->sdio; + struct mt7921_sdio_intr *irq_data = sdio->intr_data; + int i, err; + + err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data)); + if (err < 0) + return err; + + intr->isr = irq_data->isr; + intr->rec_mb = irq_data->rec_mb; + intr->tx.wtqcr = irq_data->tx.wtqcr; + intr->rx.num = irq_data->rx.num; + for (i = 0; i < 2 ; i++) { + if (!i) + intr->rx.len[0] = irq_data->rx.len0; + else + intr->rx.len[1] = irq_data->rx.len1; + } + + return 0; +} + +static int mt7921s_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_SDIO_TXD_SIZE, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .tx_prepare_skb = mt7921s_tx_prepare_skb, + .tx_complete_skb = mt7921s_tx_complete_skb, + .tx_status_data = mt7921s_tx_status_data, + .rx_skb = mt7921_queue_rx_skb, + .sta_ps = mt7921_sta_ps, + .sta_add = mt7921_mac_sta_add, + .sta_assoc = mt7921_mac_sta_assoc, + .sta_remove = mt7921_mac_sta_remove, + .update_survey = mt7921_update_channel, + }; + static const struct mt76_bus_ops mt7921s_ops = { + .rr = mt76s_rr, + .rmw = mt76s_rmw, + .wr = mt76s_wr, + .write_copy = mt76s_write_copy, + .read_copy = mt76s_read_copy, + .wr_rp = mt76s_wr_rp, + .rd_rp = mt76s_rd_rp, + .type = MT76_BUS_SDIO, + }; + static const struct mt7921_hif_ops mt7921_sdio_ops = { + .init_reset = mt7921s_init_reset, + .reset = mt7921s_mac_reset, + .mcu_init = mt7921s_mcu_init, + .drv_own = mt7921s_mcu_drv_pmctrl, + .fw_own = mt7921s_mcu_fw_pmctrl, + }; + + struct mt7921_dev *dev; + struct mt76_dev *mdev; + int ret, i; + + mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops, + &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7921_dev, mt76); + dev->hif_ops = &mt7921_sdio_ops; + + sdio_set_drvdata(func, dev); + + ret = mt76s_init(mdev, func, &mt7921s_ops); + if (ret < 0) + goto error; + + ret = mt76s_hw_init(mdev, func, MT76_CONNAC2_SDIO); + if (ret) + goto error; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + mdev->sdio.parse_irq = mt7921s_parse_intr; + mdev->sdio.intr_data = devm_kmalloc(mdev->dev, + sizeof(struct mt7921_sdio_intr), + GFP_KERNEL); + if (!mdev->sdio.intr_data) { + ret = -ENOMEM; + goto error; + } + + for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) { + mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev, + MT76S_XMIT_BUF_SZ, + GFP_KERNEL); + if (!mdev->sdio.xmit_buf[i]) { + ret = -ENOMEM; + goto error; + } + } + + ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN); + if (ret) + goto error; + + ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MCU); + if (ret) + goto error; + + ret = mt76s_alloc_tx(mdev); + if (ret) + goto error; + + ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker, + mt7921s_txrx_worker, "sdio-txrx"); + if (ret) + goto error; + + sched_set_fifo_low(mdev->sdio.txrx_worker.task); + + ret = mt7921_register_device(dev); + if (ret) + goto error; + + return 0; + +error: + mt76s_deinit(&dev->mt76); + mt76_free_device(&dev->mt76); + + return ret; +} + +static void mt7921s_remove(struct sdio_func *func) +{ + struct mt7921_dev *dev = sdio_get_drvdata(func); + + mt7921s_unregister_device(dev); +} + +#ifdef CONFIG_PM +static int mt7921s_suspend(struct device *__dev) +{ + struct sdio_func *func = dev_to_sdio_func(__dev); + struct mt7921_dev *dev = sdio_get_drvdata(func); + struct mt76_connac_pm *pm = &dev->pm; + struct mt76_dev *mdev = &dev->mt76; + bool hif_suspend; + int err; + + pm->suspended = true; + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + err = mt7921_mcu_drv_pmctrl(dev); + if (err < 0) + goto restore_suspend; + + hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state); + if (hif_suspend) { + err = mt76_connac_mcu_set_hif_suspend(mdev, true); + if (err) + goto restore_suspend; + } + + /* always enable deep sleep during suspend to reduce + * power consumption + */ + mt76_connac_mcu_set_deep_sleep(mdev, true); + + mt76_txq_schedule_all(&dev->mphy); + mt76_worker_disable(&mdev->tx_worker); + mt76_worker_disable(&mdev->sdio.txrx_worker); + mt76_worker_disable(&mdev->sdio.status_worker); + mt76_worker_disable(&mdev->sdio.net_worker); + cancel_work_sync(&mdev->sdio.stat_work); + clear_bit(MT76_READING_STATS, &dev->mphy.state); + + mt76_tx_status_check(mdev, true); + + err = mt7921_mcu_fw_pmctrl(dev); + if (err) + goto restore_worker; + + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + + return 0; + +restore_worker: + mt76_worker_enable(&mdev->tx_worker); + mt76_worker_enable(&mdev->sdio.txrx_worker); + mt76_worker_enable(&mdev->sdio.status_worker); + mt76_worker_enable(&mdev->sdio.net_worker); + + if (!pm->ds_enable) + mt76_connac_mcu_set_deep_sleep(mdev, false); + + if (hif_suspend) + mt76_connac_mcu_set_hif_suspend(mdev, false); + +restore_suspend: + pm->suspended = false; + + return err; +} + +static int mt7921s_resume(struct device *__dev) +{ + struct sdio_func *func = dev_to_sdio_func(__dev); + struct mt7921_dev *dev = sdio_get_drvdata(func); + struct mt76_connac_pm *pm = &dev->pm; + struct mt76_dev *mdev = &dev->mt76; + int err; + + pm->suspended = false; + + err = mt7921_mcu_drv_pmctrl(dev); + if (err < 0) + return err; + + mt76_worker_enable(&mdev->tx_worker); + mt76_worker_enable(&mdev->sdio.txrx_worker); + mt76_worker_enable(&mdev->sdio.status_worker); + mt76_worker_enable(&mdev->sdio.net_worker); + + /* restore previous ds setting */ + if (!pm->ds_enable) + mt76_connac_mcu_set_deep_sleep(mdev, false); + + if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state)) + err = mt76_connac_mcu_set_hif_suspend(mdev, false); + + return err; +} + +static const struct dev_pm_ops mt7921s_pm_ops = { + .suspend = mt7921s_suspend, + .resume = mt7921s_resume, +}; +#endif + +MODULE_DEVICE_TABLE(sdio, mt7921s_table); +MODULE_FIRMWARE(MT7921_FIRMWARE_WM); +MODULE_FIRMWARE(MT7921_ROM_PATCH); + +static struct sdio_driver mt7921s_driver = { + .name = KBUILD_MODNAME, + .probe = mt7921s_probe, + .remove = mt7921s_remove, + .id_table = mt7921s_table, +#ifdef CONFIG_PM + .drv = { + .pm = &mt7921s_pm_ops, + } +#endif +}; +module_sdio_driver(mt7921s_driver); +MODULE_AUTHOR("Sean Wang "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c new file mode 100644 index 00000000000000..137f86a6dbf875 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2021 MediaTek Inc. */ + +#include +#include +#include "mt7921.h" +#include "mac.h" +#include "../sdio.h" + +static void mt7921s_enable_irq(struct mt76_dev *dev) +{ + struct mt76_sdio *sdio = &dev->sdio; + + sdio_claim_host(sdio->func); + sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL); + sdio_release_host(sdio->func); +} + +static void mt7921s_disable_irq(struct mt76_dev *dev) +{ + struct mt76_sdio *sdio = &dev->sdio; + + sdio_claim_host(sdio->func); + sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL); + sdio_release_host(sdio->func); +} + +static u32 mt7921s_read_whcr(struct mt76_dev *dev) +{ + return sdio_readl(dev->sdio.func, MCR_WHCR, NULL); +} + +int mt7921s_wfsys_reset(struct mt7921_dev *dev) +{ + struct mt76_sdio *sdio = &dev->mt76.sdio; + u32 val, status; + + mt7921s_mcu_drv_pmctrl(dev); + + sdio_claim_host(sdio->func); + + val = sdio_readl(sdio->func, MCR_WHCR, NULL); + val &= ~WF_WHOLE_PATH_RSTB; + sdio_writel(sdio->func, val, MCR_WHCR, NULL); + + msleep(50); + + val = sdio_readl(sdio->func, MCR_WHCR, NULL); + val &= ~WF_SDIO_WF_PATH_RSTB; + sdio_writel(sdio->func, val, MCR_WHCR, NULL); + + usleep_range(1000, 2000); + + val = sdio_readl(sdio->func, MCR_WHCR, NULL); + val |= WF_WHOLE_PATH_RSTB; + sdio_writel(sdio->func, val, MCR_WHCR, NULL); + + readx_poll_timeout(mt7921s_read_whcr, &dev->mt76, status, + status & WF_RST_DONE, 50000, 2000000); + + sdio_release_host(sdio->func); + + /* activate mt7921s again */ + mt7921s_mcu_fw_pmctrl(dev); + mt7921s_mcu_drv_pmctrl(dev); + + return 0; +} + +int mt7921s_init_reset(struct mt7921_dev *dev) +{ + set_bit(MT76_MCU_RESET, &dev->mphy.state); + + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + wait_event_timeout(dev->mt76.sdio.wait, + mt76s_txqs_empty(&dev->mt76), 5 * HZ); + mt76_worker_disable(&dev->mt76.sdio.txrx_worker); + + mt7921s_disable_irq(&dev->mt76); + mt7921s_wfsys_reset(dev); + + mt76_worker_enable(&dev->mt76.sdio.txrx_worker); + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + mt7921s_enable_irq(&dev->mt76); + + return 0; +} + +int mt7921s_mac_reset(struct mt7921_dev *dev) +{ + int err; + + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + mt76_txq_schedule_all(&dev->mphy); + mt76_worker_disable(&dev->mt76.tx_worker); + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + wait_event_timeout(dev->mt76.sdio.wait, + mt76s_txqs_empty(&dev->mt76), 5 * HZ); + mt76_worker_disable(&dev->mt76.sdio.txrx_worker); + mt76_worker_disable(&dev->mt76.sdio.status_worker); + mt76_worker_disable(&dev->mt76.sdio.net_worker); + cancel_work_sync(&dev->mt76.sdio.stat_work); + + mt7921s_disable_irq(&dev->mt76); + mt7921s_wfsys_reset(dev); + + mt76_worker_enable(&dev->mt76.sdio.txrx_worker); + mt76_worker_enable(&dev->mt76.sdio.status_worker); + mt76_worker_enable(&dev->mt76.sdio.net_worker); + + dev->fw_assert = false; + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + mt7921s_enable_irq(&dev->mt76); + + err = mt7921_run_firmware(dev); + if (err) + goto out; + + err = mt7921_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7921_mac_init(dev); + if (err) + goto out; + + err = __mt7921_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; +} + +static void +mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid, + enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; + __le32 *txwi; + int pid; + + pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); + memset(txwi, 0, MT_SDIO_TXD_SIZE); + mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false); + skb_push(skb, MT_SDIO_TXD_SIZE); +} + +int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct sk_buff *skb = tx_info->skb; + int pad; + + if (unlikely(tx_info->skb->len <= ETH_HLEN)) + return -EINVAL; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + if (sta) { + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->last_txs + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->last_txs = jiffies; + } + } + + mt7921s_write_txwi(dev, wcid, qid, sta, skb); + + mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA); + pad = round_up(skb->len, 4) - skb->len; + + return mt76_skb_adjust_pad(skb, pad); +} + +void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) +{ + __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE); + unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + u16 idx; + + idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1])); + wcid = rcu_dereference(mdev->wcid[idx]); + sta = wcid_to_sta(wcid); + + if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7921_tx_check_aggr(sta, txwi); + + skb_pull(e->skb, headroom); + mt76_tx_complete_skb(mdev, e->wcid, e->skb); +} + +bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + mt7921_mutex_acquire(dev); + mt7921_mac_sta_poll(dev); + mt7921_mutex_release(dev); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c new file mode 100644 index 00000000000000..437cddad9a90c3 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2021 MediaTek Inc. */ + +#include +#include +#include +#include + +#include "mt7921.h" +#include "../sdio.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +static int +mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + enum mt7921_sdio_pkt_type type = MT7921_SDIO_CMD; + enum mt76_mcuq_id txq = MT_MCUQ_WM; + int ret, pad; + + /* We just return in case firmware assertion to avoid blocking the + * common workqueue to run, for example, the coredump work might be + * blocked by mt7921_mac_work that is excuting register access via sdio + * bus. + */ + if (dev->fw_assert) + return -EBUSY; + + ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq); + if (ret) + return ret; + + if (cmd == MCU_CMD_FW_SCATTER) + type = MT7921_SDIO_FWDL; + + mt7921_skb_add_sdio_hdr(skb, type); + pad = round_up(skb->len, 4) - skb->len; + __skb_put_zero(skb, pad); + + ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); + if (ret) + return ret; + + mt76_queue_kick(dev, mdev->q_mcu[txq]); + + return ret; +} + +int mt7921s_mcu_init(struct mt7921_dev *dev) +{ + static const struct mt76_mcu_ops mt7921s_mcu_ops = { + .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd), + .tailroom = MT_SDIO_TAIL_SIZE, + .mcu_skb_send_msg = mt7921s_mcu_send_message, + .mcu_parse_response = mt7921_mcu_parse_response, + .mcu_rr = mt76_connac_mcu_reg_rr, + .mcu_wr = mt76_connac_mcu_reg_wr, + }; + int ret; + + mt7921s_mcu_drv_pmctrl(dev); + + dev->mt76.mcu_ops = &mt7921s_mcu_ops; + + ret = mt7921_run_firmware(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + + return 0; +} + +int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev) +{ + struct sdio_func *func = dev->mt76.sdio.func; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int err = 0; + u32 status; + + sdio_claim_host(func); + + sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL); + + err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status, + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); + sdio_release_host(func); + + if (err < 0) { + dev_err(dev->mt76.dev, "driver own failed\n"); + err = -EIO; + goto out; + } + + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; +out: + return err; +} + +int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev) +{ + struct sdio_func *func = dev->mt76.sdio.func; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int err = 0; + u32 status; + + sdio_claim_host(func); + + sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL); + + err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status, + !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000); + sdio_release_host(func); + + if (err < 0) { + dev_err(dev->mt76.dev, "firmware own failed\n"); + clear_bit(MT76_STATE_PM, &mphy->state); + err = -EIO; + } + + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c new file mode 100644 index 00000000000000..8bd43879dd6fec --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: ISC + +#include "mt7921.h" +#include "mcu.h" + +enum mt7921_testmode_attr { + MT7921_TM_ATTR_UNSPEC, + MT7921_TM_ATTR_SET, + MT7921_TM_ATTR_QUERY, + MT7921_TM_ATTR_RSP, + + /* keep last */ + NUM_MT7921_TM_ATTRS, + MT7921_TM_ATTR_MAX = NUM_MT7921_TM_ATTRS - 1, +}; + +struct mt7921_tm_cmd { + u8 action; + u32 param0; + u32 param1; +}; + +struct mt7921_tm_evt { + u32 param0; + u32 param1; +}; + +static const struct nla_policy mt7921_tm_policy[NUM_MT7921_TM_ATTRS] = { + [MT7921_TM_ATTR_SET] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7921_tm_cmd)), + [MT7921_TM_ATTR_QUERY] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7921_tm_cmd)), +}; + +static int +mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req) +{ + struct mt7921_rftest_cmd cmd = { + .action = req->action, + .param0 = cpu_to_le32(req->param0), + .param1 = cpu_to_le32(req->param1), + }; + bool testmode = false, normal = false; + struct mt76_connac_pm *pm = &dev->pm; + struct mt76_phy *phy = &dev->mphy; + int ret = -ENOTCONN; + + mutex_lock(&dev->mt76.mutex); + + if (req->action == TM_SWITCH_MODE) { + if (req->param0 == MT7921_TM_NORMAL) + normal = true; + else + testmode = true; + } + + if (testmode) { + /* Make sure testmode running on full power mode */ + pm->enable = false; + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + __mt7921_mcu_drv_pmctrl(dev); + + mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter); + phy->test.state = MT76_TM_STATE_ON; + } + + if (!mt76_testmode_enabled(phy)) + goto out; + + ret = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TEST_CTRL, &cmd, + sizeof(cmd), false); + if (ret) + goto out; + + if (normal) { + /* Switch back to the normal world */ + phy->test.state = MT76_TM_STATE_OFF; + pm->enable = true; + } +out: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static int +mt7921_tm_query(struct mt7921_dev *dev, struct mt7921_tm_cmd *req, + struct mt7921_tm_evt *evt_resp) +{ + struct mt7921_rftest_cmd cmd = { + .action = req->action, + .param0 = cpu_to_le32(req->param0), + .param1 = cpu_to_le32(req->param1), + }; + struct mt7921_rftest_evt *evt; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_TEST_CTRL, + &cmd, sizeof(cmd), true, &skb); + if (ret) + goto out; + + evt = (struct mt7921_rftest_evt *)skb->data; + evt_resp->param0 = le32_to_cpu(evt->param0); + evt_resp->param1 = le32_to_cpu(evt->param1); +out: + dev_kfree_skb(skb); + + return ret; +} + +int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct nlattr *tb[NUM_MT76_TM_ATTRS]; + struct mt76_phy *mphy = hw->priv; + struct mt7921_phy *phy = mphy->priv; + int err; + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state) || + !(hw->conf.flags & IEEE80211_CONF_MONITOR)) + return -ENOTCONN; + + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, + mt76_tm_policy, NULL); + if (err) + return err; + + if (tb[MT76_TM_ATTR_DRV_DATA]) { + struct nlattr *drv_tb[NUM_MT7921_TM_ATTRS], *data; + int ret; + + data = tb[MT76_TM_ATTR_DRV_DATA]; + ret = nla_parse_nested_deprecated(drv_tb, + MT7921_TM_ATTR_MAX, + data, mt7921_tm_policy, + NULL); + if (ret) + return ret; + + data = drv_tb[MT7921_TM_ATTR_SET]; + if (data) + return mt7921_tm_set(phy->dev, nla_data(data)); + } + + return -EINVAL; +} + +int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, + struct netlink_callback *cb, void *data, int len) +{ + struct nlattr *tb[NUM_MT76_TM_ATTRS]; + struct mt76_phy *mphy = hw->priv; + struct mt7921_phy *phy = mphy->priv; + int err; + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state) || + !(hw->conf.flags & IEEE80211_CONF_MONITOR) || + !mt76_testmode_enabled(mphy)) + return -ENOTCONN; + + if (cb->args[2]++ > 0) + return -ENOENT; + + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, + mt76_tm_policy, NULL); + if (err) + return err; + + if (tb[MT76_TM_ATTR_DRV_DATA]) { + struct nlattr *drv_tb[NUM_MT7921_TM_ATTRS], *data; + int ret; + + data = tb[MT76_TM_ATTR_DRV_DATA]; + ret = nla_parse_nested_deprecated(drv_tb, + MT7921_TM_ATTR_MAX, + data, mt7921_tm_policy, + NULL); + if (ret) + return ret; + + data = drv_tb[MT7921_TM_ATTR_QUERY]; + if (data) { + struct mt7921_tm_evt evt_resp; + + err = mt7921_tm_query(phy->dev, nla_data(data), + &evt_resp); + if (err) + return err; + + return nla_put(msg, MT7921_TM_ATTR_RSP, + sizeof(evt_resp), &evt_resp); + } + } + + return -EINVAL; +} diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 783a15635ec528..c99acc21225e1f 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -16,9 +16,290 @@ #include #include "mt76.h" +#include "sdio.h" -static int -mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) +static u32 mt76s_read_whisr(struct mt76_dev *dev) +{ + return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); +} + +u32 mt76s_read_pcr(struct mt76_dev *dev) +{ + struct mt76_sdio *sdio = &dev->sdio; + + return sdio_readl(sdio->func, MCR_WHLPCR, NULL); +} +EXPORT_SYMBOL_GPL(mt76s_read_pcr); + +static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset) +{ + struct sdio_func *func = dev->sdio.func; + u32 val = ~0, status; + int err; + + sdio_claim_host(func); + + sdio_writel(func, offset, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting address [err=%d]\n", err); + goto out; + } + + sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); + goto out; + } + + err = readx_poll_timeout(mt76s_read_whisr, dev, status, + status & H2D_SW_INT_READ, 0, 1000000); + if (err < 0) { + dev_err(dev->dev, "query whisr timeout\n"); + goto out; + } + + sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); + goto out; + } + + val = sdio_readl(func, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); + goto out; + } + + if (val != offset) { + dev_err(dev->dev, "register mismatch\n"); + val = ~0; + goto out; + } + + val = sdio_readl(func, MCR_D2HRM1R, &err); + if (err < 0) + dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); + +out: + sdio_release_host(func); + + return val; +} + +static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) +{ + struct sdio_func *func = dev->sdio.func; + u32 status; + int err; + + sdio_claim_host(func); + + sdio_writel(func, offset, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting address [err=%d]\n", err); + goto out; + } + + sdio_writel(func, val, MCR_H2DSM1R, &err); + if (err < 0) { + dev_err(dev->dev, + "failed setting write value [err=%d]\n", err); + goto out; + } + + sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); + goto out; + } + + err = readx_poll_timeout(mt76s_read_whisr, dev, status, + status & H2D_SW_INT_WRITE, 0, 1000000); + if (err < 0) { + dev_err(dev->dev, "query whisr timeout\n"); + goto out; + } + + sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); + goto out; + } + + val = sdio_readl(func, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); + goto out; + } + + if (val != offset) + dev_err(dev->dev, "register mismatch\n"); + +out: + sdio_release_host(func); +} + +u32 mt76s_rr(struct mt76_dev *dev, u32 offset) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) + return dev->mcu_ops->mcu_rr(dev, offset); + else + return mt76s_read_mailbox(dev, offset); +} +EXPORT_SYMBOL_GPL(mt76s_rr); + +void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) + dev->mcu_ops->mcu_wr(dev, offset, val); + else + mt76s_write_mailbox(dev, offset, val); +} +EXPORT_SYMBOL_GPL(mt76s_wr); + +u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) +{ + val |= mt76s_rr(dev, offset) & ~mask; + mt76s_wr(dev, offset, val); + + return val; +} +EXPORT_SYMBOL_GPL(mt76s_rmw); + +void mt76s_write_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len) +{ + const u32 *val = data; + int i; + + for (i = 0; i < len / sizeof(u32); i++) { + mt76s_wr(dev, offset, val[i]); + offset += sizeof(u32); + } +} +EXPORT_SYMBOL_GPL(mt76s_write_copy); + +void mt76s_read_copy(struct mt76_dev *dev, u32 offset, + void *data, int len) +{ + u32 *val = data; + int i; + + for (i = 0; i < len / sizeof(u32); i++) { + val[i] = mt76s_rr(dev, offset); + offset += sizeof(u32); + } +} +EXPORT_SYMBOL_GPL(mt76s_read_copy); + +int mt76s_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + mt76s_wr(dev, data->reg, data->value); + data++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76s_wr_rp); + +int mt76s_rd_rp(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *data, int len) +{ + int i; + + for (i = 0; i < len; i++) { + data->value = mt76s_rr(dev, data->reg); + data++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76s_rd_rp); + +int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver) +{ + u32 status, ctrl; + int ret; + + dev->sdio.hw_ver = hw_ver; + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret < 0) + goto release; + + /* Get ownership from the device */ + sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, + MCR_WHLPCR, &ret); + if (ret < 0) + goto disable_func; + + ret = readx_poll_timeout(mt76s_read_pcr, dev, status, + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); + if (ret < 0) { + dev_err(dev->dev, "Cannot get ownership from device"); + goto disable_func; + } + + ret = sdio_set_block_size(func, 512); + if (ret < 0) + goto disable_func; + + /* Enable interrupt */ + sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); + if (ret < 0) + goto disable_func; + + ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; + if (hw_ver == MT76_CONNAC2_SDIO) + ctrl |= WHIER_RX1_DONE_INT_EN; + sdio_writel(func, ctrl, MCR_WHIER, &ret); + if (ret < 0) + goto disable_func; + + switch (hw_ver) { + case MT76_CONNAC_SDIO: + /* set WHISR as read clear and Rx aggregation number as 16 */ + ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); + break; + default: + ctrl = sdio_readl(func, MCR_WHCR, &ret); + if (ret < 0) + goto disable_func; + ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2; + ctrl &= ~W_INT_CLR_CTRL; /* read clear */ + ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0); + break; + } + + sdio_writel(func, ctrl, MCR_WHCR, &ret); + if (ret < 0) + goto disable_func; + + ret = sdio_claim_irq(func, mt76s_sdio_irq); + if (ret < 0) + goto disable_func; + + sdio_release_host(func); + + return 0; + +disable_func: + sdio_disable_func(func); +release: + sdio_release_host(func); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76s_hw_init); + +int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; @@ -35,6 +316,7 @@ mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) return 0; } +EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue); static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) { @@ -56,7 +338,7 @@ static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) return q; } -static int mt76s_alloc_tx(struct mt76_dev *dev) +int mt76s_alloc_tx(struct mt76_dev *dev) { struct mt76_queue *q; int i; @@ -79,18 +361,7 @@ static int mt76s_alloc_tx(struct mt76_dev *dev) return 0; } - -int mt76s_alloc_queues(struct mt76_dev *dev) -{ - int err; - - err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN); - if (err < 0) - return err; - - return mt76s_alloc_tx(dev); -} -EXPORT_SYMBOL_GPL(mt76s_alloc_queues); +EXPORT_SYMBOL_GPL(mt76s_alloc_tx); static struct mt76_queue_entry * mt76s_get_next_rx_entry(struct mt76_queue *q) @@ -328,7 +599,7 @@ void mt76s_deinit(struct mt76_dev *dev) cancel_work_sync(&sdio->stat_work); clear_bit(MT76_READING_STATS, &dev->phy.state); - mt76_tx_status_check(dev, NULL, true); + mt76_tx_status_check(dev, true); sdio_claim_host(sdio->func); sdio_release_irq(sdio->func); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h b/drivers/net/wireless/mediatek/mt76/sdio.h similarity index 72% rename from drivers/net/wireless/mediatek/mt76/mt7615/sdio.h rename to drivers/net/wireless/mediatek/mt76/sdio.h index 03877d89e15205..99db4ad93b7c71 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h +++ b/drivers/net/wireless/mediatek/mt76/sdio.h @@ -21,7 +21,12 @@ #define MCR_WHCR 0x000C #define W_INT_CLR_CTRL BIT(1) #define RECV_MAILBOX_RD_CLR_EN BIT(2) +#define WF_SYS_RSTB BIT(4) /* supported in CONNAC2 */ +#define WF_WHOLE_PATH_RSTB BIT(5) /* supported in CONNAC2 */ +#define WF_SDIO_WF_PATH_RSTB BIT(6) /* supported in CONNAC2 */ #define MAX_HIF_RX_LEN_NUM GENMASK(13, 8) +#define MAX_HIF_RX_LEN_NUM_CONNAC2 GENMASK(14, 8) /* supported in CONNAC2 */ +#define WF_RST_DONE BIT(15) /* supported in CONNAC2 */ #define RX_ENHANCE_MODE BIT(16) #define MCR_WHISR 0x0010 @@ -29,6 +34,7 @@ #define WHIER_D2H_SW_INT GENMASK(31, 8) #define WHIER_FW_OWN_BACK_INT_EN BIT(7) #define WHIER_ABNORMAL_INT_EN BIT(6) +#define WHIER_WDT_INT_EN BIT(5) /* supported in CONNAC2 */ #define WHIER_RX1_DONE_INT_EN BIT(2) #define WHIER_RX0_DONE_INT_EN BIT(1) #define WHIER_TX_DONE_INT_EN BIT(0) @@ -100,16 +106,33 @@ #define MCR_SWPCDBGR 0x0154 +#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */ +#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */ +#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */ +#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */ +#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */ +#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */ +#define MCR_WTQCR11 0x019C /* supported in CONNAC2 */ +#define MCR_WTQCR12 0x01A0 /* supported in CONNAC2 */ +#define MCR_WTQCR13 0x01A4 /* supported in CONNAC2 */ +#define MCR_WTQCR14 0x01A8 /* supported in CONNAC2 */ +#define MCR_WTQCR15 0x01AC /* supported in CONNAC2 */ + +enum mt76_connac_sdio_ver { + MT76_CONNAC_SDIO, + MT76_CONNAC2_SDIO, +}; + struct mt76s_intr { u32 isr; + u32 *rec_mb; struct { - u32 wtqcr[8]; + u32 *wtqcr; } tx; struct { - u16 num[2]; - u16 len[2][16]; + u16 *len[2]; + u16 *num; } rx; - u32 rec_mb[2]; -} __packed; +}; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c similarity index 67% rename from drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c rename to drivers/net/wireless/mediatek/mt76/sdio_txrx.c index 04f4c89b74995f..649a56790b89df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c @@ -14,12 +14,11 @@ #include #include -#include "../trace.h" -#include "mt7615.h" +#include "trace.h" #include "sdio.h" -#include "mac.h" +#include "mt76.h" -static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data) +static int mt76s_refill_sched_quota(struct mt76_dev *dev, u32 *data) { u32 ple_ac_data_quota[] = { FIELD_GET(TXQ_CNT_L, data[4]), /* VO */ @@ -53,8 +52,8 @@ static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data) return pse_data_quota + ple_data_quota + pse_mcu_quota; } -static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len, - int buf_len) +static struct sk_buff * +mt76s_build_rx_skb(void *data, int data_len, int buf_len) { int len = min_t(int, data_len, MT_SKB_HEAD_LEN); struct sk_buff *skb; @@ -78,8 +77,9 @@ static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len, return skb; } -static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, - struct mt76s_intr *intr) +static int +mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, + struct mt76s_intr *intr) { struct mt76_queue *q = &dev->q_rx[qid]; struct mt76_sdio *sdio = &dev->sdio; @@ -112,9 +112,11 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, for (i = 0; i < intr->rx.num[qid]; i++) { int index = (q->head + i) % q->ndesc; struct mt76_queue_entry *e = &q->entry[index]; + __le32 *rxd = (__le32 *)buf; - len = intr->rx.len[qid][i]; - e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4)); + /* parse rxd to get the actual packet length */ + len = FIELD_GET(GENMASK(15, 0), le32_to_cpu(rxd[0])); + e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4)); if (!e->skb) break; @@ -132,45 +134,50 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, return i; } -static int mt7663s_rx_handler(struct mt76_dev *dev) +static int mt76s_rx_handler(struct mt76_dev *dev) { struct mt76_sdio *sdio = &dev->sdio; - struct mt76s_intr *intr = sdio->intr_data; + struct mt76s_intr intr; int nframes = 0, ret; - ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr)); - if (ret < 0) + ret = sdio->parse_irq(dev, &intr); + if (ret) return ret; - trace_dev_irq(dev, intr->isr, 0); + trace_dev_irq(dev, intr.isr, 0); - if (intr->isr & WHIER_RX0_DONE_INT_EN) { - ret = mt7663s_rx_run_queue(dev, 0, intr); + if (intr.isr & WHIER_RX0_DONE_INT_EN) { + ret = mt76s_rx_run_queue(dev, 0, &intr); if (ret > 0) { mt76_worker_schedule(&sdio->net_worker); nframes += ret; } } - if (intr->isr & WHIER_RX1_DONE_INT_EN) { - ret = mt7663s_rx_run_queue(dev, 1, intr); + if (intr.isr & WHIER_RX1_DONE_INT_EN) { + ret = mt76s_rx_run_queue(dev, 1, &intr); if (ret > 0) { mt76_worker_schedule(&sdio->net_worker); nframes += ret; } } - nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr); + nframes += !!mt76s_refill_sched_quota(dev, intr.tx.wtqcr); return nframes; } -static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz, - int *pse_size, int *ple_size) +static int +mt76s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz, + int *pse_size, int *ple_size) { int pse_sz; - pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); + pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, + sdio->sched.pse_page_size); + + if (mcu && sdio->hw_ver == MT76_CONNAC2_SDIO) + pse_sz = 1; if (mcu) { if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz) @@ -187,8 +194,9 @@ static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz, return 0; } -static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, - int pse_size, int ple_size) +static void +mt76s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, int pse_size, + int ple_size) { if (mcu) { sdio->sched.pse_mcu_quota -= pse_size; @@ -198,7 +206,7 @@ static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, } } -static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) +static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) { struct mt76_sdio *sdio = &dev->sdio; int err; @@ -213,7 +221,7 @@ static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) return err; } -static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) +static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) { int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; bool mcu = q == dev->q_mcu[MT_MCUQ_WM]; @@ -227,10 +235,13 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) smp_rmb(); + if (test_bit(MT76_MCU_RESET, &dev->phy.state)) + goto next; + if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) { __skb_put_zero(e->skb, 4); - err = __mt7663s_xmit_queue(dev, e->skb->data, - e->skb->len); + err = __mt76s_xmit_queue(dev, e->skb->data, + e->skb->len); if (err) return err; @@ -241,8 +252,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ) break; - if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz, - &ple_sz)) + if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz, + &ple_sz)) break; memcpy(sdio->xmit_buf[qid] + len, e->skb->data, @@ -268,30 +279,22 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) if (nframes) { memset(sdio->xmit_buf[qid] + len, 0, 4); - err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4); + err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4); if (err) return err; } - mt7663s_tx_update_quota(sdio, mcu, pse_sz, ple_sz); + mt76s_tx_update_quota(sdio, mcu, pse_sz, ple_sz); mt76_worker_schedule(&sdio->status_worker); return nframes; } -void mt7663s_txrx_worker(struct mt76_worker *w) +void mt76s_txrx_worker(struct mt76_sdio *sdio) { - struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, - txrx_worker); - struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio); - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); int i, nframes, ret; - if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { - queue_work(mdev->wq, &dev->pm.wake_work); - return; - } - /* disable interrupt */ sdio_claim_host(sdio->func); sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL); @@ -301,34 +304,61 @@ void mt7663s_txrx_worker(struct mt76_worker *w) /* tx */ for (i = 0; i <= MT_TXQ_PSD; i++) { - ret = mt7663s_tx_run_queue(mdev, mdev->phy.q_tx[i]); + ret = mt76s_tx_run_queue(dev, dev->phy.q_tx[i]); if (ret > 0) nframes += ret; } - ret = mt7663s_tx_run_queue(mdev, mdev->q_mcu[MT_MCUQ_WM]); + ret = mt76s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]); if (ret > 0) nframes += ret; /* rx */ - ret = mt7663s_rx_handler(mdev); + ret = mt76s_rx_handler(dev); if (ret > 0) nframes += ret; + + if (test_bit(MT76_MCU_RESET, &dev->phy.state)) { + if (!mt76s_txqs_empty(dev)) + continue; + else + wake_up(&sdio->wait); + } } while (nframes > 0); /* enable interrupt */ sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL); sdio_release_host(sdio->func); - - mt76_connac_pm_unref(&dev->mphy, &dev->pm); } +EXPORT_SYMBOL_GPL(mt76s_txrx_worker); -void mt7663s_sdio_irq(struct sdio_func *func) +void mt76s_sdio_irq(struct sdio_func *func) { - struct mt7615_dev *dev = sdio_get_drvdata(func); - struct mt76_sdio *sdio = &dev->mt76.sdio; + struct mt76_dev *dev = sdio_get_drvdata(func); + struct mt76_sdio *sdio = &dev->sdio; - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state)) + if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state) || + test_bit(MT76_MCU_RESET, &dev->phy.state)) return; mt76_worker_schedule(&sdio->txrx_worker); } +EXPORT_SYMBOL_GPL(mt76s_sdio_irq); + +bool mt76s_txqs_empty(struct mt76_dev *dev) +{ + struct mt76_queue *q; + int i; + + for (i = 0; i <= MT_TXQ_PSD + 1; i++) { + if (i <= MT_TXQ_PSD) + q = dev->phy.q_tx[i]; + else + q = dev->q_mcu[MT_MCUQ_WM]; + + if (q->first != q->head) + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(mt76s_txqs_empty); diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c index f73ffbd6e622d8..66afc2b0a93592 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -2,7 +2,7 @@ /* Copyright (C) 2020 Felix Fietkau */ #include "mt76.h" -static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { +const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG }, [MT76_TM_ATTR_STATE] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 }, @@ -21,7 +21,9 @@ static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 }, [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 }, [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 }, + [MT76_TM_ATTR_DRV_DATA] = { .type = NLA_NESTED }, }; +EXPORT_SYMBOL_GPL(mt76_tm_policy); void mt76_testmode_tx_pending(struct mt76_phy *phy) { diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h index d32a7654c47e00..d1f9c036dd1fcf 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.h +++ b/drivers/net/wireless/mediatek/mt76/testmode.h @@ -44,6 +44,7 @@ * @MT76_TM_ATTR_TX_IPG: tx inter-packet gap, in unit of us (u32) * @MT76_TM_ATTR_TX_TIME: packet transmission time, in unit of us (u32) * + * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested) */ enum mt76_testmode_attr { MT76_TM_ATTR_UNSPEC, @@ -78,6 +79,8 @@ enum mt76_testmode_attr { MT76_TM_ATTR_TX_IPG, MT76_TM_ATTR_TX_TIME, + MT76_TM_ATTR_DRV_DATA, + /* keep last */ NUM_MT76_TM_ATTRS, MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1, @@ -144,6 +147,7 @@ enum mt76_testmode_rx_attr { * @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames * @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics * @MT76_TM_STATE_TX_CONT: waveform tx without time gap + * @MT76_TM_STATE_ON: test mode enabled used in offload firmware */ enum mt76_testmode_state { MT76_TM_STATE_OFF, @@ -151,6 +155,7 @@ enum mt76_testmode_state { MT76_TM_STATE_TX_FRAMES, MT76_TM_STATE_RX_FRAMES, MT76_TM_STATE_TX_CONT, + MT76_TM_STATE_ON, /* keep last */ NUM_MT76_TM_STATES, @@ -184,4 +189,6 @@ enum mt76_testmode_tx_mode { MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1, }; +extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS]; + #endif diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index f0f7a913eaabfe..11719ef034d888 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -38,21 +38,21 @@ EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) - __acquires(&dev->status_list.lock) + __acquires(&dev->status_lock) { __skb_queue_head_init(list); - spin_lock_bh(&dev->status_list.lock); + spin_lock_bh(&dev->status_lock); } EXPORT_SYMBOL_GPL(mt76_tx_status_lock); void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) - __releases(&dev->status_list.lock) + __releases(&dev->status_lock) { struct ieee80211_hw *hw; struct sk_buff *skb; - spin_unlock_bh(&dev->status_list.lock); + spin_unlock_bh(&dev->status_lock); rcu_read_lock(); while ((skb = __skb_dequeue(list)) != NULL) { @@ -64,9 +64,13 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) struct mt76_wcid *wcid; wcid = rcu_dereference(dev->wcid[cb->wcid]); - if (wcid) + if (wcid) { status.sta = wcid_to_sta(wcid); + if (status.sta) + status.rate = &wcid->rate; + } + hw = mt76_tx_status_get_hw(dev, skb); ieee80211_tx_status_ext(hw, &status); } @@ -88,8 +92,6 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, if ((flags & done) != done) return; - __skb_unlink(skb, &dev->status_list); - /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ if (flags & MT_TX_CB_TXS_FAILED) { info->status.rates[0].count = 0; @@ -116,6 +118,8 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); int pid; + memset(cb, 0, sizeof(*cb)); + if (!wcid) return MT_PACKET_ID_NO_ACK; @@ -126,16 +130,23 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, IEEE80211_TX_CTL_RATE_CTRL_PROBE))) return MT_PACKET_ID_NO_SKB; - spin_lock_bh(&dev->status_list.lock); + spin_lock_bh(&dev->status_lock); + + pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, + MT_PACKET_ID_MASK, GFP_ATOMIC); + if (pid < 0) { + pid = MT_PACKET_ID_NO_SKB; + goto out; + } - memset(cb, 0, sizeof(*cb)); - pid = mt76_get_next_pkt_id(wcid); cb->wcid = wcid->idx; cb->pktid = pid; - cb->jiffies = jiffies; - __skb_queue_tail(&dev->status_list, skb); - spin_unlock_bh(&dev->status_list.lock); + if (list_empty(&wcid->list)) + list_add_tail(&wcid->list, &dev->wcid_list); + +out: + spin_unlock_bh(&dev->status_lock); return pid; } @@ -145,36 +156,53 @@ struct sk_buff * mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, struct sk_buff_head *list) { - struct sk_buff *skb, *tmp; + struct sk_buff *skb; + int id; - skb_queue_walk_safe(&dev->status_list, skb, tmp) { - struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); + lockdep_assert_held(&dev->status_lock); - if (wcid && cb->wcid != wcid->idx) - continue; + skb = idr_remove(&wcid->pktid, pktid); + if (skb) + goto out; - if (cb->pktid == pktid) - return skb; + /* look for stale entries in the wcid idr queue */ + idr_for_each_entry(&wcid->pktid, skb, id) { + struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); - if (pktid >= 0 && !time_after(jiffies, cb->jiffies + - MT_TX_STATUS_SKB_TIMEOUT)) - continue; + if (pktid >= 0) { + if (!(cb->flags & MT_TX_CB_DMA_DONE)) + continue; + + if (!time_is_after_jiffies(cb->jiffies + + MT_TX_STATUS_SKB_TIMEOUT)) + continue; + } + /* It has been too long since DMA_DONE, time out this packet + * and stop waiting for TXS callback. + */ + idr_remove(&wcid->pktid, cb->pktid); __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | MT_TX_CB_TXS_DONE, list); } - return NULL; +out: + if (idr_is_empty(&wcid->pktid)) + list_del_init(&wcid->list); + + return skb; } EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); void -mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) +mt76_tx_status_check(struct mt76_dev *dev, bool flush) { + struct mt76_wcid *wcid, *tmp; struct sk_buff_head list; mt76_tx_status_lock(dev, &list); - mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); + list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) + mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); mt76_tx_status_unlock(dev, &list); } EXPORT_SYMBOL_GPL(mt76_tx_status_check); @@ -197,6 +225,7 @@ mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, struct list_head *free_list) { + struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); struct ieee80211_tx_status status = { .skb = skb, .free_list = free_list, @@ -226,7 +255,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * } #endif - if (!skb->prev) { + if (cb->pktid < MT_PACKET_ID_FIRST) { hw = mt76_tx_status_get_hw(dev, skb); status.sta = wcid_to_sta(wcid); ieee80211_tx_status_ext(hw, &status); @@ -234,6 +263,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * } mt76_tx_status_lock(dev, &list); + cb->jiffies = jiffies; __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); mt76_tx_status_unlock(dev, &list); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 1e9f60bb811ad1..0a7006c8959b16 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1081,7 +1081,7 @@ void mt76u_stop_tx(struct mt76_dev *dev) mt76_worker_enable(&dev->usb.status_worker); - mt76_tx_status_check(dev, NULL, true); + mt76_tx_status_check(dev, true); } EXPORT_SYMBOL_GPL(mt76u_stop_tx); diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h index 1c363ea9ab9cd2..49c52d781f40fa 100644 --- a/drivers/net/wireless/mediatek/mt76/util.h +++ b/drivers/net/wireless/mediatek/mt76/util.h @@ -70,17 +70,15 @@ mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w, if (fn) w->fn = fn; - w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s", - name, dev_name); + w->task = kthread_run(__mt76_worker_fn, w, + "mt76-%s %s", name, dev_name); - ret = PTR_ERR_OR_ZERO(w->task); - if (ret) { + if (IS_ERR(w->task)) { + ret = PTR_ERR(w->task); w->task = NULL; return ret; } - wake_up_process(w->task); - return 0; } diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index ed78d2cb35e3ce..457147394edc4b 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -515,7 +515,7 @@ static int mt7601u_alloc_tx(struct mt7601u_dev *dev) int mt7601u_dma_init(struct mt7601u_dev *dev) { - int ret = -ENOMEM; + int ret; tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet); tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet); diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 96973ec7bd9ac2..dc4bfe7be378d8 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -129,8 +129,7 @@ static void cfg_scan_result(enum scan_event scan_event, info->frame_len, (s32)info->rssi * 100, GFP_KERNEL); - if (!bss) - cfg80211_put_bss(wiphy, bss); + cfg80211_put_bss(wiphy, bss); } else if (scan_event == SCAN_EVENT_DONE) { mutex_lock(&priv->scan_req_lock); @@ -729,6 +728,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, { struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; + struct wilc *wilc = vif->wilc; u32 i = 0; u32 associatedsta = ~0; u32 inactive_time = 0; @@ -755,6 +755,9 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, } else if (vif->iftype == WILC_STATION_MODE) { struct rf_info stats; + if (!wilc->initialized) + return -EBUSY; + wilc_get_statistics(vif, &stats); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL) | @@ -1581,6 +1584,7 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) } netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled); + wilc_set_wowlan_trigger(vif, enabled); srcu_read_unlock(&wl->srcu, srcu_idx); } @@ -1683,6 +1687,7 @@ static void wlan_init_locks(struct wilc *wl) mutex_init(&wl->rxq_cs); mutex_init(&wl->cfg_cmd_lock); mutex_init(&wl->vif_mutex); + mutex_init(&wl->deinit_lock); spin_lock_init(&wl->txq_spinlock); mutex_init(&wl->txq_add_to_head_cs); @@ -1701,6 +1706,7 @@ void wlan_deinit_locks(struct wilc *wilc) mutex_destroy(&wilc->cfg_cmd_lock); mutex_destroy(&wilc->txq_add_to_head_cs); mutex_destroy(&wilc->vif_mutex); + mutex_destroy(&wilc->deinit_lock); cleanup_srcu_struct(&wilc->srcu); } @@ -1724,7 +1730,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, *wilc = wl; wl->io_type = io_type; wl->hif_func = ops; - wl->chip_ps_state = WILC_CHIP_WAKEDUP; for (i = 0; i < NQUEUES; i++) INIT_LIST_HEAD(&wl->txq[i].txq_head.list); diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index a133736a782158..e69b9c7f3d31ae 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -23,6 +23,10 @@ struct wilc_set_multicast { u8 *mc_list; }; +struct host_if_wowlan_trigger { + u8 wowlan_trigger; +}; + struct wilc_del_all_sta { u8 assoc_sta; u8 mac[WILC_MAX_NUM_STA][ETH_ALEN]; @@ -34,6 +38,7 @@ union wilc_message_body { struct wilc_set_multicast mc_info; struct wilc_remain_ch remain_on_ch; char *data; + struct host_if_wowlan_trigger wow_trigger; }; struct host_if_msg { @@ -962,6 +967,25 @@ static void handle_set_mcast_filter(struct work_struct *work) kfree(msg); } +void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled) +{ + int ret; + struct wid wid; + u8 wowlan_trigger = 0; + + if (enabled) + wowlan_trigger = 1; + + wid.id = WID_WOWLAN_TRIGGER; + wid.type = WID_CHAR; + wid.val = &wowlan_trigger; + wid.size = sizeof(char); + + ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); + if (ret) + pr_err("Failed to send wowlan trigger config packet\n"); +} + static void handle_scan_timer(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); @@ -1494,7 +1518,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) { struct host_if_drv *hif_drv; struct wilc_vif *vif = netdev_priv(dev); - struct wilc *wilc = vif->wilc; hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL); if (!hif_drv) @@ -1504,9 +1527,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) vif->hif_drv = hif_drv; - if (wilc->clients_count == 0) - mutex_init(&wilc->deinit_lock); - timer_setup(&vif->periodic_rssi, get_periodic_rssi, 0); mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000)); @@ -1518,8 +1538,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) hif_drv->p2p_timeout = 0; - wilc->clients_count++; - return 0; } @@ -1550,7 +1568,6 @@ int wilc_deinit(struct wilc_vif *vif) kfree(hif_drv); vif->hif_drv = NULL; - vif->wilc->clients_count--; mutex_unlock(&vif->wilc->deinit_lock); return result; } diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h index 58811911213bf1..cccd54ed051828 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.h +++ b/drivers/net/wireless/microchip/wilc1000/hif.h @@ -207,6 +207,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats); int wilc_get_vif_idx(struct wilc_vif *vif); int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power); int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power); +void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled); void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length); void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length); void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 7e4d9235251cb3..690572e01a2a72 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -111,7 +111,8 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header) return ndev; } -void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode) +void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid, + u8 mode) { struct wilc_vif *vif = netdev_priv(wilc_netdev); @@ -594,10 +595,14 @@ static int wilc_mac_open(struct net_device *ndev) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype, vif->idx); - if (is_valid_ether_addr(ndev->dev_addr)) + if (is_valid_ether_addr(ndev->dev_addr)) { wilc_set_mac_address(vif, ndev->dev_addr); - else - wilc_get_mac_address(vif, ndev->dev_addr); + } else { + u8 addr[ETH_ALEN]; + + wilc_get_mac_address(vif, addr); + eth_hw_addr_set(ndev, addr); + } netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { @@ -880,7 +885,6 @@ void wilc_netdev_cleanup(struct wilc *wilc) srcu_read_unlock(&wilc->srcu, srcu_idx); wilc_wfi_deinit_mon_interface(wilc, false); - flush_workqueue(wilc->hif_workqueue); destroy_workqueue(wilc->hif_workqueue); while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) { diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index 86209b391a3d6f..b9a88b3e322f13 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -264,9 +264,7 @@ struct wilc { struct device *dev; bool suspend_event; - int clients_count; struct workqueue_struct *hif_workqueue; - enum chip_ps_states chip_ps_state; struct wilc_cfg cfg; void *bus_data; struct net_device *monitor_dev; @@ -289,7 +287,8 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset); void wilc_mac_indicate(struct wilc *wilc); void wilc_netdev_cleanup(struct wilc *wilc); void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size); -void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode); +void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid, + u8 mode); struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, int vif_type, enum nl80211_iftype type, bool rtnl_locked); diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 42e03a701ae165..26ebf666434259 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -978,6 +978,7 @@ static const struct wilc_hif_func wilc_hif_sdio = { .hif_sync_ext = wilc_sdio_sync_ext, .enable_interrupt = wilc_sdio_enable_interrupt, .disable_interrupt = wilc_sdio_disable_interrupt, + .hif_reset = wilc_sdio_reset, }; static int wilc_sdio_resume(struct device *dev) diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index dd481dc0b5ce01..640850f989dd95 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -47,6 +47,8 @@ struct wilc_spi { static const struct wilc_hif_func wilc_hif_spi; +static int wilc_spi_reset(struct wilc *wilc); + /******************************************** * * Spi protocol Function @@ -144,6 +146,12 @@ struct wilc_spi_rsp_data { u8 data[]; } __packed; +struct wilc_spi_special_cmd_rsp { + u8 skip_byte; + u8 rsp_cmd_type; + u8 status; +} __packed; + static int wilc_bus_probe(struct spi_device *spi) { int ret; @@ -466,7 +474,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, } r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; - if (r->rsp_cmd_type != cmd) { + if (r->rsp_cmd_type != cmd && !clockless) { if (!spi_priv->probing_crc) dev_err(&spi->dev, "Failed cmd, cmd (%02x), resp (%02x)\n", @@ -474,7 +482,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, return -EINVAL; } - if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { + if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) { dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", r->status); return -EINVAL; @@ -563,14 +571,18 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, } r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; - if (r->rsp_cmd_type != cmd) { + /* + * Clockless registers operations might return unexptected responses, + * even if successful. + */ + if (r->rsp_cmd_type != cmd && !clockless) { dev_err(&spi->dev, "Failed cmd response, cmd (%02x), resp (%02x)\n", cmd, r->rsp_cmd_type); return -EINVAL; } - if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { + if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) { dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", r->status); return -EINVAL; @@ -709,6 +721,61 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) return 0; } +static int wilc_spi_special_cmd(struct wilc *wilc, u8 cmd) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; + u8 wb[32], rb[32]; + int cmd_len, resp_len = 0; + struct wilc_spi_cmd *c; + struct wilc_spi_special_cmd_rsp *r; + + if (cmd != CMD_TERMINATE && cmd != CMD_REPEAT && cmd != CMD_RESET) + return -EINVAL; + + memset(wb, 0x0, sizeof(wb)); + memset(rb, 0x0, sizeof(rb)); + c = (struct wilc_spi_cmd *)wb; + c->cmd_type = cmd; + + if (cmd == CMD_RESET) + memset(c->u.simple_cmd.addr, 0xFF, 3); + + cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc); + resp_len = sizeof(*r); + + if (spi_priv->crc7_enabled) { + c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); + cmd_len += 1; + } + if (cmd_len + resp_len > ARRAY_SIZE(wb)) { + dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n", + cmd_len, resp_len, ARRAY_SIZE(wb)); + return -EINVAL; + } + + if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { + dev_err(&spi->dev, "Failed cmd write, bus error...\n"); + return -EINVAL; + } + + r = (struct wilc_spi_special_cmd_rsp *)&rb[cmd_len]; + if (r->rsp_cmd_type != cmd) { + if (!spi_priv->probing_crc) + dev_err(&spi->dev, + "Failed cmd response, cmd (%02x), resp (%02x)\n", + cmd, r->rsp_cmd_type); + return -EINVAL; + } + + if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { + dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", + r->status); + return -EINVAL; + } + return 0; +} + static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data) { struct spi_device *spi = to_spi_device(wilc->dev); @@ -895,6 +962,19 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) * ********************************************/ +static int wilc_spi_reset(struct wilc *wilc) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; + int result; + + result = wilc_spi_special_cmd(wilc, CMD_RESET); + if (result && !spi_priv->probing_crc) + dev_err(&spi->dev, "Failed cmd reset\n"); + + return result; +} + static int wilc_spi_deinit(struct wilc *wilc) { /* @@ -1087,7 +1167,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint) for (i = 0; (i < 3) && (nint > 0); i++, nint--) reg |= BIT(i); - ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, ®); + ret = wilc_spi_write_reg(wilc, WILC_INTR2_ENABLE, reg); if (ret) { dev_err(&spi->dev, "Failed write reg (%08x)...\n", WILC_INTR2_ENABLE); @@ -1112,4 +1192,5 @@ static const struct wilc_hif_func wilc_hif_spi = { .hif_block_tx_ext = wilc_spi_write, .hif_block_rx_ext = wilc_spi_read, .hif_sync_ext = wilc_spi_sync_ext, + .hif_reset = wilc_spi_reset, }; diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 200a103a0a858f..ea81ef120fd13b 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -10,6 +10,8 @@ #include "cfg80211.h" #include "wlan_cfg.h" +#define WAKE_UP_TRIAL_RETRY 10000 + static inline bool is_wilc1000(u32 id) { return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID; @@ -425,6 +427,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, return 0; } + if (!wilc->initialized) { + tx_complete_fn(tx_data, 0); + return 0; + } + tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); if (!tqe) { @@ -474,6 +481,10 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer, return 0; } + if (!wilc->initialized) { + tx_complete_fn(priv, 0); + return 0; + } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); if (!tqe) { @@ -611,60 +622,67 @@ EXPORT_SYMBOL_GPL(chip_allow_sleep); void chip_wakeup(struct wilc *wilc) { - u32 reg, clk_status_reg; - const struct wilc_hif_func *h = wilc->hif_func; - - if (wilc->io_type == WILC_HIF_SPI) { - do { - h->hif_read_reg(wilc, WILC_SPI_WAKEUP_REG, ®); - h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG, - reg | WILC_SPI_WAKEUP_BIT); - h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG, - reg & ~WILC_SPI_WAKEUP_BIT); - - do { - usleep_range(2000, 2500); - wilc_get_chipid(wilc, true); - } while (wilc_get_chipid(wilc, true) == 0); - } while (wilc_get_chipid(wilc, true) == 0); - } else if (wilc->io_type == WILC_HIF_SDIO) { - h->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, - WILC_SDIO_HOST_TO_FW_BIT); - usleep_range(200, 400); - h->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, ®); - do { - h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, - reg | WILC_SDIO_WAKEUP_BIT); - h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG, - &clk_status_reg); - - while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) { - usleep_range(2000, 2500); + u32 ret = 0; + u32 clk_status_val = 0, trials = 0; + u32 wakeup_reg, wakeup_bit; + u32 clk_status_reg, clk_status_bit; + u32 to_host_from_fw_reg, to_host_from_fw_bit; + u32 from_host_to_fw_reg, from_host_to_fw_bit; + const struct wilc_hif_func *hif_func = wilc->hif_func; - h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG, - &clk_status_reg); - } - if (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) { - h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG, - reg & ~WILC_SDIO_WAKEUP_BIT); - } - } while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)); + if (wilc->io_type == WILC_HIF_SDIO) { + wakeup_reg = WILC_SDIO_WAKEUP_REG; + wakeup_bit = WILC_SDIO_WAKEUP_BIT; + clk_status_reg = WILC_SDIO_CLK_STATUS_REG; + clk_status_bit = WILC_SDIO_CLK_STATUS_BIT; + from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; + from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; + to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG; + to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT; + } else { + wakeup_reg = WILC_SPI_WAKEUP_REG; + wakeup_bit = WILC_SPI_WAKEUP_BIT; + clk_status_reg = WILC_SPI_CLK_STATUS_REG; + clk_status_bit = WILC_SPI_CLK_STATUS_BIT; + from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; + from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; + to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG; + to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT; } - if (wilc->chip_ps_state == WILC_CHIP_SLEEPING_MANUAL) { - if (wilc_get_chipid(wilc, false) < WILC_1000_BASE_ID_2B) { - u32 val32; + /* indicate host wakeup */ + ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg, + from_host_to_fw_bit); + if (ret) + return; - h->hif_read_reg(wilc, WILC_REG_4_TO_1_RX, &val32); - val32 |= BIT(6); - h->hif_write_reg(wilc, WILC_REG_4_TO_1_RX, val32); + /* Set wake-up bit */ + ret = hif_func->hif_write_reg(wilc, wakeup_reg, + wakeup_bit); + if (ret) + return; - h->hif_read_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, &val32); - val32 |= BIT(6); - h->hif_write_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, val32); + while (trials < WAKE_UP_TRIAL_RETRY) { + ret = hif_func->hif_read_reg(wilc, clk_status_reg, + &clk_status_val); + if (ret) { + pr_err("Bus error %d %x\n", ret, clk_status_val); + return; } + if (clk_status_val & clk_status_bit) + break; + + trials++; } - wilc->chip_ps_state = WILC_CHIP_WAKEDUP; + if (trials >= WAKE_UP_TRIAL_RETRY) { + pr_err("Failed to wake-up the chip\n"); + return; + } + /* Sometimes spi fail to read clock regs after reading + * writing clockless registers + */ + if (wilc->io_type == WILC_HIF_SPI) + wilc->hif_func->hif_reset(wilc); } EXPORT_SYMBOL_GPL(chip_wakeup); @@ -1071,6 +1089,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 addr, size, size2, blksz; u8 *dma_buffer; int ret = 0; + u32 reg = 0; blksz = BIT(12); @@ -1079,10 +1098,22 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, return -EIO; offset = 0; + pr_debug("%s: Downloading firmware size = %d\n", __func__, buffer_size); + + acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); + + wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, ®); + reg &= ~BIT(10); + ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg); + wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, ®); + if (reg & BIT(10)) + pr_err("%s: Failed to reset\n", __func__); + + release_bus(wilc, WILC_BUS_RELEASE_ONLY); do { addr = get_unaligned_le32(&buffer[offset]); size = get_unaligned_le32(&buffer[offset + 4]); - acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); + acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); offset += 8; while (((int)size) && (offset < buffer_size)) { if (size <= blksz) @@ -1100,10 +1131,13 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, offset += size2; size -= size2; } - release_bus(wilc, WILC_BUS_RELEASE_ONLY); + release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); - if (ret) + if (ret) { + pr_err("%s Bus error\n", __func__); goto fail; + } + pr_debug("%s Offset = %d\n", __func__, offset); } while (offset < buffer_size); fail: diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h index 771c25fa849b95..13fde636aa0e6c 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan.h @@ -97,6 +97,8 @@ #define WILC_SPI_WAKEUP_REG 0x1 #define WILC_SPI_WAKEUP_BIT BIT(1) +#define WILC_SPI_CLK_STATUS_REG 0x0f +#define WILC_SPI_CLK_STATUS_BIT BIT(2) #define WILC_SPI_HOST_TO_FW_REG 0x0b #define WILC_SPI_HOST_TO_FW_BIT BIT(0) @@ -300,7 +302,7 @@ #define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM) #define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM) /* time for expiring the completion of cfg packets */ -#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000) +#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(3000) #define IS_MANAGMEMENT 0x100 #define IS_MANAGMEMENT_CALLBACK 0x080 @@ -371,6 +373,7 @@ struct wilc_hif_func { int (*hif_sync_ext)(struct wilc *wilc, int nint); int (*enable_interrupt)(struct wilc *nic); void (*disable_interrupt)(struct wilc *nic); + int (*hif_reset)(struct wilc *wilc); }; #define WILC_MAX_CFG_FRAME_SIZE 1468 diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c index fe2a7ed8e5cd2a..dba301378b7fac 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c @@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = { {WID_STATUS, 0}, {WID_RSSI, 0}, {WID_LINKSPEED, 0}, + {WID_WOWLAN_TRIGGER, 0}, {WID_NIL, 0} }; diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h index f85fd575136dc8..6eb7eb4ac29440 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan_if.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan_if.h @@ -48,12 +48,6 @@ enum { WILC_FW_MAX_PSPOLL_PS = 4 }; -enum chip_ps_states { - WILC_CHIP_WAKEDUP = 0, - WILC_CHIP_SLEEPING_AUTO = 1, - WILC_CHIP_SLEEPING_MANUAL = 2 -}; - enum bus_acquire { WILC_BUS_ACQUIRE_ONLY = 0, WILC_BUS_ACQUIRE_AND_WAKEUP = 1, @@ -662,6 +656,7 @@ enum { WID_LOG_TERMINAL_SWITCH = 0x00CD, WID_TX_POWER = 0x00CE, + WID_WOWLAN_TRIGGER = 0X00CF, /* EMAC Short WID list */ /* RTS Threshold */ /* diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index b4dd60b2ebc907..2a63ffdc4b2c10 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -179,7 +179,7 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr) sa->sa_data); if (ret) - memcpy(ndev->dev_addr, old_addr, ETH_ALEN); + eth_hw_addr_set(ndev, old_addr); return ret; } @@ -478,7 +478,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, dev->needs_free_netdev = true; dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = &vif->wdev; - ether_addr_copy(dev->dev_addr, vif->mac_addr); + eth_hw_addr_set(dev, vif->mac_addr); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT; dev->tx_queue_len = 100; @@ -811,13 +811,11 @@ void qtnf_core_detach(struct qtnf_bus *bus) bus->fw_state = QTNF_FW_STATE_DETACHED; if (bus->workqueue) { - flush_workqueue(bus->workqueue); destroy_workqueue(bus->workqueue); bus->workqueue = NULL; } if (bus->hprio_workqueue) { - flush_workqueue(bus->hprio_workqueue); destroy_workqueue(bus->hprio_workqueue); bus->hprio_workqueue = NULL; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index 5d93c874d66698..9ad4c120fa2876 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -387,7 +387,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; error: - flush_workqueue(pcie_priv->workqueue); destroy_workqueue(pcie_priv->workqueue); pci_set_drvdata(pdev, NULL); return ret; @@ -416,7 +415,6 @@ static void qtnf_pcie_remove(struct pci_dev *dev) qtnf_core_detach(bus); netif_napi_del(&bus->mux_napi); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); tasklet_kill(&priv->reclaim_tq); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index b5c67f656cfd5b..a3ffd1b0c9bc6c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -1101,7 +1101,6 @@ static const struct usb_device_id rt2800usb_device_table[] = { #ifdef CONFIG_RT2800USB_RT53XX /* Arcadyan */ { USB_DEVICE(0x043e, 0x7a12) }, - { USB_DEVICE(0x043e, 0x7a32) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x17e8) }, /* Azurewave */ diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0f5009c47cd0ac..e3a3dc3e45b43a 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -791,7 +791,7 @@ static int ray_dev_init(struct net_device *dev) #endif /* RAY_IMMEDIATE_INIT */ /* copy mac and broadcast addresses to linux device */ - memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); + eth_hw_addr_set(dev, local->sparm.b4.a_mac_addr); eth_broadcast_addr(dev->broadcast); dev_dbg(&link->dev, "ray_dev_init ending\n"); diff --git a/drivers/net/wireless/realtek/Kconfig b/drivers/net/wireless/realtek/Kconfig index 474843277fa14c..4a1f0e64df0399 100644 --- a/drivers/net/wireless/realtek/Kconfig +++ b/drivers/net/wireless/realtek/Kconfig @@ -16,5 +16,6 @@ source "drivers/net/wireless/realtek/rtl818x/Kconfig" source "drivers/net/wireless/realtek/rtlwifi/Kconfig" source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig" source "drivers/net/wireless/realtek/rtw88/Kconfig" +source "drivers/net/wireless/realtek/rtw89/Kconfig" endif # WLAN_VENDOR_REALTEK diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile index 888b5d594e7976..ab25419f56c682 100644 --- a/drivers/net/wireless/realtek/Makefile +++ b/drivers/net/wireless/realtek/Makefile @@ -8,4 +8,5 @@ obj-$(CONFIG_RTL8187) += rtl818x/ obj-$(CONFIG_RTLWIFI) += rtlwifi/ obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/ obj-$(CONFIG_RTW88) += rtw88/ +obj-$(CONFIG_RTW89) += rtw89/ diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c index 585784258c6654..4efab907a3ac6d 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c @@ -28,7 +28,7 @@ u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv, usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, (unsigned long)addr, idx & 0x03, - &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + &priv->io_dmabuf->bits8, sizeof(val), 500); val = priv->io_dmabuf->bits8; mutex_unlock(&priv->io_mutex); @@ -45,7 +45,7 @@ u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv, usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, (unsigned long)addr, idx & 0x03, - &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + &priv->io_dmabuf->bits16, sizeof(val), 500); val = priv->io_dmabuf->bits16; mutex_unlock(&priv->io_mutex); @@ -62,7 +62,7 @@ u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv, usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, (unsigned long)addr, idx & 0x03, - &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + &priv->io_dmabuf->bits32, sizeof(val), 500); val = priv->io_dmabuf->bits32; mutex_unlock(&priv->io_mutex); @@ -79,7 +79,7 @@ void rtl818x_iowrite8_idx(struct rtl8187_priv *priv, usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, (unsigned long)addr, idx & 0x03, - &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + &priv->io_dmabuf->bits8, sizeof(val), 500); mutex_unlock(&priv->io_mutex); } @@ -93,7 +93,7 @@ void rtl818x_iowrite16_idx(struct rtl8187_priv *priv, usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, (unsigned long)addr, idx & 0x03, - &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + &priv->io_dmabuf->bits16, sizeof(val), 500); mutex_unlock(&priv->io_mutex); } @@ -107,7 +107,7 @@ void rtl818x_iowrite32_idx(struct rtl8187_priv *priv, usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, (unsigned long)addr, idx & 0x03, - &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + &priv->io_dmabuf->bits32, sizeof(val), 500); mutex_unlock(&priv->io_mutex); } @@ -183,7 +183,7 @@ static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, addr, 0x8225, &priv->io_dmabuf->bits16, sizeof(data), - HZ / 2); + 500); mutex_unlock(&priv->io_mutex); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 774341b0005a3c..a42e2081b75f3b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4460,13 +4460,17 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) { + struct ieee80211_hw *hw = priv->hw; u32 val32; u8 rate_idx = 0; rate_cfg &= RESPONSE_RATE_BITMAP_ALL; val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); - val32 &= ~RESPONSE_RATE_BITMAP_ALL; + if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) + val32 &= RESPONSE_RATE_RRSR_INIT_5G; + else + val32 &= RESPONSE_RATE_RRSR_INIT_2G; val32 |= rate_cfg; rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index a2a31f374a8202..438b65ba96405c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -516,6 +516,8 @@ #define REG_RESPONSE_RATE_SET 0x0440 #define RESPONSE_RATE_BITMAP_ALL 0xfffff #define RESPONSE_RATE_RRSR_CCK_ONLY_1M 0xffff1 +#define RESPONSE_RATE_RRSR_INIT_2G 0x15f +#define RESPONSE_RATE_RRSR_INIT_5G 0x150 #define RSR_1M BIT(0) #define RSR_2M BIT(1) #define RSR_5_5M BIT(2) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 3776495fd9d03f..ad327bae754be9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1743,7 +1743,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw) tasklet_kill(&rtlpriv->works.irq_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); - flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 88fa2e593fef8d..76189283104c94 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1430,7 +1430,7 @@ static enum version_8192e _rtl92ee_read_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; - enum version_8192e version = VERSION_UNKNOWN; + enum version_8192e version; u32 value32; rtlphy->rf_type = RF_2T2R; diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index dfd52cff5d02f8..682b23502e6ea6 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -12,6 +12,7 @@ #include "phy.h" #include "reg.h" #include "ps.h" +#include "regd.h" #ifdef CONFIG_RTW88_DEBUGFS @@ -587,7 +588,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) struct rtw_power_params pwr_param = {0}; u8 bw = hal->current_band_width; u8 ch = hal->current_channel; - u8 regd = rtwdev->regd.txpwr_regd; + u8 regd = rtw_regd_get(rtwdev); seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd)); seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n", @@ -828,6 +829,38 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v) return 0; } +static ssize_t rtw_debugfs_set_edcca_enable(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + bool input; + int err; + + err = kstrtobool_from_user(buffer, count, &input); + if (err) + return err; + + rtw_edcca_enabled = input; + rtw_phy_adaptivity_set_mode(rtwdev); + + return count; +} + +static int rtw_debugfs_get_edcca_enable(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + seq_printf(m, "EDCCA %s: EDCCA mode %d\n", + rtw_edcca_enabled ? "enabled" : "disabled", + dm_info->edcca_mode); + return 0; +} + static ssize_t rtw_debugfs_set_fw_crash(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) @@ -853,6 +886,7 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp, mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); + set_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); rtw_write8(rtwdev, REG_HRCV_MSG, 1); mutex_unlock(&rtwdev->mutex); @@ -864,7 +898,9 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; - seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)); + seq_printf(m, "%d\n", + test_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags) || + test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)); return 0; } @@ -1048,6 +1084,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = { .cb_read = rtw_debugfs_get_coex_info, }; +static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = { + .cb_write = rtw_debugfs_set_edcca_enable, + .cb_read = rtw_debugfs_get_edcca_enable, +}; + static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { .cb_write = rtw_debugfs_set_fw_crash, .cb_read = rtw_debugfs_get_fw_crash, @@ -1131,6 +1172,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) } rtw_debugfs_add_r(rf_dump); rtw_debugfs_add_r(tx_pwr_tbl); + rtw_debugfs_add_rw(edcca_enable); rtw_debugfs_add_rw(fw_crash); rtw_debugfs_add_rw(dm_cap); } diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index 0dd3f9a88c8d97..47c57f395f52f3 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -21,6 +21,7 @@ enum rtw_debug_mask { RTW_DBG_WOW = 0x00001000, RTW_DBG_CFO = 0x00002000, RTW_DBG_PATH_DIV = 0x00004000, + RTW_DBG_ADAPTIVITY = 0x00008000, RTW_DBG_ALL = 0xffffffff }; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index e6399519584bd9..0c4f2a2f2d7fb4 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -183,6 +183,28 @@ static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload, dm_info->scan_density); } +static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload, + u8 length) +{ + struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th; + struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload; + + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, + "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n", + result->density, result->igi, result->l2h_th_init, result->l2h, + result->h2l, result->option); + + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n", + rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask), + rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask)); + + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n", + rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ? + "Set" : "Unset"); +} + void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb) { struct rtw_c2h_cmd *c2h; @@ -252,6 +274,10 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, rtw_fw_scan_result(rtwdev, c2h->payload, len); dev_kfree_skb_any(skb); break; + case C2H_ADAPTIVITY: + rtw_fw_adaptivity_result(rtwdev, c2h->payload, len); + dev_kfree_skb_any(skb); + break; default: /* pass offset for further operation */ *((u32 *)skb->cb) = pkt_offset; @@ -1556,12 +1582,10 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size, u32 i; u16 idx = 0; u16 ctl; - u8 rcr; - rcr = rtw_read8(rtwdev, REG_RCR + 2); ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000; /* disable rx clock gate */ - rtw_write8(rtwdev, REG_RCR, rcr | BIT(3)); + rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK); do { rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl); @@ -1580,7 +1604,8 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size, out: rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl); - rtw_write8(rtwdev, REG_RCR + 2, rcr); + /* restore rx clock gate */ + rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK); } static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel, @@ -1722,6 +1747,27 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable) rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } +void rtw_fw_adaptivity(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + if (!rtw_edcca_enabled) { + dm_info->edcca_mode = RTW_EDCCA_NORMAL; + rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, + "EDCCA disabled by debugfs\n"); + } + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY); + SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode); + SET_ADAPTIVITY_OPTION(h2c_pkt, 2); + SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]); + SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini); + SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 64dcde35a021a0..09c7afb99e6318 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -41,6 +41,7 @@ enum rtw_c2h_cmd_id { C2H_WLAN_INFO = 0x27, C2H_WLAN_RFON = 0x32, C2H_BCN_FILTER_NOTIFY = 0x36, + C2H_ADAPTIVITY = 0x37, C2H_SCAN_RESULT = 0x38, C2H_HW_FEATURE_DUMP = 0xfd, C2H_HALMAC = 0xff, @@ -56,6 +57,15 @@ struct rtw_c2h_cmd { u8 payload[]; } __packed; +struct rtw_c2h_adaptivity { + u8 density; + u8 igi; + u8 l2h_th_init; + u8 l2h; + u8 h2l; + u8 option; +} __packed; + enum rtw_rsvd_packet_type { RSVD_BEACON, RSVD_DUMMY, @@ -90,6 +100,7 @@ enum rtw_fw_feature { FW_FEATURE_PG = BIT(3), FW_FEATURE_BCN_FILTER = BIT(5), FW_FEATURE_NOTIFY_SCAN = BIT(6), + FW_FEATURE_ADAPTIVITY = BIT(7), FW_FEATURE_MAX = BIT(31), }; @@ -375,6 +386,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57 #define H2C_CMD_WL_PHY_INFO 0x58 #define H2C_CMD_SCAN 0x59 +#define H2C_CMD_ADAPTIVITY 0x5A #define H2C_CMD_COEX_TDMA_TYPE 0x60 #define H2C_CMD_QUERY_BT_INFO 0x61 @@ -428,6 +440,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define SET_SCAN_START(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_ADAPTIVITY_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(11, 8)) +#define SET_ADAPTIVITY_OPTION(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12)) +#define SET_ADAPTIVITY_IGI(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_ADAPTIVITY_L2H(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_ADAPTIVITY_DENSITY(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) + #define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8)) #define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \ @@ -662,4 +685,5 @@ void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev); int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, u32 *buffer); void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start); +void rtw_fw_adaptivity(struct rtw_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 6bb55e663fc36b..a0d4d6e31fb49e 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -23,6 +23,14 @@ EXPORT_SYMBOL(rtw_disable_lps_deep_mode); bool rtw_bf_support = true; unsigned int rtw_debug_mask; EXPORT_SYMBOL(rtw_debug_mask); +/* EDCCA is enabled during normal behavior. For debugging purpose in + * a noisy environment, it can be disabled via edcca debugfs. Because + * all rtw88 devices will probably be affected if environment is noisy, + * rtw_edcca_enabled is just declared by driver instead of by device. + * So, turning it off will take effect for all rtw88 devices before + * there is a tough reason to maintain rtw_edcca_enabled by device. + */ +bool rtw_edcca_enabled = true; module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644); module_param_named(support_bf, rtw_bf_support, bool, 0644); @@ -556,6 +564,7 @@ static void __fw_recovery_work(struct rtw_dev *rtwdev) int ret = 0; set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); ret = rtw_fwcd_prep(rtwdev); if (ret) @@ -1964,7 +1973,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) rtw_set_supported_band(hw, rtwdev->chip); SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr); - rtw_regd_init(rtwdev, rtw_regd_notifier); + ret = rtw_regd_init(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to init regd\n"); + return ret; + } ret = ieee80211_register_hw(hw); if (ret) { @@ -1972,8 +1985,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) return ret; } - if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2)) - rtw_err(rtwdev, "regulatory_hint fail\n"); + ret = rtw_regd_hint(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to hint regd\n"); + return ret; + } rtw_debugfs_init(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 56812127a0539d..bbdd535b64e771 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -41,6 +41,7 @@ extern bool rtw_bf_support; extern bool rtw_disable_lps_deep_mode; extern unsigned int rtw_debug_mask; +extern bool rtw_edcca_enabled; extern const struct ieee80211_ops rtw_ops; #define RTW_MAX_CHANNEL_NUM_2G 14 @@ -362,6 +363,7 @@ enum rtw_flags { RTW_FLAG_BUSY_TRAFFIC, RTW_FLAG_WOWLAN, RTW_FLAG_RESTARTING, + RTW_FLAG_RESTART_TRIGGERING, NUM_OF_RTW_FLAGS, }; @@ -545,6 +547,11 @@ struct rtw_rf_sipi_addr { u32 lssi_read_pi; }; +struct rtw_hw_reg_offset { + struct rtw_hw_reg hw_reg; + u8 offset; +}; + struct rtw_backup_info { u8 len; u32 reg; @@ -800,8 +807,22 @@ struct rtw_vif { struct rtw_regulatory { char alpha2[2]; - u8 chplan; - u8 txpwr_regd; + u8 txpwr_regd_2g; + u8 txpwr_regd_5g; +}; + +enum rtw_regd_state { + RTW_REGD_STATE_WORLDWIDE, + RTW_REGD_STATE_PROGRAMMED, + RTW_REGD_STATE_SETTING, + + RTW_REGD_STATE_NR, +}; + +struct rtw_regd { + enum rtw_regd_state state; + const struct rtw_regulatory *regulatory; + enum nl80211_dfs_regions dfs_region; }; struct rtw_chip_ops { @@ -839,6 +860,8 @@ struct rtw_chip_ops { struct ieee80211_bss_conf *conf); void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, u8 fixrate_en, u8 *new_rate); + void (*adaptivity_init)(struct rtw_dev *rtwdev); + void (*adaptivity)(struct rtw_dev *rtwdev); void (*cfo_init)(struct rtw_dev *rtwdev); void (*cfo_track)(struct rtw_dev *rtwdev); void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path, @@ -1194,6 +1217,10 @@ struct rtw_chip_info { u8 bfer_su_max_num; u8 bfer_mu_max_num; + struct rtw_hw_reg_offset *edcca_th; + s8 l2h_th_ini_cs; + s8 l2h_th_ini_ad; + const char *wow_fw_name; const struct wiphy_wowlan_support *wowlan_stub; const u8 max_sched_scan_ssids; @@ -1542,6 +1569,20 @@ struct rtw_gapk_info { u8 channel; }; +#define EDCCA_TH_L2H_IDX 0 +#define EDCCA_TH_H2L_IDX 1 +#define EDCCA_TH_L2H_LB 48 +#define EDCCA_ADC_BACKOFF 12 +#define EDCCA_IGI_BASE 50 +#define EDCCA_IGI_L2H_DIFF 8 +#define EDCCA_L2H_H2L_DIFF 7 +#define EDCCA_L2H_H2L_DIFF_NORMAL 8 + +enum rtw_edcca_mode { + RTW_EDCCA_NORMAL = 0, + RTW_EDCCA_ADAPTIVITY = 1, +}; + struct rtw_cfo_track { bool is_adjust; u8 crystal_cap; @@ -1633,6 +1674,8 @@ struct rtw_dm_info { struct rtw_gapk_info gapk; bool is_bt_iqk_timeout; + s8 l2h_th_ini; + enum rtw_edcca_mode edcca_mode; u8 scan_density; }; @@ -1833,7 +1876,7 @@ struct rtw_dev { struct rtw_efuse efuse; struct rtw_sec_desc sec; struct rtw_traffic_stats stats; - struct rtw_regulatory regd; + struct rtw_regd regd; struct rtw_bf_info bf_info; struct rtw_dm_info dm_info; diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 569dd3cfde353b..bfddfcbe63f559 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -9,6 +9,7 @@ #include "fw.h" #include "phy.h" #include "debug.h" +#include "regd.h" struct phy_cfg_pair { u32 addr; @@ -119,6 +120,63 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) dm_info->cck_fa_avg = CCK_FA_AVG_RESET; } +void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l) +{ + struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th; + + rtw_write32_mask(rtwdev, + edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask, + l2h + edcca_th[EDCCA_TH_L2H_IDX].offset); + rtw_write32_mask(rtwdev, + edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr, + edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask, + h2l + edcca_th[EDCCA_TH_H2L_IDX].offset); +} +EXPORT_SYMBOL(rtw_phy_set_edcca_th); + +void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + /* turn off in debugfs for debug usage */ + if (!rtw_edcca_enabled) { + dm_info->edcca_mode = RTW_EDCCA_NORMAL; + rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n"); + return; + } + + switch (rtwdev->regd.dfs_region) { + case NL80211_DFS_ETSI: + dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY; + dm_info->l2h_th_ini = chip->l2h_th_ini_ad; + break; + case NL80211_DFS_JP: + dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY; + dm_info->l2h_th_ini = chip->l2h_th_ini_cs; + break; + default: + dm_info->edcca_mode = RTW_EDCCA_NORMAL; + break; + } +} + +static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + rtw_phy_adaptivity_set_mode(rtwdev); + if (chip->ops->adaptivity_init) + chip->ops->adaptivity_init(rtwdev); +} + +static void rtw_phy_adaptivity(struct rtw_dev *rtwdev) +{ + if (rtwdev->chip->ops->adaptivity) + rtwdev->chip->ops->adaptivity(rtwdev); +} + static void rtw_phy_cfo_init(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; @@ -159,6 +217,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev) rtw_phy_cck_pd_init(rtwdev); dm_info->iqk.done = false; + rtw_phy_adaptivity_init(rtwdev); rtw_phy_cfo_init(rtwdev); rtw_phy_tx_path_div_init(rtwdev); } @@ -711,6 +770,11 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) rtw_phy_cfo_track(rtwdev); rtw_phy_dpk_track(rtwdev); rtw_phy_pwr_track(rtwdev); + + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY)) + rtw_fw_adaptivity(rtwdev); + else + rtw_phy_adaptivity(rtwdev); } #define FRAC_BITS 3 @@ -1564,17 +1628,70 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) rtw_xref_txpwr_lmt_by_bw(rtwdev, regd); } +static void +__cfg_txpwr_lmt_by_alt(struct rtw_hal *hal, u8 regd, u8 regd_alt, u8 bw, u8 rs) +{ + u8 ch; + + for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) + hal->tx_pwr_limit_2g[regd][bw][rs][ch] = + hal->tx_pwr_limit_2g[regd_alt][bw][rs][ch]; + + for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) + hal->tx_pwr_limit_5g[regd][bw][rs][ch] = + hal->tx_pwr_limit_5g[regd_alt][bw][rs][ch]; +} + +static void +rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt) +{ + u8 bw, rs; + + for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) + for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) + __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt, + bw, rs); +} + void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl) { const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; + u32 regd_cfg_flag = 0; + u8 regd_alt; + u8 i; for (; p < end; p++) { + regd_cfg_flag |= BIT(p->regd); rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, p->bw, p->rs, p->ch, p->txpwr_lmt); } + for (i = 0; i < RTW_REGD_MAX; i++) { + if (i == RTW_REGD_WW) + continue; + + if (regd_cfg_flag & BIT(i)) + continue; + + rtw_dbg(rtwdev, RTW_DBG_REGD, + "txpwr regd %d does not be configured\n", i); + + if (rtw_regd_has_alt(i, ®d_alt) && + regd_cfg_flag & BIT(regd_alt)) { + rtw_dbg(rtwdev, RTW_DBG_REGD, + "cfg txpwr regd %d by regd %d as alternative\n", + i, regd_alt); + + rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, regd_alt); + continue; + } + + rtw_dbg(rtwdev, RTW_DBG_REGD, "cfg txpwr regd %d by WW\n", i); + rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, RTW_REGD_WW); + } + rtw_xref_txpwr_lmt(rtwdev); } EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt); @@ -2014,7 +2131,7 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, u8 ch, u8 path, u8 rs) { struct rtw_hal *hal = &rtwdev->hal; - u8 regd = rtwdev->regd.txpwr_regd; + u8 regd = rtw_regd_get(rtwdev); u8 *rates; u8 size; u8 rate; diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index 112ed125970a3d..02d1ec47ffb194 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -59,6 +59,8 @@ bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev); bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev); void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table); +void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l); +void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev); void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev, struct rtw_rx_pkt_stat *pkt_stat); void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index f5ce75095e904b..84ba9ec489c370 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -361,10 +361,12 @@ #define REG_AGGR_BREAK_TIME 0x051A #define REG_SLOT 0x051B #define REG_TX_PTCL_CTRL 0x0520 +#define BIT_DIS_EDCCA BIT(15) #define BIT_SIFS_BK_EN BIT(12) #define REG_TXPAUSE 0x0522 #define BIT_AC_QUEUE GENMASK(7, 0) #define REG_RD_CTRL 0x0524 +#define BIT_EDCCA_MSK_CNTDOWN_EN BIT(11) #define BIT_DIS_TXOP_CFE BIT(10) #define BIT_DIS_LSIG_CFE BIT(9) #define BIT_DIS_STBC_CFE BIT(8) @@ -406,6 +408,7 @@ #define BIT_MFBEN BIT(22) #define BIT_DISCHKPPDLLEN BIT(21) #define BIT_PKTCTL_DLEN BIT(20) +#define BIT_DISGCLK BIT(19) #define BIT_TIM_PARSER_EN BIT(18) #define BIT_BC_MD_EN BIT(17) #define BIT_UC_MD_EN BIT(16) @@ -640,6 +643,9 @@ #define REG_HRCV_MSG 0x1cf +#define REG_EDCCA_REPORT 0x2d38 +#define BIT_EDCCA_FLAG BIT(24) + #define REG_IGN_GNTBT4 0x4160 #define RF_MODE 0x00 diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c index 69744dd65968cb..315c2b193e92cb 100644 --- a/drivers/net/wireless/realtek/rtw88/regd.c +++ b/drivers/net/wireless/realtek/rtw88/regd.c @@ -7,288 +7,274 @@ #include "debug.h" #include "phy.h" -#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \ +#define COUNTRY_REGD_ENT(_alpha2, _regd_2g, _regd_5g) \ {.alpha2 = (_alpha2), \ - .chplan = (_chplan), \ - .txpwr_regd = (_txpwr_regd) \ + .txpwr_regd_2g = (_regd_2g), \ + .txpwr_regd_5g = (_regd_5g), \ } +#define rtw_dbg_regd_dump(_dev, _msg, _args...) \ +do { \ + struct rtw_dev *__d = (_dev); \ + const struct rtw_regd *__r = &__d->regd; \ + rtw_dbg(__d, RTW_DBG_REGD, _msg \ + "apply alpha2 %c%c, regd {%d, %d}, dfs_region %d\n",\ + ##_args, \ + __r->regulatory->alpha2[0], \ + __r->regulatory->alpha2[1], \ + __r->regulatory->txpwr_regd_2g, \ + __r->regulatory->txpwr_regd_5g, \ + __r->dfs_region); \ +} while (0) + /* If country code is not correctly defined in efuse, * use worldwide country code and txpwr regd. */ -static const struct rtw_regulatory rtw_defined_chplan = - COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW); - -static const struct rtw_regulatory all_chplan_map[] = { - COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("BT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_IC), - COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_CHILE), - COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK), - COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("KM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC3, RTW_REGD_KCC), - COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI15, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA), - COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("TV", RTW_CHPLAN_ETSI1_NULL, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("VG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC), - COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), - COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI), +static const struct rtw_regulatory rtw_reg_ww = + COUNTRY_REGD_ENT("00", RTW_REGD_WW, RTW_REGD_WW); + +static const struct rtw_regulatory rtw_reg_map[] = { + COUNTRY_REGD_ENT("AD", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AG", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("AI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AL", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AN", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("AO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AQ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AR", RTW_REGD_MEXICO, RTW_REGD_MEXICO), + COUNTRY_REGD_ENT("AS", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("AT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("AU", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("AW", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("AZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BB", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("BD", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BJ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BM", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("BN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BO", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("BR", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("BS", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("BT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BV", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BW", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BY", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("BZ", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("CA", RTW_REGD_IC, RTW_REGD_IC), + COUNTRY_REGD_ENT("CC", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CD", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CL", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("CM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CO", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("CR", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("CV", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CX", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("CY", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("DE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("DJ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("DK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("DM", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("DO", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("DZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("EC", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("EE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("EG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("EH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ER", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ES", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ET", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("FI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("FJ", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("FK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("FM", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("FO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("FR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GB", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GD", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("GE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GL", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GP", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GQ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GS", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GT", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("GU", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("GW", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GY", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("HK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("HM", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("HN", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("HR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("HT", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("HU", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ID", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IL", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IQ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IS", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("IT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("JE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("JM", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("JO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("JP", RTW_REGD_MKK, RTW_REGD_MKK), + COUNTRY_REGD_ENT("KE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("KG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("KH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("KI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("KM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("KN", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("KR", RTW_REGD_KCC, RTW_REGD_KCC), + COUNTRY_REGD_ENT("KW", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("KY", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("KZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LB", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LC", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("LI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LS", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LU", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LV", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("LY", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MC", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MD", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ME", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MF", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("MG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MH", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("MK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ML", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MP", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("MQ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MS", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MU", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MV", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MW", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MX", RTW_REGD_MEXICO, RTW_REGD_MEXICO), + COUNTRY_REGD_ENT("MY", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("MZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NC", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NF", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("NG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NI", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("NL", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NP", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("NU", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("NZ", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("OM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PA", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("PE", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("PF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PL", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PR", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("PS", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("PW", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("PY", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("QA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("RE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("RO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("RS", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("RU", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("RW", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SB", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SC", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("SE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SI", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SJ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SL", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("SR", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("ST", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("SV", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("SX", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("SZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TC", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TD", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TH", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TJ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TK", RTW_REGD_ACMA, RTW_REGD_ACMA), + COUNTRY_REGD_ENT("TM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TO", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TR", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("TT", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("TV", RTW_REGD_ETSI, RTW_REGD_WW), + COUNTRY_REGD_ENT("TW", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("TZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("UA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("UG", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("US", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("UY", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("UZ", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("VA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("VC", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("VE", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("VG", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("VI", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("VN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("VU", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("WF", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("WS", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("XK", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("YE", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("YT", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ZA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ZM", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("ZW", RTW_REGD_ETSI, RTW_REGD_ETSI), }; -static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy, - enum nl80211_reg_initiator initiator) -{ - enum nl80211_band band; - struct ieee80211_supported_band *sband; - const struct ieee80211_reg_rule *reg_rule; - struct ieee80211_channel *ch; - unsigned int i; - - for (band = 0; band < NUM_NL80211_BANDS; band++) { - if (!wiphy->bands[band]) - continue; - - sband = wiphy->bands[band]; - for (i = 0; i < sband->n_channels; i++) { - ch = &sband->channels[i]; - - reg_rule = freq_reg_info(wiphy, - MHZ_TO_KHZ(ch->center_freq)); - if (IS_ERR(reg_rule)) - continue; - - ch->flags &= ~IEEE80211_CHAN_DISABLED; - - if (!(reg_rule->flags & NL80211_RRF_NO_IR)) - ch->flags &= ~IEEE80211_CHAN_NO_IR; - } - } -} - static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); @@ -321,78 +307,223 @@ static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy) } } -static void rtw_regd_apply_world_flags(struct wiphy *wiphy, - enum nl80211_reg_initiator initiator) +static bool rtw_reg_is_ww(const struct rtw_regulatory *reg) { - rtw_regd_apply_beaconing_flags(wiphy, initiator); + return reg == &rtw_reg_ww; } -static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2) +static bool rtw_reg_match(const struct rtw_regulatory *reg, const char *alpha2) +{ + return memcmp(reg->alpha2, alpha2, 2) == 0; +} + +static const struct rtw_regulatory *rtw_reg_find_by_name(const char *alpha2) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) { - if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2)) - return all_chplan_map[i]; + for (i = 0; i < ARRAY_SIZE(rtw_reg_map); i++) { + if (rtw_reg_match(&rtw_reg_map[i], alpha2)) + return &rtw_reg_map[i]; } - return rtw_defined_chplan; + return &rtw_reg_ww; } -static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev, - struct wiphy *wiphy, - struct regulatory_request *request) +static +void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request); + +/* call this before ieee80211_register_hw() */ +int rtw_regd_init(struct rtw_dev *rtwdev) { - if (request->initiator == NL80211_REGDOM_SET_BY_USER) - return 0; - rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2); - rtw_regd_apply_world_flags(wiphy, request->initiator); + struct wiphy *wiphy = rtwdev->hw->wiphy; + const struct rtw_regulatory *chip_reg; - return 0; -} + if (!wiphy) + return -EINVAL; -static int -rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy, - void (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) -{ - wiphy->reg_notifier = reg_notifier; + wiphy->reg_notifier = rtw_regd_notifier; - wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG; - wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG; - wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS; + chip_reg = rtw_reg_find_by_name(rtwdev->efuse.country_code); + if (!rtw_reg_is_ww(chip_reg)) { + rtwdev->regd.state = RTW_REGD_STATE_PROGRAMMED; - rtw_regd_apply_hw_cap_flags(wiphy); + /* Set REGULATORY_STRICT_REG before ieee80211_register_hw(), + * stack will wait for regulatory_hint() and consider it + * as the superset for our regulatory rule. + */ + wiphy->regulatory_flags |= REGULATORY_STRICT_REG; + wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; + } else { + rtwdev->regd.state = RTW_REGD_STATE_WORLDWIDE; + } + rtwdev->regd.regulatory = &rtw_reg_ww; + rtwdev->regd.dfs_region = NL80211_DFS_UNSET; + rtw_dbg_regd_dump(rtwdev, "regd init state %d: ", rtwdev->regd.state); + + rtw_regd_apply_hw_cap_flags(wiphy); return 0; } -int rtw_regd_init(struct rtw_dev *rtwdev, - void (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) +/* call this after ieee80211_register_hw() */ +int rtw_regd_hint(struct rtw_dev *rtwdev) { struct wiphy *wiphy = rtwdev->hw->wiphy; + int ret; if (!wiphy) return -EINVAL; - rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code); - rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier); + if (rtwdev->regd.state == RTW_REGD_STATE_PROGRAMMED) { + rtw_dbg(rtwdev, RTW_DBG_REGD, + "country domain %c%c is PGed on efuse", + rtwdev->efuse.country_code[0], + rtwdev->efuse.country_code[1]); + + ret = regulatory_hint(wiphy, rtwdev->efuse.country_code); + if (ret) { + rtw_warn(rtwdev, + "failed to hint regulatory: %d\n", ret); + return ret; + } + } return 0; } +static bool rtw_regd_mgmt_worldwide(struct rtw_dev *rtwdev, + struct rtw_regd *next_regd, + struct regulatory_request *request) +{ + struct wiphy *wiphy = rtwdev->hw->wiphy; + + next_regd->state = RTW_REGD_STATE_WORLDWIDE; + + if (request->initiator == NL80211_REGDOM_SET_BY_USER && + !rtw_reg_is_ww(next_regd->regulatory)) { + next_regd->state = RTW_REGD_STATE_SETTING; + wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; + } + + return true; +} + +static bool rtw_regd_mgmt_programmed(struct rtw_dev *rtwdev, + struct rtw_regd *next_regd, + struct regulatory_request *request) +{ + if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER && + rtw_reg_match(next_regd->regulatory, rtwdev->efuse.country_code)) { + next_regd->state = RTW_REGD_STATE_PROGRAMMED; + return true; + } + + return false; +} + +static bool rtw_regd_mgmt_setting(struct rtw_dev *rtwdev, + struct rtw_regd *next_regd, + struct regulatory_request *request) +{ + struct wiphy *wiphy = rtwdev->hw->wiphy; + + if (request->initiator != NL80211_REGDOM_SET_BY_USER) + return false; + + next_regd->state = RTW_REGD_STATE_SETTING; + + if (rtw_reg_is_ww(next_regd->regulatory)) { + next_regd->state = RTW_REGD_STATE_WORLDWIDE; + wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE; + } + + return true; +} + +static bool (*const rtw_regd_handler[RTW_REGD_STATE_NR]) + (struct rtw_dev *, struct rtw_regd *, struct regulatory_request *) = { + [RTW_REGD_STATE_WORLDWIDE] = rtw_regd_mgmt_worldwide, + [RTW_REGD_STATE_PROGRAMMED] = rtw_regd_mgmt_programmed, + [RTW_REGD_STATE_SETTING] = rtw_regd_mgmt_setting, +}; + +static bool rtw_regd_state_hdl(struct rtw_dev *rtwdev, + struct rtw_regd *next_regd, + struct regulatory_request *request) +{ + next_regd->regulatory = rtw_reg_find_by_name(request->alpha2); + next_regd->dfs_region = request->dfs_region; + return rtw_regd_handler[rtwdev->regd.state](rtwdev, next_regd, request); +} + +static void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rtw_dev *rtwdev = hw->priv; struct rtw_hal *hal = &rtwdev->hal; + struct rtw_regd next_regd = {0}; + bool hdl; + + hdl = rtw_regd_state_hdl(rtwdev, &next_regd, request); + if (!hdl) { + rtw_dbg(rtwdev, RTW_DBG_REGD, + "regd state %d: ignore request %c%c of initiator %d\n", + rtwdev->regd.state, + request->alpha2[0], + request->alpha2[1], + request->initiator); + return; + } + + rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n", + rtwdev->regd.state, next_regd.state); - rtw_regd_notifier_apply(rtwdev, wiphy, request); - rtw_dbg(rtwdev, RTW_DBG_REGD, - "get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n", - request->alpha2[0], request->alpha2[1], request->initiator, - rtwdev->regd.chplan, rtwdev->regd.txpwr_regd); + rtwdev->regd = next_regd; + rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ", + request->alpha2[0], + request->alpha2[1], + request->initiator); + rtw_phy_adaptivity_set_mode(rtwdev); rtw_phy_set_tx_power_level(rtwdev, hal->current_channel); } + +u8 rtw_regd_get(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + u8 band = hal->current_band_type; + + return band == RTW_BAND_2G ? + rtwdev->regd.regulatory->txpwr_regd_2g : + rtwdev->regd.regulatory->txpwr_regd_5g; +} +EXPORT_SYMBOL(rtw_regd_get); + +struct rtw_regd_alternative_t { + bool set; + u8 alt; +}; + +#define DECL_REGD_ALT(_regd, _regd_alt) \ + [(_regd)] = {.set = true, .alt = (_regd_alt)} + +static const struct rtw_regd_alternative_t +rtw_regd_alt[RTW_REGD_MAX] = { + DECL_REGD_ALT(RTW_REGD_IC, RTW_REGD_FCC), + DECL_REGD_ALT(RTW_REGD_KCC, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_ACMA, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_CHILE, RTW_REGD_FCC), + DECL_REGD_ALT(RTW_REGD_UKRAINE, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_MEXICO, RTW_REGD_FCC), + DECL_REGD_ALT(RTW_REGD_CN, RTW_REGD_ETSI), +}; + +bool rtw_regd_has_alt(u8 regd, u8 *regd_alt) +{ + if (!rtw_regd_alt[regd].set) + return false; + + *regd_alt = rtw_regd_alt[regd].alt; + return true; +} diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h index 5d457833178865..34cb13d0cd9ebc 100644 --- a/drivers/net/wireless/realtek/rtw88/regd.h +++ b/drivers/net/wireless/realtek/rtw88/regd.h @@ -64,8 +64,8 @@ enum country_code_type { COUNTRY_CODE_MAX }; -int rtw_regd_init(struct rtw_dev *rtwdev, - void (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)); -void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request); +int rtw_regd_init(struct rtw_dev *rtwdev); +int rtw_regd_hint(struct rtw_dev *rtwdev); +u8 rtw_regd_get(struct rtw_dev *rtwdev); +bool rtw_regd_has_alt(u8 regd, u8 *regd_alt); #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 785b8181513f13..80a6f4da6acd99 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -14,6 +14,7 @@ #include "reg.h" #include "debug.h" #include "bf.h" +#include "regd.h" static const s8 lna_gain_table_0[8] = {22, 8, -6, -22, -31, -40, -46, -52}; static const s8 lna_gain_table_1[16] = {10, 6, 2, -2, -6, -10, -14, -17, @@ -60,6 +61,9 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) for (i = 0; i < 4; i++) efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; + if (rtwdev->efuse.rfe_option == 2 || rtwdev->efuse.rfe_option == 4) + efuse->txpwr_idx_table[0].pwr_idx_2g = map->txpwr_idx_table[1].pwr_idx_2g; + switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: rtw8821ce_efuse_parsing(efuse, map); @@ -304,7 +308,8 @@ static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw) if (channel <= 14) { if (rtwdev->efuse.rfe_option == 0) rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_WLG); - else if (rtwdev->efuse.rfe_option == 2) + else if (rtwdev->efuse.rfe_option == 2 || + rtwdev->efuse.rfe_option == 4) rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_BTG); rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1); rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf); @@ -773,6 +778,15 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, if (switch_status == coex_dm->cur_switch_status) return; + if (coex_rfe->wlg_at_btg) { + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + + if (coex_rfe->ant_switch_polarity) + pos_type = COEX_SWITCH_TO_WLA; + else + pos_type = COEX_SWITCH_TO_WLG_BT; + } + coex_dm->cur_switch_status = switch_status; if (coex_rfe->ant_switch_diversity && @@ -993,7 +1007,7 @@ static void rtw8821c_pwrtrack_set(struct rtw_dev *rtwdev) s8 pwr_idx_offset_lower; u8 channel = rtwdev->hal.current_channel; u8 band_width = rtwdev->hal.current_band_width; - u8 regd = rtwdev->regd.txpwr_regd; + u8 regd = rtw_regd_get(rtwdev); u8 tx_rate = dm_info->tx_rate; u8 max_pwr_idx = rtwdev->chip->max_power_index; @@ -1498,6 +1512,7 @@ static const struct rtw_intf_phy_para_table phy_para_table_8821c = { static const struct rtw_rfe_def rtw8821c_rfe_defs[] = { [0] = RTW_DEF_RFE(8821c, 0, 0), [2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2), + [4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2), }; static struct rtw_hw_reg rtw8821c_dig[] = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index f1789155e90160..c409c8c29ec8b8 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -15,6 +15,7 @@ #include "reg.h" #include "debug.h" #include "bf.h" +#include "regd.h" static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path, u8 rx_path, bool is_tx2_path); @@ -1436,7 +1437,7 @@ static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path) u8 pwr_idx_offset, tx_pwr_idx; u8 channel = rtwdev->hal.current_channel; u8 band_width = rtwdev->hal.current_band_width; - u8 regd = rtwdev->regd.txpwr_regd; + u8 regd = rtw_regd_get(rtwdev); u8 tx_rate = dm_info->tx_rate; u8 max_pwr_idx = rtwdev->chip->max_power_index; @@ -1552,6 +1553,39 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif, rtw_warn(rtwdev, "wrong bfee role\n"); } +static void rtw8822b_adaptivity_init(struct rtw_dev *rtwdev) +{ + rtw_phy_set_edcca_th(rtwdev, RTW8822B_EDCCA_MAX, RTW8822B_EDCCA_MAX); + + /* mac edcca state setting */ + rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA); + rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN); + rtw_write32_mask(rtwdev, REG_EDCCA_SOURCE, BIT_SOURCE_OPTION, + RTW8822B_EDCCA_SRC_DEF); + rtw_write32_mask(rtwdev, REG_EDCCA_POW_MA, BIT_MA_LEVEL, 0); + + /* edcca decision opt */ + rtw_write32_set(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION); +} + +static void rtw8822b_adaptivity(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s8 l2h, h2l; + u8 igi; + + igi = dm_info->igi_history[0]; + if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) { + l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB); + h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL; + } else { + l2h = min_t(s8, igi, dm_info->l2h_th_ini); + h2l = l2h - EDCCA_L2H_H2L_DIFF; + } + + rtw_phy_set_edcca_th(rtwdev, l2h, h2l); +} + static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -2125,6 +2159,8 @@ static struct rtw_chip_ops rtw8822b_ops = { .config_bfee = rtw8822b_bf_config_bfee, .set_gid_table = rtw_bf_set_gid_table, .cfg_csi_rate = rtw_bf_cfg_csi_rate, + .adaptivity_init = rtw8822b_adaptivity_init, + .adaptivity = rtw8822b_adaptivity, .coex_set_init = rtw8822b_coex_cfg_init, .coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch, @@ -2454,6 +2490,11 @@ static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = { {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, }; +static struct rtw_hw_reg_offset rtw8822b_edcca_th[] = { + [EDCCA_TH_L2H_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE0}, .offset = 0}, + [EDCCA_TH_H2L_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE1}, .offset = 0}, +}; + struct rtw_chip_info rtw8822b_hw_spec = { .ops = &rtw8822b_ops, .id = RTW_CHIP_TYPE_8822B, @@ -2502,6 +2543,9 @@ struct rtw_chip_info rtw8822b_hw_spec = { .bfer_su_max_num = 2, .bfer_mu_max_num = 1, .rx_ldpc = true, + .edcca_th = rtw8822b_edcca_th, + .l2h_th_ini_cs = 10 + EDCCA_IGI_BASE, + .l2h_th_ini_ad = -14 + EDCCA_IGI_BASE, .coex_para_ver = 0x20070206, .bt_desired_ver = 0x6, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h index 6211f4b547b95f..3fff8b881854b6 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h @@ -140,6 +140,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8)) +#define RTW8822B_EDCCA_MAX 0x7f +#define RTW8822B_EDCCA_SRC_DEF 1 #define REG_HTSTFWT 0x800 #define REG_RXPSEL 0x808 #define BIT_RX_PSEL_RST (BIT(28) | BIT(29)) @@ -152,11 +154,17 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_L1PKWT 0x840 #define REG_MRC 0x850 #define REG_CLKTRK 0x860 +#define REG_EDCCA_POW_MA 0x8a0 +#define BIT_MA_LEVEL GENMASK(1, 0) #define REG_ADCCLK 0x8ac #define REG_ADC160 0x8c4 #define REG_ADC40 0x8c8 +#define REG_EDCCA_DECISION 0x8dc +#define BIT_EDCCA_OPTION BIT(5) #define REG_CDDTXP 0x93c #define REG_TXPSEL1 0x940 +#define REG_EDCCA_SOURCE 0x944 +#define BIT_SOURCE_OPTION GENMASK(29, 28) #define REG_ACBB0 0x948 #define REG_ACBBRXFIR 0x94c #define REG_ACGG2TBL 0x958 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index f3ad079967a68b..46b881e8e4feb1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -4497,6 +4497,39 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev) dm_info->pwr_trk_triggered = false; } +static void rtw8822c_adaptivity_init(struct rtw_dev *rtwdev) +{ + rtw_phy_set_edcca_th(rtwdev, RTW8822C_EDCCA_MAX, RTW8822C_EDCCA_MAX); + + /* mac edcca state setting */ + rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA); + rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN); + + /* edcca decistion opt */ + rtw_write32_clr(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION); +} + +static void rtw8822c_adaptivity(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s8 l2h, h2l; + u8 igi; + + igi = dm_info->igi_history[0]; + if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) { + l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB); + h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL; + } else { + if (igi < dm_info->l2h_th_ini - EDCCA_ADC_BACKOFF) + l2h = igi + EDCCA_ADC_BACKOFF; + else + l2h = dm_info->l2h_th_ini; + h2l = l2h - EDCCA_L2H_H2L_DIFF; + } + + rtw_phy_set_edcca_th(rtwdev, l2h, h2l); +} + static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -4912,6 +4945,8 @@ static struct rtw_chip_ops rtw8822c_ops = { .config_bfee = rtw8822c_bf_config_bfee, .set_gid_table = rtw_bf_set_gid_table, .cfg_csi_rate = rtw_bf_cfg_csi_rate, + .adaptivity_init = rtw8822c_adaptivity_init, + .adaptivity = rtw8822c_adaptivity, .cfo_init = rtw8822c_cfo_init, .cfo_track = rtw8822c_cfo_track, .config_tx_path = rtw8822c_config_tx_path, @@ -5197,6 +5232,15 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = { .pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p, }; +static struct rtw_hw_reg_offset rtw8822c_edcca_th[] = { + [EDCCA_TH_L2H_IDX] = { + {.addr = 0x84c, .mask = MASKBYTE2}, .offset = 0x80 + }, + [EDCCA_TH_H2L_IDX] = { + {.addr = 0x84c, .mask = MASKBYTE3}, .offset = 0x80 + }, +}; + #ifdef CONFIG_PM static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE | @@ -5289,6 +5333,9 @@ struct rtw_chip_info rtw8822c_hw_spec = { .bfer_mu_max_num = 1, .rx_ldpc = true, .tx_stbc = true, + .edcca_th = rtw8822c_edcca_th, + .l2h_th_ini_cs = 60, + .l2h_th_ini_ad = 45, #ifdef CONFIG_PM .wow_fw_name = "rtw88/rtw8822c_wow_fw.bin", diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h index 364afc6d851bf9..3df627419d81b0 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h @@ -162,6 +162,7 @@ const struct rtw_table name ## _tbl = { \ #define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8)) +#define RTW8822C_EDCCA_MAX 0x7f #define REG_ANAPARLDO_POW_MAC 0x0029 #define BIT_LDOE25_PON BIT(0) #define XCAP_MASK GENMASK(6, 0) @@ -174,6 +175,8 @@ const struct rtw_table name ## _tbl = { \ #define REG_ANTMAP0 0x820 #define BIT_ANT_PATH GENMASK(1, 0) #define REG_ANTMAP 0x824 +#define REG_EDCCA_DECISION 0x844 +#define BIT_EDCCA_OPTION GENMASK(30, 29) #define REG_DYMPRITH 0x86c #define REG_DYMENTH0 0x870 #define REG_DYMENTH 0x874 diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig new file mode 100644 index 00000000000000..37e5def24d9fdf --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +menuconfig RTW89 + tristate "Realtek 802.11ax wireless chips support" + depends on MAC80211 + help + This module adds support for mac80211-based wireless drivers that + enables Realtek IEEE 802.11ax wireless chipsets. + + If you choose to build a module, it'll be called rtw89. + +if RTW89 + +config RTW89_CORE + tristate + +config RTW89_PCI + tristate + +config RTW89_8852AE + tristate "Realtek 8852AE PCI wireless network adapter" + depends on PCI + select RTW89_CORE + select RTW89_PCI + help + Select this option will enable support for 8852AE chipset + + 802.11ax PCIe wireless network adapter + +config RTW89_DEBUG + bool + +config RTW89_DEBUGMSG + bool "Realtek rtw89 debug message support" + depends on RTW89_CORE + select RTW89_DEBUG + help + Enable debug message support + + If unsure, say Y to simplify debug problems + +config RTW89_DEBUGFS + bool "Realtek rtw89 debugfs support" + depends on RTW89_CORE + select RTW89_DEBUG + help + Enable debugfs support + + If unsure, say Y to simplify debug problems + +endif diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile new file mode 100644 index 00000000000000..077e8fe23f6041 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +obj-$(CONFIG_RTW89_CORE) += rtw89_core.o +rtw89_core-y += core.o \ + mac80211.o \ + mac.o \ + phy.o \ + fw.o \ + rtw8852a.o \ + rtw8852a_table.o \ + rtw8852a_rfk.o \ + rtw8852a_rfk_table.o \ + cam.o \ + efuse.o \ + regd.o \ + sar.o \ + coex.o \ + ps.o \ + ser.o + +rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o + +obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o +rtw89_pci-y := pci.o + diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c new file mode 100644 index 00000000000000..ad7a8155dbed36 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -0,0 +1,695 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "cam.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" + +static struct sk_buff * +rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev, + struct rtw89_sec_cam_entry *sec_cam, + bool ext_key) +{ + struct sk_buff *skb; + u32 cmd_len = H2C_SEC_CAM_LEN; + u32 key32[4]; + u8 *cmd; + int i, j; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(cmd_len); + if (!skb) + return NULL; + + skb_put_zero(skb, cmd_len); + + for (i = 0; i < 4; i++) { + j = i * 4; + j += ext_key ? 16 : 0; + key32[i] = FIELD_PREP(GENMASK(7, 0), sec_cam->key[j + 0]) | + FIELD_PREP(GENMASK(15, 8), sec_cam->key[j + 1]) | + FIELD_PREP(GENMASK(23, 16), sec_cam->key[j + 2]) | + FIELD_PREP(GENMASK(31, 24), sec_cam->key[j + 3]); + } + + cmd = skb->data; + RTW89_SET_FWCMD_SEC_IDX(cmd, sec_cam->sec_cam_idx + (ext_key ? 1 : 0)); + RTW89_SET_FWCMD_SEC_OFFSET(cmd, sec_cam->offset); + RTW89_SET_FWCMD_SEC_LEN(cmd, sec_cam->len); + RTW89_SET_FWCMD_SEC_TYPE(cmd, sec_cam->type); + RTW89_SET_FWCMD_SEC_EXT_KEY(cmd, ext_key); + RTW89_SET_FWCMD_SEC_SPP_MODE(cmd, sec_cam->spp_mode); + RTW89_SET_FWCMD_SEC_KEY0(cmd, key32[0]); + RTW89_SET_FWCMD_SEC_KEY1(cmd, key32[1]); + RTW89_SET_FWCMD_SEC_KEY2(cmd, key32[2]); + RTW89_SET_FWCMD_SEC_KEY3(cmd, key32[3]); + + return skb; +} + +static int rtw89_cam_send_sec_key_cmd(struct rtw89_dev *rtwdev, + struct rtw89_sec_cam_entry *sec_cam) +{ + struct sk_buff *skb, *ext_skb; + int ret; + + skb = rtw89_cam_get_sec_key_cmd(rtwdev, sec_cam, false); + if (!skb) { + rtw89_err(rtwdev, "failed to get sec key command\n"); + return -ENOMEM; + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, + FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_SEC_CAM, + H2C_FUNC_MAC_SEC_UPD, 1, 0, + H2C_SEC_CAM_LEN); + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send sec key h2c: %d\n", ret); + dev_kfree_skb(skb); + return ret; + } + + if (!sec_cam->ext_key) + return 0; + + ext_skb = rtw89_cam_get_sec_key_cmd(rtwdev, sec_cam, true); + if (!ext_skb) { + rtw89_err(rtwdev, "failed to get ext sec key command\n"); + return -ENOMEM; + } + + rtw89_h2c_pkt_set_hdr(rtwdev, ext_skb, + FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_SEC_CAM, + H2C_FUNC_MAC_SEC_UPD, + 1, 0, H2C_SEC_CAM_LEN); + ret = rtw89_h2c_tx(rtwdev, ext_skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send ext sec key h2c: %d\n", ret); + dev_kfree_skb(ext_skb); + return ret; + } + + return 0; +} + +static int rtw89_cam_get_avail_sec_cam(struct rtw89_dev *rtwdev, + u8 *sec_cam_idx, bool ext_key) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + u8 sec_cam_num = chip->scam_num; + u8 idx = 0; + + if (!ext_key) { + idx = find_first_zero_bit(cam_info->sec_cam_map, sec_cam_num); + if (idx >= sec_cam_num) + return -EBUSY; + + set_bit(idx, cam_info->sec_cam_map); + *sec_cam_idx = idx; + + return 0; + } + +again: + idx = find_next_zero_bit(cam_info->sec_cam_map, sec_cam_num, idx); + if (idx >= sec_cam_num - 1) + return -EBUSY; + /* ext keys need two cam entries for 256-bit key */ + if (test_bit(idx + 1, cam_info->sec_cam_map)) { + idx++; + goto again; + } + + set_bit(idx, cam_info->sec_cam_map); + set_bit(idx + 1, cam_info->sec_cam_map); + *sec_cam_idx = idx; + + return 0; +} + +static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam, + struct rtw89_sec_cam_entry *sec_cam, + struct ieee80211_key_conf *key, + u8 *key_idx) +{ + u8 idx; + + /* RTW89_ADDR_CAM_SEC_NONE : not enabled + * RTW89_ADDR_CAM_SEC_ALL_UNI : 0 - 6 unicast + * RTW89_ADDR_CAM_SEC_NORMAL : 0 - 1 unicast, 2 - 4 group, 5 - 6 BIP + * RTW89_ADDR_CAM_SEC_4GROUP : 0 - 1 unicast, 2 - 5 group, 6 BIP + */ + switch (addr_cam->sec_ent_mode) { + case RTW89_ADDR_CAM_SEC_NONE: + return -EINVAL; + case RTW89_ADDR_CAM_SEC_ALL_UNI: + if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EINVAL; + idx = find_first_zero_bit(addr_cam->sec_cam_map, + RTW89_SEC_CAM_IN_ADDR_CAM); + if (idx >= RTW89_SEC_CAM_IN_ADDR_CAM) + return -EBUSY; + *key_idx = idx; + break; + case RTW89_ADDR_CAM_SEC_NORMAL: + if (sec_cam->type == RTW89_SEC_KEY_TYPE_BIP_CCMP128) { + idx = find_next_zero_bit(addr_cam->sec_cam_map, + RTW89_SEC_CAM_IN_ADDR_CAM, 5); + if (idx > 6) + return -EBUSY; + *key_idx = idx; + break; + } + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + idx = find_next_zero_bit(addr_cam->sec_cam_map, + RTW89_SEC_CAM_IN_ADDR_CAM, 0); + if (idx > 1) + return -EBUSY; + *key_idx = idx; + break; + } + + /* Group keys */ + idx = find_next_zero_bit(addr_cam->sec_cam_map, + RTW89_SEC_CAM_IN_ADDR_CAM, 2); + if (idx > 4) + return -EBUSY; + *key_idx = idx; + break; + case RTW89_ADDR_CAM_SEC_4GROUP: + if (sec_cam->type == RTW89_SEC_KEY_TYPE_BIP_CCMP128) { + if (test_bit(6, addr_cam->sec_cam_map)) + return -EINVAL; + *key_idx = 6; + break; + } + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + idx = find_next_zero_bit(addr_cam->sec_cam_map, + RTW89_SEC_CAM_IN_ADDR_CAM, 0); + if (idx > 1) + return -EBUSY; + *key_idx = idx; + break; + } + + /* Group keys */ + idx = find_next_zero_bit(addr_cam->sec_cam_map, + RTW89_SEC_CAM_IN_ADDR_CAM, 2); + if (idx > 5) + return -EBUSY; + *key_idx = idx; + break; + } + + return 0; +} + +static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + struct rtw89_sec_cam_entry *sec_cam) +{ + struct rtw89_vif *rtwvif; + struct rtw89_addr_cam_entry *addr_cam; + u8 key_idx = 0; + int ret; + + if (!vif) { + rtw89_err(rtwdev, "No iface for adding sec cam\n"); + return -EINVAL; + } + + rtwvif = (struct rtw89_vif *)vif->drv_priv; + addr_cam = &rtwvif->addr_cam; + ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx); + if (ret) { + rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n", + addr_cam->sec_ent_mode, sec_cam->type); + return ret; + } + + key->hw_key_idx = key_idx; + addr_cam->sec_ent_keyid[key_idx] = key->keyidx; + addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx; + addr_cam->sec_entries[key_idx] = sec_cam; + set_bit(key_idx, addr_cam->sec_cam_map); + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + if (ret) { + rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n", + ret); + clear_bit(key_idx, addr_cam->sec_cam_map); + addr_cam->sec_entries[key_idx] = NULL; + return ret; + } + + return 0; +} + +static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + u8 hw_key_type, bool ext_key) +{ + struct rtw89_sec_cam_entry *sec_cam = NULL; + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + u8 sec_cam_idx; + int ret; + + /* maximum key length 256-bit */ + if (key->keylen > 32) { + rtw89_err(rtwdev, "invalid sec key length %d\n", key->keylen); + return -EINVAL; + } + + ret = rtw89_cam_get_avail_sec_cam(rtwdev, &sec_cam_idx, ext_key); + if (ret) { + rtw89_warn(rtwdev, "no available sec cam: %d ext: %d\n", + ret, ext_key); + return ret; + } + + sec_cam = kzalloc(sizeof(*sec_cam), GFP_KERNEL); + if (!sec_cam) { + ret = -ENOMEM; + goto err_release_cam; + } + + sec_cam->sec_cam_idx = sec_cam_idx; + sec_cam->type = hw_key_type; + sec_cam->len = RTW89_SEC_CAM_LEN; + sec_cam->ext_key = ext_key; + memcpy(sec_cam->key, key->key, key->keylen); + ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam); + if (ret) { + rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret); + goto err_release_cam; + } + + /* associate with addr cam */ + ret = rtw89_cam_attach_sec_cam(rtwdev, vif, sta, key, sec_cam); + if (ret) { + rtw89_err(rtwdev, "failed to attach sec cam: %d\n", ret); + goto err_release_cam; + } + + return 0; + +err_release_cam: + kfree(sec_cam); + clear_bit(sec_cam_idx, cam_info->sec_cam_map); + if (ext_key) + clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map); + + return ret; +} + +int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + u8 hw_key_type; + bool ext_key = false; + int ret; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + hw_key_type = RTW89_SEC_KEY_TYPE_WEP40; + break; + case WLAN_CIPHER_SUITE_WEP104: + hw_key_type = RTW89_SEC_KEY_TYPE_WEP104; + break; + case WLAN_CIPHER_SUITE_CCMP: + hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128; + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + break; + case WLAN_CIPHER_SUITE_CCMP_256: + hw_key_type = RTW89_SEC_KEY_TYPE_CCMP256; + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + ext_key = true; + break; + case WLAN_CIPHER_SUITE_GCMP: + hw_key_type = RTW89_SEC_KEY_TYPE_GCMP128; + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + hw_key_type = RTW89_SEC_KEY_TYPE_GCMP256; + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + ext_key = true; + break; + default: + return -EOPNOTSUPP; + } + + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + ret = rtw89_cam_sec_key_install(rtwdev, vif, sta, key, hw_key_type, + ext_key); + if (ret) { + rtw89_err(rtwdev, "failed to install key type %d ext %d: %d\n", + hw_key_type, ext_key, ret); + return ret; + } + + return 0; +} + +int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + bool inform_fw) +{ + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + struct rtw89_vif *rtwvif; + struct rtw89_addr_cam_entry *addr_cam; + struct rtw89_sec_cam_entry *sec_cam; + u8 key_idx = key->hw_key_idx; + u8 sec_cam_idx; + int ret = 0; + + if (!vif) { + rtw89_err(rtwdev, "No iface for deleting sec cam\n"); + return -EINVAL; + } + + rtwvif = (struct rtw89_vif *)vif->drv_priv; + addr_cam = &rtwvif->addr_cam; + sec_cam = addr_cam->sec_entries[key_idx]; + if (!sec_cam) + return -EINVAL; + + /* detach sec cam from addr cam */ + clear_bit(key_idx, addr_cam->sec_cam_map); + addr_cam->sec_entries[key_idx] = NULL; + if (inform_fw) { + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + if (ret) + rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret); + } + + /* clear valid bit in addr cam will disable sec cam, + * so we don't need to send H2C command again + */ + sec_cam_idx = sec_cam->sec_cam_idx; + clear_bit(sec_cam_idx, cam_info->sec_cam_map); + if (sec_cam->ext_key) + clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map); + + kfree(sec_cam); + + return ret; +} + +static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + rtw89_cam_sec_key_del(rtwdev, vif, sta, key, false); + rtw89_cam_deinit(rtwdev, rtwvif); +} + +void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; + struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; + + addr_cam->valid = false; + bssid_cam->valid = false; + clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map); + clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map); +} + +void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev) +{ + rcu_read_lock(); + ieee80211_iter_keys_rcu(rtwdev->hw, NULL, rtw89_cam_reset_key_iter, rtwdev); + rcu_read_unlock(); +} + +static int rtw89_cam_get_avail_addr_cam(struct rtw89_dev *rtwdev, + u8 *addr_cam_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + u8 addr_cam_num = chip->acam_num; + u8 idx; + + idx = find_first_zero_bit(cam_info->addr_cam_map, addr_cam_num); + if (idx >= addr_cam_num) + return -EBUSY; + + set_bit(idx, cam_info->addr_cam_map); + *addr_cam_idx = idx; + + return 0; +} + +static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; + u8 addr_cam_idx; + int i; + int ret; + + ret = rtw89_cam_get_avail_addr_cam(rtwdev, &addr_cam_idx); + if (ret) { + rtw89_err(rtwdev, "failed to get available addr cam\n"); + return ret; + } + + addr_cam->addr_cam_idx = addr_cam_idx; + addr_cam->len = ADDR_CAM_ENT_SIZE; + addr_cam->offset = 0; + addr_cam->valid = true; + addr_cam->addr_mask = 0; + addr_cam->mask_sel = RTW89_NO_MSK; + bitmap_zero(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM); + ether_addr_copy(addr_cam->sma, rtwvif->mac_addr); + + for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) { + addr_cam->sec_ent_keyid[i] = 0; + addr_cam->sec_ent[i] = 0; + } + + return 0; +} + +static int rtw89_cam_get_avail_bssid_cam(struct rtw89_dev *rtwdev, + u8 *bssid_cam_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + u8 bssid_cam_num = chip->bcam_num; + u8 idx; + + idx = find_first_zero_bit(cam_info->bssid_cam_map, bssid_cam_num); + if (idx >= bssid_cam_num) + return -EBUSY; + + set_bit(idx, cam_info->bssid_cam_map); + *bssid_cam_idx = idx; + + return 0; +} + +static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; + u8 bssid_cam_idx; + int ret; + + ret = rtw89_cam_get_avail_bssid_cam(rtwdev, &bssid_cam_idx); + if (ret) { + rtw89_err(rtwdev, "failed to get available bssid cam\n"); + return ret; + } + + bssid_cam->bssid_cam_idx = bssid_cam_idx; + bssid_cam->phy_idx = rtwvif->phy_idx; + bssid_cam->len = BSSID_CAM_ENT_SIZE; + bssid_cam->offset = 0; + bssid_cam->valid = true; + ether_addr_copy(bssid_cam->bssid, rtwvif->bssid); + + return 0; +} + +void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; + struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; + + if (vif->type == NL80211_IFTYPE_STATION) + ether_addr_copy(addr_cam->tma, rtwvif->bssid); + ether_addr_copy(bssid_cam->bssid, rtwvif->bssid); +} + +int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; + struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; + int ret; + + ret = rtw89_cam_init_addr_cam(rtwdev, rtwvif); + if (ret) { + rtw89_err(rtwdev, "failed to init addr cam\n"); + return ret; + } + + ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif); + if (ret) { + rtw89_err(rtwdev, "failed to init bssid cam\n"); + return ret; + } + + /* associate addr cam with bssid cam */ + addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx; + + return 0; +} + +int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, u8 *cmd) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; + u8 bss_color = vif->bss_conf.he_bss_color.color; + + FWCMD_SET_ADDR_BSSID_IDX(cmd, bssid_cam->bssid_cam_idx); + FWCMD_SET_ADDR_BSSID_OFFSET(cmd, bssid_cam->offset); + FWCMD_SET_ADDR_BSSID_LEN(cmd, bssid_cam->len); + FWCMD_SET_ADDR_BSSID_VALID(cmd, bssid_cam->valid); + FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, bssid_cam->phy_idx); + FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, bss_color); + + FWCMD_SET_ADDR_BSSID_BSSID0(cmd, bssid_cam->bssid[0]); + FWCMD_SET_ADDR_BSSID_BSSID1(cmd, bssid_cam->bssid[1]); + FWCMD_SET_ADDR_BSSID_BSSID2(cmd, bssid_cam->bssid[2]); + FWCMD_SET_ADDR_BSSID_BSSID3(cmd, bssid_cam->bssid[3]); + FWCMD_SET_ADDR_BSSID_BSSID4(cmd, bssid_cam->bssid[4]); + FWCMD_SET_ADDR_BSSID_BSSID5(cmd, bssid_cam->bssid[5]); + + return 0; +} + +static u8 rtw89_cam_addr_hash(u8 start, u8 *addr) +{ + u8 hash = 0; + u8 i; + + for (i = start; i < ETH_ALEN; i++) + hash ^= addr[i]; + + return hash; +} + +void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + u8 *cmd) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct ieee80211_sta *sta; + struct rtw89_sta *rtwsta; + struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; + u8 sma_hash, tma_hash, addr_msk_start; + u8 sma_start = 0; + u8 tma_start = 0; + + if (addr_cam->addr_mask != 0) { + addr_msk_start = __ffs(addr_cam->addr_mask); + if (addr_cam->mask_sel == RTW89_SMA) + sma_start = addr_msk_start; + else if (addr_cam->mask_sel == RTW89_TMA) + tma_start = addr_msk_start; + } + sma_hash = rtw89_cam_addr_hash(sma_start, rtwvif->mac_addr); + tma_hash = rtw89_cam_addr_hash(tma_start, addr_cam->tma); + + FWCMD_SET_ADDR_IDX(cmd, addr_cam->addr_cam_idx); + FWCMD_SET_ADDR_OFFSET(cmd, addr_cam->offset); + FWCMD_SET_ADDR_LEN(cmd, addr_cam->len); + + FWCMD_SET_ADDR_VALID(cmd, addr_cam->valid); + FWCMD_SET_ADDR_NET_TYPE(cmd, rtwvif->net_type); + FWCMD_SET_ADDR_BCN_HIT_COND(cmd, rtwvif->bcn_hit_cond); + FWCMD_SET_ADDR_HIT_RULE(cmd, rtwvif->hit_rule); + FWCMD_SET_ADDR_BB_SEL(cmd, rtwvif->phy_idx); + FWCMD_SET_ADDR_ADDR_MASK(cmd, addr_cam->addr_mask); + FWCMD_SET_ADDR_MASK_SEL(cmd, addr_cam->mask_sel); + FWCMD_SET_ADDR_SMA_HASH(cmd, sma_hash); + FWCMD_SET_ADDR_TMA_HASH(cmd, tma_hash); + + FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, addr_cam->bssid_cam_idx); + + FWCMD_SET_ADDR_SMA0(cmd, rtwvif->mac_addr[0]); + FWCMD_SET_ADDR_SMA1(cmd, rtwvif->mac_addr[1]); + FWCMD_SET_ADDR_SMA2(cmd, rtwvif->mac_addr[2]); + FWCMD_SET_ADDR_SMA3(cmd, rtwvif->mac_addr[3]); + FWCMD_SET_ADDR_SMA4(cmd, rtwvif->mac_addr[4]); + FWCMD_SET_ADDR_SMA5(cmd, rtwvif->mac_addr[5]); + + FWCMD_SET_ADDR_TMA0(cmd, addr_cam->tma[0]); + FWCMD_SET_ADDR_TMA1(cmd, addr_cam->tma[1]); + FWCMD_SET_ADDR_TMA2(cmd, addr_cam->tma[2]); + FWCMD_SET_ADDR_TMA3(cmd, addr_cam->tma[3]); + FWCMD_SET_ADDR_TMA4(cmd, addr_cam->tma[4]); + FWCMD_SET_ADDR_TMA5(cmd, addr_cam->tma[5]); + + FWCMD_SET_ADDR_PORT_INT(cmd, rtwvif->port); + FWCMD_SET_ADDR_TSF_SYNC(cmd, rtwvif->port); + FWCMD_SET_ADDR_TF_TRS(cmd, rtwvif->trigger); + FWCMD_SET_ADDR_LSIG_TXOP(cmd, rtwvif->lsig_txop); + FWCMD_SET_ADDR_TGT_IND(cmd, rtwvif->tgt_ind); + FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind); + + if (vif->type == NL80211_IFTYPE_STATION) { + sta = rtwvif->mgd.ap; + if (sta) { + rtwsta = (struct rtw89_sta *)sta->drv_priv; + FWCMD_SET_ADDR_MACID(cmd, rtwsta->mac_id); + FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff); + } + } + FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern); + FWCMD_SET_ADDR_WOL_UC(cmd, rtwvif->wowlan_uc); + FWCMD_SET_ADDR_WOL_MAGIC(cmd, rtwvif->wowlan_magic); + FWCMD_SET_ADDR_WAPI(cmd, addr_cam->wapi); + FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, addr_cam->sec_ent_mode); + FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, addr_cam->sec_ent_keyid[0]); + FWCMD_SET_ADDR_SEC_ENT1_KEYID(cmd, addr_cam->sec_ent_keyid[1]); + FWCMD_SET_ADDR_SEC_ENT2_KEYID(cmd, addr_cam->sec_ent_keyid[2]); + FWCMD_SET_ADDR_SEC_ENT3_KEYID(cmd, addr_cam->sec_ent_keyid[3]); + FWCMD_SET_ADDR_SEC_ENT4_KEYID(cmd, addr_cam->sec_ent_keyid[4]); + FWCMD_SET_ADDR_SEC_ENT5_KEYID(cmd, addr_cam->sec_ent_keyid[5]); + FWCMD_SET_ADDR_SEC_ENT6_KEYID(cmd, addr_cam->sec_ent_keyid[6]); + + FWCMD_SET_ADDR_SEC_ENT_VALID(cmd, addr_cam->sec_cam_map[0] & 0xff); + FWCMD_SET_ADDR_SEC_ENT0(cmd, addr_cam->sec_ent[0]); + FWCMD_SET_ADDR_SEC_ENT1(cmd, addr_cam->sec_ent[1]); + FWCMD_SET_ADDR_SEC_ENT2(cmd, addr_cam->sec_ent[2]); + FWCMD_SET_ADDR_SEC_ENT3(cmd, addr_cam->sec_ent[3]); + FWCMD_SET_ADDR_SEC_ENT4(cmd, addr_cam->sec_ent[4]); + FWCMD_SET_ADDR_SEC_ENT5(cmd, addr_cam->sec_ent[5]); + FWCMD_SET_ADDR_SEC_ENT6(cmd, addr_cam->sec_ent[6]); +} diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h new file mode 100644 index 00000000000000..90a20a5375c6b3 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/cam.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_CAM_H__ +#define __RTW89_CAM_H__ + +#include "core.h" + +#define RTW89_SEC_CAM_LEN 20 + +#define FWCMD_SET_ADDR_IDX(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_OFFSET(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_LEN(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_VALID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(0)) +#define FWCMD_SET_ADDR_NET_TYPE(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(2, 1)) +#define FWCMD_SET_ADDR_BCN_HIT_COND(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(4, 3)) +#define FWCMD_SET_ADDR_HIT_RULE(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(6, 5)) +#define FWCMD_SET_ADDR_BB_SEL(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(7)) +#define FWCMD_SET_ADDR_ADDR_MASK(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(13, 8)) +#define FWCMD_SET_ADDR_MASK_SEL(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(15, 14)) +#define FWCMD_SET_ADDR_SMA_HASH(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_TMA_HASH(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 3, value, GENMASK(5, 0)) +#define FWCMD_SET_ADDR_SMA0(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_SMA1(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_SMA2(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_SMA3(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_SMA4(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_SMA5(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_TMA0(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_TMA1(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_TMA2(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_TMA3(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_TMA4(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_TMA5(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_MACID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_PORT_INT(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(10, 8)) +#define FWCMD_SET_ADDR_TSF_SYNC(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(13, 11)) +#define FWCMD_SET_ADDR_TF_TRS(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(14)) +#define FWCMD_SET_ADDR_LSIG_TXOP(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(15)) +#define FWCMD_SET_ADDR_TGT_IND(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(26, 24)) +#define FWCMD_SET_ADDR_FRM_TGT_IND(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(29, 27)) +#define FWCMD_SET_ADDR_AID12(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 0)) +#define FWCMD_SET_ADDR_AID12_0(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_AID12_1(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 8)) +#define FWCMD_SET_ADDR_WOL_PATTERN(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(12)) +#define FWCMD_SET_ADDR_WOL_UC(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(13)) +#define FWCMD_SET_ADDR_WOL_MAGIC(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(14)) +#define FWCMD_SET_ADDR_WAPI(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(15)) +#define FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(17, 16)) +#define FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(19, 18)) +#define FWCMD_SET_ADDR_SEC_ENT1_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(21, 20)) +#define FWCMD_SET_ADDR_SEC_ENT2_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(23, 22)) +#define FWCMD_SET_ADDR_SEC_ENT3_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(25, 24)) +#define FWCMD_SET_ADDR_SEC_ENT4_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(27, 26)) +#define FWCMD_SET_ADDR_SEC_ENT5_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(29, 28)) +#define FWCMD_SET_ADDR_SEC_ENT6_KEYID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(31, 30)) +#define FWCMD_SET_ADDR_SEC_ENT_VALID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_SEC_ENT0(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_SEC_ENT1(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_SEC_ENT2(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_SEC_ENT3(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_SEC_ENT4(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_SEC_ENT5(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_SEC_ENT6(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_BSSID_IDX(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_BSSID_OFFSET(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_BSSID_LEN(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_BSSID_VALID(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(0)) +#define FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1)) +#define FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8)) +#define FWCMD_SET_ADDR_BSSID_BSSID0(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_BSSID_BSSID1(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(31, 24)) +#define FWCMD_SET_ADDR_BSSID_BSSID2(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(7, 0)) +#define FWCMD_SET_ADDR_BSSID_BSSID3(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(15, 8)) +#define FWCMD_SET_ADDR_BSSID_BSSID4(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(23, 16)) +#define FWCMD_SET_ADDR_BSSID_BSSID5(cmd, value) \ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24)) + +int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); +void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); +void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, + struct rtw89_vif *vif, u8 *cmd); +int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev, + struct rtw89_vif *vif, u8 *cmd); +int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + bool inform_fw); +void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif); +void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev); +#endif diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c new file mode 100644 index 00000000000000..abe4b6549ab228 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -0,0 +1,5716 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "ps.h" +#include "reg.h" + +#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/ + +enum btc_fbtc_tdma_template { + CXTD_OFF = 0x0, + CXTD_OFF_B2, + CXTD_OFF_EXT, + CXTD_FIX, + CXTD_PFIX, + CXTD_AUTO, + CXTD_PAUTO, + CXTD_AUTO2, + CXTD_PAUTO2, + CXTD_MAX, +}; + +enum btc_fbtc_tdma_type { + CXTDMA_OFF = 0x0, + CXTDMA_FIX = 0x1, + CXTDMA_AUTO = 0x2, + CXTDMA_AUTO2 = 0x3, + CXTDMA_MAX +}; + +enum btc_fbtc_tdma_rx_flow_ctrl { + CXFLC_OFF = 0x0, + CXFLC_NULLP = 0x1, + CXFLC_QOSNULL = 0x2, + CXFLC_CTS = 0x3, + CXFLC_MAX +}; + +enum btc_fbtc_tdma_wlan_tx_pause { + CXTPS_OFF = 0x0, /* no wl tx pause*/ + CXTPS_ON = 0x1, + CXTPS_MAX +}; + +enum btc_mlme_state { + MLME_NO_LINK, + MLME_LINKING, + MLME_LINKED, +}; + +#define FCXONESLOT_VER 1 +struct btc_fbtc_1slot { + u8 fver; + u8 sid; /* slot id */ + struct rtw89_btc_fbtc_slot slot; +} __packed; + +static const struct rtw89_btc_fbtc_tdma t_def[] = { + [CXTD_OFF] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0}, + [CXTD_OFF_B2] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 1, 0, 0}, + [CXTD_OFF_EXT] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 3, 0, 0}, + [CXTD_FIX] = { CXTDMA_FIX, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0}, + [CXTD_PFIX] = { CXTDMA_FIX, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0}, + [CXTD_AUTO] = { CXTDMA_AUTO, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0}, + [CXTD_PAUTO] = { CXTDMA_AUTO, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0}, + [CXTD_AUTO2] = {CXTDMA_AUTO2, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0}, + [CXTD_PAUTO2] = {CXTDMA_AUTO2, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0} +}; + +#define __DEF_FBTC_SLOT(__dur, __cxtbl, __cxtype) \ + { .dur = cpu_to_le16(__dur), .cxtbl = cpu_to_le32(__cxtbl), \ + .cxtype = cpu_to_le16(__cxtype),} + +static const struct rtw89_btc_fbtc_slot s_def[] = { + [CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX), + [CXST_B2W] = __DEF_FBTC_SLOT(5, 0x5a5a5a5a, SLOT_ISO), + [CXST_W1] = __DEF_FBTC_SLOT(70, 0x5a5a5a5a, SLOT_ISO), + [CXST_W2] = __DEF_FBTC_SLOT(70, 0x5a5a5aaa, SLOT_ISO), + [CXST_W2B] = __DEF_FBTC_SLOT(15, 0x5a5a5a5a, SLOT_ISO), + [CXST_B1] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX), + [CXST_B2] = __DEF_FBTC_SLOT(7, 0x6a5a5a5a, SLOT_MIX), + [CXST_B3] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX), + [CXST_B4] = __DEF_FBTC_SLOT(50, 0x55555555, SLOT_MIX), + [CXST_LK] = __DEF_FBTC_SLOT(20, 0x5a5a5a5a, SLOT_ISO), + [CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX), + [CXST_E2G] = __DEF_FBTC_SLOT(20, 0x6a5a5a5a, SLOT_MIX), + [CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX), + [CXST_EBT] = __DEF_FBTC_SLOT(20, 0x55555555, SLOT_MIX), + [CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO), + [CXST_WLK] = __DEF_FBTC_SLOT(250, 0x6a5a6a5a, SLOT_MIX), + [CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO), + [CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX), +}; + +static const u32 cxtbl[] = { + 0xffffffff, /* 0 */ + 0xaaaaaaaa, /* 1 */ + 0x55555555, /* 2 */ + 0x66555555, /* 3 */ + 0x66556655, /* 4 */ + 0x5a5a5a5a, /* 5 */ + 0x5a5a5aaa, /* 6 */ + 0xaa5a5a5a, /* 7 */ + 0x6a5a5a5a, /* 8 */ + 0x6a5a5aaa, /* 9 */ + 0x6a5a6a5a, /* 10 */ + 0x6a5a6aaa, /* 11 */ + 0x6afa5afa, /* 12 */ + 0xaaaa5aaa, /* 13 */ + 0xaaffffaa, /* 14 */ + 0xaa5555aa, /* 15 */ + 0xfafafafa, /* 16 */ + 0xffffddff, /* 17 */ + 0xdaffdaff, /* 18 */ + 0xfafadafa /* 19 */ +}; + +struct rtw89_btc_btf_tlv { + u8 type; + u8 len; + u8 val[1]; +} __packed; + +enum btc_btf_set_report_en { + RPT_EN_TDMA = BIT(0), + RPT_EN_CYCLE = BIT(1), + RPT_EN_MREG = BIT(2), + RPT_EN_BT_VER_INFO = BIT(3), + RPT_EN_BT_SCAN_INFO = BIT(4), + RPT_EN_BT_AFH_MAP = BIT(5), + RPT_EN_BT_DEVICE_INFO = BIT(6), + RPT_EN_WL_ALL = GENMASK(2, 0), + RPT_EN_BT_ALL = GENMASK(6, 3), + RPT_EN_ALL = GENMASK(6, 0), +}; + +#define BTF_SET_REPORT_VER 1 +struct rtw89_btc_btf_set_report { + u8 fver; + __le32 enable; + __le32 para; +} __packed; + +#define BTF_SET_SLOT_TABLE_VER 1 +struct rtw89_btc_btf_set_slot_table { + u8 fver; + u8 tbl_num; + u8 buf[]; +} __packed; + +#define BTF_SET_MON_REG_VER 1 +struct rtw89_btc_btf_set_mon_reg { + u8 fver; + u8 reg_num; + u8 buf[]; +} __packed; + +enum btc_btf_set_cx_policy { + CXPOLICY_TDMA = 0x0, + CXPOLICY_SLOT = 0x1, + CXPOLICY_TYPE = 0x2, + CXPOLICY_MAX, +}; + +enum btc_b2w_scoreboard { + BTC_BSCB_ACT = BIT(0), + BTC_BSCB_ON = BIT(1), + BTC_BSCB_WHQL = BIT(2), + BTC_BSCB_BT_S1 = BIT(3), + BTC_BSCB_A2DP_ACT = BIT(4), + BTC_BSCB_RFK_RUN = BIT(5), + BTC_BSCB_RFK_REQ = BIT(6), + BTC_BSCB_LPS = BIT(7), + BTC_BSCB_WLRFK = BIT(11), + BTC_BSCB_BT_HILNA = BIT(13), + BTC_BSCB_BT_CONNECT = BIT(16), + BTC_BSCB_PATCH_CODE = BIT(30), + BTC_BSCB_ALL = GENMASK(30, 0), +}; + +enum btc_phymap { + BTC_PHY_0 = BIT(0), + BTC_PHY_1 = BIT(1), + BTC_PHY_ALL = BIT(0) | BIT(1), +}; + +enum btc_cx_state_map { + BTC_WIDLE = 0, + BTC_WBUSY_BNOSCAN, + BTC_WBUSY_BSCAN, + BTC_WSCAN_BNOSCAN, + BTC_WSCAN_BSCAN, + BTC_WLINKING +}; + +enum btc_ant_phase { + BTC_ANT_WPOWERON = 0, + BTC_ANT_WINIT, + BTC_ANT_WONLY, + BTC_ANT_WOFF, + BTC_ANT_W2G, + BTC_ANT_W5G, + BTC_ANT_W25G, + BTC_ANT_FREERUN, + BTC_ANT_WRFK, + BTC_ANT_BRFK, + BTC_ANT_MAX +}; + +enum btc_plt { + BTC_PLT_NONE = 0, + BTC_PLT_LTE_RX = BIT(0), + BTC_PLT_GNT_BT_TX = BIT(1), + BTC_PLT_GNT_BT_RX = BIT(2), + BTC_PLT_GNT_WL = BIT(3), + BTC_PLT_BT = BIT(1) | BIT(2), + BTC_PLT_ALL = 0xf +}; + +enum btc_cx_poicy_main_type { + BTC_CXP_OFF = 0, + BTC_CXP_OFFB, + BTC_CXP_OFFE, + BTC_CXP_FIX, + BTC_CXP_PFIX, + BTC_CXP_AUTO, + BTC_CXP_PAUTO, + BTC_CXP_AUTO2, + BTC_CXP_PAUTO2, + BTC_CXP_MANUAL, + BTC_CXP_USERDEF0, + BTC_CXP_MAIN_MAX +}; + +enum btc_cx_poicy_type { + /* TDMA off + pri: BT > WL */ + BTC_CXP_OFF_BT = (BTC_CXP_OFF << 8) | 0, + + /* TDMA off + pri: WL > BT */ + BTC_CXP_OFF_WL = (BTC_CXP_OFF << 8) | 1, + + /* TDMA off + pri: BT = WL */ + BTC_CXP_OFF_EQ0 = (BTC_CXP_OFF << 8) | 2, + + /* TDMA off + pri: BT = WL > BT_Lo */ + BTC_CXP_OFF_EQ1 = (BTC_CXP_OFF << 8) | 3, + + /* TDMA off + pri: WL = BT, BT_Rx > WL_Lo_Tx */ + BTC_CXP_OFF_EQ2 = (BTC_CXP_OFF << 8) | 4, + + /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */ + BTC_CXP_OFF_EQ3 = (BTC_CXP_OFF << 8) | 5, + + /* TDMA off + pri: BT_Hi > WL > BT_Lo */ + BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 6, + + /* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */ + BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7, + + /* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/ + BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0, + + /* TDMA off + Ext-Ctrl + pri: default */ + BTC_CXP_OFFE_DEF = (BTC_CXP_OFFE << 8) | 0, + + /* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */ + BTC_CXP_OFFE_DEF2 = (BTC_CXP_OFFE << 8) | 1, + + /* TDMA Fix slot-0: W1:B1 = 30:30 */ + BTC_CXP_FIX_TD3030 = (BTC_CXP_FIX << 8) | 0, + + /* TDMA Fix slot-1: W1:B1 = 50:50 */ + BTC_CXP_FIX_TD5050 = (BTC_CXP_FIX << 8) | 1, + + /* TDMA Fix slot-2: W1:B1 = 20:30 */ + BTC_CXP_FIX_TD2030 = (BTC_CXP_FIX << 8) | 2, + + /* TDMA Fix slot-3: W1:B1 = 40:10 */ + BTC_CXP_FIX_TD4010 = (BTC_CXP_FIX << 8) | 3, + + /* TDMA Fix slot-4: W1:B1 = 70:10 */ + BTC_CXP_FIX_TD7010 = (BTC_CXP_FIX << 8) | 4, + + /* TDMA Fix slot-5: W1:B1 = 20:60 */ + BTC_CXP_FIX_TD2060 = (BTC_CXP_FIX << 8) | 5, + + /* TDMA Fix slot-6: W1:B1 = 30:60 */ + BTC_CXP_FIX_TD3060 = (BTC_CXP_FIX << 8) | 6, + + /* TDMA Fix slot-7: W1:B1 = 20:80 */ + BTC_CXP_FIX_TD2080 = (BTC_CXP_FIX << 8) | 7, + + /* TDMA Fix slot-8: W1:B1 = user-define */ + BTC_CXP_FIX_TDW1B1 = (BTC_CXP_FIX << 8) | 8, + + /* TDMA Fix slot-9: W1:B1 = 40:20 */ + BTC_CXP_FIX_TD4020 = (BTC_CXP_FIX << 8) | 9, + + /* PS-TDMA Fix slot-0: W1:B1 = 30:30 */ + BTC_CXP_PFIX_TD3030 = (BTC_CXP_PFIX << 8) | 0, + + /* PS-TDMA Fix slot-1: W1:B1 = 50:50 */ + BTC_CXP_PFIX_TD5050 = (BTC_CXP_PFIX << 8) | 1, + + /* PS-TDMA Fix slot-2: W1:B1 = 20:30 */ + BTC_CXP_PFIX_TD2030 = (BTC_CXP_PFIX << 8) | 2, + + /* PS-TDMA Fix slot-3: W1:B1 = 20:60 */ + BTC_CXP_PFIX_TD2060 = (BTC_CXP_PFIX << 8) | 3, + + /* PS-TDMA Fix slot-4: W1:B1 = 30:70 */ + BTC_CXP_PFIX_TD3070 = (BTC_CXP_PFIX << 8) | 4, + + /* PS-TDMA Fix slot-5: W1:B1 = 20:80 */ + BTC_CXP_PFIX_TD2080 = (BTC_CXP_PFIX << 8) | 5, + + /* PS-TDMA Fix slot-6: W1:B1 = user-define */ + BTC_CXP_PFIX_TDW1B1 = (BTC_CXP_PFIX << 8) | 6, + + /* TDMA Auto slot-0: W1:B1 = 50:200 */ + BTC_CXP_AUTO_TD50200 = (BTC_CXP_AUTO << 8) | 0, + + /* TDMA Auto slot-1: W1:B1 = 60:200 */ + BTC_CXP_AUTO_TD60200 = (BTC_CXP_AUTO << 8) | 1, + + /* TDMA Auto slot-2: W1:B1 = 20:200 */ + BTC_CXP_AUTO_TD20200 = (BTC_CXP_AUTO << 8) | 2, + + /* TDMA Auto slot-3: W1:B1 = user-define */ + BTC_CXP_AUTO_TDW1B1 = (BTC_CXP_AUTO << 8) | 3, + + /* PS-TDMA Auto slot-0: W1:B1 = 50:200 */ + BTC_CXP_PAUTO_TD50200 = (BTC_CXP_PAUTO << 8) | 0, + + /* PS-TDMA Auto slot-1: W1:B1 = 60:200 */ + BTC_CXP_PAUTO_TD60200 = (BTC_CXP_PAUTO << 8) | 1, + + /* PS-TDMA Auto slot-2: W1:B1 = 20:200 */ + BTC_CXP_PAUTO_TD20200 = (BTC_CXP_PAUTO << 8) | 2, + + /* PS-TDMA Auto slot-3: W1:B1 = user-define */ + BTC_CXP_PAUTO_TDW1B1 = (BTC_CXP_PAUTO << 8) | 3, + + /* TDMA Auto slot2-0: W1:B4 = 30:50 */ + BTC_CXP_AUTO2_TD3050 = (BTC_CXP_AUTO2 << 8) | 0, + + /* TDMA Auto slot2-1: W1:B4 = 30:70 */ + BTC_CXP_AUTO2_TD3070 = (BTC_CXP_AUTO2 << 8) | 1, + + /* TDMA Auto slot2-2: W1:B4 = 50:50 */ + BTC_CXP_AUTO2_TD5050 = (BTC_CXP_AUTO2 << 8) | 2, + + /* TDMA Auto slot2-3: W1:B4 = 60:60 */ + BTC_CXP_AUTO2_TD6060 = (BTC_CXP_AUTO2 << 8) | 3, + + /* TDMA Auto slot2-4: W1:B4 = 20:80 */ + BTC_CXP_AUTO2_TD2080 = (BTC_CXP_AUTO2 << 8) | 4, + + /* TDMA Auto slot2-5: W1:B4 = user-define */ + BTC_CXP_AUTO2_TDW1B4 = (BTC_CXP_AUTO2 << 8) | 5, + + /* PS-TDMA Auto slot2-0: W1:B4 = 30:50 */ + BTC_CXP_PAUTO2_TD3050 = (BTC_CXP_PAUTO2 << 8) | 0, + + /* PS-TDMA Auto slot2-1: W1:B4 = 30:70 */ + BTC_CXP_PAUTO2_TD3070 = (BTC_CXP_PAUTO2 << 8) | 1, + + /* PS-TDMA Auto slot2-2: W1:B4 = 50:50 */ + BTC_CXP_PAUTO2_TD5050 = (BTC_CXP_PAUTO2 << 8) | 2, + + /* PS-TDMA Auto slot2-3: W1:B4 = 60:60 */ + BTC_CXP_PAUTO2_TD6060 = (BTC_CXP_PAUTO2 << 8) | 3, + + /* PS-TDMA Auto slot2-4: W1:B4 = 20:80 */ + BTC_CXP_PAUTO2_TD2080 = (BTC_CXP_PAUTO2 << 8) | 4, + + /* PS-TDMA Auto slot2-5: W1:B4 = user-define */ + BTC_CXP_PAUTO2_TDW1B4 = (BTC_CXP_PAUTO2 << 8) | 5, + + BTC_CXP_MAX = 0xffff +}; + +enum btc_wl_rfk_result { + BTC_WRFK_REJECT = 0, + BTC_WRFK_ALLOW = 1, +}; + +enum btc_coex_info_map_en { + BTC_COEX_INFO_CX = BIT(0), + BTC_COEX_INFO_WL = BIT(1), + BTC_COEX_INFO_BT = BIT(2), + BTC_COEX_INFO_DM = BIT(3), + BTC_COEX_INFO_MREG = BIT(4), + BTC_COEX_INFO_SUMMARY = BIT(5), + BTC_COEX_INFO_ALL = GENMASK(7, 0), +}; + +#define BTC_CXP_MASK GENMASK(15, 8) + +enum btc_w2b_scoreboard { + BTC_WSCB_ACTIVE = BIT(0), + BTC_WSCB_ON = BIT(1), + BTC_WSCB_SCAN = BIT(2), + BTC_WSCB_UNDERTEST = BIT(3), + BTC_WSCB_RXGAIN = BIT(4), + BTC_WSCB_WLBUSY = BIT(7), + BTC_WSCB_EXTFEM = BIT(8), + BTC_WSCB_TDMA = BIT(9), + BTC_WSCB_FIX2M = BIT(10), + BTC_WSCB_WLRFK = BIT(11), + BTC_WSCB_BTRFK_GNT = BIT(12), /* not used, use mailbox to inform BT */ + BTC_WSCB_BT_HILNA = BIT(13), + BTC_WSCB_BTLOG = BIT(14), + BTC_WSCB_ALL = GENMASK(23, 0), +}; + +enum btc_wl_link_mode { + BTC_WLINK_NOLINK = 0x0, + BTC_WLINK_2G_STA, + BTC_WLINK_2G_AP, + BTC_WLINK_2G_GO, + BTC_WLINK_2G_GC, + BTC_WLINK_2G_SCC, + BTC_WLINK_2G_MCC, + BTC_WLINK_25G_MCC, + BTC_WLINK_25G_DBCC, + BTC_WLINK_5G, + BTC_WLINK_2G_NAN, + BTC_WLINK_OTHER, + BTC_WLINK_MAX +}; + +enum btc_bt_hid_type { + BTC_HID_218 = BIT(0), + BTC_HID_418 = BIT(1), + BTC_HID_BLE = BIT(2), + BTC_HID_RCU = BIT(3), + BTC_HID_RCU_VOICE = BIT(4), + BTC_HID_OTHER_LEGACY = BIT(5) +}; + +enum btc_reset_module { + BTC_RESET_CX = BIT(0), + BTC_RESET_DM = BIT(1), + BTC_RESET_CTRL = BIT(2), + BTC_RESET_CXDM = BIT(0) | BIT(1), + BTC_RESET_BTINFO = BIT(3), + BTC_RESET_MDINFO = BIT(4), + BTC_RESET_ALL = GENMASK(7, 0), +}; + +enum btc_gnt_state { + BTC_GNT_HW = 0, + BTC_GNT_SW_LO, + BTC_GNT_SW_HI, + BTC_GNT_MAX +}; + +enum btc_wl_max_tx_time { + BTC_MAX_TX_TIME_L1 = 500, + BTC_MAX_TX_TIME_L2 = 1000, + BTC_MAX_TX_TIME_L3 = 2000, + BTC_MAX_TX_TIME_DEF = 5280 +}; + +enum btc_wl_max_tx_retry { + BTC_MAX_TX_RETRY_L1 = 7, + BTC_MAX_TX_RETRY_L2 = 15, + BTC_MAX_TX_RETRY_DEF = 31, +}; + +enum btc_reason_and_action { + BTC_RSN_NONE, + BTC_RSN_NTFY_INIT, + BTC_RSN_NTFY_SWBAND, + BTC_RSN_NTFY_WL_STA, + BTC_RSN_NTFY_RADIO_STATE, + BTC_RSN_UPDATE_BT_SCBD, + BTC_RSN_NTFY_WL_RFK, + BTC_RSN_UPDATE_BT_INFO, + BTC_RSN_NTFY_SCAN_START, + BTC_RSN_NTFY_SCAN_FINISH, + BTC_RSN_NTFY_SPECIFIC_PACKET, + BTC_RSN_NTFY_POWEROFF, + BTC_RSN_NTFY_ROLE_INFO, + BTC_RSN_CMD_SET_COEX, + BTC_RSN_ACT1_WORK, + BTC_RSN_BT_DEVINFO_WORK, + BTC_RSN_RFK_CHK_WORK, + BTC_RSN_NUM, + BTC_ACT_NONE = 100, + BTC_ACT_WL_ONLY, + BTC_ACT_WL_5G, + BTC_ACT_WL_OTHER, + BTC_ACT_WL_IDLE, + BTC_ACT_WL_NC, + BTC_ACT_WL_RFK, + BTC_ACT_WL_INIT, + BTC_ACT_WL_OFF, + BTC_ACT_FREERUN, + BTC_ACT_BT_WHQL, + BTC_ACT_BT_RFK, + BTC_ACT_BT_OFF, + BTC_ACT_BT_IDLE, + BTC_ACT_BT_HFP, + BTC_ACT_BT_HID, + BTC_ACT_BT_A2DP, + BTC_ACT_BT_A2DPSINK, + BTC_ACT_BT_PAN, + BTC_ACT_BT_A2DP_HID, + BTC_ACT_BT_A2DP_PAN, + BTC_ACT_BT_PAN_HID, + BTC_ACT_BT_A2DP_PAN_HID, + BTC_ACT_WL_25G_MCC, + BTC_ACT_WL_2G_MCC, + BTC_ACT_WL_2G_SCC, + BTC_ACT_WL_2G_AP, + BTC_ACT_WL_2G_GO, + BTC_ACT_WL_2G_GC, + BTC_ACT_WL_2G_NAN, + BTC_ACT_LAST, + BTC_ACT_NUM = BTC_ACT_LAST - BTC_ACT_NONE, + BTC_ACT_EXT_BIT = BIT(14), + BTC_POLICY_EXT_BIT = BIT(15), +}; + +#define BTC_FREERUN_ANTISO_MIN 30 +#define BTC_TDMA_BTHID_MAX 2 +#define BTC_BLINK_NOCONNECT 0 + +static void _run_coex(struct rtw89_dev *rtwdev, + enum btc_reason_and_action reason); +static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state); +static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update); + +static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func, + void *param, u16 len) +{ + rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len, + false, true); +} + +static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; + struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info; + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__); + + if (type & BTC_RESET_CX) + memset(cx, 0, sizeof(*cx)); + else if (type & BTC_RESET_BTINFO) /* only for BT enable */ + memset(bt, 0, sizeof(*bt)); + + if (type & BTC_RESET_CTRL) { + memset(&btc->ctrl, 0, sizeof(btc->ctrl)); + btc->ctrl.trace_step = FCXDEF_STEP; + } + + /* Init Coex variables that are not zero */ + if (type & BTC_RESET_DM) { + memset(&btc->dm, 0, sizeof(btc->dm)); + memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state)); + + for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) + memset(wl_linfo[i].rssi_state, 0, + sizeof(wl_linfo[i].rssi_state)); + + /* set the slot_now table to original */ + btc->dm.tdma_now = t_def[CXTD_OFF]; + btc->dm.tdma = t_def[CXTD_OFF]; + memcpy(&btc->dm.slot_now, s_def, sizeof(btc->dm.slot_now)); + memcpy(&btc->dm.slot, s_def, sizeof(btc->dm.slot)); + + btc->policy_len = 0; + btc->bt_req_len = 0; + + btc->dm.coex_info_map = BTC_COEX_INFO_ALL; + btc->dm.wl_tx_limit.tx_time = BTC_MAX_TX_TIME_DEF; + btc->dm.wl_tx_limit.tx_retry = BTC_MAX_TX_RETRY_DEF; + } + + if (type & BTC_RESET_MDINFO) + memset(&btc->mdinfo, 0, sizeof(btc->mdinfo)); +} + +#define BTC_FWINFO_BUF 1024 + +#define BTC_RPT_HDR_SIZE 3 +#define BTC_CHK_WLSLOT_DRIFT_MAX 15 +#define BTC_CHK_HANG_MAX 3 + +static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_bt_info *bt = &cx->bt; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): type:%d cnt:%d\n", + __func__, type, cnt); + + switch (type) { + case BTC_DCNT_RPT_FREEZE: + if (dm->cnt_dm[BTC_DCNT_RPT] == cnt && btc->fwinfo.rpt_en_map) + dm->cnt_dm[BTC_DCNT_RPT_FREEZE]++; + else + dm->cnt_dm[BTC_DCNT_RPT_FREEZE] = 0; + + if (dm->cnt_dm[BTC_DCNT_RPT_FREEZE] >= BTC_CHK_HANG_MAX) + dm->error.map.wl_fw_hang = true; + else + dm->error.map.wl_fw_hang = false; + + dm->cnt_dm[BTC_DCNT_RPT] = cnt; + break; + case BTC_DCNT_CYCLE_FREEZE: + if (dm->cnt_dm[BTC_DCNT_CYCLE] == cnt && + (dm->tdma_now.type != CXTDMA_OFF || + dm->tdma_now.ext_ctrl == CXECTL_EXT)) + dm->cnt_dm[BTC_DCNT_CYCLE_FREEZE]++; + else + dm->cnt_dm[BTC_DCNT_CYCLE_FREEZE] = 0; + + if (dm->cnt_dm[BTC_DCNT_CYCLE_FREEZE] >= BTC_CHK_HANG_MAX) + dm->error.map.cycle_hang = true; + else + dm->error.map.cycle_hang = false; + + dm->cnt_dm[BTC_DCNT_CYCLE] = cnt; + break; + case BTC_DCNT_W1_FREEZE: + if (dm->cnt_dm[BTC_DCNT_W1] == cnt && + dm->tdma_now.type != CXTDMA_OFF) + dm->cnt_dm[BTC_DCNT_W1_FREEZE]++; + else + dm->cnt_dm[BTC_DCNT_W1_FREEZE] = 0; + + if (dm->cnt_dm[BTC_DCNT_W1_FREEZE] >= BTC_CHK_HANG_MAX) + dm->error.map.w1_hang = true; + else + dm->error.map.w1_hang = false; + + dm->cnt_dm[BTC_DCNT_W1] = cnt; + break; + case BTC_DCNT_B1_FREEZE: + if (dm->cnt_dm[BTC_DCNT_B1] == cnt && + dm->tdma_now.type != CXTDMA_OFF) + dm->cnt_dm[BTC_DCNT_B1_FREEZE]++; + else + dm->cnt_dm[BTC_DCNT_B1_FREEZE] = 0; + + if (dm->cnt_dm[BTC_DCNT_B1_FREEZE] >= BTC_CHK_HANG_MAX) + dm->error.map.b1_hang = true; + else + dm->error.map.b1_hang = false; + + dm->cnt_dm[BTC_DCNT_B1] = cnt; + break; + case BTC_DCNT_TDMA_NONSYNC: + if (cnt != 0) /* if tdma not sync between drv/fw */ + dm->cnt_dm[BTC_DCNT_TDMA_NONSYNC]++; + else + dm->cnt_dm[BTC_DCNT_TDMA_NONSYNC] = 0; + + if (dm->cnt_dm[BTC_DCNT_TDMA_NONSYNC] >= BTC_CHK_HANG_MAX) + dm->error.map.tdma_no_sync = true; + else + dm->error.map.tdma_no_sync = false; + break; + case BTC_DCNT_SLOT_NONSYNC: + if (cnt != 0) /* if slot not sync between drv/fw */ + dm->cnt_dm[BTC_DCNT_SLOT_NONSYNC]++; + else + dm->cnt_dm[BTC_DCNT_SLOT_NONSYNC] = 0; + + if (dm->cnt_dm[BTC_DCNT_SLOT_NONSYNC] >= BTC_CHK_HANG_MAX) + dm->error.map.tdma_no_sync = true; + else + dm->error.map.tdma_no_sync = false; + break; + case BTC_DCNT_BTCNT_FREEZE: + cnt = cx->cnt_bt[BTC_BCNT_HIPRI_RX] + + cx->cnt_bt[BTC_BCNT_HIPRI_TX] + + cx->cnt_bt[BTC_BCNT_LOPRI_RX] + + cx->cnt_bt[BTC_BCNT_LOPRI_TX]; + + if (cnt == 0) + dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE]++; + else + dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0; + + if ((dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE] >= BTC_CHK_HANG_MAX && + bt->enable.now) || (!dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE] && + !bt->enable.now)) + _update_bt_scbd(rtwdev, false); + break; + case BTC_DCNT_WL_SLOT_DRIFT: + if (cnt >= BTC_CHK_WLSLOT_DRIFT_MAX) + dm->cnt_dm[BTC_DCNT_WL_SLOT_DRIFT]++; + else + dm->cnt_dm[BTC_DCNT_WL_SLOT_DRIFT] = 0; + + if (dm->cnt_dm[BTC_DCNT_WL_SLOT_DRIFT] >= BTC_CHK_HANG_MAX) + dm->error.map.wl_slot_drift = true; + else + dm->error.map.wl_slot_drift = false; + break; + } +} + +static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; + struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc; + struct rtw89_btc_fbtc_btver *pver = NULL; + struct rtw89_btc_fbtc_btscan *pscan = NULL; + struct rtw89_btc_fbtc_btafh *pafh = NULL; + struct rtw89_btc_fbtc_btdevinfo *pdev = NULL; + + pver = (struct rtw89_btc_fbtc_btver *)pfinfo; + pscan = (struct rtw89_btc_fbtc_btscan *)pfinfo; + pafh = (struct rtw89_btc_fbtc_btafh *)pfinfo; + pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): rpt_type:%d\n", + __func__, rpt_type); + + switch (rpt_type) { + case BTC_RPT_TYPE_BT_VER: + bt->ver_info.fw = le32_to_cpu(pver->fw_ver); + bt->ver_info.fw_coex = le32_get_bits(pver->coex_ver, GENMASK(7, 0)); + bt->feature = le32_to_cpu(pver->feature); + break; + case BTC_RPT_TYPE_BT_SCAN: + memcpy(bt->scan_info, pscan->scan, BTC_SCAN_MAX1); + break; + case BTC_RPT_TYPE_BT_AFH: + memcpy(&bt_linfo->afh_map[0], pafh->afh_l, 4); + memcpy(&bt_linfo->afh_map[4], pafh->afh_m, 4); + memcpy(&bt_linfo->afh_map[8], pafh->afh_h, 2); + break; + case BTC_RPT_TYPE_BT_DEVICE: + a2dp->device_name = le32_to_cpu(pdev->dev_name); + a2dp->vendor_id = le16_to_cpu(pdev->vendor_id); + a2dp->flush_time = le32_to_cpu(pdev->flush_time); + break; + default: + break; + } +} + +struct rtw89_btc_fbtc_cysta_cpu { + u8 fver; + u8 rsvd; + u16 cycles; + u16 cycles_a2dp[CXT_FLCTRL_MAX]; + u16 a2dpept; + u16 a2dpeptto; + u16 tavg_cycle[CXT_MAX]; + u16 tmax_cycle[CXT_MAX]; + u16 tmaxdiff_cycle[CXT_MAX]; + u16 tavg_a2dp[CXT_FLCTRL_MAX]; + u16 tmax_a2dp[CXT_FLCTRL_MAX]; + u16 tavg_a2dpept; + u16 tmax_a2dpept; + u16 tavg_lk; + u16 tmax_lk; + u32 slot_cnt[CXST_MAX]; + u32 bcn_cnt[CXBCN_MAX]; + u32 leakrx_cnt; + u32 collision_cnt; + u32 skip_cnt; + u32 exception; + u32 except_cnt; + u16 tslot_cycle[BTC_CYCLE_SLOT_MAX]; +}; + +static void rtw89_btc_fbtc_cysta_to_cpu(const struct rtw89_btc_fbtc_cysta *src, + struct rtw89_btc_fbtc_cysta_cpu *dst) +{ + static_assert(sizeof(*src) == sizeof(*dst)); + +#define __CPY_U8(_x) ({dst->_x = src->_x; }) +#define __CPY_LE16(_x) ({dst->_x = le16_to_cpu(src->_x); }) +#define __CPY_LE16S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \ + dst->_x[_i] = le16_to_cpu(src->_x[_i]); }) +#define __CPY_LE32(_x) ({dst->_x = le32_to_cpu(src->_x); }) +#define __CPY_LE32S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \ + dst->_x[_i] = le32_to_cpu(src->_x[_i]); }) + + __CPY_U8(fver); + __CPY_U8(rsvd); + __CPY_LE16(cycles); + __CPY_LE16S(cycles_a2dp); + __CPY_LE16(a2dpept); + __CPY_LE16(a2dpeptto); + __CPY_LE16S(tavg_cycle); + __CPY_LE16S(tmax_cycle); + __CPY_LE16S(tmaxdiff_cycle); + __CPY_LE16S(tavg_a2dp); + __CPY_LE16S(tmax_a2dp); + __CPY_LE16(tavg_a2dpept); + __CPY_LE16(tmax_a2dpept); + __CPY_LE16(tavg_lk); + __CPY_LE16(tmax_lk); + __CPY_LE32S(slot_cnt); + __CPY_LE32S(bcn_cnt); + __CPY_LE32(leakrx_cnt); + __CPY_LE32(collision_cnt); + __CPY_LE32(skip_cnt); + __CPY_LE32(exception); + __CPY_LE32(except_cnt); + __CPY_LE16S(tslot_cycle); + +#undef __CPY_U8 +#undef __CPY_LE16 +#undef __CPY_LE16S +#undef __CPY_LE32 +#undef __CPY_LE32S +} + +#define BTC_LEAK_AP_TH 10 +#define BTC_CYSTA_CHK_PERIOD 100 + +struct rtw89_btc_prpt { + u8 type; + __le16 len; + u8 content[]; +} __packed; + +static u32 _chk_btc_report(struct rtw89_dev *rtwdev, + struct rtw89_btc_btf_fwinfo *pfwinfo, + u8 *prptbuf, u32 index) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_fbtc_rpt_ctrl *prpt = NULL; + struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL; + struct rtw89_btc_fbtc_cysta_cpu pcysta[1]; + struct rtw89_btc_prpt *btc_prpt = NULL; + struct rtw89_btc_fbtc_slot *rtp_slot = NULL; + u8 rpt_type = 0, *rpt_content = NULL, *pfinfo = NULL; + u16 wl_slot_set = 0; + u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t; + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): index:%d\n", + __func__, index); + + if (!prptbuf) { + pfwinfo->err[BTFRE_INVALID_INPUT]++; + return 0; + } + + btc_prpt = (struct rtw89_btc_prpt *)&prptbuf[index]; + rpt_type = btc_prpt->type; + rpt_len = le16_to_cpu(btc_prpt->len); + rpt_content = btc_prpt->content; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): rpt_type:%d\n", + __func__, rpt_type); + + switch (rpt_type) { + case BTC_RPT_TYPE_CTRL: + pcinfo = &pfwinfo->rpt_ctrl.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_ctrl.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo); + pcinfo->req_fver = BTCRPT_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_TDMA: + pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_tdma.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo); + pcinfo->req_fver = FCXTDMA_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_SLOT: + pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_slots.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo); + pcinfo->req_fver = FCXSLOTS_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_CYSTA: + pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_cysta.finfo); + pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo; + rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo); + pcinfo->req_fver = FCXCYSTA_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_STEP: + pcinfo = &pfwinfo->rpt_fbtc_step.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_step.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) * + trace_step + 8; + pcinfo->req_fver = FCXSTEP_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_NULLSTA: + pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_nullsta.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo); + pcinfo->req_fver = FCXNULLSTA_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_MREG: + pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_mregval.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo); + pcinfo->req_fver = FCXMREG_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_GPIO_DBG: + pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_gpio_dbg.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo); + pcinfo->req_fver = FCXGPIODBG_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_BT_VER: + pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btver.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo); + pcinfo->req_fver = FCX_BTVER_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_BT_SCAN: + pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btscan.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo); + pcinfo->req_fver = FCX_BTSCAN_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_BT_AFH: + pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btafh.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo); + pcinfo->req_fver = FCX_BTAFH_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + case BTC_RPT_TYPE_BT_DEVICE: + pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo; + pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btdev.finfo); + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo); + pcinfo->req_fver = FCX_BTDEVINFO_VER; + pcinfo->rx_len = rpt_len; + pcinfo->rx_cnt++; + break; + default: + pfwinfo->err[BTFRE_UNDEF_TYPE]++; + return 0; + } + + if (rpt_len != pcinfo->req_len) { + if (rpt_type < BTC_RPT_TYPE_MAX) + pfwinfo->len_mismch |= (0x1 << rpt_type); + else + pfwinfo->len_mismch |= BIT(31); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): %d rpt_len:%d!=req_len:%d\n", + __func__, rpt_type, rpt_len, pcinfo->req_len); + + pcinfo->valid = 0; + return 0; + } else if (!pfinfo || !rpt_content || !pcinfo->req_len) { + pfwinfo->err[BTFRE_EXCEPTION]++; + pcinfo->valid = 0; + return 0; + } + + memcpy(pfinfo, rpt_content, pcinfo->req_len); + pcinfo->valid = 1; + + if (rpt_type == BTC_RPT_TYPE_TDMA) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): check %d %zu\n", __func__, + BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now)); + + if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo, + sizeof(dm->tdma_now)) != 0) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n", + __func__, BTC_DCNT_TDMA_NONSYNC, + dm->tdma_now.type, dm->tdma_now.rxflctrl, + dm->tdma_now.txpause, dm->tdma_now.wtgle_n, + dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl, + dm->tdma_now.rsvd0, dm->tdma_now.rsvd1); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n", + __func__, BTC_DCNT_TDMA_NONSYNC, + pfwinfo->rpt_fbtc_tdma.finfo.type, + pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl, + pfwinfo->rpt_fbtc_tdma.finfo.txpause, + pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n, + pfwinfo->rpt_fbtc_tdma.finfo.leak_n, + pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl, + pfwinfo->rpt_fbtc_tdma.finfo.rsvd0, + pfwinfo->rpt_fbtc_tdma.finfo.rsvd1); + } + + _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC, + memcmp(&dm->tdma_now, + &pfwinfo->rpt_fbtc_tdma.finfo, + sizeof(dm->tdma_now))); + } + + if (rpt_type == BTC_RPT_TYPE_SLOT) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): check %d %zu\n", + __func__, BTC_DCNT_SLOT_NONSYNC, + sizeof(dm->slot_now)); + + if (memcmp(dm->slot_now, pfwinfo->rpt_fbtc_slots.finfo.slot, + sizeof(dm->slot_now)) != 0) { + for (i = 0; i < CXST_MAX; i++) { + rtp_slot = + &pfwinfo->rpt_fbtc_slots.finfo.slot[i]; + if (memcmp(&dm->slot_now[i], rtp_slot, + sizeof(dm->slot_now[i])) != 0) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): %d slot_now[%d] dur=0x%04x tbl=%08x type=0x%04x\n", + __func__, + BTC_DCNT_SLOT_NONSYNC, i, + dm->slot_now[i].dur, + dm->slot_now[i].cxtbl, + dm->slot_now[i].cxtype); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): %d rpt_fbtc_slots[%d] dur=0x%04x tbl=%08x type=0x%04x\n", + __func__, + BTC_DCNT_SLOT_NONSYNC, i, + rtp_slot->dur, + rtp_slot->cxtbl, + rtp_slot->cxtype); + } + } + } + _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC, + memcmp(dm->slot_now, + pfwinfo->rpt_fbtc_slots.finfo.slot, + sizeof(dm->slot_now))); + } + + if (rpt_type == BTC_RPT_TYPE_CYSTA && + pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) { + /* Check Leak-AP */ + if (pcysta->slot_cnt[CXST_LK] != 0 && + pcysta->leakrx_cnt != 0 && dm->tdma_now.rxflctrl) { + if (pcysta->slot_cnt[CXST_LK] < + BTC_LEAK_AP_TH * pcysta->leakrx_cnt) + dm->leak_ap = 1; + } + + /* Check diff time between WL slot and W1/E2G slot */ + if (dm->tdma_now.type == CXTDMA_OFF && + dm->tdma_now.ext_ctrl == CXECTL_EXT) + wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur); + else + wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur); + + if (pcysta->tavg_cycle[CXT_WL] > wl_slot_set) { + diff_t = pcysta->tavg_cycle[CXT_WL] - wl_slot_set; + _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t); + } + } + + if (rpt_type == BTC_RPT_TYPE_CTRL) { + prpt = &pfwinfo->rpt_ctrl.finfo; + btc->fwinfo.rpt_en_map = prpt->rpt_enable; + wl->ver_info.fw_coex = prpt->wl_fw_coex_ver; + wl->ver_info.fw = prpt->wl_fw_ver; + dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload); + } + + if (rpt_type >= BTC_RPT_TYPE_BT_VER && + rpt_type <= BTC_RPT_TYPE_BT_DEVICE) + _update_bt_report(rtwdev, rpt_type, pfinfo); + + return (rpt_len + BTC_RPT_HDR_SIZE); +} + +static void _parse_btc_report(struct rtw89_dev *rtwdev, + struct rtw89_btc_btf_fwinfo *pfwinfo, + u8 *pbuf, u32 buf_len) +{ + struct rtw89_btc_prpt *btc_prpt = NULL; + u32 index = 0, rpt_len = 0; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): buf_len:%d\n", + __func__, buf_len); + + while (pbuf) { + btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index]; + if (index + 2 >= BTC_FWINFO_BUF) + break; + /* At least 3 bytes: type(1) & len(2) */ + rpt_len = le16_to_cpu(btc_prpt->len); + if ((index + rpt_len + BTC_RPT_HDR_SIZE) > buf_len) + break; + + rpt_len = _chk_btc_report(rtwdev, pfwinfo, pbuf, index); + if (!rpt_len) + break; + index += rpt_len; + } +} + +#define BTC_TLV_HDR_LEN 2 + +static void _append_tdma(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_btf_tlv *tlv = NULL; + struct rtw89_btc_fbtc_tdma *v = NULL; + u16 len = btc->policy_len; + + if (!btc->update_policy_force && + !memcmp(&dm->tdma, &dm->tdma_now, sizeof(dm->tdma))) { + rtw89_debug(rtwdev, + RTW89_DBG_BTC, "[BTC], %s(): tdma no change!\n", + __func__); + return; + } + + tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len]; + v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0]; + tlv->type = CXPOLICY_TDMA; + tlv->len = sizeof(*v); + + memcpy(v, &dm->tdma, sizeof(*v)); + btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): type:%d, rxflctrl=%d, txpause=%d, wtgle_n=%d, leak_n=%d, ext_ctrl=%d\n", + __func__, dm->tdma.type, dm->tdma.rxflctrl, + dm->tdma.txpause, dm->tdma.wtgle_n, dm->tdma.leak_n, + dm->tdma.ext_ctrl); +} + +static void _append_slot(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_btf_tlv *tlv = NULL; + struct btc_fbtc_1slot *v = NULL; + u16 len = 0; + u8 i, cnt = 0; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): A:btc->policy_len = %d\n", + __func__, btc->policy_len); + + for (i = 0; i < CXST_MAX; i++) { + if (!btc->update_policy_force && + !memcmp(&dm->slot[i], &dm->slot_now[i], + sizeof(dm->slot[i]))) + continue; + + len = btc->policy_len; + + tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len]; + v = (struct btc_fbtc_1slot *)&tlv->val[0]; + tlv->type = CXPOLICY_SLOT; + tlv->len = sizeof(*v); + + v->fver = FCXONESLOT_VER; + v->sid = i; + v->slot = dm->slot[i]; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): slot-%d: dur=%d, table=0x%08x, type=%d\n", + __func__, i, dm->slot[i].dur, dm->slot[i].cxtbl, + dm->slot[i].cxtype); + cnt++; + + btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v); + } + + if (cnt > 0) + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): slot update (cnt=%d)!!\n", + __func__, cnt); +} + +static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev, + u32 rpt_map, bool rpt_state) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo; + struct rtw89_btc_btf_set_report r = {0}; + u32 val = 0; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): rpt_map=%x, rpt_state=%x\n", + __func__, rpt_map, rpt_state); + + if (rpt_state) + val = fwinfo->rpt_en_map | rpt_map; + else + val = fwinfo->rpt_en_map & ~rpt_map; + + if (val == fwinfo->rpt_en_map) + return; + + fwinfo->rpt_en_map = val; + + r.fver = BTF_SET_REPORT_VER; + r.enable = cpu_to_le32(val); + r.para = cpu_to_le32(rpt_state); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r)); +} + +static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num, + struct rtw89_btc_fbtc_slot *s) +{ + struct rtw89_btc_btf_set_slot_table *tbl = NULL; + u8 *ptr = NULL; + u16 n = 0; + + n = sizeof(*s) * num + sizeof(*tbl); + tbl = kmalloc(n, GFP_KERNEL); + if (!tbl) + return; + + tbl->fver = BTF_SET_SLOT_TABLE_VER; + tbl->tbl_num = num; + ptr = &tbl->buf[0]; + memcpy(ptr, s, num * sizeof(*s)); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n); + + kfree(tbl); +} + +static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc_btf_set_mon_reg *monreg = NULL; + u8 n, *ptr = NULL, ulen; + u16 sz = 0; + + n = chip->mon_reg_num; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): mon_reg_num=%d\n", __func__, n); + if (n > CXMREG_MAX) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): mon reg count %d > %d\n", + __func__, n, CXMREG_MAX); + return; + } + + ulen = sizeof(struct rtw89_btc_fbtc_mreg); + sz = (ulen * n) + sizeof(*monreg); + monreg = kmalloc(sz, GFP_KERNEL); + if (!monreg) + return; + + monreg->fver = BTF_SET_MON_REG_VER; + monreg->reg_num = n; + ptr = &monreg->buf[0]; + memcpy(ptr, chip->mon_reg, n * ulen); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): sz=%d ulen=%d n=%d\n", + __func__, sz, ulen, n); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, (u8 *)monreg, sz); + kfree(monreg); + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1); +} + +static void _update_dm_step(struct rtw89_dev *rtwdev, + enum btc_reason_and_action reason_or_action) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + + /* use ring-structure to store dm step */ + dm->dm_step.step[dm->dm_step.step_pos] = reason_or_action; + dm->dm_step.step_pos++; + + if (dm->dm_step.step_pos >= ARRAY_SIZE(dm->dm_step.step)) { + dm->dm_step.step_pos = 0; + dm->dm_step.step_ov = true; + } +} + +static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type, + enum btc_reason_and_action action) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + + dm->run_action = action; + + _update_dm_step(rtwdev, action | BTC_ACT_EXT_BIT); + _update_dm_step(rtwdev, policy_type | BTC_POLICY_EXT_BIT); + + btc->policy_len = 0; + btc->policy_type = policy_type; + + _append_tdma(rtwdev); + _append_slot(rtwdev); + + if (btc->policy_len == 0 || btc->policy_len > RTW89_BTC_POLICY_MAXLEN) + return; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): action = %d -> policy type/len: 0x%04x/%d\n", + __func__, action, policy_type, btc->policy_len); + + if (dm->tdma.rxflctrl == CXFLC_NULLP || + dm->tdma.rxflctrl == CXFLC_QOSNULL) + btc->lps = 1; + else + btc->lps = 0; + + if (btc->lps == 1) + rtw89_set_coex_ctrl_lps(rtwdev, btc->lps); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY, + btc->policy, btc->policy_len); + + memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now)); + memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now)); + + if (btc->update_policy_force) + btc->update_policy_force = false; + + if (btc->lps == 0) + rtw89_set_coex_ctrl_lps(rtwdev, btc->lps); +} + +static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type) +{ + switch (type) { + case CXDRVINFO_INIT: + rtw89_fw_h2c_cxdrv_init(rtwdev); + break; + case CXDRVINFO_ROLE: + rtw89_fw_h2c_cxdrv_role(rtwdev); + break; + case CXDRVINFO_CTRL: + rtw89_fw_h2c_cxdrv_ctrl(rtwdev); + break; + case CXDRVINFO_RFK: + rtw89_fw_h2c_cxdrv_rfk(rtwdev); + break; + default: + break; + } +} + +static +void btc_fw_event(struct rtw89_dev *rtwdev, u8 evt_id, void *data, u32 len) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): evt_id:%d len:%d\n", + __func__, evt_id, len); + + if (!len || !data) + return; + + switch (evt_id) { + case BTF_EVNT_RPT: + _parse_btc_report(rtwdev, pfwinfo, data, len); + break; + default: + break; + } +} + +static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_mac_ax_gnt *g = dm->gnt.band; + u8 i; + + if (phy_map > BTC_PHY_ALL) + return; + + for (i = 0; i < RTW89_PHY_MAX; i++) { + if (!(phy_map & BIT(i))) + continue; + + switch (state) { + case BTC_GNT_HW: + g[i].gnt_wl_sw_en = 0; + g[i].gnt_wl = 0; + break; + case BTC_GNT_SW_LO: + g[i].gnt_wl_sw_en = 1; + g[i].gnt_wl = 0; + break; + case BTC_GNT_SW_HI: + g[i].gnt_wl_sw_en = 1; + g[i].gnt_wl = 1; + break; + } + } + + rtw89_mac_cfg_gnt(rtwdev, &dm->gnt); +} + +#define BTC_TDMA_WLROLE_MAX 2 + +static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable) +{ + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): set bt %s wlan_act\n", __func__, + enable ? "ignore" : "do not ignore"); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_IGNORE_WLAN_ACT, &enable, 1); +} + +#define WL_TX_POWER_NO_BTC_CTRL GENMASK(31, 0) +#define WL_TX_POWER_ALL_TIME GENMASK(15, 0) +#define WL_TX_POWER_WITH_BT GENMASK(31, 16) +#define WL_TX_POWER_INT_PART GENMASK(8, 2) +#define WL_TX_POWER_FRA_PART GENMASK(1, 0) +#define B_BTC_WL_TX_POWER_SIGN BIT(7) +#define B_TSSI_WL_TX_POWER_SIGN BIT(8) + +static void _set_wl_tx_power(struct rtw89_dev *rtwdev, u32 level) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + u32 pwr_val; + + if (wl->rf_para.tx_pwr_freerun == level) + return; + + wl->rf_para.tx_pwr_freerun = level; + btc->dm.rf_trx_para.wl_tx_power = level; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): level = %d\n", + __func__, level); + + if (level == RTW89_BTC_WL_DEF_TX_PWR) { + pwr_val = WL_TX_POWER_NO_BTC_CTRL; + } else { /* only apply "force tx power" */ + pwr_val = FIELD_PREP(WL_TX_POWER_INT_PART, level); + if (pwr_val > RTW89_BTC_WL_DEF_TX_PWR) + pwr_val = RTW89_BTC_WL_DEF_TX_PWR; + + if (level & B_BTC_WL_TX_POWER_SIGN) + pwr_val |= B_TSSI_WL_TX_POWER_SIGN; + pwr_val |= WL_TX_POWER_WITH_BT; + } + + chip->ops->btc_set_wl_txpwr_ctrl(rtwdev, pwr_val); +} + +static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + if (wl->rf_para.rx_gain_freerun == level) + return; + + wl->rf_para.rx_gain_freerun = level; + btc->dm.rf_trx_para.wl_rx_gain = level; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): level = %d\n", + __func__, level); +} + +static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + u8 buf; + + if (bt->rf_para.tx_pwr_freerun == level) + return; + + bt->rf_para.tx_pwr_freerun = level; + btc->dm.rf_trx_para.bt_tx_power = level; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): level = %d\n", + __func__, level); + + buf = (s8)(-level); + _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1); +} + +#define BTC_BT_RX_NORMAL_LVL 7 + +static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + + if (bt->rf_para.rx_gain_freerun == level || + level > BTC_BT_RX_NORMAL_LVL) + return; + + bt->rf_para.rx_gain_freerun = level; + btc->dm.rf_trx_para.bt_rx_gain = level; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): level = %d\n", + __func__, level); + + if (level == BTC_BT_RX_NORMAL_LVL) + _write_scbd(rtwdev, BTC_WSCB_RXGAIN, false); + else + _write_scbd(rtwdev, BTC_WSCB_RXGAIN, true); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_LNA_CONSTRAIN, &level, 1); +} + +static void _set_rf_trx_para(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_rf_trx_para para; + u32 wl_stb_chg = 0; + u8 level_id = 0; + + if (!dm->freerun) { + dm->trx_para_level = 0; + chip->ops->btc_bt_aci_imp(rtwdev); + } + + level_id = (u8)dm->trx_para_level; + + if (level_id >= chip->rf_para_dlink_num || + level_id >= chip->rf_para_ulink_num) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): invalid level_id: %d\n", + __func__, level_id); + return; + } + + if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) + para = chip->rf_para_ulink[level_id]; + else + para = chip->rf_para_dlink[level_id]; + + if (para.wl_tx_power != RTW89_BTC_WL_DEF_TX_PWR) + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): wl_tx_power=%d\n", + __func__, para.wl_tx_power); + _set_wl_tx_power(rtwdev, para.wl_tx_power); + _set_wl_rx_gain(rtwdev, para.wl_rx_gain); + _set_bt_tx_power(rtwdev, para.bt_tx_power); + _set_bt_rx_gain(rtwdev, para.bt_rx_gain); + + if (bt->enable.now == 0 || wl->status.map.rf_off == 1 || + wl->status.map.lps == 1) + wl_stb_chg = 0; + else + wl_stb_chg = 1; + + if (wl_stb_chg != dm->wl_stb_chg) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): wl_stb_chg=%d\n", + __func__, wl_stb_chg); + dm->wl_stb_chg = wl_stb_chg; + chip->ops->btc_wl_s1_standby(rtwdev, dm->wl_stb_chg); + } +} + +static void _update_btc_state_map(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; + + if (wl->status.map.connecting || wl->status.map._4way || + wl->status.map.roaming) { + cx->state_map = BTC_WLINKING; + } else if (wl->status.map.scan) { /* wl scan */ + if (bt_linfo->status.map.inq_pag) + cx->state_map = BTC_WSCAN_BSCAN; + else + cx->state_map = BTC_WSCAN_BNOSCAN; + } else if (wl->status.map.busy) { /* only busy */ + if (bt_linfo->status.map.inq_pag) + cx->state_map = BTC_WBUSY_BSCAN; + else + cx->state_map = BTC_WBUSY_BNOSCAN; + } else { /* wl idle */ + cx->state_map = BTC_WIDLE; + } +} + +static void _set_bt_afh_info(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_bt_link_info *b = &bt->link_info; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + u8 en = 0, i, ch = 0, bw = 0; + + if (btc->ctrl.manual || wl->status.map.scan) + return; + + /* TODO if include module->ant.type == BTC_ANT_SHARED */ + if (wl->status.map.rf_off || bt->whql_test || + wl_rinfo->link_mode == BTC_WLINK_NOLINK || + wl_rinfo->link_mode == BTC_WLINK_5G || + wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) { + en = false; + } else if (wl_rinfo->link_mode == BTC_WLINK_2G_MCC || + wl_rinfo->link_mode == BTC_WLINK_2G_SCC) { + en = true; + /* get p2p channel */ + for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) { + if (wl_rinfo->active_role[i].role == + RTW89_WIFI_ROLE_P2P_GO || + wl_rinfo->active_role[i].role == + RTW89_WIFI_ROLE_P2P_CLIENT) { + ch = wl_rinfo->active_role[i].ch; + bw = wl_rinfo->active_role[i].bw; + break; + } + } + } else { + en = true; + /* get 2g channel */ + for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) { + if (wl_rinfo->active_role[i].connected && + wl_rinfo->active_role[i].band == RTW89_BAND_2G) { + ch = wl_rinfo->active_role[i].ch; + bw = wl_rinfo->active_role[i].bw; + break; + } + } + } + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + bw = 20 + chip->afh_guard_ch * 2; + break; + case RTW89_CHANNEL_WIDTH_40: + bw = 40 + chip->afh_guard_ch * 2; + break; + case RTW89_CHANNEL_WIDTH_5: + bw = 5 + chip->afh_guard_ch * 2; + break; + case RTW89_CHANNEL_WIDTH_10: + bw = 10 + chip->afh_guard_ch * 2; + break; + default: + bw = 0; + en = false; /* turn off AFH info if BW > 40 */ + break; + } + + if (wl->afh_info.en == en && + wl->afh_info.ch == ch && + wl->afh_info.bw == bw && + b->profile_cnt.last == b->profile_cnt.now) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return because no change!\n", + __func__); + return; + } + + wl->afh_info.en = en; + wl->afh_info.ch = ch; + wl->afh_info.bw = bw; + + _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_WL_CH_INFO, &wl->afh_info, 3); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): en=%d, ch=%d, bw=%d\n", + __func__, en, ch, bw); + btc->cx.cnt_wl[BTC_WCNT_CH_UPDATE]++; +} + +static bool _check_freerun(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; + struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc; + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { + btc->dm.trx_para_level = 0; + return false; + } + + /* The below is dedicated antenna case */ + if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) { + btc->dm.trx_para_level = 5; + return true; + } + + if (bt_linfo->profile_cnt.now == 0) { + btc->dm.trx_para_level = 5; + return true; + } + + if (hid->pair_cnt > BTC_TDMA_BTHID_MAX) { + btc->dm.trx_para_level = 5; + return true; + } + + /* TODO get isolation by BT psd */ + if (btc->mdinfo.ant.isolation >= BTC_FREERUN_ANTISO_MIN) { + btc->dm.trx_para_level = 5; + return true; + } + + if (!wl->status.map.busy) {/* wl idle -> freerun */ + btc->dm.trx_para_level = 5; + return true; + } else if (wl->rssi_level > 1) {/* WL rssi < 50% (-60dBm) */ + btc->dm.trx_para_level = 0; + return false; + } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) { + if (wl->rssi_level == 0 && bt_linfo->rssi > 31) { + btc->dm.trx_para_level = 6; + return true; + } else if (wl->rssi_level == 1 && bt_linfo->rssi > 36) { + btc->dm.trx_para_level = 7; + return true; + } + btc->dm.trx_para_level = 0; + return false; + } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL)) { + if (bt_linfo->rssi > 28) { + btc->dm.trx_para_level = 6; + return true; + } + } + + btc->dm.trx_para_level = 0; + return false; +} + +#define _tdma_set_flctrl(btc, flc) ({(btc)->dm.tdma.rxflctrl = flc; }) +#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; }) +#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; }) + +#define _slot_set(btc, sid, dura, tbl, type) \ + do { \ + typeof(sid) _sid = (sid); \ + typeof(btc) _btc = (btc); \ + _btc->dm.slot[_sid].dur = cpu_to_le16(dura);\ + _btc->dm.slot[_sid].cxtbl = cpu_to_le32(tbl); \ + _btc->dm.slot[_sid].cxtype = cpu_to_le16(type); \ + } while (0) + +#define _slot_set_dur(btc, sid, dura) (btc)->dm.slot[sid].dur = cpu_to_le16(dura) +#define _slot_set_tbl(btc, sid, tbl) (btc)->dm.slot[sid].cxtbl = cpu_to_le32(tbl) +#define _slot_set_type(btc, sid, type) (btc)->dm.slot[sid].cxtype = cpu_to_le16(type) + +struct btc_btinfo_lb2 { + u8 connect: 1; + u8 sco_busy: 1; + u8 inq_pag: 1; + u8 acl_busy: 1; + u8 hfp: 1; + u8 hid: 1; + u8 a2dp: 1; + u8 pan: 1; +}; + +struct btc_btinfo_lb3 { + u8 retry: 4; + u8 cqddr: 1; + u8 inq: 1; + u8 mesh_busy: 1; + u8 pag: 1; +}; + +struct btc_btinfo_hb0 { + s8 rssi; +}; + +struct btc_btinfo_hb1 { + u8 ble_connect: 1; + u8 reinit: 1; + u8 relink: 1; + u8 igno_wl: 1; + u8 voice: 1; + u8 ble_scan: 1; + u8 role_sw: 1; + u8 multi_link: 1; +}; + +struct btc_btinfo_hb2 { + u8 pan_active: 1; + u8 afh_update: 1; + u8 a2dp_active: 1; + u8 slave: 1; + u8 hid_slot: 2; + u8 hid_cnt: 2; +}; + +struct btc_btinfo_hb3 { + u8 a2dp_bitpool: 6; + u8 tx_3m: 1; + u8 a2dp_sink: 1; +}; + +union btc_btinfo { + u8 val; + struct btc_btinfo_lb2 lb2; + struct btc_btinfo_lb3 lb3; + struct btc_btinfo_hb0 hb0; + struct btc_btinfo_hb1 hb1; + struct btc_btinfo_hb2 hb2; + struct btc_btinfo_hb3 hb3; +}; + +static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type, + enum btc_reason_and_action action) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_fbtc_tdma *t = &dm->tdma; + struct rtw89_btc_fbtc_slot *s = dm->slot; + u8 type; + u32 tbl_w1, tbl_b1, tbl_b4; + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { + if (btc->cx.wl.status.map._4way) + tbl_w1 = cxtbl[1]; + else + tbl_w1 = cxtbl[8]; + tbl_b1 = cxtbl[3]; + tbl_b4 = cxtbl[3]; + } else { + tbl_w1 = cxtbl[16]; + tbl_b1 = cxtbl[17]; + tbl_b4 = cxtbl[17]; + } + + type = (u8)((policy_type & BTC_CXP_MASK) >> 8); + btc->bt_req_en = false; + + switch (type) { + case BTC_CXP_USERDEF0: + *t = t_def[CXTD_OFF]; + s[CXST_OFF] = s_def[CXST_OFF]; + _slot_set_tbl(btc, CXST_OFF, cxtbl[2]); + btc->update_policy_force = true; + break; + case BTC_CXP_OFF: /* TDMA off */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, false); + *t = t_def[CXTD_OFF]; + s[CXST_OFF] = s_def[CXST_OFF]; + + switch (policy_type) { + case BTC_CXP_OFF_BT: + _slot_set_tbl(btc, CXST_OFF, cxtbl[2]); + break; + case BTC_CXP_OFF_WL: + _slot_set_tbl(btc, CXST_OFF, cxtbl[1]); + break; + case BTC_CXP_OFF_EQ0: + _slot_set_tbl(btc, CXST_OFF, cxtbl[0]); + break; + case BTC_CXP_OFF_EQ1: + _slot_set_tbl(btc, CXST_OFF, cxtbl[16]); + break; + case BTC_CXP_OFF_EQ2: + _slot_set_tbl(btc, CXST_OFF, cxtbl[17]); + break; + case BTC_CXP_OFF_EQ3: + _slot_set_tbl(btc, CXST_OFF, cxtbl[18]); + break; + case BTC_CXP_OFF_BWB0: + _slot_set_tbl(btc, CXST_OFF, cxtbl[5]); + break; + case BTC_CXP_OFF_BWB1: + _slot_set_tbl(btc, CXST_OFF, cxtbl[8]); + break; + } + break; + case BTC_CXP_OFFB: /* TDMA off + beacon protect */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, false); + *t = t_def[CXTD_OFF_B2]; + s[CXST_OFF] = s_def[CXST_OFF]; + switch (policy_type) { + case BTC_CXP_OFFB_BWB0: + _slot_set_tbl(btc, CXST_OFF, cxtbl[8]); + break; + } + break; + case BTC_CXP_OFFE: /* TDMA off + beacon protect + Ext_control */ + btc->bt_req_en = true; + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_OFF_EXT]; + switch (policy_type) { + case BTC_CXP_OFFE_DEF: + s[CXST_E2G] = s_def[CXST_E2G]; + s[CXST_E5G] = s_def[CXST_E5G]; + s[CXST_EBT] = s_def[CXST_EBT]; + s[CXST_ENULL] = s_def[CXST_ENULL]; + break; + case BTC_CXP_OFFE_DEF2: + _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO); + s[CXST_E5G] = s_def[CXST_E5G]; + s[CXST_EBT] = s_def[CXST_EBT]; + s[CXST_ENULL] = s_def[CXST_ENULL]; + break; + } + break; + case BTC_CXP_FIX: /* TDMA Fix-Slot */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_FIX]; + switch (policy_type) { + case BTC_CXP_FIX_TD3030: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD5050: + _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD2030: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD4010: + _slot_set(btc, CXST_W1, 40, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD4020: + _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_MIX); + _slot_set(btc, CXST_B1, 20, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD7010: + _slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD2060: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD3060: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TD2080: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_FIX_TDW1B1: /* W1:B1 = user-define */ + _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1], + tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1], + tbl_b1, SLOT_MIX); + break; + } + break; + case BTC_CXP_PFIX: /* PS-TDMA Fix-Slot */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_PFIX]; + if (btc->cx.wl.role_info.role_map.role.ap) + _tdma_set_flctrl(btc, CXFLC_QOSNULL); + + switch (policy_type) { + case BTC_CXP_PFIX_TD3030: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PFIX_TD5050: + _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PFIX_TD2030: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PFIX_TD2060: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PFIX_TD3070: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PFIX_TD2080: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX); + break; + } + break; + case BTC_CXP_AUTO: /* TDMA Auto-Slot */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_AUTO]; + switch (policy_type) { + case BTC_CXP_AUTO_TD50200: + _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_AUTO_TD60200: + _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_AUTO_TD20200: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */ + _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1], + tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1], + tbl_b1, SLOT_MIX); + break; + } + break; + case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_PAUTO]; + switch (policy_type) { + case BTC_CXP_PAUTO_TD50200: + _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PAUTO_TD60200: + _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PAUTO_TD20200: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + break; + case BTC_CXP_PAUTO_TDW1B1: + _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1], + tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1], + tbl_b1, SLOT_MIX); + break; + } + break; + case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_AUTO2]; + switch (policy_type) { + case BTC_CXP_AUTO2_TD3050: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_AUTO2_TD3070: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_AUTO2_TD5050: + _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_AUTO2_TD6060: + _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_AUTO2_TD2080: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */ + _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1], + tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4], + tbl_b4, SLOT_MIX); + break; + } + break; + case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */ + _write_scbd(rtwdev, BTC_WSCB_TDMA, true); + *t = t_def[CXTD_PAUTO2]; + switch (policy_type) { + case BTC_CXP_PAUTO2_TD3050: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_PAUTO2_TD3070: + _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_PAUTO2_TD5050: + _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_PAUTO2_TD6060: + _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_PAUTO2_TD2080: + _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX); + _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX); + break; + case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */ + _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1], + tbl_w1, SLOT_ISO); + _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4], + tbl_b4, SLOT_MIX); + break; + } + break; + } + + _fw_set_policy(rtwdev, policy_type, action); +} + +static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_mac_ax_gnt *g = dm->gnt.band; + u8 i; + + if (phy_map > BTC_PHY_ALL) + return; + + for (i = 0; i < RTW89_PHY_MAX; i++) { + if (!(phy_map & BIT(i))) + continue; + + switch (state) { + case BTC_GNT_HW: + g[i].gnt_bt_sw_en = 0; + g[i].gnt_bt = 0; + break; + case BTC_GNT_SW_LO: + g[i].gnt_bt_sw_en = 1; + g[i].gnt_bt = 0; + break; + case BTC_GNT_SW_HI: + g[i].gnt_bt_sw_en = 1; + g[i].gnt_bt = 1; + break; + } + } + + rtw89_mac_cfg_gnt(rtwdev, &dm->gnt); +} + +static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map, + u8 tx_val, u8 rx_val) +{ + struct rtw89_mac_ax_plt plt; + + plt.band = RTW89_MAC_0; + plt.tx = tx_val; + plt.rx = rx_val; + + if (phy_map & BTC_PHY_0) + rtw89_mac_cfg_plt(rtwdev, &plt); + + if (!rtwdev->dbcc_en) + return; + + plt.band = RTW89_MAC_1; + if (phy_map & BTC_PHY_1) + rtw89_mac_cfg_plt(rtwdev, &plt); +} + +static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec, + u8 phy_map, u8 type) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + u8 gnt_wl_ctrl, gnt_bt_ctrl, plt_ctrl, i, b2g = 0; + u32 ant_path_type; + + ant_path_type = ((phy_map << 8) + type); + + if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF || + btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE || + btc->dm.run_reason == BTC_RSN_CMD_SET_COEX) + force_exec = FC_EXEC; + + if (!force_exec && ant_path_type == dm->set_ant_path) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by no change!!\n", + __func__); + return; + } else if (bt->rfk_info.map.run) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by bt rfk!!\n", __func__); + return; + } else if (btc->dm.run_reason != BTC_RSN_NTFY_WL_RFK && + wl->rfk_info.state != BTC_WRFK_STOP) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by wl rfk!!\n", __func__); + return; + } + + dm->set_ant_path = ant_path_type; + + rtw89_debug(rtwdev, + RTW89_DBG_BTC, + "[BTC], %s(): path=0x%x, set_type=0x%x\n", + __func__, phy_map, dm->set_ant_path & 0xff); + + switch (type) { + case BTC_ANT_WPOWERON: + rtw89_mac_cfg_ctrl_path(rtwdev, false); + break; + case BTC_ANT_WINIT: + if (bt->enable.now) { + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI); + } else { + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO); + } + rtw89_mac_cfg_ctrl_path(rtwdev, true); + _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT); + break; + case BTC_ANT_WONLY: + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO); + rtw89_mac_cfg_ctrl_path(rtwdev, true); + _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE); + break; + case BTC_ANT_WOFF: + rtw89_mac_cfg_ctrl_path(rtwdev, false); + _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE); + break; + case BTC_ANT_W2G: + rtw89_mac_cfg_ctrl_path(rtwdev, true); + if (rtwdev->dbcc_en) { + for (i = 0; i < RTW89_PHY_MAX; i++) { + b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G); + + gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI; + _set_gnt_wl(rtwdev, BIT(i), gnt_wl_ctrl); + + gnt_bt_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI; + /* BT should control by GNT_BT if WL_2G at S0 */ + if (i == 1 && + wl_dinfo->real_band[0] == RTW89_BAND_2G && + wl_dinfo->real_band[1] == RTW89_BAND_5G) + gnt_bt_ctrl = BTC_GNT_HW; + _set_gnt_bt(rtwdev, BIT(i), gnt_bt_ctrl); + + plt_ctrl = b2g ? BTC_PLT_BT : BTC_PLT_NONE; + _set_bt_plut(rtwdev, BIT(i), + plt_ctrl, plt_ctrl); + } + } else { + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW); + _set_bt_plut(rtwdev, BTC_PHY_ALL, + BTC_PLT_BT, BTC_PLT_BT); + } + break; + case BTC_ANT_W5G: + rtw89_mac_cfg_ctrl_path(rtwdev, true); + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW); + _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE); + break; + case BTC_ANT_W25G: + rtw89_mac_cfg_ctrl_path(rtwdev, true); + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW); + _set_bt_plut(rtwdev, BTC_PHY_ALL, + BTC_PLT_GNT_WL, BTC_PLT_GNT_WL); + break; + case BTC_ANT_FREERUN: + rtw89_mac_cfg_ctrl_path(rtwdev, true); + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE); + break; + case BTC_ANT_WRFK: + rtw89_mac_cfg_ctrl_path(rtwdev, true); + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO); + _set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE); + break; + case BTC_ANT_BRFK: + rtw89_mac_cfg_ctrl_path(rtwdev, false); + _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO); + _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI); + _set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE); + break; + default: + break; + } +} + +static void _action_wl_only(struct rtw89_dev *rtwdev) +{ + _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY); + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_ONLY); +} + +static void _action_wl_init(struct rtw89_dev *rtwdev) +{ + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + + _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WINIT); + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_INIT); +} + +static void _action_wl_off(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + + if (wl->status.map.rf_off || btc->dm.bt_only) + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_WOFF); + + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_OFF); +} + +static void _action_freerun(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + + _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_FREERUN); + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_FREERUN); + + btc->dm.freerun = true; +} + +static void _action_bt_whql(struct rtw89_dev *rtwdev) +{ + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + + _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_WHQL); +} + +static void _action_bt_off(struct rtw89_dev *rtwdev) +{ + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + + _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY); + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_OFF); +} + +static void _action_bt_idle(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_link_info *b = &btc->cx.bt.link_info; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */ + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /*wl-busy + bt idle*/ + if (b->profile_cnt.now > 0) + _set_policy(rtwdev, BTC_CXP_FIX_TD4010, + BTC_ACT_BT_IDLE); + else + _set_policy(rtwdev, BTC_CXP_FIX_TD4020, + BTC_ACT_BT_IDLE); + break; + case BTC_WBUSY_BSCAN: /*wl-busy + bt-inq */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD5050, + BTC_ACT_BT_IDLE); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */ + if (b->profile_cnt.now > 0) + _set_policy(rtwdev, BTC_CXP_FIX_TD4010, + BTC_ACT_BT_IDLE); + else + _set_policy(rtwdev, BTC_CXP_FIX_TD4020, + BTC_ACT_BT_IDLE); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq */ + _set_policy(rtwdev, BTC_CXP_FIX_TD5050, + BTC_ACT_BT_IDLE); + break; + case BTC_WLINKING: /* wl-connecting + bt-inq or bt-idle */ + _set_policy(rtwdev, BTC_CXP_FIX_TD7010, + BTC_ACT_BT_IDLE); + break; + case BTC_WIDLE: /* wl-idle + bt-idle */ + _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_IDLE); + break; + } + } else { /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_BT_IDLE); + } +} + +static void _action_bt_hfp(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { + if (btc->cx.wl.status.map._4way) + _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP); + else + _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HFP); + } else { + _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP); + } +} + +static void _action_bt_hid(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) /* shared-antenna */ + if (btc->cx.wl.status.map._4way) + _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HID); + else + _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HID); + else /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ3, BTC_ACT_BT_HID); +} + +static void _action_bt_a2dp(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; + struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; + struct rtw89_btc_dm *dm = &btc->dm; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP */ + if (a2dp.vendor_id == 0x4c || dm->leak_ap) { + dm->slot_dur[CXST_W1] = 40; + dm->slot_dur[CXST_B1] = 200; + _set_policy(rtwdev, + BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP); + } else { + _set_policy(rtwdev, + BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP); + } + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, BTC_ACT_BT_A2DP); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP */ + case BTC_WLINKING: /* wl-connecting + bt-A2DP */ + if (a2dp.vendor_id == 0x4c || dm->leak_ap) { + dm->slot_dur[CXST_W1] = 40; + dm->slot_dur[CXST_B1] = 200; + _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, + BTC_ACT_BT_A2DP); + } else { + _set_policy(rtwdev, BTC_CXP_AUTO_TD50200, + BTC_ACT_BT_A2DP); + } + break; + case BTC_WIDLE: /* wl-idle + bt-A2DP */ + _set_policy(rtwdev, BTC_CXP_AUTO_TD20200, BTC_ACT_BT_A2DP); + break; + } +} + +static void _action_bt_a2dpsink(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2dp_Sink */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD2030, BTC_ACT_BT_A2DPSINK); + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2dp_Sink */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD2060, BTC_ACT_BT_A2DPSINK); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2dp_Sink */ + _set_policy(rtwdev, BTC_CXP_FIX_TD2030, BTC_ACT_BT_A2DPSINK); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2dp_Sink */ + _set_policy(rtwdev, BTC_CXP_FIX_TD2060, BTC_ACT_BT_A2DPSINK); + break; + case BTC_WLINKING: /* wl-connecting + bt-A2dp_Sink */ + _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK); + break; + case BTC_WIDLE: /* wl-idle + bt-A2dp_Sink */ + _set_policy(rtwdev, BTC_CXP_FIX_TD2080, BTC_ACT_BT_A2DPSINK); + break; + } +} + +static void _action_bt_pan(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-PAN */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD5050, BTC_ACT_BT_PAN); + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-PAN */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD3070, BTC_ACT_BT_PAN); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-PAN */ + _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_PAN); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-PAN */ + _set_policy(rtwdev, BTC_CXP_FIX_TD3060, BTC_ACT_BT_PAN); + break; + case BTC_WLINKING: /* wl-connecting + bt-PAN */ + _set_policy(rtwdev, BTC_CXP_FIX_TD4020, BTC_ACT_BT_PAN); + break; + case BTC_WIDLE: /* wl-idle + bt-pan */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD2080, BTC_ACT_BT_PAN); + break; + } +} + +static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; + struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; + struct rtw89_btc_dm *dm = &btc->dm; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+HID */ + case BTC_WIDLE: /* wl-idle + bt-A2DP */ + if (a2dp.vendor_id == 0x4c || dm->leak_ap) { + dm->slot_dur[CXST_W1] = 40; + dm->slot_dur[CXST_B1] = 200; + _set_policy(rtwdev, + BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID); + } else { + _set_policy(rtwdev, + BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP_HID); + } + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP_HID); + break; + + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+HID */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, BTC_ACT_BT_A2DP_HID); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+HID */ + case BTC_WLINKING: /* wl-connecting + bt-A2DP+HID */ + if (a2dp.vendor_id == 0x4c || dm->leak_ap) { + dm->slot_dur[CXST_W1] = 40; + dm->slot_dur[CXST_B1] = 200; + _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, + BTC_ACT_BT_A2DP_HID); + } else { + _set_policy(rtwdev, BTC_CXP_AUTO_TD50200, + BTC_ACT_BT_A2DP_HID); + } + break; + } +} + +static void _action_bt_a2dp_pan(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+PAN */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_PAN); + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+PAN */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_PAN); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+PAN */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD5050, BTC_ACT_BT_A2DP_PAN); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+PAN */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD3070, BTC_ACT_BT_A2DP_PAN); + break; + case BTC_WLINKING: /* wl-connecting + bt-A2DP+PAN */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, BTC_ACT_BT_A2DP_PAN); + break; + case BTC_WIDLE: /* wl-idle + bt-A2DP+PAN */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD2080, BTC_ACT_BT_A2DP_PAN); + break; + } +} + +static void _action_bt_pan_hid(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-PAN+HID */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD3030, BTC_ACT_BT_PAN_HID); + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-PAN+HID */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD3070, BTC_ACT_BT_PAN_HID); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-PAN+HID */ + _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_PAN_HID); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-PAN+HID */ + _set_policy(rtwdev, BTC_CXP_FIX_TD3060, BTC_ACT_BT_PAN_HID); + break; + case BTC_WLINKING: /* wl-connecting + bt-PAN+HID */ + _set_policy(rtwdev, BTC_CXP_FIX_TD4010, BTC_ACT_BT_PAN_HID); + break; + case BTC_WIDLE: /* wl-idle + bt-PAN+HID */ + _set_policy(rtwdev, BTC_CXP_PFIX_TD2080, BTC_ACT_BT_PAN_HID); + break; + } +} + +static void _action_bt_a2dp_pan_hid(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + switch (btc->cx.state_map) { + case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+PAN+HID */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, + BTC_ACT_BT_A2DP_PAN_HID); + break; + case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+PAN+HID */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, + BTC_ACT_BT_A2DP_PAN_HID); + break; + case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+PAN+HID */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD3070, + BTC_ACT_BT_A2DP_PAN_HID); + break; + case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+PAN+HID */ + case BTC_WLINKING: /* wl-connecting + bt-A2DP+PAN+HID */ + _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, + BTC_ACT_BT_A2DP_PAN_HID); + break; + case BTC_WIDLE: /* wl-idle + bt-A2DP+PAN+HID */ + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD2080, + BTC_ACT_BT_A2DP_PAN_HID); + break; + } +} + +static void _action_wl_5g(struct rtw89_dev *rtwdev) +{ + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W5G); + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_5G); +} + +static void _action_wl_other(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) + _set_policy(rtwdev, BTC_CXP_OFFB_BWB0, BTC_ACT_WL_OTHER); + else + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_OTHER); +} + +static void _action_wl_nc(struct rtw89_dev *rtwdev) +{ + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_NC); +} + +static void _action_wl_rfk(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_rfk_info rfk = btc->cx.wl.rfk_info; + + if (rfk.state != BTC_WRFK_START) + return; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): band = %d\n", + __func__, rfk.band); + + _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WRFK); + _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_WL_RFK); +} + +static void _set_btg_ctrl(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + bool is_btg = false; + + if (btc->ctrl.manual) + return; + + /* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */ + if (wl_rinfo->link_mode == BTC_WLINK_5G) /* always 0 if 5G */ + is_btg = false; + else if (wl_rinfo->link_mode == BTC_WLINK_25G_DBCC && + wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G) + is_btg = false; + else + is_btg = true; + + if (btc->dm.run_reason != BTC_RSN_NTFY_INIT && + is_btg == btc->dm.wl_btg_rx) + return; + + btc->dm.wl_btg_rx = is_btg; + + if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC) + return; + + rtw89_ctrl_btg(rtwdev, is_btg); +} + +struct rtw89_txtime_data { + struct rtw89_dev *rtwdev; + int type; + u32 tx_time; + u8 tx_retry; + u16 enable; + bool reenable; +}; + +static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_txtime_data *iter_data = + (struct rtw89_txtime_data *)data; + struct rtw89_dev *rtwdev = iter_data->rtwdev; + struct rtw89_vif *rtwvif = rtwsta->rtwvif; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_wl_link_info *plink = NULL; + u8 port = rtwvif->port; + u32 tx_time = iter_data->tx_time; + u8 tx_retry = iter_data->tx_retry; + u16 enable = iter_data->enable; + bool reenable = iter_data->reenable; + + plink = &wl->link_info[port]; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): port = %d\n", __func__, port); + + if (!plink->connected) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): connected = %d\n", + __func__, plink->connected); + return; + } + + /* backup the original tx time before tx-limit on */ + if (reenable) { + rtw89_mac_get_tx_time(rtwdev, rtwsta, &plink->tx_time); + rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta, &plink->tx_retry); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): reenable, tx_time=%d tx_retry= %d\n", + __func__, plink->tx_time, plink->tx_retry); + } + + /* restore the original tx time if no tx-limit */ + if (!enable) { + rtw89_mac_set_tx_time(rtwdev, rtwsta, true, plink->tx_time); + rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta, true, + plink->tx_retry); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): restore, tx_time=%d tx_retry= %d\n", + __func__, plink->tx_time, plink->tx_retry); + + } else { + rtw89_mac_set_tx_time(rtwdev, rtwsta, false, tx_time); + rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta, false, tx_retry); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): set, tx_time=%d tx_retry= %d\n", + __func__, tx_time, tx_retry); + } +} + +static void _set_wl_tx_limit(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_btc_bt_link_info *b = &bt->link_info; + struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc; + struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + struct rtw89_txtime_data data = {.rtwdev = rtwdev}; + u8 mode = wl_rinfo->link_mode; + u8 tx_retry = 0; + u32 tx_time = 0; + u16 enable = 0; + bool reenable = false; + + if (btc->ctrl.manual) + return; + + if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 || + mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) { + enable = 0; + tx_time = BTC_MAX_TX_TIME_DEF; + tx_retry = BTC_MAX_TX_RETRY_DEF; + } else if ((hfp->exist && hid->exist) || hid->pair_cnt > 1) { + enable = 1; + tx_time = BTC_MAX_TX_TIME_L2; + tx_retry = BTC_MAX_TX_RETRY_L1; + } else if (hfp->exist || hid->exist) { + enable = 1; + tx_time = BTC_MAX_TX_TIME_L3; + tx_retry = BTC_MAX_TX_RETRY_L1; + } else { + enable = 0; + tx_time = BTC_MAX_TX_TIME_DEF; + tx_retry = BTC_MAX_TX_RETRY_DEF; + } + + if (dm->wl_tx_limit.enable == enable && + dm->wl_tx_limit.tx_time == tx_time && + dm->wl_tx_limit.tx_retry == tx_retry) + return; + + if (!dm->wl_tx_limit.enable && enable) + reenable = true; + + dm->wl_tx_limit.enable = enable; + dm->wl_tx_limit.tx_time = tx_time; + dm->wl_tx_limit.tx_retry = tx_retry; + + data.enable = enable; + data.tx_time = tx_time; + data.tx_retry = tx_retry; + data.reenable = reenable; + + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_tx_time_iter, + &data); +} + +static void _set_bt_rx_agc(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + bool bt_hi_lna_rx = false; + + if (wl_rinfo->link_mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx) + bt_hi_lna_rx = true; + + if (bt_hi_lna_rx == bt->hi_lna_rx) + return; + + _write_scbd(rtwdev, BTC_WSCB_BT_HILNA, bt_hi_lna_rx); +} + +/* TODO add these functions */ +static void _action_common(struct rtw89_dev *rtwdev) +{ + _set_btg_ctrl(rtwdev); + _set_wl_tx_limit(rtwdev); + _set_bt_afh_info(rtwdev); + _set_bt_rx_agc(rtwdev); + _set_rf_trx_para(rtwdev); +} + +static void _action_by_bt(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; + struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc; + struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; + struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc; + u8 profile_map = 0; + + if (bt_linfo->hfp_desc.exist) + profile_map |= BTC_BT_HFP; + + if (bt_linfo->hid_desc.exist) + profile_map |= BTC_BT_HID; + + if (bt_linfo->a2dp_desc.exist) + profile_map |= BTC_BT_A2DP; + + if (bt_linfo->pan_desc.exist) + profile_map |= BTC_BT_PAN; + + switch (profile_map) { + case BTC_BT_NOPROFILE: + if (_check_freerun(rtwdev)) + _action_freerun(rtwdev); + else if (a2dp.active || pan.active) + _action_bt_pan(rtwdev); + else + _action_bt_idle(rtwdev); + break; + case BTC_BT_HFP: + if (_check_freerun(rtwdev)) + _action_freerun(rtwdev); + else + _action_bt_hfp(rtwdev); + break; + case BTC_BT_HFP | BTC_BT_HID: + case BTC_BT_HID: + if (_check_freerun(rtwdev)) + _action_freerun(rtwdev); + else + _action_bt_hid(rtwdev); + break; + case BTC_BT_A2DP: + if (_check_freerun(rtwdev)) + _action_freerun(rtwdev); + else if (a2dp.sink) + _action_bt_a2dpsink(rtwdev); + else if (bt_linfo->multi_link.now && !hid.pair_cnt) + _action_bt_a2dp_pan(rtwdev); + else + _action_bt_a2dp(rtwdev); + break; + case BTC_BT_PAN: + _action_bt_pan(rtwdev); + break; + case BTC_BT_A2DP | BTC_BT_HFP: + case BTC_BT_A2DP | BTC_BT_HID: + case BTC_BT_A2DP | BTC_BT_HFP | BTC_BT_HID: + if (_check_freerun(rtwdev)) + _action_freerun(rtwdev); + else + _action_bt_a2dp_hid(rtwdev); + break; + case BTC_BT_A2DP | BTC_BT_PAN: + _action_bt_a2dp_pan(rtwdev); + break; + case BTC_BT_PAN | BTC_BT_HFP: + case BTC_BT_PAN | BTC_BT_HID: + case BTC_BT_PAN | BTC_BT_HFP | BTC_BT_HID: + _action_bt_pan_hid(rtwdev); + break; + case BTC_BT_A2DP | BTC_BT_PAN | BTC_BT_HID: + case BTC_BT_A2DP | BTC_BT_PAN | BTC_BT_HFP: + default: + _action_bt_a2dp_pan_hid(rtwdev); + break; + } +} + +static void _action_wl_2g_sta(struct rtw89_dev *rtwdev) +{ + _action_by_bt(rtwdev); +} + +static void _action_wl_scan(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + + if (rtwdev->dbcc_en) { + if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G && + wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G) + _action_wl_5g(rtwdev); + else + _action_by_bt(rtwdev); + } else { + if (wl->scan_info.band[RTW89_PHY_0] != RTW89_BAND_2G) + _action_wl_5g(rtwdev); + else + _action_by_bt(rtwdev); + } +} + +static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { + if (btc->cx.bt.link_info.profile_cnt.now == 0) + _set_policy(rtwdev, BTC_CXP_OFFE_DEF2, + BTC_ACT_WL_25G_MCC); + else + _set_policy(rtwdev, BTC_CXP_OFFE_DEF, + BTC_ACT_WL_25G_MCC); + } else { /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_25G_MCC); + } +} + +static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev) +{ struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */ + if (btc->cx.bt.link_info.profile_cnt.now == 0) + _set_policy(rtwdev, BTC_CXP_OFFE_DEF2, + BTC_ACT_WL_2G_MCC); + else + _set_policy(rtwdev, BTC_CXP_OFFE_DEF, + BTC_ACT_WL_2G_MCC); + } else { /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_MCC); + } +} + +static void _action_wl_2g_scc(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */ + if (btc->cx.bt.link_info.profile_cnt.now == 0) + _set_policy(rtwdev, + BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_SCC); + else + _set_policy(rtwdev, + BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_SCC); + } else { /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_SCC); + } +} + +static void _action_wl_2g_ap(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { + if (btc->cx.bt.link_info.profile_cnt.now == 0) + _set_policy(rtwdev, BTC_CXP_OFFE_DEF2, + BTC_ACT_WL_2G_AP); + else + _set_policy(rtwdev, BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_AP); + } else {/* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_AP); + } +} + +static void _action_wl_2g_go(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */ + if (btc->cx.bt.link_info.profile_cnt.now == 0) + _set_policy(rtwdev, + BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_GO); + else + _set_policy(rtwdev, + BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_GO); + } else { /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GO); + } +} + +static void _action_wl_2g_gc(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */ + _action_by_bt(rtwdev); + } else {/* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GC); + } +} + +static void _action_wl_2g_nan(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */ + if (btc->cx.bt.link_info.profile_cnt.now == 0) + _set_policy(rtwdev, + BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_NAN); + else + _set_policy(rtwdev, + BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_NAN); + } else { /* dedicated-antenna */ + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_NAN); + } +} + +static u32 _read_scbd(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + u32 scbd_val = 0; + + if (!chip->scbd) + return 0; + + scbd_val = rtw89_mac_get_sb(rtwdev); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], read scbd: 0x%08x\n", + scbd_val); + + btc->cx.cnt_bt[BTC_BCNT_SCBDREAD]++; + return scbd_val; +} + +static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + u32 scbd_val = 0; + + if (!chip->scbd) + return; + + scbd_val = state ? wl->scbd | val : wl->scbd & ~val; + + if (scbd_val == wl->scbd) + return; + rtw89_mac_cfg_sb(rtwdev, scbd_val); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n", + scbd_val); + wl->scbd = scbd_val; + + btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++; +} + +static u8 +_update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u8 next_state, tol = chip->rssi_tol; + + if (pre_state == BTC_RSSI_ST_LOW || + pre_state == BTC_RSSI_ST_STAY_LOW) { + if (rssi >= (thresh + tol)) + next_state = BTC_RSSI_ST_HIGH; + else + next_state = BTC_RSSI_ST_STAY_LOW; + } else { + if (rssi < thresh) + next_state = BTC_RSSI_ST_LOW; + else + next_state = BTC_RSSI_ST_STAY_HIGH; + } + + return next_state; +} + +static +void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + btc->cx.wl.dbcc_info.real_band[phy_idx] = + btc->cx.wl.scan_info.phy_map & BIT(phy_idx) ? + btc->cx.wl.dbcc_info.scan_band[phy_idx] : + btc->cx.wl.dbcc_info.op_band[phy_idx]; +} + +static void _update_wl_info(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + u8 i, cnt_connect = 0, cnt_connecting = 0, cnt_active = 0; + u8 cnt_2g = 0, cnt_5g = 0, phy; + u32 wl_2g_ch[2] = {0}, wl_5g_ch[2] = {0}; + bool b2g = false, b5g = false, client_joined = false; + + memset(wl_rinfo, 0, sizeof(*wl_rinfo)); + + for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) { + /* check if role active? */ + if (!wl_linfo[i].active) + continue; + + cnt_active++; + wl_rinfo->active_role[cnt_active - 1].role = wl_linfo[i].role; + wl_rinfo->active_role[cnt_active - 1].pid = wl_linfo[i].pid; + wl_rinfo->active_role[cnt_active - 1].phy = wl_linfo[i].phy; + wl_rinfo->active_role[cnt_active - 1].band = wl_linfo[i].band; + wl_rinfo->active_role[cnt_active - 1].noa = (u8)wl_linfo[i].noa; + wl_rinfo->active_role[cnt_active - 1].connected = 0; + + wl->port_id[wl_linfo[i].role] = wl_linfo[i].pid; + + phy = wl_linfo[i].phy; + + /* check dbcc role */ + if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) { + wl_dinfo->role[phy] = wl_linfo[i].role; + wl_dinfo->op_band[phy] = wl_linfo[i].band; + _update_dbcc_band(rtwdev, phy); + _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC); + } + + if (wl_linfo[i].connected == MLME_NO_LINK) { + continue; + } else if (wl_linfo[i].connected == MLME_LINKING) { + cnt_connecting++; + } else { + cnt_connect++; + if ((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO || + wl_linfo[i].role == RTW89_WIFI_ROLE_AP) && + wl_linfo[i].client_cnt > 1) + client_joined = true; + } + + wl_rinfo->role_map.val |= BIT(wl_linfo[i].role); + wl_rinfo->active_role[cnt_active - 1].ch = wl_linfo[i].ch; + wl_rinfo->active_role[cnt_active - 1].bw = wl_linfo[i].bw; + wl_rinfo->active_role[cnt_active - 1].connected = 1; + + /* only care 2 roles + BT coex */ + if (wl_linfo[i].band != RTW89_BAND_2G) { + if (cnt_5g <= ARRAY_SIZE(wl_5g_ch) - 1) + wl_5g_ch[cnt_5g] = wl_linfo[i].ch; + cnt_5g++; + b5g = true; + } else { + if (cnt_2g <= ARRAY_SIZE(wl_2g_ch) - 1) + wl_2g_ch[cnt_2g] = wl_linfo[i].ch; + cnt_2g++; + b2g = true; + } + } + + wl_rinfo->connect_cnt = cnt_connect; + + /* Be careful to change the following sequence!! */ + if (cnt_connect == 0) { + wl_rinfo->link_mode = BTC_WLINK_NOLINK; + wl_rinfo->role_map.role.none = 1; + } else if (!b2g && b5g) { + wl_rinfo->link_mode = BTC_WLINK_5G; + } else if (wl_rinfo->role_map.role.nan) { + wl_rinfo->link_mode = BTC_WLINK_2G_NAN; + } else if (cnt_connect > BTC_TDMA_WLROLE_MAX) { + wl_rinfo->link_mode = BTC_WLINK_OTHER; + } else if (b2g && b5g && cnt_connect == 2) { + if (rtwdev->dbcc_en) { + switch (wl_dinfo->role[RTW89_PHY_0]) { + case RTW89_WIFI_ROLE_STATION: + wl_rinfo->link_mode = BTC_WLINK_2G_STA; + break; + case RTW89_WIFI_ROLE_P2P_GO: + wl_rinfo->link_mode = BTC_WLINK_2G_GO; + break; + case RTW89_WIFI_ROLE_P2P_CLIENT: + wl_rinfo->link_mode = BTC_WLINK_2G_GC; + break; + case RTW89_WIFI_ROLE_AP: + wl_rinfo->link_mode = BTC_WLINK_2G_AP; + break; + default: + wl_rinfo->link_mode = BTC_WLINK_OTHER; + break; + } + } else { + wl_rinfo->link_mode = BTC_WLINK_25G_MCC; + } + } else if (!b5g && cnt_connect == 2) { + if (wl_rinfo->role_map.role.station && + (wl_rinfo->role_map.role.p2p_go || + wl_rinfo->role_map.role.p2p_gc || + wl_rinfo->role_map.role.ap)) { + if (wl_2g_ch[0] == wl_2g_ch[1]) + wl_rinfo->link_mode = BTC_WLINK_2G_SCC; + else + wl_rinfo->link_mode = BTC_WLINK_2G_MCC; + } else { + wl_rinfo->link_mode = BTC_WLINK_2G_MCC; + } + } else if (!b5g && cnt_connect == 1) { + if (wl_rinfo->role_map.role.station) + wl_rinfo->link_mode = BTC_WLINK_2G_STA; + else if (wl_rinfo->role_map.role.ap) + wl_rinfo->link_mode = BTC_WLINK_2G_AP; + else if (wl_rinfo->role_map.role.p2p_go) + wl_rinfo->link_mode = BTC_WLINK_2G_GO; + else if (wl_rinfo->role_map.role.p2p_gc) + wl_rinfo->link_mode = BTC_WLINK_2G_GC; + else + wl_rinfo->link_mode = BTC_WLINK_OTHER; + } + + /* if no client_joined, don't care P2P-GO/AP role */ + if (wl_rinfo->role_map.role.p2p_go || wl_rinfo->role_map.role.ap) { + if (!client_joined) { + if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC || + wl_rinfo->link_mode == BTC_WLINK_2G_MCC) { + wl_rinfo->link_mode = BTC_WLINK_2G_STA; + wl_rinfo->connect_cnt = 1; + } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO || + wl_rinfo->link_mode == BTC_WLINK_2G_AP) { + wl_rinfo->link_mode = BTC_WLINK_NOLINK; + wl_rinfo->connect_cnt = 0; + } + } + } + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], cnt_connect = %d, link_mode = %d\n", + cnt_connect, wl_rinfo->link_mode); + + _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE); +} + +#define BTC_CHK_HANG_MAX 3 +#define BTC_SCB_INV_VALUE GENMASK(31, 0) + +void rtw89_coex_act1_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + coex_act1_work.work); + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + + mutex_lock(&rtwdev->mutex); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__); + dm->cnt_notify[BTC_NCNT_TIMER]++; + if (wl->status.map._4way) + wl->status.map._4way = false; + if (wl->status.map.connecting) + wl->status.map.connecting = false; + + _run_coex(rtwdev, BTC_RSN_ACT1_WORK); + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_coex_bt_devinfo_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + coex_bt_devinfo_work.work); + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; + + mutex_lock(&rtwdev->mutex); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__); + dm->cnt_notify[BTC_NCNT_TIMER]++; + a2dp->play_latency = 0; + _run_coex(rtwdev, BTC_RSN_BT_DEVINFO_WORK); + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_coex_rfk_chk_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + coex_rfk_chk_work.work); + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + + mutex_lock(&rtwdev->mutex); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__); + dm->cnt_notify[BTC_NCNT_TIMER]++; + if (wl->rfk_info.state != BTC_WRFK_STOP) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): RFK timeout\n", __func__); + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]++; + dm->error.map.wl_rfk_timeout = true; + wl->rfk_info.state = BTC_WRFK_STOP; + _write_scbd(rtwdev, BTC_WSCB_WLRFK, false); + _run_coex(rtwdev, BTC_RSN_RFK_CHK_WORK); + } + mutex_unlock(&rtwdev->mutex); +} + +static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + u32 val; + bool status_change = false; + + if (!chip->scbd) + return; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__); + + val = _read_scbd(rtwdev); + if (val == BTC_SCB_INV_VALUE) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by invalid scbd value\n", + __func__); + return; + } + + if (!(val & BTC_BSCB_ON) || + btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] >= BTC_CHK_HANG_MAX) + bt->enable.now = 0; + else + bt->enable.now = 1; + + if (bt->enable.now != bt->enable.last) + status_change = true; + + /* reset bt info if bt re-enable */ + if (bt->enable.now && !bt->enable.last) { + _reset_btc_var(rtwdev, BTC_RESET_BTINFO); + cx->cnt_bt[BTC_BCNT_REENABLE]++; + bt->enable.now = 1; + } + + bt->enable.last = bt->enable.now; + bt->scbd = val; + bt->mbx_avl = !!(val & BTC_BSCB_ACT); + + if (bt->whql_test != !!(val & BTC_BSCB_WHQL)) + status_change = true; + + bt->whql_test = !!(val & BTC_BSCB_WHQL); + bt->btg_type = val & BTC_BSCB_BT_S1 ? BTC_BT_BTG : BTC_BT_ALONE; + bt->link_info.a2dp_desc.active = !!(val & BTC_BSCB_A2DP_ACT); + + /* if rfk run 1->0 */ + if (bt->rfk_info.map.run && !(val & BTC_BSCB_RFK_RUN)) + status_change = true; + + bt->rfk_info.map.run = !!(val & BTC_BSCB_RFK_RUN); + bt->rfk_info.map.req = !!(val & BTC_BSCB_RFK_REQ); + bt->hi_lna_rx = !!(val & BTC_BSCB_BT_HILNA); + bt->link_info.status.map.connect = !!(val & BTC_BSCB_BT_CONNECT); + bt->run_patch_code = !!(val & BTC_BSCB_PATCH_CODE); + + if (!only_update && status_change) + _run_coex(rtwdev, BTC_RSN_UPDATE_BT_SCBD); +} + +static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_bt_info *bt = &cx->bt; + + _update_bt_scbd(rtwdev, true); + + cx->cnt_wl[BTC_WCNT_RFK_REQ]++; + + if ((bt->rfk_info.map.run || bt->rfk_info.map.req) && + !bt->rfk_info.map.timeout) { + cx->cnt_wl[BTC_WCNT_RFK_REJECT]++; + } else { + cx->cnt_wl[BTC_WCNT_RFK_GO]++; + return true; + } + return false; +} + +static +void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + u8 mode = wl_rinfo->link_mode; + + lockdep_assert_held(&rtwdev->mutex); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n", + __func__, reason, mode); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n", + __func__, dm->wl_only, dm->bt_only); + + dm->run_reason = reason; + _update_dm_step(rtwdev, reason); + _update_btc_state_map(rtwdev); + + /* Be careful to change the following function sequence!! */ + if (btc->ctrl.manual) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return for Manual CTRL!!\n", + __func__); + return; + } + + if (btc->ctrl.igno_bt && + (reason == BTC_RSN_UPDATE_BT_INFO || + reason == BTC_RSN_UPDATE_BT_SCBD)) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return for Stop Coex DM!!\n", + __func__); + return; + } + + if (!wl->status.map.init_ok) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return for WL init fail!!\n", + __func__); + return; + } + + if (wl->status.map.rf_off_pre == wl->status.map.rf_off && + wl->status.map.lps_pre == wl->status.map.lps && + (reason == BTC_RSN_NTFY_POWEROFF || + reason == BTC_RSN_NTFY_RADIO_STATE)) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return for WL rf off state no change!!\n", + __func__); + return; + } + + dm->cnt_dm[BTC_DCNT_RUN]++; + + if (btc->ctrl.always_freerun) { + _action_freerun(rtwdev); + btc->ctrl.igno_bt = true; + goto exit; + } + + if (dm->wl_only) { + _action_wl_only(rtwdev); + btc->ctrl.igno_bt = true; + goto exit; + } + + if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) { + _action_wl_off(rtwdev); + btc->ctrl.igno_bt = true; + goto exit; + } + + btc->ctrl.igno_bt = false; + dm->freerun = false; + + if (reason == BTC_RSN_NTFY_INIT) { + _action_wl_init(rtwdev); + goto exit; + } + + if (!cx->bt.enable.now && !cx->other.type) { + _action_bt_off(rtwdev); + goto exit; + } + + if (cx->bt.whql_test) { + _action_bt_whql(rtwdev); + goto exit; + } + + if (wl->rfk_info.state != BTC_WRFK_STOP) { + _action_wl_rfk(rtwdev); + goto exit; + } + + if (cx->state_map == BTC_WLINKING) { + if (mode == BTC_WLINK_NOLINK || mode == BTC_WLINK_2G_STA || + mode == BTC_WLINK_5G) { + _action_wl_scan(rtwdev); + goto exit; + } + } + + if (wl->status.map.scan) { + _action_wl_scan(rtwdev); + goto exit; + } + + switch (mode) { + case BTC_WLINK_NOLINK: + _action_wl_nc(rtwdev); + break; + case BTC_WLINK_2G_STA: + _action_wl_2g_sta(rtwdev); + break; + case BTC_WLINK_2G_AP: + _action_wl_2g_ap(rtwdev); + break; + case BTC_WLINK_2G_GO: + _action_wl_2g_go(rtwdev); + break; + case BTC_WLINK_2G_GC: + _action_wl_2g_gc(rtwdev); + break; + case BTC_WLINK_2G_SCC: + _action_wl_2g_scc(rtwdev); + break; + case BTC_WLINK_2G_MCC: + _action_wl_2g_mcc(rtwdev); + break; + case BTC_WLINK_25G_MCC: + _action_wl_25g_mcc(rtwdev); + break; + case BTC_WLINK_5G: + _action_wl_5g(rtwdev); + break; + case BTC_WLINK_2G_NAN: + _action_wl_2g_nan(rtwdev); + break; + default: + _action_wl_other(rtwdev); + break; + } + +exit: + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): exit\n", __func__); + _action_common(rtwdev); +} + +void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + btc->dm.cnt_notify[BTC_NCNT_POWER_ON]++; +} + +void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__); + btc->dm.cnt_notify[BTC_NCNT_POWER_OFF]++; + + btc->cx.wl.status.map.rf_off = 1; + + _write_scbd(rtwdev, BTC_WSCB_ALL, false); + _run_coex(rtwdev, BTC_RSN_NTFY_POWEROFF); + + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, 0); + + btc->cx.wl.status.map.rf_off_pre = btc->cx.wl.status.map.rf_off; +} + +static void _set_init_info(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + dm->init_info.wl_only = (u8)dm->wl_only; + dm->init_info.bt_only = (u8)dm->bt_only; + dm->init_info.wl_init_ok = (u8)wl->status.map.init_ok; + dm->init_info.dbcc_en = rtwdev->dbcc_en; + dm->init_info.cx_other = btc->cx.other.type; + dm->init_info.wl_guard_ch = chip->afh_guard_ch; + dm->init_info.module = btc->mdinfo; +} + +void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + const struct rtw89_chip_info *chip = rtwdev->chip; + + _reset_btc_var(rtwdev, BTC_RESET_ALL); + btc->dm.run_reason = BTC_RSN_NONE; + btc->dm.run_action = BTC_ACT_NONE; + btc->ctrl.igno_bt = true; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): mode=%d\n", __func__, mode); + + dm->cnt_notify[BTC_NCNT_INIT_COEX]++; + dm->wl_only = mode == BTC_MODE_WL ? 1 : 0; + dm->bt_only = mode == BTC_MODE_BT ? 1 : 0; + wl->status.map.rf_off = mode == BTC_MODE_WLOFF ? 1 : 0; + + chip->ops->btc_set_rfe(rtwdev); + chip->ops->btc_init_cfg(rtwdev); + + if (!wl->status.map.init_ok) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return for WL init fail!!\n", + __func__); + dm->error.map.init = true; + return; + } + + _write_scbd(rtwdev, + BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true); + _update_bt_scbd(rtwdev, true); + if (rtw89_mac_get_ctrl_path(rtwdev)) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): PTA owner warning!!\n", + __func__); + dm->error.map.pta_owner = true; + } + + _set_init_info(rtwdev); + _set_wl_tx_power(rtwdev, RTW89_BTC_WL_DEF_TX_PWR); + rtw89_btc_fw_set_slots(rtwdev, CXST_MAX, dm->slot); + btc_fw_set_monreg(rtwdev); + _fw_set_drv_info(rtwdev, CXDRVINFO_INIT); + _fw_set_drv_info(rtwdev, CXDRVINFO_CTRL); + + _run_coex(rtwdev, BTC_RSN_NTFY_INIT); +} + +void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): phy_idx=%d, band=%d\n", + __func__, phy_idx, band); + btc->dm.cnt_notify[BTC_NCNT_SCAN_START]++; + wl->status.map.scan = true; + wl->scan_info.band[phy_idx] = band; + wl->scan_info.phy_map |= BIT(phy_idx); + _fw_set_drv_info(rtwdev, CXDRVINFO_SCAN); + + if (rtwdev->dbcc_en) { + wl->dbcc_info.scan_band[phy_idx] = band; + _update_dbcc_band(rtwdev, phy_idx); + _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC); + } + + _run_coex(rtwdev, BTC_RSN_NTFY_SCAN_START); +} + +void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): phy_idx=%d\n", __func__, phy_idx); + btc->dm.cnt_notify[BTC_NCNT_SCAN_FINISH]++; + + wl->status.map.scan = false; + wl->scan_info.phy_map &= ~BIT(phy_idx); + _fw_set_drv_info(rtwdev, CXDRVINFO_SCAN); + + if (rtwdev->dbcc_en) { + _update_dbcc_band(rtwdev, phy_idx); + _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC); + } + + _run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH); +} + +void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): phy_idx=%d, band=%d\n", + __func__, phy_idx, band); + btc->dm.cnt_notify[BTC_NCNT_SWITCH_BAND]++; + + wl->scan_info.band[phy_idx] = band; + wl->scan_info.phy_map |= BIT(phy_idx); + _fw_set_drv_info(rtwdev, CXDRVINFO_SCAN); + + if (rtwdev->dbcc_en) { + wl->dbcc_info.scan_band[phy_idx] = band; + _update_dbcc_band(rtwdev, phy_idx); + _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC); + } + _run_coex(rtwdev, BTC_RSN_NTFY_SWBAND); +} + +void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev, + enum btc_pkt_type pkt_type) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_bt_link_info *b = &cx->bt.link_info; + struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc; + struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc; + u32 cnt; + u32 delay = RTW89_COEX_ACT1_WORK_PERIOD; + bool delay_work = false; + + switch (pkt_type) { + case PACKET_DHCP: + cnt = ++cx->cnt_wl[BTC_WCNT_DHCP]; + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): DHCP cnt=%d\n", __func__, cnt); + wl->status.map.connecting = true; + delay_work = true; + break; + case PACKET_EAPOL: + cnt = ++cx->cnt_wl[BTC_WCNT_EAPOL]; + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): EAPOL cnt=%d\n", __func__, cnt); + wl->status.map._4way = true; + delay_work = true; + if (hfp->exist || hid->exist) + delay /= 2; + break; + case PACKET_EAPOL_END: + cnt = ++cx->cnt_wl[BTC_WCNT_EAPOL]; + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): EAPOL_End cnt=%d\n", + __func__, cnt); + wl->status.map._4way = false; + cancel_delayed_work(&rtwdev->coex_act1_work); + break; + case PACKET_ARP: + cnt = ++cx->cnt_wl[BTC_WCNT_ARP]; + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): ARP cnt=%d\n", __func__, cnt); + return; + case PACKET_ICMP: + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): ICMP pkt\n", __func__); + return; + default: + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): unknown packet type %d\n", + __func__, pkt_type); + return; + } + + if (delay_work) { + cancel_delayed_work(&rtwdev->coex_act1_work); + ieee80211_queue_delayed_work(rtwdev->hw, + &rtwdev->coex_act1_work, delay); + } + + btc->dm.cnt_notify[BTC_NCNT_SPECIAL_PACKET]++; + _run_coex(rtwdev, BTC_RSN_NTFY_SPECIFIC_PACKET); +} + +void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + btc.eapol_notify_work); + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL); + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + btc.arp_notify_work); + + mutex_lock(&rtwdev->mutex); + rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ARP); + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + btc.dhcp_notify_work); + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_DHCP); + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + btc.icmp_notify_work); + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ICMP); + mutex_unlock(&rtwdev->mutex); +} + +static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_btc_bt_link_info *b = &bt->link_info; + struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc; + struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc; + struct rtw89_btc_bt_a2dp_desc *a2dp = &b->a2dp_desc; + struct rtw89_btc_bt_pan_desc *pan = &b->pan_desc; + union btc_btinfo btinfo; + + if (buf[BTC_BTINFO_L1] != 6) + return; + + if (!memcmp(bt->raw_info, buf, BTC_BTINFO_MAX)) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by bt-info duplicate!!\n", + __func__); + cx->cnt_bt[BTC_BCNT_INFOSAME]++; + return; + } + + memcpy(bt->raw_info, buf, BTC_BTINFO_MAX); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): bt_info[2]=0x%02x\n", + __func__, bt->raw_info[2]); + + /* reset to mo-connect before update */ + b->status.val = BTC_BLINK_NOCONNECT; + b->profile_cnt.last = b->profile_cnt.now; + b->relink.last = b->relink.now; + a2dp->exist_last = a2dp->exist; + b->multi_link.last = b->multi_link.now; + bt->inq_pag.last = bt->inq_pag.now; + b->profile_cnt.now = 0; + hid->type = 0; + + /* parse raw info low-Byte2 */ + btinfo.val = bt->raw_info[BTC_BTINFO_L2]; + b->status.map.connect = btinfo.lb2.connect; + b->status.map.sco_busy = btinfo.lb2.sco_busy; + b->status.map.acl_busy = btinfo.lb2.acl_busy; + b->status.map.inq_pag = btinfo.lb2.inq_pag; + bt->inq_pag.now = btinfo.lb2.inq_pag; + cx->cnt_bt[BTC_BCNT_INQPAG] += !!(bt->inq_pag.now && !bt->inq_pag.last); + + hfp->exist = btinfo.lb2.hfp; + b->profile_cnt.now += (u8)hfp->exist; + hid->exist = btinfo.lb2.hid; + b->profile_cnt.now += (u8)hid->exist; + a2dp->exist = btinfo.lb2.a2dp; + b->profile_cnt.now += (u8)a2dp->exist; + pan->active = btinfo.lb2.pan; + + /* parse raw info low-Byte3 */ + btinfo.val = bt->raw_info[BTC_BTINFO_L3]; + if (btinfo.lb3.retry != 0) + cx->cnt_bt[BTC_BCNT_RETRY]++; + b->cqddr = btinfo.lb3.cqddr; + cx->cnt_bt[BTC_BCNT_INQ] += !!(btinfo.lb3.inq && !bt->inq); + bt->inq = btinfo.lb3.inq; + cx->cnt_bt[BTC_BCNT_PAGE] += !!(btinfo.lb3.pag && !bt->pag); + bt->pag = btinfo.lb3.pag; + + b->status.map.mesh_busy = btinfo.lb3.mesh_busy; + /* parse raw info high-Byte0 */ + btinfo.val = bt->raw_info[BTC_BTINFO_H0]; + /* raw val is dBm unit, translate from -100~ 0dBm to 0~100%*/ + b->rssi = chip->ops->btc_get_bt_rssi(rtwdev, btinfo.hb0.rssi); + + /* parse raw info high-Byte1 */ + btinfo.val = bt->raw_info[BTC_BTINFO_H1]; + b->status.map.ble_connect = btinfo.hb1.ble_connect; + if (btinfo.hb1.ble_connect) + hid->type |= (hid->exist ? BTC_HID_BLE : BTC_HID_RCU); + + cx->cnt_bt[BTC_BCNT_REINIT] += !!(btinfo.hb1.reinit && !bt->reinit); + bt->reinit = btinfo.hb1.reinit; + cx->cnt_bt[BTC_BCNT_RELINK] += !!(btinfo.hb1.relink && !b->relink.now); + b->relink.now = btinfo.hb1.relink; + cx->cnt_bt[BTC_BCNT_IGNOWL] += !!(btinfo.hb1.igno_wl && !bt->igno_wl); + bt->igno_wl = btinfo.hb1.igno_wl; + + if (bt->igno_wl && !cx->wl.status.map.rf_off) + _set_bt_ignore_wlan_act(rtwdev, false); + + hid->type |= (btinfo.hb1.voice ? BTC_HID_RCU_VOICE : 0); + bt->ble_scan_en = btinfo.hb1.ble_scan; + + cx->cnt_bt[BTC_BCNT_ROLESW] += !!(btinfo.hb1.role_sw && !b->role_sw); + b->role_sw = btinfo.hb1.role_sw; + + b->multi_link.now = btinfo.hb1.multi_link; + + /* parse raw info high-Byte2 */ + btinfo.val = bt->raw_info[BTC_BTINFO_H2]; + pan->exist = btinfo.hb2.pan_active; + b->profile_cnt.now += (u8)pan->exist; + + cx->cnt_bt[BTC_BCNT_AFH] += !!(btinfo.hb2.afh_update && !b->afh_update); + b->afh_update = btinfo.hb2.afh_update; + a2dp->active = btinfo.hb2.a2dp_active; + b->slave_role = btinfo.hb2.slave; + hid->slot_info = btinfo.hb2.hid_slot; + hid->pair_cnt = btinfo.hb2.hid_cnt; + hid->type |= (hid->slot_info == BTC_HID_218 ? + BTC_HID_218 : BTC_HID_418); + /* parse raw info high-Byte3 */ + btinfo.val = bt->raw_info[BTC_BTINFO_H3]; + a2dp->bitpool = btinfo.hb3.a2dp_bitpool; + + if (b->tx_3m != (u32)btinfo.hb3.tx_3m) + cx->cnt_bt[BTC_BCNT_RATECHG]++; + b->tx_3m = (u32)btinfo.hb3.tx_3m; + + a2dp->sink = btinfo.hb3.a2dp_sink; + + if (b->profile_cnt.now || b->status.map.ble_connect) + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 1); + else + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 0); + + if (!a2dp->exist_last && a2dp->exist) { + a2dp->vendor_id = 0; + a2dp->flush_time = 0; + a2dp->play_latency = 1; + ieee80211_queue_delayed_work(rtwdev->hw, + &rtwdev->coex_bt_devinfo_work, + RTW89_COEX_BT_DEVINFO_WORK_PERIOD); + } + + if (a2dp->exist && (a2dp->flush_time == 0 || a2dp->vendor_id == 0 || + a2dp->play_latency == 1)) + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 1); + else + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 0); + + _run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO); +} + +enum btc_wl_mode { + BTC_WL_MODE_HT = 0, + BTC_WL_MODE_VHT = 1, + BTC_WL_MODE_HE = 2, + BTC_WL_MODE_NUM, +}; + +void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta, enum btc_role_state state) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_link_info r = {0}; + struct rtw89_btc_wl_link_info *wlinfo = NULL; + u8 mode = 0; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], state=%d\n", state); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], role is STA=%d\n", + vif->type == NL80211_IFTYPE_STATION); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif->port); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], band=%d ch=%d bw=%d\n", + hal->current_band_type, hal->current_channel, + hal->current_band_width); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], associated=%d\n", + state == BTC_ROLE_MSTS_STA_CONN_END); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], bcn_period=%d dtim_period=%d\n", + vif->bss_conf.beacon_int, vif->bss_conf.dtim_period); + + if (rtwsta) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], STA mac_id=%d\n", + rtwsta->mac_id); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], STA support HE=%d VHT=%d HT=%d\n", + sta->he_cap.has_he, + sta->vht_cap.vht_supported, + sta->ht_cap.ht_supported); + if (sta->he_cap.has_he) + mode |= BIT(BTC_WL_MODE_HE); + if (sta->vht_cap.vht_supported) + mode |= BIT(BTC_WL_MODE_VHT); + if (sta->ht_cap.ht_supported) + mode |= BIT(BTC_WL_MODE_HT); + + r.mode = mode; + } + + if (rtwvif->wifi_role >= RTW89_WIFI_ROLE_MLME_MAX) + return; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], wifi_role=%d\n", rtwvif->wifi_role); + + r.role = rtwvif->wifi_role; + r.phy = rtwvif->phy_idx; + r.pid = rtwvif->port; + r.active = true; + r.connected = MLME_LINKED; + r.bcn_period = vif->bss_conf.beacon_int; + r.dtim_period = vif->bss_conf.dtim_period; + r.band = hal->current_band_type; + r.ch = hal->current_channel; + r.bw = hal->current_band_width; + ether_addr_copy(r.mac_addr, rtwvif->mac_addr); + + if (rtwsta && vif->type == NL80211_IFTYPE_STATION) + r.mac_id = rtwsta->mac_id; + + btc->dm.cnt_notify[BTC_NCNT_ROLE_INFO]++; + + wlinfo = &wl->link_info[r.pid]; + + memcpy(wlinfo, &r, sizeof(*wlinfo)); + _update_wl_info(rtwdev); + + if (wlinfo->role == RTW89_WIFI_ROLE_STATION && + wlinfo->connected == MLME_NO_LINK) + btc->dm.leak_ap = 0; + + if (state == BTC_ROLE_MSTS_STA_CONN_START) + wl->status.map.connecting = 1; + else + wl->status.map.connecting = 0; + + if (state == BTC_ROLE_MSTS_STA_DIS_CONN) + wl->status.map._4way = false; + + _run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO); +} + +void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_state) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): rf_state = %d\n", + __func__, rf_state); + btc->dm.cnt_notify[BTC_NCNT_RADIO_STATE]++; + + switch (rf_state) { + case BTC_RFCTRL_WL_OFF: + wl->status.map.rf_off = 1; + wl->status.map.lps = 0; + break; + case BTC_RFCTRL_FW_CTRL: + wl->status.map.rf_off = 0; + wl->status.map.lps = 1; + break; + case BTC_RFCTRL_WL_ON: + default: + wl->status.map.rf_off = 0; + wl->status.map.lps = 0; + break; + } + + if (rf_state == BTC_RFCTRL_WL_ON) { + rtw89_btc_fw_en_rpt(rtwdev, + RPT_EN_MREG | RPT_EN_BT_VER_INFO, true); + _write_scbd(rtwdev, BTC_WSCB_ACTIVE, true); + _update_bt_scbd(rtwdev, true); + chip->ops->btc_init_cfg(rtwdev); + } else { + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, false); + _write_scbd(rtwdev, BTC_WSCB_ACTIVE | BTC_WSCB_WLBUSY, false); + } + + _run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE); + + wl->status.map.rf_off_pre = wl->status.map.rf_off; + wl->status.map.lps_pre = wl->status.map.lps; +} + +static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path, + enum btc_wl_rfk_type type, + enum btc_wl_rfk_state state) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + bool result = BTC_WRFK_REJECT; + + wl->rfk_info.type = type; + wl->rfk_info.path_map = FIELD_GET(BTC_RFK_PATH_MAP, phy_path); + wl->rfk_info.phy_map = FIELD_GET(BTC_RFK_PHY_MAP, phy_path); + wl->rfk_info.band = FIELD_GET(BTC_RFK_BAND_MAP, phy_path); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s()_start: phy=0x%x, path=0x%x, type=%d, state=%d\n", + __func__, wl->rfk_info.phy_map, wl->rfk_info.path_map, + type, state); + + switch (state) { + case BTC_WRFK_START: + result = _chk_wl_rfk_request(rtwdev); + wl->rfk_info.state = result ? BTC_WRFK_START : BTC_WRFK_STOP; + + _write_scbd(rtwdev, BTC_WSCB_WLRFK, result); + + btc->dm.cnt_notify[BTC_NCNT_WL_RFK]++; + break; + case BTC_WRFK_ONESHOT_START: + case BTC_WRFK_ONESHOT_STOP: + if (wl->rfk_info.state == BTC_WRFK_STOP) { + result = BTC_WRFK_REJECT; + } else { + result = BTC_WRFK_ALLOW; + wl->rfk_info.state = state; + } + break; + case BTC_WRFK_STOP: + result = BTC_WRFK_ALLOW; + wl->rfk_info.state = BTC_WRFK_STOP; + + _write_scbd(rtwdev, BTC_WSCB_WLRFK, false); + cancel_delayed_work(&rtwdev->coex_rfk_chk_work); + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s() warning state=%d\n", __func__, state); + break; + } + + if (result == BTC_WRFK_ALLOW) { + if (wl->rfk_info.state == BTC_WRFK_START || + wl->rfk_info.state == BTC_WRFK_STOP) + _run_coex(rtwdev, BTC_RSN_NTFY_WL_RFK); + + if (wl->rfk_info.state == BTC_WRFK_START) + ieee80211_queue_delayed_work(rtwdev->hw, + &rtwdev->coex_rfk_chk_work, + RTW89_COEX_RFK_CHK_WORK_PERIOD); + } + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s()_finish: rfk_cnt=%d, result=%d\n", + __func__, btc->dm.cnt_notify[BTC_NCNT_WL_RFK], result); + + return result == BTC_WRFK_ALLOW; +} + +void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map, + enum btc_wl_rfk_type type, + enum btc_wl_rfk_state state) +{ + u8 band; + bool allow; + int ret; + + band = FIELD_GET(BTC_RFK_BAND_MAP, phy_map); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] RFK notify (%s / PHY%u / K_type = %u / path_idx = %lu / process = %s)\n", + band == RTW89_BAND_2G ? "2G" : + band == RTW89_BAND_5G ? "5G" : "6G", + !!(FIELD_GET(BTC_RFK_PHY_MAP, phy_map) & BIT(RTW89_PHY_1)), + type, + FIELD_GET(BTC_RFK_PATH_MAP, phy_map), + state == BTC_WRFK_STOP ? "RFK_STOP" : + state == BTC_WRFK_START ? "RFK_START" : + state == BTC_WRFK_ONESHOT_START ? "ONE-SHOT_START" : + "ONE-SHOT_STOP"); + + if (state != BTC_WRFK_START || rtwdev->is_bt_iqk_timeout) { + _ntfy_wl_rfk(rtwdev, phy_map, type, state); + return; + } + + ret = read_poll_timeout(_ntfy_wl_rfk, allow, allow, 40, 100000, false, + rtwdev, phy_map, type, state); + if (ret) { + rtw89_warn(rtwdev, "RFK notify timeout\n"); + rtwdev->is_bt_iqk_timeout = true; + } +} + +struct rtw89_btc_wl_sta_iter_data { + struct rtw89_dev *rtwdev; + u8 busy_all; + u8 dir_all; + u8 rssi_map_all; + bool is_sta_change; + bool is_traffic_change; +}; + +static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_btc_wl_sta_iter_data *iter_data = + (struct rtw89_btc_wl_sta_iter_data *)data; + struct rtw89_dev *rtwdev = iter_data->rtwdev; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_link_info *link_info = NULL; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_traffic_stats *link_info_t = NULL; + struct rtw89_vif *rtwvif = rtwsta->rtwvif; + struct rtw89_traffic_stats *stats = &rtwvif->stats; + const struct rtw89_chip_info *chip = rtwdev->chip; + u32 last_tx_rate, last_rx_rate; + u16 last_tx_lvl, last_rx_lvl; + u8 port = rtwvif->port; + u8 rssi; + u8 busy = 0; + u8 dir = 0; + u8 rssi_map = 0; + u8 i = 0; + bool is_sta_change = false, is_traffic_change = false; + + rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR; + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], rssi=%d\n", rssi); + + link_info = &wl->link_info[port]; + link_info->stat.traffic = rtwvif->stats; + link_info_t = &link_info->stat.traffic; + + if (link_info->connected == MLME_NO_LINK) { + link_info->rx_rate_drop_cnt = 0; + return; + } + + link_info->stat.rssi = rssi; + for (i = 0; i < BTC_WL_RSSI_THMAX; i++) { + link_info->rssi_state[i] = + _update_rssi_state(rtwdev, + link_info->rssi_state[i], + link_info->stat.rssi, + chip->wl_rssi_thres[i]); + if (BTC_RSSI_LOW(link_info->rssi_state[i])) + rssi_map |= BIT(i); + + if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED && + BTC_RSSI_CHANGE(link_info->rssi_state[i])) + is_sta_change = true; + } + iter_data->rssi_map_all |= rssi_map; + + last_tx_rate = link_info_t->tx_rate; + last_rx_rate = link_info_t->rx_rate; + last_tx_lvl = (u16)link_info_t->tx_tfc_lv; + last_rx_lvl = (u16)link_info_t->rx_tfc_lv; + + if (stats->tx_tfc_lv != RTW89_TFC_IDLE || + stats->rx_tfc_lv != RTW89_TFC_IDLE) + busy = 1; + + if (stats->tx_tfc_lv > stats->rx_tfc_lv) + dir = RTW89_TFC_UL; + else + dir = RTW89_TFC_DL; + + link_info = &wl->link_info[port]; + if (link_info->busy != busy || link_info->dir != dir) { + is_sta_change = true; + link_info->busy = busy; + link_info->dir = dir; + } + + iter_data->busy_all |= busy; + iter_data->dir_all |= BIT(dir); + + if (rtwsta->rx_hw_rate <= RTW89_HW_RATE_CCK2 && + last_rx_rate > RTW89_HW_RATE_CCK2 && + link_info_t->rx_tfc_lv > RTW89_TFC_IDLE) + link_info->rx_rate_drop_cnt++; + + if (last_tx_rate != rtwsta->ra_report.hw_rate || + last_rx_rate != rtwsta->rx_hw_rate || + last_tx_lvl != link_info_t->tx_tfc_lv || + last_rx_lvl != link_info_t->rx_tfc_lv) + is_traffic_change = true; + + link_info_t->tx_rate = rtwsta->ra_report.hw_rate; + link_info_t->rx_rate = rtwsta->rx_hw_rate; + + wl->role_info.active_role[port].tx_lvl = (u16)stats->tx_tfc_lv; + wl->role_info.active_role[port].rx_lvl = (u16)stats->rx_tfc_lv; + wl->role_info.active_role[port].tx_rate = rtwsta->ra_report.hw_rate; + wl->role_info.active_role[port].rx_rate = rtwsta->rx_hw_rate; + + if (is_sta_change) + iter_data->is_sta_change = true; + + if (is_traffic_change) + iter_data->is_traffic_change = true; +} + +#define BTC_NHM_CHK_INTVL 20 + +void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_sta_iter_data data = {.rtwdev = rtwdev}; + u8 i; + + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_btc_ntfy_wl_sta_iter, + &data); + + wl->rssi_level = 0; + btc->dm.cnt_notify[BTC_NCNT_WL_STA]++; + for (i = BTC_WL_RSSI_THMAX; i > 0; i--) { + /* set RSSI level 4 ~ 0 if rssi bit map match */ + if (data.rssi_map_all & BIT(i - 1)) { + wl->rssi_level = i; + break; + } + } + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): busy=%d\n", + __func__, !!wl->status.map.busy); + + _write_scbd(rtwdev, BTC_WSCB_WLBUSY, (!!wl->status.map.busy)); + + if (data.is_traffic_change) + _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE); + if (data.is_sta_change) { + wl->status.map.busy = data.busy_all; + wl->status.map.traffic_dir = data.dir_all; + _run_coex(rtwdev, BTC_RSN_NTFY_WL_STA); + } else if (btc->dm.cnt_notify[BTC_NCNT_WL_STA] >= + btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST] + BTC_NHM_CHK_INTVL) { + btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST] = + btc->dm.cnt_notify[BTC_NCNT_WL_STA]; + } else if (btc->dm.cnt_notify[BTC_NCNT_WL_STA] < + btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST]) { + btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST] = + btc->dm.cnt_notify[BTC_NCNT_WL_STA]; + } +} + +void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u32 len, u8 class, u8 func) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + u8 *buf = &skb->data[RTW89_C2H_HEADER_LEN]; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): C2H BT len:%d class:%d fun:%d\n", + __func__, len, class, func); + + if (class != BTFC_FW_EVENT) + return; + + switch (func) { + case BTF_EVNT_RPT: + case BTF_EVNT_BUF_OVERFLOW: + pfwinfo->event[func]++; + /* Don't need rtw89_leave_ps_mode() */ + btc_fw_event(rtwdev, func, buf, len); + break; + case BTF_EVNT_BT_INFO: + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], handle C2H BT INFO with data %8ph\n", buf); + btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE]++; + rtw89_leave_ps_mode(rtwdev); + _update_bt_info(rtwdev, buf, len); + break; + case BTF_EVNT_BT_SCBD: + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], handle C2H BT SCBD with data %8ph\n", buf); + btc->cx.cnt_bt[BTC_BCNT_SCBDUPDATE]++; + rtw89_leave_ps_mode(rtwdev); + _update_bt_scbd(rtwdev, false); + break; + case BTF_EVNT_BT_PSD: + break; + case BTF_EVNT_BT_REG: + btc->dbg.rb_done = true; + btc->dbg.rb_val = le32_to_cpu(*((__le32 *)buf)); + + break; + case BTF_EVNT_C2H_LOOPBACK: + btc->dbg.rb_done = true; + btc->dbg.rb_val = buf[0]; + break; + case BTF_EVNT_CX_RUNINFO: + btc->dm.cnt_dm[BTC_DCNT_CX_RUNINFO]++; + break; + } +} + +#define BTC_CX_FW_OFFLOAD 0 + +static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0; + + if (!(dm->coex_info_map & BTC_COEX_INFO_CX)) + return; + + dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++; + + seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n", + chip->chip_id); + + ver_main = FIELD_GET(GENMASK(31, 24), chip->para_ver); + ver_sub = FIELD_GET(GENMASK(23, 16), chip->para_ver); + ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->para_ver); + id_branch = FIELD_GET(GENMASK(7, 0), chip->para_ver); + seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ", + "[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch); + + if (dm->wl_fw_cx_offload != BTC_CX_FW_OFFLOAD) + dm->error.map.offload_mismatch = true; + else + dm->error.map.offload_mismatch = false; + + ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw_coex); + ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw_coex); + ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw_coex); + id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw_coex); + seq_printf(m, "WL_FW_coex:%d.%d.%d(branch:%d)", + ver_main, ver_sub, ver_hotfix, id_branch); + + ver_main = FIELD_GET(GENMASK(31, 24), chip->wlcx_desired); + ver_sub = FIELD_GET(GENMASK(23, 16), chip->wlcx_desired); + ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired); + seq_printf(m, "(%s, desired:%d.%d.%d), ", + (wl->ver_info.fw_coex >= chip->wlcx_desired ? + "Match" : "Mis-Match"), ver_main, ver_sub, ver_hotfix); + + seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n", + bt->ver_info.fw_coex, + (bt->ver_info.fw_coex >= chip->btcx_desired ? + "Match" : "Mis-Match"), chip->btcx_desired); + + if (bt->enable.now && bt->ver_info.fw == 0) + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true); + else + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false); + + ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw); + ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw); + ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw); + id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw); + seq_printf(m, " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n", + "[sub_module]", + ver_main, ver_sub, ver_hotfix, id_branch, + bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM"); + + seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s", + "[hw_info]", btc->mdinfo.cv, btc->mdinfo.rfe_type, + btc->mdinfo.ant.isolation, btc->mdinfo.ant.num, + (btc->mdinfo.ant.num > 1 ? "" : (btc->mdinfo.ant.single_pos ? + "1Ant_Pos:S1, " : "1Ant_Pos:S0, "))); + + seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n", + btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss, + hal->rx_nss); +} + +static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_link_info *plink = NULL; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + struct rtw89_traffic_stats *t; + u8 i; + + if (rtwdev->dbcc_en) { + seq_printf(m, + " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ", + "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0], + wl_dinfo->scan_band[RTW89_PHY_0], + wl_dinfo->real_band[RTW89_PHY_0]); + seq_printf(m, + "PHY1_band(op:%d/scan:%d/real:%d)\n", + wl_dinfo->op_band[RTW89_PHY_1], + wl_dinfo->scan_band[RTW89_PHY_1], + wl_dinfo->real_band[RTW89_PHY_1]); + } + + for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) { + plink = &btc->cx.wl.link_info[i]; + + if (!plink->active) + continue; + + seq_printf(m, + " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d", + plink->pid, (u32)plink->role, plink->phy, + (u32)plink->connected, plink->client_cnt - 1, + (u32)plink->mode, plink->ch, (u32)plink->bw); + + if (plink->connected == MLME_NO_LINK) + continue; + + seq_printf(m, + ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n", + plink->mac_id, plink->tx_time, plink->tx_retry); + + seq_printf(m, + " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ", + plink->pid, 110 - plink->stat.rssi, + plink->stat.rssi, plink->busy, + plink->dir == RTW89_TFC_UL ? "UL" : "DL"); + + t = &plink->stat.traffic; + + seq_printf(m, + "tx[rate:%d/busy_level:%d], ", + (u32)t->tx_rate, t->tx_tfc_lv); + + seq_printf(m, "rx[rate:%d/busy_level:%d/drop:%d]\n", + (u32)t->rx_rate, + t->rx_tfc_lv, plink->rx_rate_drop_cnt); + } +} + +static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; + + if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL)) + return; + + seq_puts(m, "========== [WL Status] ==========\n"); + + seq_printf(m, " %-15s : link_mode:%d, ", + "[status]", (u32)wl_rinfo->link_mode); + + seq_printf(m, + "rf_off:%s, power_save:%s, scan:%s(band:%d/phy_map:0x%x), ", + wl->status.map.rf_off ? "Y" : "N", + wl->status.map.lps ? "Y" : "N", + wl->status.map.scan ? "Y" : "N", + wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map); + + seq_printf(m, + "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n", + wl->status.map.connecting ? "Y" : "N", + wl->status.map.roaming ? "Y" : "N", + wl->status.map._4way ? "Y" : "N", + wl->status.map.init_ok ? "Y" : "N"); + + _show_wl_role_info(rtwdev, m); +} + +enum btc_bt_a2dp_type { + BTC_A2DP_LEGACY = 0, + BTC_A2DP_TWS_SNIFF = 1, + BTC_A2DP_TWS_RELAY = 2, +}; + +static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; + struct rtw89_btc_bt_hfp_desc hfp = bt_linfo->hfp_desc; + struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc; + struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; + struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc; + + if (hfp.exist) { + seq_printf(m, " %-15s : type:%s, sut_pwr:%d, golden-rx:%d", + "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"), + bt_linfo->sut_pwr_level[0], + bt_linfo->golden_rx_shift[0]); + } + + if (hid.exist) { + seq_printf(m, + "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n", + "[HID]", + hid.type & BTC_HID_218 ? "2/18," : "", + hid.type & BTC_HID_418 ? "4/18," : "", + hid.type & BTC_HID_BLE ? "BLE," : "", + hid.type & BTC_HID_RCU ? "RCU," : "", + hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "", + hid.pair_cnt, bt_linfo->sut_pwr_level[1], + bt_linfo->golden_rx_shift[1]); + } + + if (a2dp.exist) { + seq_printf(m, + " %-15s : type:%s, bit-pool:%d, flush-time:%d, ", + "[A2DP]", + a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS", + a2dp.bitpool, a2dp.flush_time); + + seq_printf(m, + "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n", + a2dp.vendor_id, a2dp.device_name, + bt_linfo->sut_pwr_level[2], + bt_linfo->golden_rx_shift[2]); + } + + if (pan.exist) { + seq_printf(m, " %-15s : sut_pwr:%d, golden-rx:%d\n", + "[PAN]", + bt_linfo->sut_pwr_level[3], + bt_linfo->golden_rx_shift[3]); + } +} + +static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_module *module = &btc->mdinfo; + struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; + u8 *afh = bt_linfo->afh_map; + u16 polt_cnt = 0; + + if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT)) + return; + + seq_puts(m, "========== [BT Status] ==========\n"); + + seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ", + "[status]", bt->enable.now ? "Y" : "N", + bt->btg_type ? "Y" : "N", + (bt->enable.now && (bt->btg_type != module->bt_pos) ? + "(efuse-mismatch!!)" : ""), + (bt_linfo->status.map.connect ? "Y" : "N")); + + seq_printf(m, "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n", + bt->igno_wl ? "Y" : "N", + bt->mbx_avl ? "Y" : "N", bt->rfk_info.val); + + seq_printf(m, " %-15s : profile:%s%s%s%s%s ", + "[profile]", + (bt_linfo->profile_cnt.now == 0) ? "None," : "", + bt_linfo->hfp_desc.exist ? "HFP," : "", + bt_linfo->hid_desc.exist ? "HID," : "", + bt_linfo->a2dp_desc.exist ? + (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "", + bt_linfo->pan_desc.exist ? "PAN," : ""); + + seq_printf(m, + "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n", + bt_linfo->multi_link.now ? "Y" : "N", + bt_linfo->slave_role ? "Slave" : "Master", + bt_linfo->status.map.ble_connect ? "Y" : "N", + bt_linfo->cqddr ? "Y" : "N", + bt_linfo->a2dp_desc.active ? "Y" : "N", + bt_linfo->pan_desc.active ? "Y" : "N"); + + seq_printf(m, + " %-15s : rssi:%ddBm, tx_rate:%dM, %s%s%s", + "[link]", bt_linfo->rssi - 100, + bt_linfo->tx_3m ? 3 : 2, + bt_linfo->status.map.inq_pag ? " inq-page!!" : "", + bt_linfo->status.map.acl_busy ? " acl_busy!!" : "", + bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : ""); + + seq_printf(m, + "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ", + bt_linfo->relink.now ? " ReLink!!" : "", + afh[0], afh[1], afh[2], afh[3], afh[4], + afh[5], afh[6], afh[7], afh[8], afh[9]); + + seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n", + wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw); + + seq_printf(m, + " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ", + "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY], + cx->cnt_bt[BTC_BCNT_RELINK], cx->cnt_bt[BTC_BCNT_RATECHG], + cx->cnt_bt[BTC_BCNT_REINIT], cx->cnt_bt[BTC_BCNT_REENABLE]); + + seq_printf(m, + "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n", + cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH], + cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ], + cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]); + + _show_bt_profile_info(rtwdev, m); + + seq_printf(m, + " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n", + "[bt_info]", bt->raw_info[2], bt->raw_info[3], + bt->raw_info[4], bt->raw_info[5], bt->raw_info[6], + bt->raw_info[7], + bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply", + cx->cnt_bt[BTC_BCNT_INFOUPDATE], + cx->cnt_bt[BTC_BCNT_INFOSAME]); + + if (wl->status.map.lps || wl->status.map.rf_off) + return; + + chip->ops->btc_update_bt_cnt(rtwdev); + _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0); + + seq_printf(m, + " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)\n", + "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX], + cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX], + cx->cnt_bt[BTC_BCNT_LOPRI_TX], polt_cnt); +} + +#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e +#define CASE_BTC_ACT_STR(e) case BTC_ACT_ ## e | BTC_ACT_EXT_BIT: return #e +#define CASE_BTC_POLICY_STR(e) \ + case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e + +static const char *steps_to_str(u16 step) +{ + switch (step) { + CASE_BTC_RSN_STR(NONE); + CASE_BTC_RSN_STR(NTFY_INIT); + CASE_BTC_RSN_STR(NTFY_SWBAND); + CASE_BTC_RSN_STR(NTFY_WL_STA); + CASE_BTC_RSN_STR(NTFY_RADIO_STATE); + CASE_BTC_RSN_STR(UPDATE_BT_SCBD); + CASE_BTC_RSN_STR(NTFY_WL_RFK); + CASE_BTC_RSN_STR(UPDATE_BT_INFO); + CASE_BTC_RSN_STR(NTFY_SCAN_START); + CASE_BTC_RSN_STR(NTFY_SCAN_FINISH); + CASE_BTC_RSN_STR(NTFY_SPECIFIC_PACKET); + CASE_BTC_RSN_STR(NTFY_POWEROFF); + CASE_BTC_RSN_STR(NTFY_ROLE_INFO); + CASE_BTC_RSN_STR(CMD_SET_COEX); + CASE_BTC_RSN_STR(ACT1_WORK); + CASE_BTC_RSN_STR(BT_DEVINFO_WORK); + CASE_BTC_RSN_STR(RFK_CHK_WORK); + + CASE_BTC_ACT_STR(NONE); + CASE_BTC_ACT_STR(WL_ONLY); + CASE_BTC_ACT_STR(WL_5G); + CASE_BTC_ACT_STR(WL_OTHER); + CASE_BTC_ACT_STR(WL_IDLE); + CASE_BTC_ACT_STR(WL_NC); + CASE_BTC_ACT_STR(WL_RFK); + CASE_BTC_ACT_STR(WL_INIT); + CASE_BTC_ACT_STR(WL_OFF); + CASE_BTC_ACT_STR(FREERUN); + CASE_BTC_ACT_STR(BT_WHQL); + CASE_BTC_ACT_STR(BT_RFK); + CASE_BTC_ACT_STR(BT_OFF); + CASE_BTC_ACT_STR(BT_IDLE); + CASE_BTC_ACT_STR(BT_HFP); + CASE_BTC_ACT_STR(BT_HID); + CASE_BTC_ACT_STR(BT_A2DP); + CASE_BTC_ACT_STR(BT_A2DPSINK); + CASE_BTC_ACT_STR(BT_PAN); + CASE_BTC_ACT_STR(BT_A2DP_HID); + CASE_BTC_ACT_STR(BT_A2DP_PAN); + CASE_BTC_ACT_STR(BT_PAN_HID); + CASE_BTC_ACT_STR(BT_A2DP_PAN_HID); + CASE_BTC_ACT_STR(WL_25G_MCC); + CASE_BTC_ACT_STR(WL_2G_MCC); + CASE_BTC_ACT_STR(WL_2G_SCC); + CASE_BTC_ACT_STR(WL_2G_AP); + CASE_BTC_ACT_STR(WL_2G_GO); + CASE_BTC_ACT_STR(WL_2G_GC); + CASE_BTC_ACT_STR(WL_2G_NAN); + + CASE_BTC_POLICY_STR(OFF_BT); + CASE_BTC_POLICY_STR(OFF_WL); + CASE_BTC_POLICY_STR(OFF_EQ0); + CASE_BTC_POLICY_STR(OFF_EQ1); + CASE_BTC_POLICY_STR(OFF_EQ2); + CASE_BTC_POLICY_STR(OFF_EQ3); + CASE_BTC_POLICY_STR(OFF_BWB0); + CASE_BTC_POLICY_STR(OFF_BWB1); + CASE_BTC_POLICY_STR(OFFB_BWB0); + CASE_BTC_POLICY_STR(OFFE_DEF); + CASE_BTC_POLICY_STR(OFFE_DEF2); + CASE_BTC_POLICY_STR(FIX_TD3030); + CASE_BTC_POLICY_STR(FIX_TD5050); + CASE_BTC_POLICY_STR(FIX_TD2030); + CASE_BTC_POLICY_STR(FIX_TD4010); + CASE_BTC_POLICY_STR(FIX_TD7010); + CASE_BTC_POLICY_STR(FIX_TD2060); + CASE_BTC_POLICY_STR(FIX_TD3060); + CASE_BTC_POLICY_STR(FIX_TD2080); + CASE_BTC_POLICY_STR(FIX_TDW1B1); + CASE_BTC_POLICY_STR(FIX_TD4020); + CASE_BTC_POLICY_STR(PFIX_TD3030); + CASE_BTC_POLICY_STR(PFIX_TD5050); + CASE_BTC_POLICY_STR(PFIX_TD2030); + CASE_BTC_POLICY_STR(PFIX_TD2060); + CASE_BTC_POLICY_STR(PFIX_TD3070); + CASE_BTC_POLICY_STR(PFIX_TD2080); + CASE_BTC_POLICY_STR(PFIX_TDW1B1); + CASE_BTC_POLICY_STR(AUTO_TD50200); + CASE_BTC_POLICY_STR(AUTO_TD60200); + CASE_BTC_POLICY_STR(AUTO_TD20200); + CASE_BTC_POLICY_STR(AUTO_TDW1B1); + CASE_BTC_POLICY_STR(PAUTO_TD50200); + CASE_BTC_POLICY_STR(PAUTO_TD60200); + CASE_BTC_POLICY_STR(PAUTO_TD20200); + CASE_BTC_POLICY_STR(PAUTO_TDW1B1); + CASE_BTC_POLICY_STR(AUTO2_TD3050); + CASE_BTC_POLICY_STR(AUTO2_TD3070); + CASE_BTC_POLICY_STR(AUTO2_TD5050); + CASE_BTC_POLICY_STR(AUTO2_TD6060); + CASE_BTC_POLICY_STR(AUTO2_TD2080); + CASE_BTC_POLICY_STR(AUTO2_TDW1B4); + CASE_BTC_POLICY_STR(PAUTO2_TD3050); + CASE_BTC_POLICY_STR(PAUTO2_TD3070); + CASE_BTC_POLICY_STR(PAUTO2_TD5050); + CASE_BTC_POLICY_STR(PAUTO2_TD6060); + CASE_BTC_POLICY_STR(PAUTO2_TD2080); + CASE_BTC_POLICY_STR(PAUTO2_TDW1B4); + default: + return "unknown step"; + } +} + +static +void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data, + u8 len, u8 seg_len, u8 start_idx, u8 ring_len) +{ + u8 i; + u8 cur_index; + + for (i = 0; i < len ; i++) { + if ((i % seg_len) == 0) + seq_printf(m, " %-15s : ", prefix); + cur_index = (start_idx + i) % ring_len; + if (i % 3 == 0) + seq_printf(m, "-> %-20s", + steps_to_str(*(data + cur_index))); + else if (i % 3 == 1) + seq_printf(m, "-> %-15s", + steps_to_str(*(data + cur_index))); + else + seq_printf(m, "-> %-13s", + steps_to_str(*(data + cur_index))); + if (i == (len - 1) || (i % seg_len) == (seg_len - 1)) + seq_puts(m, "\n"); + } +} + +static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + u8 start_idx; + u8 len; + + len = dm->dm_step.step_ov ? RTW89_BTC_DM_MAXSTEP : dm->dm_step.step_pos; + start_idx = dm->dm_step.step_ov ? dm->dm_step.step_pos : 0; + + seq_print_segment(m, "[dm_steps]", dm->dm_step.step, len, 6, start_idx, + ARRAY_SIZE(dm->dm_step.step)); +} + +static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_module *module = &btc->mdinfo; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + + if (!(dm->coex_info_map & BTC_COEX_INFO_DM)) + return; + + seq_printf(m, "========== [Mechanism Status %s] ==========\n", + (btc->ctrl.manual ? "(Manual)" : "(Auto)")); + + seq_printf(m, + " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%ld, run_cnt:%d\n", + "[status]", + module->ant.type == BTC_ANT_SHARED ? "shared" : "dedicated", + steps_to_str(dm->run_reason), + steps_to_str(dm->run_action | BTC_ACT_EXT_BIT), + FIELD_GET(GENMASK(7, 0), dm->set_ant_path), + dm->cnt_dm[BTC_DCNT_RUN]); + + _show_dm_step(rtwdev, m); + + seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ", + "[dm_flag]", dm->wl_only, dm->bt_only, btc->ctrl.igno_bt, + dm->freerun, btc->lps, dm->wl_mimo_ps); + + seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap, + (BTC_CX_FW_OFFLOAD ? "Y" : "N"), + (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ? + "" : "(Mis-Match!!)")); + + if (dm->rf_trx_para.wl_tx_power == 0xff) + seq_printf(m, + " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ", + "[trx_ctrl]", wl->rssi_level, dm->trx_para_level); + + else + seq_printf(m, + " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ", + "[trx_ctrl]", wl->rssi_level, dm->trx_para_level, + dm->rf_trx_para.wl_tx_power); + + seq_printf(m, + "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n", + dm->rf_trx_para.wl_rx_gain, dm->rf_trx_para.bt_tx_power, + dm->rf_trx_para.bt_rx_gain, + (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx); + + seq_printf(m, + " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU\n", + "[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time, + dm->wl_tx_limit.tx_retry, btc->bt_req_len); +} + +static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_fbtc_cysta *pcysta = NULL; + + pcysta = &pfwinfo->rpt_fbtc_cysta.finfo; + + if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && + pcysta->except_cnt == 0 && + !pfwinfo->len_mismch && !pfwinfo->fver_mismch) + return; + + seq_printf(m, " %-15s : ", "[error]"); + + if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]) { + seq_printf(m, + "overflow-cnt: %d, ", + pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]); + } + + if (pfwinfo->len_mismch) { + seq_printf(m, + "len-mismatch: 0x%x, ", + pfwinfo->len_mismch); + } + + if (pfwinfo->fver_mismch) { + seq_printf(m, + "fver-mismatch: 0x%x, ", + pfwinfo->fver_mismch); + } + + /* cycle statistics exceptions */ + if (pcysta->exception || pcysta->except_cnt) { + seq_printf(m, + "exception-type: 0x%x, exception-cnt = %d", + pcysta->exception, pcysta->except_cnt); + } + seq_puts(m, "\n"); +} + +static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_tdma *t = NULL; + struct rtw89_btc_fbtc_slot *s = NULL; + struct rtw89_btc_dm *dm = &btc->dm; + u8 i, cnt = 0; + + pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo; + if (!pcinfo->valid) + return; + + t = &pfwinfo->rpt_fbtc_tdma.finfo; + + seq_printf(m, + " %-15s : ", "[tdma_policy]"); + seq_printf(m, + "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ", + (u32)t->type, + t->rxflctrl, t->txpause); + + seq_printf(m, + "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ", + t->wtgle_n, t->leak_n, t->ext_ctrl); + + seq_printf(m, + "policy_type:%d", + (u32)btc->policy_type); + + s = pfwinfo->rpt_fbtc_slots.finfo.slot; + + for (i = 0; i < CXST_MAX; i++) { + if (dm->update_slot_map == BIT(CXST_MAX) - 1) + break; + + if (!(dm->update_slot_map & BIT(i))) + continue; + + if (cnt % 6 == 0) + seq_printf(m, + " %-15s : %d[%d/0x%x/%d]", + "[slot_policy]", + (u32)i, + s[i].dur, s[i].cxtbl, s[i].cxtype); + else + seq_printf(m, + ", %d[%d/0x%x/%d]", + (u32)i, + s[i].dur, s[i].cxtbl, s[i].cxtype); + if (cnt % 6 == 5) + seq_puts(m, "\n"); + cnt++; + } + seq_puts(m, "\n"); +} + +static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_slots *pslots = NULL; + struct rtw89_btc_fbtc_slot s; + u8 i = 0; + + pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo; + if (!pcinfo->valid) + return; + + pslots = &pfwinfo->rpt_fbtc_slots.finfo; + + for (i = 0; i < CXST_MAX; i++) { + s = pslots->slot[i]; + if (i % 6 == 0) + seq_printf(m, + " %-15s : %02d[%03d/0x%x/%d]", + "[slot_list]", + (u32)i, + s.dur, s.cxtbl, s.cxtype); + else + seq_printf(m, + ", %02d[%03d/0x%x/%d]", + (u32)i, + s.dur, s.cxtbl, s.cxtype); + if (i % 6 == 5) + seq_puts(m, "\n"); + } +} + +static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL; + struct rtw89_btc_fbtc_cysta_cpu pcysta[1]; + union rtw89_btc_fbtc_rxflct r; + u8 i, cnt = 0, slot_pair; + u16 cycle, c_begin, c_end, store_index; + + pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; + if (!pcinfo->valid) + return; + + pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo; + rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta); + seq_printf(m, + " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", + "[cycle_cnt]", pcysta->cycles, pcysta->bcn_cnt[CXBCN_ALL], + pcysta->bcn_cnt[CXBCN_ALL_OK], + pcysta->bcn_cnt[CXBCN_BT_SLOT], + pcysta->bcn_cnt[CXBCN_BT_OK]); + + _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles); + + for (i = 0; i < CXST_MAX; i++) { + if (!pcysta->slot_cnt[i]) + continue; + seq_printf(m, + ", %d:%d", (u32)i, pcysta->slot_cnt[i]); + } + + if (dm->tdma_now.rxflctrl) { + seq_printf(m, + ", leak_rx:%d", pcysta->leakrx_cnt); + } + + if (pcysta->collision_cnt) { + seq_printf(m, + ", collision:%d", pcysta->collision_cnt); + } + + if (pcysta->skip_cnt) { + seq_printf(m, + ", skip:%d", pcysta->skip_cnt); + } + seq_puts(m, "\n"); + + _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]); + _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE, pcysta->slot_cnt[CXST_B1]); + + seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_time]", + pcysta->tavg_cycle[CXT_WL], + pcysta->tavg_cycle[CXT_BT], + pcysta->tavg_lk / 1000, pcysta->tavg_lk % 1000); + seq_printf(m, + ", max_t[wl:%d/bt:%d/lk:%d.%03d]", + pcysta->tmax_cycle[CXT_WL], + pcysta->tmax_cycle[CXT_BT], + pcysta->tmax_lk / 1000, pcysta->tmax_lk % 1000); + seq_printf(m, + ", maxdiff_t[wl:%d/bt:%d]\n", + pcysta->tmaxdiff_cycle[CXT_WL], + pcysta->tmaxdiff_cycle[CXT_BT]); + + if (pcysta->cycles == 0) + return; + + /* 1 cycle record 1 wl-slot and 1 bt-slot */ + slot_pair = BTC_CYCLE_SLOT_MAX / 2; + + if (pcysta->cycles <= slot_pair) + c_begin = 1; + else + c_begin = pcysta->cycles - slot_pair + 1; + + c_end = pcysta->cycles; + + for (cycle = c_begin; cycle <= c_end; cycle++) { + cnt++; + store_index = ((cycle - 1) % slot_pair) * 2; + + if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1) + seq_printf(m, + " %-15s : ->b%02d->w%02d", "[cycle_step]", + pcysta->tslot_cycle[store_index], + pcysta->tslot_cycle[store_index + 1]); + else + seq_printf(m, + "->b%02d->w%02d", + pcysta->tslot_cycle[store_index], + pcysta->tslot_cycle[store_index + 1]); + if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end) + seq_puts(m, "\n"); + } + + if (a2dp->exist) { + seq_printf(m, + " %-15s : a2dp_ept:%d, a2dp_late:%d", + "[a2dp_t_sta]", + pcysta->a2dpept, pcysta->a2dpeptto); + + seq_printf(m, + ", avg_t:%d, max_t:%d", + pcysta->tavg_a2dpept, pcysta->tmax_a2dpept); + r.val = dm->tdma_now.rxflctrl; + + if (r.type && r.tgln_n) { + seq_printf(m, + ", cycle[PSTDMA:%d/TDMA:%d], ", + pcysta->cycles_a2dp[CXT_FLCTRL_ON], + pcysta->cycles_a2dp[CXT_FLCTRL_OFF]); + + seq_printf(m, + "avg_t[PSTDMA:%d/TDMA:%d], ", + pcysta->tavg_a2dp[CXT_FLCTRL_ON], + pcysta->tavg_a2dp[CXT_FLCTRL_OFF]); + + seq_printf(m, + "max_t[PSTDMA:%d/TDMA:%d]", + pcysta->tmax_a2dp[CXT_FLCTRL_ON], + pcysta->tmax_a2dp[CXT_FLCTRL_OFF]); + } + seq_puts(m, "\n"); + } +} + +static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_cynullsta *ns = NULL; + u8 i = 0; + + if (!btc->dm.tdma_now.rxflctrl) + return; + + pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo; + if (!pcinfo->valid) + return; + + ns = &pfwinfo->rpt_fbtc_nullsta.finfo; + + seq_printf(m, " %-15s : ", "[null_sta]"); + + for (i = 0; i < 2; i++) { + if (i != 0) + seq_printf(m, ", null-%d", i); + else + seq_printf(m, "null-%d", i); + seq_printf(m, "[ok:%d/", le32_to_cpu(ns->result[i][1])); + seq_printf(m, "fail:%d/", le32_to_cpu(ns->result[i][0])); + seq_printf(m, "on_time:%d/", le32_to_cpu(ns->result[i][2])); + seq_printf(m, "retry:%d/", le32_to_cpu(ns->result[i][3])); + seq_printf(m, "avg_t:%d.%03d/", + le32_to_cpu(ns->avg_t[i]) / 1000, + le32_to_cpu(ns->avg_t[i]) % 1000); + seq_printf(m, "max_t:%d.%03d]", + le32_to_cpu(ns->max_t[i]) / 1000, + le32_to_cpu(ns->max_t[i]) % 1000); + } + seq_puts(m, "\n"); +} + +static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_steps *pstep = NULL; + u8 type, val, cnt = 0, state = 0; + bool outloop = false; + u16 i, diff_t, n_start = 0, n_stop = 0; + u16 pos_old, pos_new; + + pcinfo = &pfwinfo->rpt_fbtc_step.cinfo; + if (!pcinfo->valid) + return; + + pstep = &pfwinfo->rpt_fbtc_step.finfo; + pos_old = le16_to_cpu(pstep->pos_old); + pos_new = le16_to_cpu(pstep->pos_new); + + if (pcinfo->req_fver != pstep->fver) + return; + + /* store step info by using ring instead of FIFO*/ + do { + switch (state) { + case 0: + n_start = pos_old; + if (pos_new >= pos_old) + n_stop = pos_new; + else + n_stop = btc->ctrl.trace_step - 1; + + state = 1; + break; + case 1: + for (i = n_start; i <= n_stop; i++) { + type = pstep->step[i].type; + val = pstep->step[i].val; + diff_t = le16_to_cpu(pstep->step[i].difft); + + if (type == CXSTEP_NONE || type >= CXSTEP_MAX) + continue; + + if (cnt % 10 == 0) + seq_printf(m, " %-15s : ", "[steps]"); + + seq_printf(m, "-> %s(%02d)(%02d)", + (type == CXSTEP_SLOT ? "SLT" : + "EVT"), (u32)val, diff_t); + if (cnt % 10 == 9) + seq_puts(m, "\n"); + cnt++; + } + + state = 2; + break; + case 2: + if (pos_new < pos_old && n_start != 0) { + n_start = 0; + n_stop = pos_new; + state = 1; + } else { + outloop = true; + } + break; + } + } while (!outloop); +} + +static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM)) + return; + + _show_error(rtwdev, m); + _show_fbtc_tdma(rtwdev, m); + _show_fbtc_slots(rtwdev, m); + _show_fbtc_cysta(rtwdev, m); + _show_fbtc_nullsta(rtwdev, m); + _show_fbtc_step(rtwdev, m); +} + +static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_mreg_val *pmreg = NULL; + struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_mac_ax_gnt gnt[2] = {0}; + u8 i = 0, type = 0, cnt = 0; + u32 val, offset; + + if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG)) + return; + + seq_puts(m, "========== [HW Status] ==========\n"); + + seq_printf(m, + " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n", + "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE], + bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], + cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); + + /* To avoid I/O if WL LPS or power-off */ + if (!wl->status.map.lps && !wl->status.map.rf_off) { + rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); + if (val & (B_AX_GNT_BT_RFC_S0_SW_VAL | + B_AX_GNT_BT_BB_S0_SW_VAL)) + gnt[0].gnt_bt = true; + if (val & (B_AX_GNT_BT_RFC_S0_SW_CTRL | + B_AX_GNT_BT_BB_S0_SW_CTRL)) + gnt[0].gnt_bt_sw_en = true; + if (val & (B_AX_GNT_WL_RFC_S0_SW_VAL | + B_AX_GNT_WL_BB_S0_SW_VAL)) + gnt[0].gnt_wl = true; + if (val & (B_AX_GNT_WL_RFC_S0_SW_CTRL | + B_AX_GNT_WL_BB_S0_SW_CTRL)) + gnt[0].gnt_wl_sw_en = true; + + if (val & (B_AX_GNT_BT_RFC_S1_SW_VAL | + B_AX_GNT_BT_BB_S1_SW_VAL)) + gnt[1].gnt_bt = true; + if (val & (B_AX_GNT_BT_RFC_S1_SW_CTRL | + B_AX_GNT_BT_BB_S1_SW_CTRL)) + gnt[1].gnt_bt_sw_en = true; + if (val & (B_AX_GNT_WL_RFC_S1_SW_VAL | + B_AX_GNT_WL_BB_S1_SW_VAL)) + gnt[1].gnt_wl = true; + if (val & (B_AX_GNT_WL_RFC_S1_SW_CTRL | + B_AX_GNT_WL_BB_S1_SW_CTRL)) + gnt[1].gnt_wl_sw_en = true; + + seq_printf(m, + " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ", + "[gnt_status]", + (rtw89_mac_get_ctrl_path(rtwdev) ? "WL" : "BT"), + (gnt[0].gnt_wl_sw_en ? "SW" : "HW"), gnt[0].gnt_wl, + (gnt[0].gnt_bt_sw_en ? "SW" : "HW"), gnt[0].gnt_bt); + + seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n", + (gnt[1].gnt_wl_sw_en ? "SW" : "HW"), gnt[1].gnt_wl, + (gnt[1].gnt_bt_sw_en ? "SW" : "HW"), gnt[1].gnt_bt); + } + + pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo; + if (!pcinfo->valid) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n", + __func__); + return; + } + + pmreg = &pfwinfo->rpt_fbtc_mregval.finfo; + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): rpt_fbtc_mregval reg_num = %d\n", + __func__, pmreg->reg_num); + + for (i = 0; i < pmreg->reg_num; i++) { + type = (u8)le16_to_cpu(chip->mon_reg[i].type); + offset = le32_to_cpu(chip->mon_reg[i].offset); + val = le32_to_cpu(pmreg->mreg_val[i]); + + if (cnt % 6 == 0) + seq_printf(m, " %-15s : %d_0x%04x=0x%08x", + "[reg]", (u32)type, offset, val); + else + seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type, + offset, val); + if (cnt % 6 == 5) + seq_puts(m, "\n"); + cnt++; + } + + pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo; + if (!pcinfo->valid) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n", + __func__); + return; + } + + gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo; + if (!gdbg->en_map) + return; + + seq_printf(m, " %-15s : enable_map:0x%08x", + "[gpio_dbg]", gdbg->en_map); + + for (i = 0; i < BTC_DBG_MAX1; i++) { + if (!(gdbg->en_map & BIT(i))) + continue; + seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]); + } + seq_puts(m, "\n"); +} + +static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_rpt_ctrl *prptctrl = NULL; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_bt_info *bt = &cx->bt; + u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify; + u8 i; + + if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) + return; + + seq_puts(m, "========== [Statistics] ==========\n"); + + pcinfo = &pfwinfo->rpt_ctrl.cinfo; + if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) { + prptctrl = &pfwinfo->rpt_ctrl.finfo; + + seq_printf(m, + " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt, + pfwinfo->cnt_c2h, prptctrl->c2h_cnt); + + seq_printf(m, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x", + pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt, + prptctrl->rpt_enable, dm->error.val); + + _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE, + pfwinfo->event[BTF_EVNT_RPT]); + + if (dm->error.map.wl_fw_hang) + seq_puts(m, " (WL FW Hang!!)"); + seq_puts(m, "\n"); + seq_printf(m, + " %-15s : send_ok:%d, send_fail:%d, recv:%d", + "[mailbox]", prptctrl->mb_send_ok_cnt, + prptctrl->mb_send_fail_cnt, prptctrl->mb_recv_cnt); + + seq_printf(m, + "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n", + prptctrl->mb_a2dp_empty_cnt, + prptctrl->mb_a2dp_flct_cnt, + prptctrl->mb_a2dp_full_cnt); + + seq_printf(m, + " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]", + "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); + + seq_printf(m, + ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n", + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]); + + if (prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT] > 0) + bt->rfk_info.map.timeout = 1; + else + bt->rfk_info.map.timeout = 0; + + dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout; + } else { + seq_printf(m, + " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h, + pfwinfo->event[BTF_EVNT_RPT], + btc->fwinfo.rpt_en_map); + seq_puts(m, " (WL FW report invalid!!)\n"); + } + + for (i = 0; i < BTC_NCNT_NUM; i++) + cnt_sum += dm->cnt_notify[i]; + + seq_printf(m, + " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + seq_printf(m, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); + + seq_printf(m, + " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", + "[notify_cnt]", cnt[BTC_NCNT_SCAN_START], + cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND], + cnt[BTC_NCNT_SPECIAL_PACKET]); + + seq_printf(m, + "timer=%d, control=%d, customerize=%d\n", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], + cnt[BTC_NCNT_CUSTOMERIZE]); +} + +void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_bt_info *bt = &cx->bt; + + seq_puts(m, "=========================================\n"); + seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n", + fw_suit->major_ver, fw_suit->minor_ver, + fw_suit->sub_ver, fw_suit->sub_idex); + seq_printf(m, "manual %d\n", btc->ctrl.manual); + + seq_puts(m, "=========================================\n"); + + seq_printf(m, "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)", + "[bt_info]", + bt->raw_info[2], bt->raw_info[3], + bt->raw_info[4], bt->raw_info[5], + bt->raw_info[6], bt->raw_info[7], + bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply", + cx->cnt_bt[BTC_BCNT_INFOUPDATE], + cx->cnt_bt[BTC_BCNT_INFOSAME]); + + seq_puts(m, "\n=========================================\n"); + + _show_cx_info(rtwdev, m); + _show_wl_info(rtwdev, m); + _show_bt_info(rtwdev, m); + _show_dm_info(rtwdev, m); + _show_fw_dm_msg(rtwdev, m); + _show_mreg(rtwdev, m); + _show_summary(rtwdev, m); +} diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h new file mode 100644 index 00000000000000..4b4565d15c9eca --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_COEX_H__ +#define __RTW89_COEX_H__ + +#include "core.h" + +enum btc_mode { + BTC_MODE_NORMAL, + BTC_MODE_WL, + BTC_MODE_BT, + BTC_MODE_WLOFF, + BTC_MODE_MAX +}; + +enum btc_wl_rfk_type { + BTC_WRFKT_IQK = 0, + BTC_WRFKT_LCK = 1, + BTC_WRFKT_DPK = 2, + BTC_WRFKT_TXGAPK = 3, + BTC_WRFKT_DACK = 4, + BTC_WRFKT_RXDCK = 5, + BTC_WRFKT_TSSI = 6, +}; + +#define NM_EXEC false +#define FC_EXEC true + +#define RTW89_COEX_ACT1_WORK_PERIOD round_jiffies_relative(HZ * 4) +#define RTW89_COEX_BT_DEVINFO_WORK_PERIOD round_jiffies_relative(HZ * 16) +#define RTW89_COEX_RFK_CHK_WORK_PERIOD msecs_to_jiffies(300) +#define BTC_RFK_PATH_MAP GENMASK(3, 0) +#define BTC_RFK_PHY_MAP GENMASK(5, 4) +#define BTC_RFK_BAND_MAP GENMASK(7, 6) + +enum btc_wl_rfk_state { + BTC_WRFK_STOP = 0, + BTC_WRFK_START = 1, + BTC_WRFK_ONESHOT_START = 2, + BTC_WRFK_ONESHOT_STOP = 3, +}; + +enum btc_pri { + BTC_PRI_MASK_RX_RESP = 0, + BTC_PRI_MASK_TX_RESP, + BTC_PRI_MASK_BEACON, + BTC_PRI_MASK_RX_CCK, + BTC_PRI_MASK_TX_MNGQ, + BTC_PRI_MASK_MAX, +}; + +enum btc_bt_trs { + BTC_BT_SS_GROUP = 0x0, + BTC_BT_TX_GROUP = 0x2, + BTC_BT_RX_GROUP = 0x3, + BTC_BT_MAX_GROUP, +}; + +enum btc_rssi_st { + BTC_RSSI_ST_LOW = 0x0, + BTC_RSSI_ST_HIGH, + BTC_RSSI_ST_STAY_LOW, + BTC_RSSI_ST_STAY_HIGH, + BTC_RSSI_ST_MAX +}; + +#define BTC_RSSI_HIGH(_rssi_) \ + ({typeof(_rssi_) __rssi = (_rssi_); \ + ((__rssi == BTC_RSSI_ST_HIGH || \ + __rssi == BTC_RSSI_ST_STAY_HIGH) ? 1 : 0); }) + +#define BTC_RSSI_LOW(_rssi_) \ + ({typeof(_rssi_) __rssi = (_rssi_); \ + ((__rssi == BTC_RSSI_ST_LOW || \ + __rssi == BTC_RSSI_ST_STAY_LOW) ? 1 : 0); }) + +#define BTC_RSSI_CHANGE(_rssi_) \ + ({typeof(_rssi_) __rssi = (_rssi_); \ + ((__rssi == BTC_RSSI_ST_LOW || \ + __rssi == BTC_RSSI_ST_HIGH) ? 1 : 0); }) + +enum btc_ant { + BTC_ANT_SHARED = 0, + BTC_ANT_DEDICATED, + BTC_ANTTYPE_MAX +}; + +enum btc_bt_btg { + BTC_BT_ALONE = 0, + BTC_BT_BTG +}; + +enum btc_switch { + BTC_SWITCH_INTERNAL = 0, + BTC_SWITCH_EXTERNAL +}; + +enum btc_pkt_type { + PACKET_DHCP, + PACKET_ARP, + PACKET_EAPOL, + PACKET_EAPOL_END, + PACKET_ICMP, + PACKET_MAX +}; + +enum btc_bt_mailbox_id { + BTC_BTINFO_REPLY = 0x23, + BTC_BTINFO_AUTO = 0x27 +}; + +enum btc_role_state { + BTC_ROLE_START, + BTC_ROLE_STOP, + BTC_ROLE_CHG_TYPE, + BTC_ROLE_MSTS_STA_CONN_START, + BTC_ROLE_MSTS_STA_CONN_END, + BTC_ROLE_MSTS_STA_DIS_CONN, + BTC_ROLE_MSTS_AP_START, + BTC_ROLE_MSTS_AP_STOP, + BTC_ROLE_STATE_UNKNOWN +}; + +enum btc_rfctrl { + BTC_RFCTRL_WL_OFF, + BTC_RFCTRL_WL_ON, + BTC_RFCTRL_FW_CTRL, + BTC_RFCTRL_MAX +}; + +void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev); +void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev); +void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode); +void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band); +void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx); +void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band); +void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev, + enum btc_pkt_type pkt_type); +void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work); +void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work); +void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work); +void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work); +void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta, enum btc_role_state state); +void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_state); +void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map, + enum btc_wl_rfk_type type, + enum btc_wl_rfk_state state); +void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev); +void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u32 len, u8 class, u8 func); +void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m); +void rtw89_coex_act1_work(struct work_struct *work); +void rtw89_coex_bt_devinfo_work(struct work_struct *work); +void rtw89_coex_rfk_chk_work(struct work_struct *work); +void rtw89_coex_power_on(struct rtw89_dev *rtwdev); + +static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + enum rtw89_rf_path_bit paths) +{ + struct rtw89_hal *hal = &rtwdev->hal; + u8 phy_map; + + phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) | + FIELD_PREP(BTC_RFK_PHY_MAP, BIT(phy_idx)) | + FIELD_PREP(BTC_RFK_BAND_MAP, hal->current_band_type); + + return phy_map; +} + +static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + enum rtw89_rf_path path) +{ + return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path)); +} + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c new file mode 100644 index 00000000000000..d02ec5a735cb15 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -0,0 +1,2502 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "coex.h" +#include "core.h" +#include "efuse.h" +#include "fw.h" +#include "mac.h" +#include "phy.h" +#include "ps.h" +#include "reg.h" +#include "sar.h" +#include "ser.h" +#include "txrx.h" +#include "util.h" + +static bool rtw89_disable_ps_mode; +module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644); +MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode"); + +static struct ieee80211_channel rtw89_channels_2ghz[] = { + { .center_freq = 2412, .hw_value = 1, }, + { .center_freq = 2417, .hw_value = 2, }, + { .center_freq = 2422, .hw_value = 3, }, + { .center_freq = 2427, .hw_value = 4, }, + { .center_freq = 2432, .hw_value = 5, }, + { .center_freq = 2437, .hw_value = 6, }, + { .center_freq = 2442, .hw_value = 7, }, + { .center_freq = 2447, .hw_value = 8, }, + { .center_freq = 2452, .hw_value = 9, }, + { .center_freq = 2457, .hw_value = 10, }, + { .center_freq = 2462, .hw_value = 11, }, + { .center_freq = 2467, .hw_value = 12, }, + { .center_freq = 2472, .hw_value = 13, }, + { .center_freq = 2484, .hw_value = 14, }, +}; + +static struct ieee80211_channel rtw89_channels_5ghz[] = { + {.center_freq = 5180, .hw_value = 36,}, + {.center_freq = 5200, .hw_value = 40,}, + {.center_freq = 5220, .hw_value = 44,}, + {.center_freq = 5240, .hw_value = 48,}, + {.center_freq = 5260, .hw_value = 52,}, + {.center_freq = 5280, .hw_value = 56,}, + {.center_freq = 5300, .hw_value = 60,}, + {.center_freq = 5320, .hw_value = 64,}, + {.center_freq = 5500, .hw_value = 100,}, + {.center_freq = 5520, .hw_value = 104,}, + {.center_freq = 5540, .hw_value = 108,}, + {.center_freq = 5560, .hw_value = 112,}, + {.center_freq = 5580, .hw_value = 116,}, + {.center_freq = 5600, .hw_value = 120,}, + {.center_freq = 5620, .hw_value = 124,}, + {.center_freq = 5640, .hw_value = 128,}, + {.center_freq = 5660, .hw_value = 132,}, + {.center_freq = 5680, .hw_value = 136,}, + {.center_freq = 5700, .hw_value = 140,}, + {.center_freq = 5720, .hw_value = 144,}, + {.center_freq = 5745, .hw_value = 149,}, + {.center_freq = 5765, .hw_value = 153,}, + {.center_freq = 5785, .hw_value = 157,}, + {.center_freq = 5805, .hw_value = 161,}, + {.center_freq = 5825, .hw_value = 165, + .flags = IEEE80211_CHAN_NO_HT40MINUS}, +}; + +static struct ieee80211_rate rtw89_bitrates[] = { + { .bitrate = 10, .hw_value = 0x00, }, + { .bitrate = 20, .hw_value = 0x01, }, + { .bitrate = 55, .hw_value = 0x02, }, + { .bitrate = 110, .hw_value = 0x03, }, + { .bitrate = 60, .hw_value = 0x04, }, + { .bitrate = 90, .hw_value = 0x05, }, + { .bitrate = 120, .hw_value = 0x06, }, + { .bitrate = 180, .hw_value = 0x07, }, + { .bitrate = 240, .hw_value = 0x08, }, + { .bitrate = 360, .hw_value = 0x09, }, + { .bitrate = 480, .hw_value = 0x0a, }, + { .bitrate = 540, .hw_value = 0x0b, }, +}; + +u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate) +{ + struct ieee80211_rate rate; + + if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) { + rtw89_info(rtwdev, "invalid rpt rate %d\n", rpt_rate); + return 0; + } + + rate = rtw89_bitrates[rpt_rate]; + + return rate.bitrate; +} + +static struct ieee80211_supported_band rtw89_sband_2ghz = { + .band = NL80211_BAND_2GHZ, + .channels = rtw89_channels_2ghz, + .n_channels = ARRAY_SIZE(rtw89_channels_2ghz), + .bitrates = rtw89_bitrates, + .n_bitrates = ARRAY_SIZE(rtw89_bitrates), + .ht_cap = {0}, + .vht_cap = {0}, +}; + +static struct ieee80211_supported_band rtw89_sband_5ghz = { + .band = NL80211_BAND_5GHZ, + .channels = rtw89_channels_5ghz, + .n_channels = ARRAY_SIZE(rtw89_channels_5ghz), + + /* 5G has no CCK rates, 1M/2M/5.5M/11M */ + .bitrates = rtw89_bitrates + 4, + .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4, + .ht_cap = {0}, + .vht_cap = {0}, +}; + +static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev, + struct rtw89_traffic_stats *stats, + struct sk_buff *skb, bool tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_data(hdr->frame_control)) + return; + + if (is_broadcast_ether_addr(hdr->addr1) || + is_multicast_ether_addr(hdr->addr1)) + return; + + if (tx) { + stats->tx_cnt++; + stats->tx_unicast += skb->len; + } else { + stats->rx_cnt++; + stats->rx_unicast += skb->len; + } +} + +static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef, + struct rtw89_channel_params *chan_param) +{ + struct ieee80211_channel *channel = chandef->chan; + enum nl80211_chan_width width = chandef->width; + u8 *cch_by_bw = chan_param->cch_by_bw; + u32 primary_freq, center_freq; + u8 center_chan; + u8 bandwidth = RTW89_CHANNEL_WIDTH_20; + u8 primary_chan_idx = 0; + u8 i; + + center_chan = channel->hw_value; + primary_freq = channel->center_freq; + center_freq = chandef->center_freq1; + + /* assign the center channel used while 20M bw is selected */ + cch_by_bw[RTW89_CHANNEL_WIDTH_20] = channel->hw_value; + + switch (width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + bandwidth = RTW89_CHANNEL_WIDTH_20; + primary_chan_idx = RTW89_SC_DONT_CARE; + break; + case NL80211_CHAN_WIDTH_40: + bandwidth = RTW89_CHANNEL_WIDTH_40; + if (primary_freq > center_freq) { + primary_chan_idx = RTW89_SC_20_UPPER; + center_chan -= 2; + } else { + primary_chan_idx = RTW89_SC_20_LOWER; + center_chan += 2; + } + break; + case NL80211_CHAN_WIDTH_80: + bandwidth = RTW89_CHANNEL_WIDTH_80; + if (primary_freq > center_freq) { + if (primary_freq - center_freq == 10) { + primary_chan_idx = RTW89_SC_20_UPPER; + center_chan -= 2; + } else { + primary_chan_idx = RTW89_SC_20_UPMOST; + center_chan -= 6; + } + /* assign the center channel used + * while 40M bw is selected + */ + cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan + 4; + } else { + if (center_freq - primary_freq == 10) { + primary_chan_idx = RTW89_SC_20_LOWER; + center_chan += 2; + } else { + primary_chan_idx = RTW89_SC_20_LOWEST; + center_chan += 6; + } + /* assign the center channel used + * while 40M bw is selected + */ + cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan - 4; + } + break; + default: + center_chan = 0; + break; + } + + chan_param->center_chan = center_chan; + chan_param->primary_chan = channel->hw_value; + chan_param->bandwidth = bandwidth; + chan_param->pri_ch_idx = primary_chan_idx; + + /* assign the center channel used while current bw is selected */ + cch_by_bw[bandwidth] = center_chan; + + for (i = bandwidth + 1; i <= RTW89_MAX_CHANNEL_WIDTH; i++) + cch_by_bw[i] = 0; +} + +void rtw89_set_channel(struct rtw89_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_channel_params ch_param; + struct rtw89_channel_help_params bak; + u8 center_chan, bandwidth; + u8 band_type; + bool band_changed; + u8 i; + + rtw89_get_channel_params(&hw->conf.chandef, &ch_param); + if (WARN(ch_param.center_chan == 0, "Invalid channel\n")) + return; + + center_chan = ch_param.center_chan; + bandwidth = ch_param.bandwidth; + band_type = center_chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G; + band_changed = hal->current_band_type != band_type || + hal->current_channel == 0; + + hal->current_band_width = bandwidth; + hal->current_channel = center_chan; + hal->current_primary_channel = ch_param.primary_chan; + hal->current_band_type = band_type; + + switch (center_chan) { + case 1 ... 14: + hal->current_subband = RTW89_CH_2G; + break; + case 36 ... 64: + hal->current_subband = RTW89_CH_5G_BAND_1; + break; + case 100 ... 144: + hal->current_subband = RTW89_CH_5G_BAND_3; + break; + case 149 ... 177: + hal->current_subband = RTW89_CH_5G_BAND_4; + break; + } + + for (i = RTW89_CHANNEL_WIDTH_20; i <= RTW89_MAX_CHANNEL_WIDTH; i++) + hal->cch_by_bw[i] = ch_param.cch_by_bw[i]; + + rtw89_chip_set_channel_prepare(rtwdev, &bak); + + chip->ops->set_channel(rtwdev, &ch_param); + + rtw89_chip_set_txpwr(rtwdev); + + rtw89_chip_set_channel_done(rtwdev, &bak); + + if (band_changed) { + rtw89_btc_ntfy_switch_band(rtwdev, RTW89_PHY_0, hal->current_band_type); + rtw89_chip_rfk_band_changed(rtwdev); + } +} + +static enum rtw89_core_tx_type +rtw89_core_get_tx_type(struct rtw89_dev *rtwdev, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + + if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) + return RTW89_CORE_TX_TYPE_MGMT; + + return RTW89_CORE_TX_TYPE_DATA; +} + +static void +rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req, u8 tid) +{ + struct ieee80211_sta *sta = tx_req->sta; + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct rtw89_sta *rtwsta; + u8 ampdu_num; + + if (!sta) { + rtw89_warn(rtwdev, "cannot set ampdu info without sta\n"); + return; + } + + rtwsta = (struct rtw89_sta *)sta->drv_priv; + + ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ? + rtwsta->ampdu_params[tid].agg_num : + 4 << sta->ht_cap.ampdu_factor) - 1); + + desc_info->agg_en = true; + desc_info->ampdu_density = sta->ht_cap.ampdu_density; + desc_info->ampdu_num = ampdu_num; +} + +static void +rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct ieee80211_vif *vif = tx_req->vif; + struct ieee80211_tx_info *info; + struct ieee80211_key_conf *key; + struct rtw89_vif *rtwvif; + struct rtw89_addr_cam_entry *addr_cam; + struct rtw89_sec_cam_entry *sec_cam; + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct sk_buff *skb = tx_req->skb; + u8 sec_type = RTW89_SEC_KEY_TYPE_NONE; + + if (!vif) { + rtw89_warn(rtwdev, "cannot set sec key without vif\n"); + return; + } + + rtwvif = (struct rtw89_vif *)vif->drv_priv; + addr_cam = &rtwvif->addr_cam; + + info = IEEE80211_SKB_CB(skb); + key = info->control.hw_key; + sec_cam = addr_cam->sec_entries[key->hw_key_idx]; + if (!sec_cam) { + rtw89_warn(rtwdev, "sec cam entry is empty\n"); + return; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + sec_type = RTW89_SEC_KEY_TYPE_WEP40; + break; + case WLAN_CIPHER_SUITE_WEP104: + sec_type = RTW89_SEC_KEY_TYPE_WEP104; + break; + case WLAN_CIPHER_SUITE_TKIP: + sec_type = RTW89_SEC_KEY_TYPE_TKIP; + break; + case WLAN_CIPHER_SUITE_CCMP: + sec_type = RTW89_SEC_KEY_TYPE_CCMP128; + break; + case WLAN_CIPHER_SUITE_CCMP_256: + sec_type = RTW89_SEC_KEY_TYPE_CCMP256; + break; + case WLAN_CIPHER_SUITE_GCMP: + sec_type = RTW89_SEC_KEY_TYPE_GCMP128; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + sec_type = RTW89_SEC_KEY_TYPE_GCMP256; + break; + default: + rtw89_warn(rtwdev, "key cipher not supported %d\n", key->cipher); + return; + } + + desc_info->sec_en = true; + desc_info->sec_type = sec_type; + desc_info->sec_cam_idx = sec_cam->sec_cam_idx; +} + +static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct sk_buff *skb = tx_req->skb; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = tx_info->control.vif; + struct rtw89_hal *hal = &rtwdev->hal; + u16 lowest_rate = hal->current_band_type == RTW89_BAND_2G ? + RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6; + + if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta) + return lowest_rate; + + return __ffs(vif->bss_conf.basic_rates) + lowest_rate; +} + +static void +rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + u8 qsel, ch_dma; + + qsel = RTW89_TX_QSEL_B0_MGMT; + ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + + desc_info->qsel = RTW89_TX_QSEL_B0_MGMT; + desc_info->ch_dma = ch_dma; + + /* fixed data rate for mgmt frames */ + desc_info->en_wd_info = true; + desc_info->use_rate = true; + desc_info->dis_data_fb = true; + desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req); + + rtw89_debug(rtwdev, RTW89_DBG_TXRX, + "tx mgmt frame with rate 0x%x on channel %d (bw %d)\n", + desc_info->data_rate, rtwdev->hal.current_channel, + rtwdev->hal.current_band_width); +} + +static void +rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + + desc_info->is_bmc = false; + desc_info->wd_page = false; + desc_info->ch_dma = RTW89_DMA_H2C; +} + +static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc) +{ + static const u8 rtw89_bandwidth_to_om[] = { + [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20, + [RTW89_CHANNEL_WIDTH_40] = HTC_OM_CHANNEL_WIDTH_40, + [RTW89_CHANNEL_WIDTH_80] = HTC_OM_CHANNEL_WIDTH_80, + [RTW89_CHANNEL_WIDTH_160] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80, + [RTW89_CHANNEL_WIDTH_80_80] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80, + }; + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_hal *hal = &rtwdev->hal; + u8 om_bandwidth; + + if (!chip->dis_2g_40m_ul_ofdma || + hal->current_band_type != RTW89_BAND_2G || + hal->current_band_width != RTW89_CHANNEL_WIDTH_40) + return; + + om_bandwidth = hal->current_band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ? + rtw89_bandwidth_to_om[hal->current_band_width] : 0; + *htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) | + le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) | + le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) | + le32_encode_bits(om_bandwidth, RTW89_HTC_MASK_HTC_OM_CH_WIDTH) | + le32_encode_bits(1, RTW89_HTC_MASK_HTC_OM_UL_MU_DIS) | + le32_encode_bits(hal->tx_nss - 1, RTW89_HTC_MASK_HTC_OM_TX_NSTS) | + le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_ER_SU_DIS) | + le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR) | + le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS); +} + +static bool +__rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req, + enum btc_pkt_type pkt_type) +{ + struct ieee80211_sta *sta = tx_req->sta; + struct sk_buff *skb = tx_req->skb; + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + + /* AP IOT issue with EAPoL, ARP and DHCP */ + if (pkt_type < PACKET_MAX) + return false; + + if (!sta || !sta->he_cap.has_he) + return false; + + if (!ieee80211_is_data_qos(fc)) + return false; + + if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN) + return false; + + return true; +} + +static void +__rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct ieee80211_sta *sta = tx_req->sta; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct sk_buff *skb = tx_req->skb; + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + void *data; + __le32 *htc; + u8 *qc; + int hdr_len; + + hdr_len = ieee80211_has_a4(fc) ? 32 : 26; + data = skb_push(skb, IEEE80211_HT_CTL_LEN); + memmove(data, data + IEEE80211_HT_CTL_LEN, hdr_len); + + hdr = data; + htc = data + hdr_len; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER); + *htc = rtwsta->htc_template ? rtwsta->htc_template : + le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) | + le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID); + + qc = data + hdr_len - IEEE80211_QOS_CTL_LEN; + qc[0] |= IEEE80211_QOS_CTL_EOSP; +} + +static void +rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req, + enum btc_pkt_type pkt_type) +{ + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct ieee80211_vif *vif = tx_req->vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type)) + goto desc_bk; + + __rtw89_core_tx_adjust_he_qos_htc(rtwdev, tx_req); + + desc_info->pkt_size += IEEE80211_HT_CTL_LEN; + desc_info->a_ctrl_bsr = true; + +desc_bk: + if (!rtwvif || rtwvif->last_a_ctrl == desc_info->a_ctrl_bsr) + return; + + rtwvif->last_a_ctrl = desc_info->a_ctrl_bsr; + desc_info->bk = true; +} + +static void +rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct ieee80211_vif *vif = tx_req->vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct sk_buff *skb = tx_req->skb; + u8 tid, tid_indicate; + u8 qsel, ch_dma; + + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid); + qsel = rtw89_core_get_qsel(rtwdev, tid); + ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + + desc_info->ch_dma = ch_dma; + desc_info->tid_indicate = tid_indicate; + desc_info->qsel = qsel; + + /* enable wd_info for AMPDU */ + desc_info->en_wd_info = true; + + if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU) + rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, tid); + if (IEEE80211_SKB_CB(skb)->control.hw_key) + rtw89_core_tx_update_sec_key(rtwdev, tx_req); + + if (rate_pattern->enable) + desc_info->data_retry_lowest_rate = rate_pattern->rate; + else if (hal->current_band_type == RTW89_BAND_2G) + desc_info->data_retry_lowest_rate = RTW89_HW_RATE_CCK1; + else + desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6; +} + +static enum btc_pkt_type +rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct sk_buff *skb = tx_req->skb; + struct udphdr *udphdr; + + if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { + ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work); + return PACKET_EAPOL; + } + + if (skb->protocol == htons(ETH_P_ARP)) { + ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work); + return PACKET_ARP; + } + + if (skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol == IPPROTO_UDP) { + udphdr = udp_hdr(skb); + if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) || + (udphdr->source == htons(68) && udphdr->dest == htons(67))) && + skb->len > 282) { + ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work); + return PACKET_DHCP; + } + } + + if (skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol == IPPROTO_ICMP) { + ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work); + return PACKET_ICMP; + } + + return PACKET_MAX; +} + +static void +rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct sk_buff *skb = tx_req->skb; + struct ieee80211_hdr *hdr = (void *)skb->data; + enum rtw89_core_tx_type tx_type; + enum btc_pkt_type pkt_type; + bool is_bmc; + u16 seq; + + seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) { + tx_type = rtw89_core_get_tx_type(rtwdev, skb); + tx_req->tx_type = tx_type; + } + is_bmc = (is_broadcast_ether_addr(hdr->addr1) || + is_multicast_ether_addr(hdr->addr1)); + + desc_info->seq = seq; + desc_info->pkt_size = skb->len; + desc_info->is_bmc = is_bmc; + desc_info->wd_page = true; + + switch (tx_req->tx_type) { + case RTW89_CORE_TX_TYPE_MGMT: + rtw89_core_tx_update_mgmt_info(rtwdev, tx_req); + break; + case RTW89_CORE_TX_TYPE_DATA: + rtw89_core_tx_update_data_info(rtwdev, tx_req); + pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req); + rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type); + break; + case RTW89_CORE_TX_TYPE_FWCMD: + rtw89_core_tx_update_h2c_info(rtwdev, tx_req); + break; + } +} + +void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) +{ + u8 ch_dma; + + ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + + rtw89_hci_tx_kick_off(rtwdev, ch_dma); +} + +int rtw89_h2c_tx(struct rtw89_dev *rtwdev, + struct sk_buff *skb, bool fwdl) +{ + struct rtw89_core_tx_request tx_req = {0}; + u32 cnt; + int ret; + + tx_req.skb = skb; + tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD; + if (fwdl) + tx_req.desc_info.fw_dl = true; + + rtw89_core_tx_update_desc_info(rtwdev, &tx_req); + + if (!fwdl) + rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "H2C: ", skb->data, skb->len); + + cnt = rtw89_hci_check_and_reclaim_tx_resource(rtwdev, RTW89_TXCH_CH12); + if (cnt == 0) { + rtw89_err(rtwdev, "no tx fwcmd resource\n"); + return -ENOSPC; + } + + ret = rtw89_hci_tx_write(rtwdev, &tx_req); + if (ret) { + rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); + return ret; + } + rtw89_hci_tx_kick_off(rtwdev, RTW89_TXCH_CH12); + + return 0; +} + +int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel) +{ + struct rtw89_core_tx_request tx_req = {0}; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + int ret; + + tx_req.skb = skb; + tx_req.sta = sta; + tx_req.vif = vif; + + rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true); + rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true); + rtw89_core_tx_update_desc_info(rtwdev, &tx_req); + ret = rtw89_hci_tx_write(rtwdev, &tx_req); + if (ret) { + rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); + return ret; + } + + if (qsel) + *qsel = tx_req.desc_info.qsel; + + return 0; +} + +static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) | + FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) | + FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) | + FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | + FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) | + FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) | + FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) | + FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY3_SW_SEQ, desc_info->seq) | + FIELD_PREP(RTW89_TXWD_BODY3_AGG_EN, desc_info->agg_en) | + FIELD_PREP(RTW89_TXWD_BODY3_BK, desc_info->bk); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) | + FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) | + FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) | + FIELD_PREP(RTW89_TXWD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) | + FIELD_PREP(RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE, + desc_info->data_retry_lowest_rate); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | + FIELD_PREP(RTW89_TXWD_INFO2_SEC_TYPE, desc_info->sec_type) | + FIELD_PREP(RTW89_TXWD_INFO2_SEC_HW_ENC, desc_info->sec_en) | + FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) | + FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1); + + return cpu_to_le32(dword); +} + +void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_txwd_body *txwd_body = (struct rtw89_txwd_body *)txdesc; + struct rtw89_txwd_info *txwd_info; + + txwd_body->dword0 = rtw89_build_txwd_body0(desc_info); + txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); + txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); + + if (!desc_info->en_wd_info) + return; + + txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1); + txwd_info->dword0 = rtw89_build_txwd_info0(desc_info); + txwd_info->dword1 = rtw89_build_txwd_info1(desc_info); + txwd_info->dword2 = rtw89_build_txwd_info2(desc_info); + txwd_info->dword4 = rtw89_build_txwd_info4(desc_info); + +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc); + +static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, + struct sk_buff *skb, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + bool rx_cnt_valid = false; + u8 plcp_size = 0; + u8 usr_num = 0; + u8 *phy_sts; + + rx_cnt_valid = RTW89_GET_RXINFO_RX_CNT_VLD(skb->data); + plcp_size = RTW89_GET_RXINFO_PLCP_LEN(skb->data) << 3; + usr_num = RTW89_GET_RXINFO_USR_NUM(skb->data); + if (usr_num > RTW89_PPDU_MAX_USR) { + rtw89_warn(rtwdev, "Invalid user number in mac info\n"); + return -EINVAL; + } + + phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE; + phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE; + /* 8-byte alignment */ + if (usr_num & BIT(0)) + phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE; + if (rx_cnt_valid) + phy_sts += RTW89_PPDU_MAC_RX_CNT_SIZE; + phy_sts += plcp_size; + + phy_ppdu->buf = phy_sts; + phy_ppdu->len = skb->data + skb->len - phy_sts; + + return 0; +} + +static void rtw89_core_rx_process_phy_ppdu_iter(void *data, + struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data; + + if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self) + ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg); +} + +#define VAR_LEN 0xff +#define VAR_LEN_UNIT 8 +static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev, u8 *addr) +{ + static const u8 physts_ie_len_tab[32] = { + 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN, + VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN, + VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32 + }; + u16 ie_len; + u8 ie; + + ie = RTW89_GET_PHY_STS_IE_TYPE(addr); + if (physts_ie_len_tab[ie] != VAR_LEN) + ie_len = physts_ie_len_tab[ie]; + else + ie_len = RTW89_GET_PHY_STS_IE_LEN(addr) * VAR_LEN_UNIT; + + return ie_len; +} + +static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + s16 cfo; + + /* sign conversion for S(12,2) */ + cfo = sign_extend32(RTW89_GET_PHY_STS_IE0_CFO(addr), 11); + rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu); +} + +static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, u8 *addr, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + u8 ie; + + ie = RTW89_GET_PHY_STS_IE_TYPE(addr); + switch (ie) { + case RTW89_PHYSTS_IE01_CMN_OFDM: + rtw89_core_parse_phy_status_ie01(rtwdev, addr, phy_ppdu); + break; + default: + break; + } + + return 0; +} + +static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + s8 *rssi = phy_ppdu->rssi; + u8 *buf = phy_ppdu->buf; + + phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf); + rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf)); + rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf)); + rssi[RF_PATH_C] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_C(buf)); + rssi[RF_PATH_D] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_D(buf)); +} + +static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + if (RTW89_GET_PHY_STS_LEN(phy_ppdu->buf) << 3 != phy_ppdu->len) { + rtw89_warn(rtwdev, "phy ppdu len mismatch\n"); + return -EINVAL; + } + rtw89_core_update_phy_ppdu(phy_ppdu); + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_core_rx_process_phy_ppdu_iter, + phy_ppdu); + + return 0; +} + +static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + u16 ie_len; + u8 *pos, *end; + + if (!phy_ppdu->to_self) + return 0; + + pos = (u8 *)phy_ppdu->buf + PHY_STS_HDR_LEN; + end = (u8 *)phy_ppdu->buf + phy_ppdu->len; + while (pos < end) { + ie_len = rtw89_core_get_phy_status_ie_len(rtwdev, pos); + rtw89_core_process_phy_status_ie(rtwdev, pos, phy_ppdu); + pos += ie_len; + if (pos > end || ie_len == 0) { + rtw89_debug(rtwdev, RTW89_DBG_TXRX, + "phy status parse failed\n"); + return -EINVAL; + } + } + + return 0; +} + +static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + int ret; + + ret = rtw89_core_rx_parse_phy_sts(rtwdev, phy_ppdu); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n"); + else + phy_ppdu->valid = true; +} + +static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev, + const struct rtw89_rx_desc_info *desc_info, + bool rx_status) +{ + switch (desc_info->gi_ltf) { + case RTW89_GILTF_SGI_4XHE08: + case RTW89_GILTF_2XHE08: + case RTW89_GILTF_1XHE08: + return NL80211_RATE_INFO_HE_GI_0_8; + case RTW89_GILTF_2XHE16: + case RTW89_GILTF_1XHE16: + return NL80211_RATE_INFO_HE_GI_1_6; + case RTW89_GILTF_LGI_4XHE32: + return NL80211_RATE_INFO_HE_GI_3_2; + default: + rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info->gi_ltf); + return rx_status ? NL80211_RATE_INFO_HE_GI_3_2 : U8_MAX; + } +} + +static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct ieee80211_rx_status *status) +{ + u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; + u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf; + u16 data_rate; + bool ret; + + data_rate = desc_info->data_rate; + data_rate_mode = GET_DATA_RATE_MODE(data_rate); + if (data_rate_mode == DATA_RATE_MODE_NON_HT) { + rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); + /* No 4 CCK rates for 5G */ + if (status->band == NL80211_BAND_5GHZ) + rate_idx -= 4; + } else if (data_rate_mode == DATA_RATE_MODE_HT) { + rate_idx = GET_DATA_RATE_HT_IDX(data_rate); + } else if (data_rate_mode == DATA_RATE_MODE_VHT) { + rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); + } else if (data_rate_mode == DATA_RATE_MODE_HE) { + rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); + } else { + rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); + } + + if (desc_info->bw == RTW89_CHANNEL_WIDTH_80) + bw = RATE_INFO_BW_80; + else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40) + bw = RATE_INFO_BW_40; + else + bw = RATE_INFO_BW_20; + + gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false); + ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt && + status->rate_idx == rate_idx && + status->he_gi == gi_ltf && + status->bw == bw; + + return ret; +} + +struct rtw89_vif_rx_stats_iter_data { + struct rtw89_dev *rtwdev; + struct rtw89_rx_phy_ppdu *phy_ppdu; + struct rtw89_rx_desc_info *desc_info; + struct sk_buff *skb; + const u8 *bssid; +}; + +static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_vif_rx_stats_iter_data *iter_data = data; + struct rtw89_dev *rtwdev = iter_data->rtwdev; + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; + struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; + struct sk_buff *skb = iter_data->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + const u8 *bssid = iter_data->bssid; + + if (!ether_addr_equal(vif->bss_conf.bssid, bssid)) + return; + + if (ieee80211_is_beacon(hdr->frame_control)) + pkt_stat->beacon_nr++; + + if (!ether_addr_equal(vif->addr, hdr->addr1)) + return; + + if (desc_info->data_rate < RTW89_HW_RATE_NR) + pkt_stat->rx_rate_cnt[desc_info->data_rate]++; + + rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, false); +} + +static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct rtw89_rx_desc_info *desc_info, + struct sk_buff *skb) +{ + struct rtw89_vif_rx_stats_iter_data iter_data; + + rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, false); + + iter_data.rtwdev = rtwdev; + iter_data.phy_ppdu = phy_ppdu; + iter_data.desc_info = desc_info; + iter_data.skb = skb; + iter_data.bssid = get_hdr_bssid((struct ieee80211_hdr *)skb->data); + rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data); +} + +static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct rtw89_rx_desc_info *desc_info, + struct sk_buff *skb) +{ + u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; + int curr = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band]; + struct sk_buff *skb_ppdu = NULL, *tmp; + struct ieee80211_rx_status *rx_status; + + if (curr > RTW89_MAX_PPDU_CNT) + return; + + skb_queue_walk_safe(&rtwdev->ppdu_sts.rx_queue[band], skb_ppdu, tmp) { + skb_unlink(skb_ppdu, &rtwdev->ppdu_sts.rx_queue[band]); + rx_status = IEEE80211_SKB_RXCB(skb_ppdu); + if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status)) + rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status); + rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); + ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi); + rtwdev->napi_budget_countdown--; + } +} + +static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct sk_buff *skb) +{ + struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false, + .len = skb->len, + .to_self = desc_info->addr1_match, + .mac_id = desc_info->mac_id}; + int ret; + + if (desc_info->mac_info_valid) + rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu); + ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_TXRX, "process ppdu failed\n"); + + rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu); + rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb); + dev_kfree_skb_any(skb); +} + +static void rtw89_core_rx_process_report(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct sk_buff *skb) +{ + switch (desc_info->pkt_type) { + case RTW89_CORE_RX_TYPE_C2H: + rtw89_fw_c2h_irqsafe(rtwdev, skb); + break; + case RTW89_CORE_RX_TYPE_PPDU_STAT: + rtw89_core_rx_process_ppdu_sts(rtwdev, desc_info, skb); + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_TXRX, "unhandled pkt_type=%d\n", + desc_info->pkt_type); + dev_kfree_skb_any(skb); + break; + } +} + +void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + u8 *data, u32 data_offset) +{ + struct rtw89_rxdesc_short *rxd_s; + struct rtw89_rxdesc_long *rxd_l; + u8 shift_len, drv_info_len; + + rxd_s = (struct rtw89_rxdesc_short *)(data + data_offset); + desc_info->pkt_size = RTW89_GET_RXWD_PKT_SIZE(rxd_s); + desc_info->drv_info_size = RTW89_GET_RXWD_DRV_INFO_SIZE(rxd_s); + desc_info->long_rxdesc = RTW89_GET_RXWD_LONG_RXD(rxd_s); + desc_info->pkt_type = RTW89_GET_RXWD_RPKT_TYPE(rxd_s); + desc_info->mac_info_valid = RTW89_GET_RXWD_MAC_INFO_VALID(rxd_s); + desc_info->bw = RTW89_GET_RXWD_BW(rxd_s); + desc_info->data_rate = RTW89_GET_RXWD_DATA_RATE(rxd_s); + desc_info->gi_ltf = RTW89_GET_RXWD_GI_LTF(rxd_s); + desc_info->user_id = RTW89_GET_RXWD_USER_ID(rxd_s); + desc_info->sr_en = RTW89_GET_RXWD_SR_EN(rxd_s); + desc_info->ppdu_cnt = RTW89_GET_RXWD_PPDU_CNT(rxd_s); + desc_info->ppdu_type = RTW89_GET_RXWD_PPDU_TYPE(rxd_s); + desc_info->free_run_cnt = RTW89_GET_RXWD_FREE_RUN_CNT(rxd_s); + desc_info->icv_err = RTW89_GET_RXWD_ICV_ERR(rxd_s); + desc_info->crc32_err = RTW89_GET_RXWD_CRC32_ERR(rxd_s); + desc_info->hw_dec = RTW89_GET_RXWD_HW_DEC(rxd_s); + desc_info->sw_dec = RTW89_GET_RXWD_SW_DEC(rxd_s); + desc_info->addr1_match = RTW89_GET_RXWD_A1_MATCH(rxd_s); + + shift_len = desc_info->shift << 1; /* 2-byte unit */ + drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */ + desc_info->offset = data_offset + shift_len + drv_info_len; + desc_info->ready = true; + + if (!desc_info->long_rxdesc) + return; + + rxd_l = (struct rtw89_rxdesc_long *)(data + data_offset); + desc_info->frame_type = RTW89_GET_RXWD_TYPE(rxd_l); + desc_info->addr_cam_valid = RTW89_GET_RXWD_ADDR_CAM_VLD(rxd_l); + desc_info->addr_cam_id = RTW89_GET_RXWD_ADDR_CAM_ID(rxd_l); + desc_info->sec_cam_id = RTW89_GET_RXWD_SEC_CAM_ID(rxd_l); + desc_info->mac_id = RTW89_GET_RXWD_MAC_ID(rxd_l); + desc_info->rx_pl_id = RTW89_GET_RXWD_RX_PL_ID(rxd_l); +} +EXPORT_SYMBOL(rtw89_core_query_rxdesc); + +struct rtw89_core_iter_rx_status { + struct rtw89_dev *rtwdev; + struct ieee80211_rx_status *rx_status; + struct rtw89_rx_desc_info *desc_info; + u8 mac_id; +}; + +static +void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_core_iter_rx_status *iter_data = + (struct rtw89_core_iter_rx_status *)data; + struct ieee80211_rx_status *rx_status = iter_data->rx_status; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; + u8 mac_id = iter_data->mac_id; + + if (mac_id != rtwsta->mac_id) + return; + + rtwsta->rx_status = *rx_status; + rtwsta->rx_hw_rate = desc_info->data_rate; +} + +static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct ieee80211_rx_status *rx_status) +{ + struct rtw89_core_iter_rx_status iter_data; + + if (!desc_info->addr1_match || !desc_info->long_rxdesc) + return; + + if (desc_info->frame_type != RTW89_RX_TYPE_DATA) + return; + + iter_data.rtwdev = rtwdev; + iter_data.rx_status = rx_status; + iter_data.desc_info = desc_info; + iter_data.mac_id = desc_info->mac_id; + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_core_stats_sta_rx_status_iter, + &iter_data); +} + +static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_hw *hw = rtwdev->hw; + u16 data_rate; + u8 data_rate_mode; + + /* currently using single PHY */ + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + + if (desc_info->icv_err || desc_info->crc32_err) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (desc_info->hw_dec && + !(desc_info->sw_dec || desc_info->icv_err)) + rx_status->flag |= RX_FLAG_DECRYPTED; + + if (desc_info->bw == RTW89_CHANNEL_WIDTH_80) + rx_status->bw = RATE_INFO_BW_80; + else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40) + rx_status->bw = RATE_INFO_BW_40; + else + rx_status->bw = RATE_INFO_BW_20; + + data_rate = desc_info->data_rate; + data_rate_mode = GET_DATA_RATE_MODE(data_rate); + if (data_rate_mode == DATA_RATE_MODE_NON_HT) { + rx_status->encoding = RX_ENC_LEGACY; + rx_status->rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); + /* No 4 CCK rates for 5G */ + if (rx_status->band == NL80211_BAND_5GHZ) + rx_status->rate_idx -= 4; + if (rtwdev->scanning) + rx_status->rate_idx = min_t(u8, rx_status->rate_idx, + ARRAY_SIZE(rtw89_bitrates) - 5); + } else if (data_rate_mode == DATA_RATE_MODE_HT) { + rx_status->encoding = RX_ENC_HT; + rx_status->rate_idx = GET_DATA_RATE_HT_IDX(data_rate); + if (desc_info->gi_ltf) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + } else if (data_rate_mode == DATA_RATE_MODE_VHT) { + rx_status->encoding = RX_ENC_VHT; + rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); + rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1; + if (desc_info->gi_ltf) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + } else if (data_rate_mode == DATA_RATE_MODE_HE) { + rx_status->encoding = RX_ENC_HE; + rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); + rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1; + } else { + rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); + } + + /* he_gi is used to match ppdu, so we always fill it. */ + rx_status->he_gi = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, true); + rx_status->flag |= RX_FLAG_MACTIME_START; + rx_status->mactime = desc_info->free_run_cnt; + + rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status); +} + +static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (rtw89_disable_ps_mode || !chip->ps_mode_supported) + return RTW89_PS_MODE_NONE; + + if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) + return RTW89_PS_MODE_PWR_GATED; + + if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED)) + return RTW89_PS_MODE_CLK_GATED; + + if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_RFOFF)) + return RTW89_PS_MODE_RFOFF; + + return RTW89_PS_MODE_NONE; +} + +static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info) +{ + struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; + u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; + struct sk_buff *skb_ppdu, *tmp; + + skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) { + skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]); + rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu); + ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi); + rtwdev->napi_budget_countdown--; + } +} + +void rtw89_core_rx(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status; + struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; + u8 ppdu_cnt = desc_info->ppdu_cnt; + u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; + + if (desc_info->pkt_type != RTW89_CORE_RX_TYPE_WIFI) { + rtw89_core_rx_process_report(rtwdev, desc_info, skb); + return; + } + + if (ppdu_sts->curr_rx_ppdu_cnt[band] != ppdu_cnt) { + rtw89_core_flush_ppdu_rx_queue(rtwdev, desc_info); + ppdu_sts->curr_rx_ppdu_cnt[band] = ppdu_cnt; + } + + rx_status = IEEE80211_SKB_RXCB(skb); + memset(rx_status, 0, sizeof(*rx_status)); + rtw89_core_update_rx_status(rtwdev, desc_info, rx_status); + if (desc_info->long_rxdesc && + BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) { + skb_queue_tail(&ppdu_sts->rx_queue[band], skb); + } else { + rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb); + ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi); + rtwdev->napi_budget_countdown--; + } +} +EXPORT_SYMBOL(rtw89_core_rx); + +void rtw89_core_napi_start(struct rtw89_dev *rtwdev) +{ + if (test_and_set_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) + return; + + napi_enable(&rtwdev->napi); +} +EXPORT_SYMBOL(rtw89_core_napi_start); + +void rtw89_core_napi_stop(struct rtw89_dev *rtwdev) +{ + if (!test_and_clear_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) + return; + + napi_synchronize(&rtwdev->napi); + napi_disable(&rtwdev->napi); +} +EXPORT_SYMBOL(rtw89_core_napi_stop); + +void rtw89_core_napi_init(struct rtw89_dev *rtwdev) +{ + init_dummy_netdev(&rtwdev->netdev); + netif_napi_add(&rtwdev->netdev, &rtwdev->napi, + rtwdev->hci.ops->napi_poll, NAPI_POLL_WEIGHT); +} +EXPORT_SYMBOL(rtw89_core_napi_init); + +void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev) +{ + rtw89_core_napi_stop(rtwdev); + netif_napi_del(&rtwdev->napi); +} +EXPORT_SYMBOL(rtw89_core_napi_deinit); + +static void rtw89_core_ba_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = + container_of(work, struct rtw89_dev, ba_work); + struct rtw89_txq *rtwtxq, *tmp; + int ret; + + spin_lock_bh(&rtwdev->ba_lock); + list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) { + struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); + struct ieee80211_sta *sta = txq->sta; + struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; + u8 tid = txq->tid; + + if (!sta) { + rtw89_warn(rtwdev, "cannot start BA without sta\n"); + goto skip_ba_work; + } + + if (rtwsta->disassoc) { + rtw89_debug(rtwdev, RTW89_DBG_TXRX, + "cannot start BA with disassoc sta\n"); + goto skip_ba_work; + } + + ret = ieee80211_start_tx_ba_session(sta, tid, 0); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_TXRX, + "failed to setup BA session for %pM:%2d: %d\n", + sta->addr, tid, ret); + if (ret == -EINVAL) + set_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags); + } +skip_ba_work: + list_del_init(&rtwtxq->list); + } + spin_unlock_bh(&rtwdev->ba_lock); +} + +static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev, + struct ieee80211_sta *sta) +{ + struct rtw89_txq *rtwtxq, *tmp; + + spin_lock_bh(&rtwdev->ba_lock); + list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) { + struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); + + if (sta == txq->sta) + list_del_init(&rtwtxq->list); + } + spin_unlock_bh(&rtwdev->ba_lock); +} + +static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev, + struct rtw89_txq *rtwtxq, + struct sk_buff *skb) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); + struct ieee80211_sta *sta = txq->sta; + struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; + + if (unlikely(skb_get_queue_mapping(skb) == IEEE80211_AC_VO)) + return; + + if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) + return; + + if (unlikely(!sta)) + return; + + if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags))) + return; + + if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) { + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_AMPDU; + return; + } + + spin_lock_bh(&rtwdev->ba_lock); + if (!rtwsta->disassoc && list_empty(&rtwtxq->list)) { + list_add_tail(&rtwtxq->list, &rtwdev->ba_list); + ieee80211_queue_work(hw, &rtwdev->ba_work); + } + spin_unlock_bh(&rtwdev->ba_lock); +} + +static void rtw89_core_txq_push(struct rtw89_dev *rtwdev, + struct rtw89_txq *rtwtxq, + unsigned long frame_cnt, + unsigned long byte_cnt) +{ + struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); + struct ieee80211_vif *vif = txq->vif; + struct ieee80211_sta *sta = txq->sta; + struct sk_buff *skb; + unsigned long i; + int ret; + + for (i = 0; i < frame_cnt; i++) { + skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq); + if (!skb) { + rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n"); + return; + } + rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb); + ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL); + if (ret) { + rtw89_err(rtwdev, "failed to push txq: %d\n", ret); + ieee80211_free_txskb(rtwdev->hw, skb); + break; + } + } +} + +static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid) +{ + u8 qsel, ch_dma; + + qsel = rtw89_core_get_qsel(rtwdev, tid); + ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + + return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma); +} + +static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev, + struct ieee80211_txq *txq, + unsigned long *frame_cnt, + bool *sched_txq, bool *reinvoke) +{ + struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv; + struct ieee80211_sta *sta = txq->sta; + struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; + + if (!sta || rtwsta->max_agg_wait <= 0) + return false; + + if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID) + return false; + + if (*frame_cnt > 1) { + *frame_cnt -= 1; + *sched_txq = true; + *reinvoke = true; + rtwtxq->wait_cnt = 1; + return false; + } + + if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta->max_agg_wait) { + *reinvoke = true; + rtwtxq->wait_cnt++; + return true; + } + + rtwtxq->wait_cnt = 0; + return false; +} + +static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinvoke) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct ieee80211_txq *txq; + struct rtw89_txq *rtwtxq; + unsigned long frame_cnt; + unsigned long byte_cnt; + u32 tx_resource; + bool sched_txq; + + ieee80211_txq_schedule_start(hw, ac); + while ((txq = ieee80211_next_txq(hw, ac))) { + rtwtxq = (struct rtw89_txq *)txq->drv_priv; + tx_resource = rtw89_check_and_reclaim_tx_resource(rtwdev, txq->tid); + sched_txq = false; + + ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); + if (rtw89_core_txq_agg_wait(rtwdev, txq, &frame_cnt, &sched_txq, reinvoke)) { + ieee80211_return_txq(hw, txq, true); + continue; + } + frame_cnt = min_t(unsigned long, frame_cnt, tx_resource); + rtw89_core_txq_push(rtwdev, rtwtxq, frame_cnt, byte_cnt); + ieee80211_return_txq(hw, txq, sched_txq); + if (frame_cnt != 0) + rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid)); + } + ieee80211_txq_schedule_end(hw, ac); +} + +static void rtw89_core_txq_work(struct work_struct *w) +{ + struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work); + bool reinvoke = false; + u8 ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + rtw89_core_txq_schedule(rtwdev, ac, &reinvoke); + + if (reinvoke) { + /* reinvoke to process the last frame */ + mod_delayed_work(rtwdev->txq_wq, &rtwdev->txq_reinvoke_work, 1); + } +} + +static void rtw89_core_txq_reinvoke_work(struct work_struct *w) +{ + struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, + txq_reinvoke_work.work); + + queue_work(rtwdev->txq_wq, &rtwdev->txq_work); +} + +static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev, + u32 throughput, u64 cnt) +{ + if (cnt < 100) + return RTW89_TFC_IDLE; + if (throughput > 50) + return RTW89_TFC_HIGH; + if (throughput > 10) + return RTW89_TFC_MID; + if (throughput > 2) + return RTW89_TFC_LOW; + return RTW89_TFC_ULTRA_LOW; +} + +static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev, + struct rtw89_traffic_stats *stats) +{ + enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; + enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; + + stats->tx_throughput_raw = (u32)(stats->tx_unicast >> RTW89_TP_SHIFT); + stats->rx_throughput_raw = (u32)(stats->rx_unicast >> RTW89_TP_SHIFT); + + ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw); + ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw); + + stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); + stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); + stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput, + stats->tx_cnt); + stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput, + stats->rx_cnt); + stats->tx_avg_len = stats->tx_cnt ? + DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0; + stats->rx_avg_len = stats->rx_cnt ? + DIV_ROUND_DOWN_ULL(stats->rx_unicast, stats->rx_cnt) : 0; + + stats->tx_unicast = 0; + stats->rx_unicast = 0; + stats->tx_cnt = 0; + stats->rx_cnt = 0; + + if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv) + return true; + + return false; +} + +static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + bool tfc_changed; + + tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats); + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats); + + return tfc_changed; +} + +static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION) + return; + + if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE && + rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE) + rtw89_enter_lps(rtwdev, rtwvif->mac_id); +} + +static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_vif_enter_lps(rtwdev, rtwvif); +} + +void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, + struct rtw89_traffic_stats *stats) +{ + stats->tx_unicast = 0; + stats->rx_unicast = 0; + stats->tx_cnt = 0; + stats->rx_cnt = 0; + ewma_tp_init(&stats->tx_ewma_tp); + ewma_tp_init(&stats->rx_ewma_tp); +} + +static void rtw89_track_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + track_work.work); + bool tfc_changed; + + mutex_lock(&rtwdev->mutex); + + if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) + goto out; + + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, + RTW89_TRACK_WORK_PERIOD); + + tfc_changed = rtw89_traffic_stats_track(rtwdev); + if (rtwdev->scanning) + goto out; + + rtw89_leave_lps(rtwdev); + + if (tfc_changed) { + rtw89_hci_recalc_int_mit(rtwdev); + rtw89_btc_ntfy_wl_sta(rtwdev); + } + rtw89_mac_bf_monitor_track(rtwdev); + rtw89_phy_stat_track(rtwdev); + rtw89_phy_env_monitor_track(rtwdev); + rtw89_phy_dig(rtwdev); + rtw89_chip_rfk_track(rtwdev); + rtw89_phy_ra_update(rtwdev); + rtw89_phy_cfo_track(rtwdev); + + if (rtwdev->lps_enabled && !rtwdev->btc.lps) + rtw89_enter_lps_track(rtwdev); + +out: + mutex_unlock(&rtwdev->mutex); +} + +u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size) +{ + unsigned long bit; + + bit = find_first_zero_bit(addr, size); + if (bit < size) + set_bit(bit, addr); + + return bit; +} + +void rtw89_core_release_bit_map(unsigned long *addr, u8 bit) +{ + clear_bit(bit, addr); +} + +void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits) +{ + bitmap_zero(addr, nbits); +} + +#define RTW89_TYPE_MAPPING(_type) \ + case NL80211_IFTYPE_ ## _type: \ + rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \ + break +void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + switch (vif->type) { + RTW89_TYPE_MAPPING(ADHOC); + RTW89_TYPE_MAPPING(STATION); + RTW89_TYPE_MAPPING(AP); + RTW89_TYPE_MAPPING(MONITOR); + RTW89_TYPE_MAPPING(MESH_POINT); + default: + WARN_ON(1); + break; + } + + switch (vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + rtwvif->net_type = RTW89_NET_TYPE_AP_MODE; + rtwvif->self_role = RTW89_SELF_ROLE_AP; + break; + case NL80211_IFTYPE_ADHOC: + rtwvif->net_type = RTW89_NET_TYPE_AD_HOC; + rtwvif->self_role = RTW89_SELF_ROLE_CLIENT; + break; + case NL80211_IFTYPE_STATION: + if (assoc) { + rtwvif->net_type = RTW89_NET_TYPE_INFRA; + rtwvif->trigger = vif->bss_conf.he_support; + } else { + rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; + rtwvif->trigger = false; + } + rtwvif->self_role = RTW89_SELF_ROLE_CLIENT; + rtwvif->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL; + break; + default: + WARN_ON(1); + break; + } +} + +int rtw89_core_sta_add(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + int i; + + rtwsta->rtwvif = rtwvif; + rtwsta->prev_rssi = 0; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + rtw89_core_txq_init(rtwdev, sta->txq[i]); + + ewma_rssi_init(&rtwsta->avg_rssi); + + if (vif->type == NL80211_IFTYPE_STATION) { + rtwvif->mgd.ap = sta; + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, + BTC_ROLE_MSTS_STA_CONN_START); + rtw89_chip_rfk_channel(rtwdev); + } + + return 0; +} + +int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + + rtwdev->total_sta_assoc--; + rtwsta->disassoc = true; + + return 0; +} + +int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + int ret; + + rtw89_mac_bf_monitor_calc(rtwdev, sta, true); + rtw89_mac_bf_disassoc(rtwdev, vif, sta); + rtw89_core_free_sta_pending_ba(rtwdev, sta); + + rtw89_vif_type_mapping(vif, false); + + ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c cmac table\n"); + return ret; + } + + ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 1); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c join info\n"); + return ret; + } + + /* update cam aid mac_id net_type */ + rtw89_fw_h2c_cam(rtwdev, rtwvif); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c cam\n"); + return ret; + } + + return ret; +} + +int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + int ret; + + rtw89_vif_type_mapping(vif, true); + + ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c cmac table\n"); + return ret; + } + + /* for station mode, assign the mac_id from itself */ + if (vif->type == NL80211_IFTYPE_STATION) + rtwsta->mac_id = rtwvif->mac_id; + + ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c join info\n"); + return ret; + } + + /* update cam aid mac_id net_type */ + rtw89_fw_h2c_cam(rtwdev, rtwvif); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c cam\n"); + return ret; + } + + ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwsta->mac_id); + if (ret) { + rtw89_warn(rtwdev, "failed to send h2c general packet\n"); + return ret; + } + + rtwdev->total_sta_assoc++; + rtw89_phy_ra_assoc(rtwdev, sta); + rtw89_mac_bf_assoc(rtwdev, vif, sta); + rtw89_mac_bf_monitor_calc(rtwdev, sta, false); + + if (vif->type == NL80211_IFTYPE_STATION) { + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, + BTC_ROLE_MSTS_STA_CONN_END); + rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template); + } + + return ret; +} + +int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + + if (vif->type == NL80211_IFTYPE_STATION) + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, + BTC_ROLE_MSTS_STA_DIS_CONN); + + return 0; +} + +static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev, + struct ieee80211_sta_ht_cap *ht_cap) +{ + static const __le16 highest[RF_PATH_MAX] = { + cpu_to_le16(150), cpu_to_le16(300), cpu_to_le16(450), cpu_to_le16(600), + }; + struct rtw89_hal *hal = &rtwdev->hal; + u8 nss = hal->rx_nss; + int i; + + ht_cap->ht_supported = true; + ht_cap->cap = 0; + ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_MAX_AMSDU | + IEEE80211_HT_CAP_TX_STBC | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; + ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_DSSSCCK40 | + IEEE80211_HT_CAP_SGI_40; + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + for (i = 0; i < nss; i++) + ht_cap->mcs.rx_mask[i] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + ht_cap->mcs.rx_highest = highest[nss - 1]; +} + +static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev, + struct ieee80211_sta_vht_cap *vht_cap) +{ + static const __le16 highest[RF_PATH_MAX] = { + cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733), + }; + struct rtw89_hal *hal = &rtwdev->hal; + u16 tx_mcs_map = 0, rx_mcs_map = 0; + u8 sts_cap = 3; + int i; + + for (i = 0; i < 8; i++) { + if (i < hal->tx_nss) + tx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + tx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); + if (i < hal->rx_nss) + rx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + rx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); + } + + vht_cap->vht_supported = true; + vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + 0; + vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; + vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map); + vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1]; + vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1]; +} + +#define RTW89_SBAND_IFTYPES_NR 2 + +static void rtw89_init_he_cap(struct rtw89_dev *rtwdev, + enum nl80211_band band, + struct ieee80211_supported_band *sband) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_hal *hal = &rtwdev->hal; + struct ieee80211_sband_iftype_data *iftype_data; + bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) || + (chip->chip_id == RTL8852B && hal->cv == CHIP_CAV); + u16 mcs_map = 0; + int i; + int nss = hal->rx_nss; + int idx = 0; + + iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL); + if (!iftype_data) + return; + + for (i = 0; i < 8; i++) { + if (i < nss) + mcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); + else + mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); + } + + for (i = 0; i < NUM_NL80211_IFTYPES; i++) { + struct ieee80211_sta_he_cap *he_cap; + u8 *mac_cap_info; + u8 *phy_cap_info; + + switch (i) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + break; + default: + continue; + } + + if (idx >= RTW89_SBAND_IFTYPES_NR) { + rtw89_warn(rtwdev, "run out of iftype_data\n"); + break; + } + + iftype_data[idx].types_mask = BIT(i); + he_cap = &iftype_data[idx].he_cap; + mac_cap_info = he_cap->he_cap_elem.mac_cap_info; + phy_cap_info = he_cap->he_cap_elem.phy_cap_info; + + he_cap->has_he = true; + if (i == NL80211_IFTYPE_AP) + mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE; + if (i == NL80211_IFTYPE_STATION) + mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; + mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK | + IEEE80211_HE_MAC_CAP2_BSR; + mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2; + if (i == NL80211_IFTYPE_AP) + mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL; + mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS | + IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; + if (i == NL80211_IFTYPE_STATION) + mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; + phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; + phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_DOPPLER_TX; + phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM; + if (i == NL80211_IFTYPE_STATION) + phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2; + if (i == NL80211_IFTYPE_AP) + phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU; + phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4; + phy_cap_info[5] = no_ng16 ? 0 : + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; + phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE; + phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP7_MAX_NC_1; + phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996; + phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; + if (i == NL80211_IFTYPE_STATION) + phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; + he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map); + he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map); + + idx++; + } + + sband->iftype_data = iftype_data; + sband->n_iftype_data = idx; +} + +static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL; + u32 size = sizeof(struct ieee80211_supported_band); + + sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL); + if (!sband_2ghz) + goto err; + rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap); + rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz); + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz; + + sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL); + if (!sband_5ghz) + goto err; + rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap); + rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap); + rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz); + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz; + + return 0; + +err: + hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; + hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; + if (sband_2ghz) + kfree(sband_2ghz->iftype_data); + if (sband_5ghz) + kfree(sband_5ghz->iftype_data); + kfree(sband_2ghz); + kfree(sband_5ghz); + return -ENOMEM; +} + +static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + + kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); + kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); + kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); + kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); + hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; + hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; +} + +static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev) +{ + int i; + + for (i = 0; i < RTW89_PHY_MAX; i++) + skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]); + for (i = 0; i < RTW89_PHY_MAX; i++) + rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX; +} + +int rtw89_core_start(struct rtw89_dev *rtwdev) +{ + int ret; + + rtwdev->mac.qta_mode = RTW89_QTA_SCC; + ret = rtw89_mac_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret); + return ret; + } + + rtw89_btc_ntfy_poweron(rtwdev); + + /* efuse process */ + + /* pre-config BB/RF, BB reset/RFC reset */ + rtw89_mac_disable_bb_rf(rtwdev); + rtw89_mac_enable_bb_rf(rtwdev); + rtw89_phy_init_bb_reg(rtwdev); + rtw89_phy_init_rf_reg(rtwdev); + + rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL); + + rtw89_phy_dm_init(rtwdev); + + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); + rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0); + + ret = rtw89_hci_start(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to start hci\n"); + return ret; + } + + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, + RTW89_TRACK_WORK_PERIOD); + + set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); + + rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); + rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable); + + return 0; +} + +void rtw89_core_stop(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + /* Prvent to stop twice; enter_ips and ops_stop */ + if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) + return; + + rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF); + + clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); + + mutex_unlock(&rtwdev->mutex); + + cancel_work_sync(&rtwdev->c2h_work); + cancel_work_sync(&btc->eapol_notify_work); + cancel_work_sync(&btc->arp_notify_work); + cancel_work_sync(&btc->dhcp_notify_work); + cancel_work_sync(&btc->icmp_notify_work); + cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); + cancel_delayed_work_sync(&rtwdev->track_work); + cancel_delayed_work_sync(&rtwdev->coex_act1_work); + cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work); + cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work); + cancel_delayed_work_sync(&rtwdev->cfo_track_work); + + mutex_lock(&rtwdev->mutex); + + rtw89_btc_ntfy_poweroff(rtwdev); + rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true); + rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, true); + rtw89_hci_stop(rtwdev); + rtw89_hci_deinit(rtwdev); + rtw89_mac_pwr_off(rtwdev); + rtw89_hci_reset(rtwdev); +} + +int rtw89_core_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + int ret; + + INIT_LIST_HEAD(&rtwdev->ba_list); + INIT_LIST_HEAD(&rtwdev->rtwvifs_list); + INIT_LIST_HEAD(&rtwdev->early_h2c_list); + INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); + INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); + INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); + INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work); + INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work); + INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); + INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); + INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); + rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); + spin_lock_init(&rtwdev->ba_lock); + mutex_init(&rtwdev->mutex); + mutex_init(&rtwdev->rf_mutex); + rtwdev->total_sta_assoc = 0; + + INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work); + skb_queue_head_init(&rtwdev->c2h_queue); + rtw89_core_ppdu_sts_init(rtwdev); + rtw89_traffic_stats_init(rtwdev, &rtwdev->stats); + + rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev); + rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR; + + INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work); + INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work); + INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work); + INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work); + + ret = rtw89_load_firmware(rtwdev); + if (ret) { + rtw89_warn(rtwdev, "no firmware loaded\n"); + return ret; + } + rtw89_ser_init(rtwdev); + + return 0; +} +EXPORT_SYMBOL(rtw89_core_init); + +void rtw89_core_deinit(struct rtw89_dev *rtwdev) +{ + rtw89_ser_deinit(rtwdev); + rtw89_unload_firmware(rtwdev); + rtw89_fw_free_all_early_h2c(rtwdev); + + destroy_workqueue(rtwdev->txq_wq); + mutex_destroy(&rtwdev->rf_mutex); + mutex_destroy(&rtwdev->mutex); +} +EXPORT_SYMBOL(rtw89_core_deinit); + +static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) +{ + u8 cv; + + cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK); + if (cv <= CHIP_CBV) { + if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD) + cv = CHIP_CAV; + else + cv = CHIP_CBV; + } + + rtwdev->hal.cv = cv; +} + +static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_mac_partial_init(rtwdev); + if (ret) + return ret; + + ret = rtw89_parse_efuse_map(rtwdev); + if (ret) + return ret; + + ret = rtw89_parse_phycap_map(rtwdev); + if (ret) + return ret; + + ret = rtw89_mac_setup_phycap(rtwdev); + if (ret) + return ret; + + rtw89_mac_pwr_off(rtwdev); + + return 0; +} + +static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev) +{ + rtw89_chip_fem_setup(rtwdev); + + return 0; +} + +int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) +{ + int ret; + + rtw89_read_chip_ver(rtwdev); + + ret = rtw89_wait_firmware_completion(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to wait firmware completion\n"); + return ret; + } + + ret = rtw89_fw_recognize(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to recognize firmware\n"); + return ret; + } + + ret = rtw89_chip_efuse_info_setup(rtwdev); + if (ret) + return ret; + + ret = rtw89_chip_board_info_setup(rtwdev); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(rtw89_chip_info_setup); + +static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + struct rtw89_efuse *efuse = &rtwdev->efuse; + int ret; + int tx_headroom = IEEE80211_HT_CTL_LEN; + + hw->vif_data_size = sizeof(struct rtw89_vif); + hw->sta_data_size = sizeof(struct rtw89_sta); + hw->txq_data_size = sizeof(struct rtw89_txq); + + SET_IEEE80211_PERM_ADDR(hw, efuse->addr); + + hw->extra_tx_headroom = tx_headroom; + hw->queues = IEEE80211_NUM_ACS; + hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM; + hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM; + + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, MFP_CAPABLE); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1; + hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1; + + hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + + ret = rtw89_core_set_supported_band(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to set supported band\n"); + return ret; + } + + hw->wiphy->reg_notifier = rtw89_regd_notifier; + hw->wiphy->sar_capa = &rtw89_sar_capa; + + ret = ieee80211_register_hw(hw); + if (ret) { + rtw89_err(rtwdev, "failed to register hw\n"); + goto err; + } + + ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier); + if (ret) { + rtw89_err(rtwdev, "failed to init regd\n"); + goto err; + } + + return 0; + +err: + return ret; +} + +static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev) +{ + struct ieee80211_hw *hw = rtwdev->hw; + + ieee80211_unregister_hw(hw); + rtw89_core_clr_supported_band(rtwdev); +} + +int rtw89_core_register(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_core_register_hw(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to register core hw\n"); + return ret; + } + + rtw89_debugfs_init(rtwdev); + + return 0; +} +EXPORT_SYMBOL(rtw89_core_register); + +void rtw89_core_unregister(struct rtw89_dev *rtwdev) +{ + rtw89_core_unregister_hw(rtwdev); +} +EXPORT_SYMBOL(rtw89_core_unregister); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ax wireless core module"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h new file mode 100644 index 00000000000000..c2885e4dd882f0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -0,0 +1,3384 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_CORE_H__ +#define __RTW89_CORE_H__ + +#include +#include +#include +#include +#include +#include + +struct rtw89_dev; + +extern const struct ieee80211_ops rtw89_ops; +extern const struct rtw89_chip_info rtw8852a_chip_info; + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKBYTE4 0xff00000000ULL +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define RFREG_MASK 0xfffff +#define INV_RF_DATA 0xffffffff + +#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2) +#define CFO_TRACK_MAX_USER 64 +#define MAX_RSSI 110 +#define RSSI_FACTOR 1 +#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI) +#define RTW89_MAX_HW_PORT_NUM 5 + +#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0) +#define RTW89_HTC_VARIANT_HE 3 +#define RTW89_HTC_MASK_CTL_ID GENMASK(5, 2) +#define RTW89_HTC_VARIANT_HE_CID_OM 1 +#define RTW89_HTC_VARIANT_HE_CID_CAS 6 +#define RTW89_HTC_MASK_CTL_INFO GENMASK(31, 6) + +#define RTW89_HTC_MASK_HTC_OM_RX_NSS GENMASK(8, 6) +enum htc_om_channel_width { + HTC_OM_CHANNEL_WIDTH_20 = 0, + HTC_OM_CHANNEL_WIDTH_40 = 1, + HTC_OM_CHANNEL_WIDTH_80 = 2, + HTC_OM_CHANNEL_WIDTH_160_OR_80_80 = 3, +}; +#define RTW89_HTC_MASK_HTC_OM_CH_WIDTH GENMASK(10, 9) +#define RTW89_HTC_MASK_HTC_OM_UL_MU_DIS BIT(11) +#define RTW89_HTC_MASK_HTC_OM_TX_NSTS GENMASK(14, 12) +#define RTW89_HTC_MASK_HTC_OM_ER_SU_DIS BIT(15) +#define RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR BIT(16) +#define RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS BIT(17) + +enum rtw89_subband { + RTW89_CH_2G = 0, + RTW89_CH_5G_BAND_1 = 1, + /* RTW89_CH_5G_BAND_2 = 2, unused */ + RTW89_CH_5G_BAND_3 = 3, + RTW89_CH_5G_BAND_4 = 4, + + RTW89_SUBBAND_NR, +}; + +enum rtw89_hci_type { + RTW89_HCI_TYPE_PCIE, + RTW89_HCI_TYPE_USB, + RTW89_HCI_TYPE_SDIO, +}; + +enum rtw89_core_chip_id { + RTL8852A, + RTL8852B, + RTL8852C, +}; + +enum rtw89_cv { + CHIP_CAV, + CHIP_CBV, + CHIP_CCV, + CHIP_CDV, + CHIP_CEV, + CHIP_CFV, + CHIP_CV_MAX, + CHIP_CV_INVALID = CHIP_CV_MAX, +}; + +enum rtw89_core_tx_type { + RTW89_CORE_TX_TYPE_DATA, + RTW89_CORE_TX_TYPE_MGMT, + RTW89_CORE_TX_TYPE_FWCMD, +}; + +enum rtw89_core_rx_type { + RTW89_CORE_RX_TYPE_WIFI = 0, + RTW89_CORE_RX_TYPE_PPDU_STAT = 1, + RTW89_CORE_RX_TYPE_CHAN_INFO = 2, + RTW89_CORE_RX_TYPE_BB_SCOPE = 3, + RTW89_CORE_RX_TYPE_F2P_TXCMD = 4, + RTW89_CORE_RX_TYPE_SS2FW = 5, + RTW89_CORE_RX_TYPE_TX_REPORT = 6, + RTW89_CORE_RX_TYPE_TX_REL_HOST = 7, + RTW89_CORE_RX_TYPE_DFS_REPORT = 8, + RTW89_CORE_RX_TYPE_TX_REL_CPU = 9, + RTW89_CORE_RX_TYPE_C2H = 10, + RTW89_CORE_RX_TYPE_CSI = 11, + RTW89_CORE_RX_TYPE_CQI = 12, +}; + +enum rtw89_txq_flags { + RTW89_TXQ_F_AMPDU = 0, + RTW89_TXQ_F_BLOCK_BA = 1, +}; + +enum rtw89_net_type { + RTW89_NET_TYPE_NO_LINK = 0, + RTW89_NET_TYPE_AD_HOC = 1, + RTW89_NET_TYPE_INFRA = 2, + RTW89_NET_TYPE_AP_MODE = 3, +}; + +enum rtw89_wifi_role { + RTW89_WIFI_ROLE_NONE, + RTW89_WIFI_ROLE_STATION, + RTW89_WIFI_ROLE_AP, + RTW89_WIFI_ROLE_AP_VLAN, + RTW89_WIFI_ROLE_ADHOC, + RTW89_WIFI_ROLE_ADHOC_MASTER, + RTW89_WIFI_ROLE_MESH_POINT, + RTW89_WIFI_ROLE_MONITOR, + RTW89_WIFI_ROLE_P2P_DEVICE, + RTW89_WIFI_ROLE_P2P_CLIENT, + RTW89_WIFI_ROLE_P2P_GO, + RTW89_WIFI_ROLE_NAN, + RTW89_WIFI_ROLE_MLME_MAX +}; + +enum rtw89_upd_mode { + RTW89_VIF_CREATE, + RTW89_VIF_REMOVE, + RTW89_VIF_TYPE_CHANGE, + RTW89_VIF_INFO_CHANGE, + RTW89_VIF_CON_DISCONN +}; + +enum rtw89_self_role { + RTW89_SELF_ROLE_CLIENT, + RTW89_SELF_ROLE_AP, + RTW89_SELF_ROLE_AP_CLIENT +}; + +enum rtw89_msk_sO_el { + RTW89_NO_MSK, + RTW89_SMA, + RTW89_TMA, + RTW89_BSSID +}; + +enum rtw89_sch_tx_sel { + RTW89_SCH_TX_SEL_ALL, + RTW89_SCH_TX_SEL_HIQ, + RTW89_SCH_TX_SEL_MG0, + RTW89_SCH_TX_SEL_MACID, +}; + +/* RTW89_ADDR_CAM_SEC_NONE : not enabled + * RTW89_ADDR_CAM_SEC_ALL_UNI : 0 - 6 unicast + * RTW89_ADDR_CAM_SEC_NORMAL : 0 - 1 unicast, 2 - 4 group, 5 - 6 BIP + * RTW89_ADDR_CAM_SEC_4GROUP : 0 - 1 unicast, 2 - 5 group, 6 BIP + */ +enum rtw89_add_cam_sec_mode { + RTW89_ADDR_CAM_SEC_NONE = 0, + RTW89_ADDR_CAM_SEC_ALL_UNI = 1, + RTW89_ADDR_CAM_SEC_NORMAL = 2, + RTW89_ADDR_CAM_SEC_4GROUP = 3, +}; + +enum rtw89_sec_key_type { + RTW89_SEC_KEY_TYPE_NONE = 0, + RTW89_SEC_KEY_TYPE_WEP40 = 1, + RTW89_SEC_KEY_TYPE_WEP104 = 2, + RTW89_SEC_KEY_TYPE_TKIP = 3, + RTW89_SEC_KEY_TYPE_WAPI = 4, + RTW89_SEC_KEY_TYPE_GCMSMS4 = 5, + RTW89_SEC_KEY_TYPE_CCMP128 = 6, + RTW89_SEC_KEY_TYPE_CCMP256 = 7, + RTW89_SEC_KEY_TYPE_GCMP128 = 8, + RTW89_SEC_KEY_TYPE_GCMP256 = 9, + RTW89_SEC_KEY_TYPE_BIP_CCMP128 = 10, +}; + +enum rtw89_port { + RTW89_PORT_0 = 0, + RTW89_PORT_1 = 1, + RTW89_PORT_2 = 2, + RTW89_PORT_3 = 3, + RTW89_PORT_4 = 4, + RTW89_PORT_NUM +}; + +enum rtw89_band { + RTW89_BAND_2G = 0, + RTW89_BAND_5G = 1, + RTW89_BAND_MAX, +}; + +enum rtw89_hw_rate { + RTW89_HW_RATE_CCK1 = 0x0, + RTW89_HW_RATE_CCK2 = 0x1, + RTW89_HW_RATE_CCK5_5 = 0x2, + RTW89_HW_RATE_CCK11 = 0x3, + RTW89_HW_RATE_OFDM6 = 0x4, + RTW89_HW_RATE_OFDM9 = 0x5, + RTW89_HW_RATE_OFDM12 = 0x6, + RTW89_HW_RATE_OFDM18 = 0x7, + RTW89_HW_RATE_OFDM24 = 0x8, + RTW89_HW_RATE_OFDM36 = 0x9, + RTW89_HW_RATE_OFDM48 = 0xA, + RTW89_HW_RATE_OFDM54 = 0xB, + RTW89_HW_RATE_MCS0 = 0x80, + RTW89_HW_RATE_MCS1 = 0x81, + RTW89_HW_RATE_MCS2 = 0x82, + RTW89_HW_RATE_MCS3 = 0x83, + RTW89_HW_RATE_MCS4 = 0x84, + RTW89_HW_RATE_MCS5 = 0x85, + RTW89_HW_RATE_MCS6 = 0x86, + RTW89_HW_RATE_MCS7 = 0x87, + RTW89_HW_RATE_MCS8 = 0x88, + RTW89_HW_RATE_MCS9 = 0x89, + RTW89_HW_RATE_MCS10 = 0x8A, + RTW89_HW_RATE_MCS11 = 0x8B, + RTW89_HW_RATE_MCS12 = 0x8C, + RTW89_HW_RATE_MCS13 = 0x8D, + RTW89_HW_RATE_MCS14 = 0x8E, + RTW89_HW_RATE_MCS15 = 0x8F, + RTW89_HW_RATE_MCS16 = 0x90, + RTW89_HW_RATE_MCS17 = 0x91, + RTW89_HW_RATE_MCS18 = 0x92, + RTW89_HW_RATE_MCS19 = 0x93, + RTW89_HW_RATE_MCS20 = 0x94, + RTW89_HW_RATE_MCS21 = 0x95, + RTW89_HW_RATE_MCS22 = 0x96, + RTW89_HW_RATE_MCS23 = 0x97, + RTW89_HW_RATE_MCS24 = 0x98, + RTW89_HW_RATE_MCS25 = 0x99, + RTW89_HW_RATE_MCS26 = 0x9A, + RTW89_HW_RATE_MCS27 = 0x9B, + RTW89_HW_RATE_MCS28 = 0x9C, + RTW89_HW_RATE_MCS29 = 0x9D, + RTW89_HW_RATE_MCS30 = 0x9E, + RTW89_HW_RATE_MCS31 = 0x9F, + RTW89_HW_RATE_VHT_NSS1_MCS0 = 0x100, + RTW89_HW_RATE_VHT_NSS1_MCS1 = 0x101, + RTW89_HW_RATE_VHT_NSS1_MCS2 = 0x102, + RTW89_HW_RATE_VHT_NSS1_MCS3 = 0x103, + RTW89_HW_RATE_VHT_NSS1_MCS4 = 0x104, + RTW89_HW_RATE_VHT_NSS1_MCS5 = 0x105, + RTW89_HW_RATE_VHT_NSS1_MCS6 = 0x106, + RTW89_HW_RATE_VHT_NSS1_MCS7 = 0x107, + RTW89_HW_RATE_VHT_NSS1_MCS8 = 0x108, + RTW89_HW_RATE_VHT_NSS1_MCS9 = 0x109, + RTW89_HW_RATE_VHT_NSS2_MCS0 = 0x110, + RTW89_HW_RATE_VHT_NSS2_MCS1 = 0x111, + RTW89_HW_RATE_VHT_NSS2_MCS2 = 0x112, + RTW89_HW_RATE_VHT_NSS2_MCS3 = 0x113, + RTW89_HW_RATE_VHT_NSS2_MCS4 = 0x114, + RTW89_HW_RATE_VHT_NSS2_MCS5 = 0x115, + RTW89_HW_RATE_VHT_NSS2_MCS6 = 0x116, + RTW89_HW_RATE_VHT_NSS2_MCS7 = 0x117, + RTW89_HW_RATE_VHT_NSS2_MCS8 = 0x118, + RTW89_HW_RATE_VHT_NSS2_MCS9 = 0x119, + RTW89_HW_RATE_VHT_NSS3_MCS0 = 0x120, + RTW89_HW_RATE_VHT_NSS3_MCS1 = 0x121, + RTW89_HW_RATE_VHT_NSS3_MCS2 = 0x122, + RTW89_HW_RATE_VHT_NSS3_MCS3 = 0x123, + RTW89_HW_RATE_VHT_NSS3_MCS4 = 0x124, + RTW89_HW_RATE_VHT_NSS3_MCS5 = 0x125, + RTW89_HW_RATE_VHT_NSS3_MCS6 = 0x126, + RTW89_HW_RATE_VHT_NSS3_MCS7 = 0x127, + RTW89_HW_RATE_VHT_NSS3_MCS8 = 0x128, + RTW89_HW_RATE_VHT_NSS3_MCS9 = 0x129, + RTW89_HW_RATE_VHT_NSS4_MCS0 = 0x130, + RTW89_HW_RATE_VHT_NSS4_MCS1 = 0x131, + RTW89_HW_RATE_VHT_NSS4_MCS2 = 0x132, + RTW89_HW_RATE_VHT_NSS4_MCS3 = 0x133, + RTW89_HW_RATE_VHT_NSS4_MCS4 = 0x134, + RTW89_HW_RATE_VHT_NSS4_MCS5 = 0x135, + RTW89_HW_RATE_VHT_NSS4_MCS6 = 0x136, + RTW89_HW_RATE_VHT_NSS4_MCS7 = 0x137, + RTW89_HW_RATE_VHT_NSS4_MCS8 = 0x138, + RTW89_HW_RATE_VHT_NSS4_MCS9 = 0x139, + RTW89_HW_RATE_HE_NSS1_MCS0 = 0x180, + RTW89_HW_RATE_HE_NSS1_MCS1 = 0x181, + RTW89_HW_RATE_HE_NSS1_MCS2 = 0x182, + RTW89_HW_RATE_HE_NSS1_MCS3 = 0x183, + RTW89_HW_RATE_HE_NSS1_MCS4 = 0x184, + RTW89_HW_RATE_HE_NSS1_MCS5 = 0x185, + RTW89_HW_RATE_HE_NSS1_MCS6 = 0x186, + RTW89_HW_RATE_HE_NSS1_MCS7 = 0x187, + RTW89_HW_RATE_HE_NSS1_MCS8 = 0x188, + RTW89_HW_RATE_HE_NSS1_MCS9 = 0x189, + RTW89_HW_RATE_HE_NSS1_MCS10 = 0x18A, + RTW89_HW_RATE_HE_NSS1_MCS11 = 0x18B, + RTW89_HW_RATE_HE_NSS2_MCS0 = 0x190, + RTW89_HW_RATE_HE_NSS2_MCS1 = 0x191, + RTW89_HW_RATE_HE_NSS2_MCS2 = 0x192, + RTW89_HW_RATE_HE_NSS2_MCS3 = 0x193, + RTW89_HW_RATE_HE_NSS2_MCS4 = 0x194, + RTW89_HW_RATE_HE_NSS2_MCS5 = 0x195, + RTW89_HW_RATE_HE_NSS2_MCS6 = 0x196, + RTW89_HW_RATE_HE_NSS2_MCS7 = 0x197, + RTW89_HW_RATE_HE_NSS2_MCS8 = 0x198, + RTW89_HW_RATE_HE_NSS2_MCS9 = 0x199, + RTW89_HW_RATE_HE_NSS2_MCS10 = 0x19A, + RTW89_HW_RATE_HE_NSS2_MCS11 = 0x19B, + RTW89_HW_RATE_HE_NSS3_MCS0 = 0x1A0, + RTW89_HW_RATE_HE_NSS3_MCS1 = 0x1A1, + RTW89_HW_RATE_HE_NSS3_MCS2 = 0x1A2, + RTW89_HW_RATE_HE_NSS3_MCS3 = 0x1A3, + RTW89_HW_RATE_HE_NSS3_MCS4 = 0x1A4, + RTW89_HW_RATE_HE_NSS3_MCS5 = 0x1A5, + RTW89_HW_RATE_HE_NSS3_MCS6 = 0x1A6, + RTW89_HW_RATE_HE_NSS3_MCS7 = 0x1A7, + RTW89_HW_RATE_HE_NSS3_MCS8 = 0x1A8, + RTW89_HW_RATE_HE_NSS3_MCS9 = 0x1A9, + RTW89_HW_RATE_HE_NSS3_MCS10 = 0x1AA, + RTW89_HW_RATE_HE_NSS3_MCS11 = 0x1AB, + RTW89_HW_RATE_HE_NSS4_MCS0 = 0x1B0, + RTW89_HW_RATE_HE_NSS4_MCS1 = 0x1B1, + RTW89_HW_RATE_HE_NSS4_MCS2 = 0x1B2, + RTW89_HW_RATE_HE_NSS4_MCS3 = 0x1B3, + RTW89_HW_RATE_HE_NSS4_MCS4 = 0x1B4, + RTW89_HW_RATE_HE_NSS4_MCS5 = 0x1B5, + RTW89_HW_RATE_HE_NSS4_MCS6 = 0x1B6, + RTW89_HW_RATE_HE_NSS4_MCS7 = 0x1B7, + RTW89_HW_RATE_HE_NSS4_MCS8 = 0x1B8, + RTW89_HW_RATE_HE_NSS4_MCS9 = 0x1B9, + RTW89_HW_RATE_HE_NSS4_MCS10 = 0x1BA, + RTW89_HW_RATE_HE_NSS4_MCS11 = 0x1BB, + RTW89_HW_RATE_NR, + + RTW89_HW_RATE_MASK_MOD = GENMASK(8, 7), + RTW89_HW_RATE_MASK_VAL = GENMASK(6, 0), +}; + +/* 2G channels, + * 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ +#define RTW89_2G_CH_NUM 14 + +/* 5G channels, + * 36, 38, 40, 42, 44, 46, 48, 50, + * 52, 54, 56, 58, 60, 62, 64, + * 100, 102, 104, 106, 108, 110, 112, 114, + * 116, 118, 120, 122, 124, 126, 128, 130, + * 132, 134, 136, 138, 140, 142, 144, + * 149, 151, 153, 155, 157, 159, 161, 163, + * 165, 167, 169, 171, 173, 175, 177 + */ +#define RTW89_5G_CH_NUM 53 + +enum rtw89_rate_section { + RTW89_RS_CCK, + RTW89_RS_OFDM, + RTW89_RS_MCS, /* for HT/VHT/HE */ + RTW89_RS_HEDCM, + RTW89_RS_OFFSET, + RTW89_RS_MAX, + RTW89_RS_LMT_NUM = RTW89_RS_MCS + 1, +}; + +enum rtw89_rate_max { + RTW89_RATE_CCK_MAX = 4, + RTW89_RATE_OFDM_MAX = 8, + RTW89_RATE_MCS_MAX = 12, + RTW89_RATE_HEDCM_MAX = 4, /* for HEDCM MCS0/1/3/4 */ + RTW89_RATE_OFFSET_MAX = 5, /* for HE(HEDCM)/VHT/HT/OFDM/CCK offset */ +}; + +enum rtw89_nss { + RTW89_NSS_1 = 0, + RTW89_NSS_2 = 1, + /* HE DCM only support 1ss and 2ss */ + RTW89_NSS_HEDCM_MAX = RTW89_NSS_2 + 1, + RTW89_NSS_3 = 2, + RTW89_NSS_4 = 3, + RTW89_NSS_MAX, +}; + +enum rtw89_ntx { + RTW89_1TX = 0, + RTW89_2TX = 1, + RTW89_NTX_NUM, +}; + +enum rtw89_beamforming_type { + RTW89_NONBF = 0, + RTW89_BF = 1, + RTW89_BF_NUM, +}; + +enum rtw89_regulation_type { + RTW89_WW = 0, + RTW89_ETSI = 1, + RTW89_FCC = 2, + RTW89_MKK = 3, + RTW89_NA = 4, + RTW89_IC = 5, + RTW89_KCC = 6, + RTW89_NCC = 7, + RTW89_CHILE = 8, + RTW89_ACMA = 9, + RTW89_MEXICO = 10, + RTW89_UKRAINE = 11, + RTW89_CN = 12, + RTW89_REGD_NUM, +}; + +extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX]; +extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX]; + +struct rtw89_txpwr_byrate { + s8 cck[RTW89_RATE_CCK_MAX]; + s8 ofdm[RTW89_RATE_OFDM_MAX]; + s8 mcs[RTW89_NSS_MAX][RTW89_RATE_MCS_MAX]; + s8 hedcm[RTW89_NSS_HEDCM_MAX][RTW89_RATE_HEDCM_MAX]; + s8 offset[RTW89_RATE_OFFSET_MAX]; +}; + +enum rtw89_bandwidth_section_num { + RTW89_BW20_SEC_NUM = 8, + RTW89_BW40_SEC_NUM = 4, + RTW89_BW80_SEC_NUM = 2, +}; + +struct rtw89_txpwr_limit { + s8 cck_20m[RTW89_BF_NUM]; + s8 cck_40m[RTW89_BF_NUM]; + s8 ofdm[RTW89_BF_NUM]; + s8 mcs_20m[RTW89_BW20_SEC_NUM][RTW89_BF_NUM]; + s8 mcs_40m[RTW89_BW40_SEC_NUM][RTW89_BF_NUM]; + s8 mcs_80m[RTW89_BW80_SEC_NUM][RTW89_BF_NUM]; + s8 mcs_160m[RTW89_BF_NUM]; + s8 mcs_40m_0p5[RTW89_BF_NUM]; + s8 mcs_40m_2p5[RTW89_BF_NUM]; +}; + +#define RTW89_RU_SEC_NUM 8 + +struct rtw89_txpwr_limit_ru { + s8 ru26[RTW89_RU_SEC_NUM]; + s8 ru52[RTW89_RU_SEC_NUM]; + s8 ru106[RTW89_RU_SEC_NUM]; +}; + +struct rtw89_rate_desc { + enum rtw89_nss nss; + enum rtw89_rate_section rs; + u8 idx; +}; + +#define PHY_STS_HDR_LEN 8 +#define RF_PATH_MAX 4 +#define RTW89_MAX_PPDU_CNT 8 +struct rtw89_rx_phy_ppdu { + u8 *buf; + u32 len; + u8 rssi_avg; + s8 rssi[RF_PATH_MAX]; + u8 mac_id; + bool to_self; + bool valid; +}; + +enum rtw89_mac_idx { + RTW89_MAC_0 = 0, + RTW89_MAC_1 = 1, +}; + +enum rtw89_phy_idx { + RTW89_PHY_0 = 0, + RTW89_PHY_1 = 1, + RTW89_PHY_MAX +}; + +enum rtw89_rf_path { + RF_PATH_A = 0, + RF_PATH_B = 1, + RF_PATH_C = 2, + RF_PATH_D = 3, + RF_PATH_AB, + RF_PATH_AC, + RF_PATH_AD, + RF_PATH_BC, + RF_PATH_BD, + RF_PATH_CD, + RF_PATH_ABC, + RF_PATH_ABD, + RF_PATH_ACD, + RF_PATH_BCD, + RF_PATH_ABCD, +}; + +enum rtw89_rf_path_bit { + RF_A = BIT(0), + RF_B = BIT(1), + RF_C = BIT(2), + RF_D = BIT(3), + + RF_AB = (RF_A | RF_B), + RF_AC = (RF_A | RF_C), + RF_AD = (RF_A | RF_D), + RF_BC = (RF_B | RF_C), + RF_BD = (RF_B | RF_D), + RF_CD = (RF_C | RF_D), + + RF_ABC = (RF_A | RF_B | RF_C), + RF_ABD = (RF_A | RF_B | RF_D), + RF_ACD = (RF_A | RF_C | RF_D), + RF_BCD = (RF_B | RF_C | RF_D), + + RF_ABCD = (RF_A | RF_B | RF_C | RF_D), +}; + +enum rtw89_bandwidth { + RTW89_CHANNEL_WIDTH_20 = 0, + RTW89_CHANNEL_WIDTH_40 = 1, + RTW89_CHANNEL_WIDTH_80 = 2, + RTW89_CHANNEL_WIDTH_160 = 3, + RTW89_CHANNEL_WIDTH_80_80 = 4, + RTW89_CHANNEL_WIDTH_5 = 5, + RTW89_CHANNEL_WIDTH_10 = 6, +}; + +enum rtw89_ps_mode { + RTW89_PS_MODE_NONE = 0, + RTW89_PS_MODE_RFOFF = 1, + RTW89_PS_MODE_CLK_GATED = 2, + RTW89_PS_MODE_PWR_GATED = 3, +}; + +#define RTW89_MAX_CHANNEL_WIDTH RTW89_CHANNEL_WIDTH_80 +#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1) +#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1) +#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1) + +enum rtw89_ru_bandwidth { + RTW89_RU26 = 0, + RTW89_RU52 = 1, + RTW89_RU106 = 2, + RTW89_RU_NUM, +}; + +enum rtw89_sc_offset { + RTW89_SC_DONT_CARE = 0, + RTW89_SC_20_UPPER = 1, + RTW89_SC_20_LOWER = 2, + RTW89_SC_20_UPMOST = 3, + RTW89_SC_20_LOWEST = 4, + RTW89_SC_40_UPPER = 9, + RTW89_SC_40_LOWER = 10, +}; + +struct rtw89_channel_params { + u8 center_chan; + u8 primary_chan; + u8 bandwidth; + u8 pri_ch_idx; + u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1]; +}; + +struct rtw89_channel_help_params { + u16 tx_en; +}; + +struct rtw89_port_reg { + u32 port_cfg; + u32 tbtt_prohib; + u32 bcn_area; + u32 bcn_early; + u32 tbtt_early; + u32 tbtt_agg; + u32 bcn_space; + u32 bcn_forcetx; + u32 bcn_err_cnt; + u32 bcn_err_flag; + u32 dtim_ctrl; + u32 tbtt_shift; + u32 bcn_cnt_tmr; + u32 tsftr_l; + u32 tsftr_h; +}; + +struct rtw89_txwd_body { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; +} __packed; + +struct rtw89_txwd_info { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; +} __packed; + +struct rtw89_rx_desc_info { + u16 pkt_size; + u8 pkt_type; + u8 drv_info_size; + u8 shift; + u8 wl_hd_iv_len; + bool long_rxdesc; + bool bb_sel; + bool mac_info_valid; + u16 data_rate; + u8 gi_ltf; + u8 bw; + u32 free_run_cnt; + u8 user_id; + bool sr_en; + u8 ppdu_cnt; + u8 ppdu_type; + bool icv_err; + bool crc32_err; + bool hw_dec; + bool sw_dec; + bool addr1_match; + u8 frag; + u16 seq; + u8 frame_type; + u8 rx_pl_id; + bool addr_cam_valid; + u8 addr_cam_id; + u8 sec_cam_id; + u8 mac_id; + u16 offset; + bool ready; +}; + +struct rtw89_rxdesc_short { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; +} __packed; + +struct rtw89_rxdesc_long { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; +} __packed; + +struct rtw89_tx_desc_info { + u16 pkt_size; + u8 wp_offset; + u8 qsel; + u8 ch_dma; + u8 hdr_llc_len; + bool is_bmc; + bool en_wd_info; + bool wd_page; + bool use_rate; + bool dis_data_fb; + bool tid_indicate; + bool agg_en; + bool bk; + u8 ampdu_density; + u8 ampdu_num; + bool sec_en; + u8 sec_type; + u8 sec_cam_idx; + u16 data_rate; + u16 data_retry_lowest_rate; + bool fw_dl; + u16 seq; + bool a_ctrl_bsr; +}; + +struct rtw89_core_tx_request { + enum rtw89_core_tx_type tx_type; + + struct sk_buff *skb; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + struct rtw89_tx_desc_info desc_info; +}; + +struct rtw89_txq { + struct list_head list; + unsigned long flags; + int wait_cnt; +}; + +struct rtw89_mac_ax_gnt { + u8 gnt_bt_sw_en; + u8 gnt_bt; + u8 gnt_wl_sw_en; + u8 gnt_wl; +}; + +#define RTW89_MAC_AX_COEX_GNT_NR 2 +struct rtw89_mac_ax_coex_gnt { + struct rtw89_mac_ax_gnt band[RTW89_MAC_AX_COEX_GNT_NR]; +}; + +enum rtw89_btc_ncnt { + BTC_NCNT_POWER_ON = 0x0, + BTC_NCNT_POWER_OFF, + BTC_NCNT_INIT_COEX, + BTC_NCNT_SCAN_START, + BTC_NCNT_SCAN_FINISH, + BTC_NCNT_SPECIAL_PACKET, + BTC_NCNT_SWITCH_BAND, + BTC_NCNT_RFK_TIMEOUT, + BTC_NCNT_SHOW_COEX_INFO, + BTC_NCNT_ROLE_INFO, + BTC_NCNT_CONTROL, + BTC_NCNT_RADIO_STATE, + BTC_NCNT_CUSTOMERIZE, + BTC_NCNT_WL_RFK, + BTC_NCNT_WL_STA, + BTC_NCNT_FWINFO, + BTC_NCNT_TIMER, + BTC_NCNT_NUM +}; + +enum rtw89_btc_btinfo { + BTC_BTINFO_L0 = 0, + BTC_BTINFO_L1, + BTC_BTINFO_L2, + BTC_BTINFO_L3, + BTC_BTINFO_H0, + BTC_BTINFO_H1, + BTC_BTINFO_H2, + BTC_BTINFO_H3, + BTC_BTINFO_MAX +}; + +enum rtw89_btc_dcnt { + BTC_DCNT_RUN = 0x0, + BTC_DCNT_CX_RUNINFO, + BTC_DCNT_RPT, + BTC_DCNT_RPT_FREEZE, + BTC_DCNT_CYCLE, + BTC_DCNT_CYCLE_FREEZE, + BTC_DCNT_W1, + BTC_DCNT_W1_FREEZE, + BTC_DCNT_B1, + BTC_DCNT_B1_FREEZE, + BTC_DCNT_TDMA_NONSYNC, + BTC_DCNT_SLOT_NONSYNC, + BTC_DCNT_BTCNT_FREEZE, + BTC_DCNT_WL_SLOT_DRIFT, + BTC_DCNT_WL_STA_LAST, + BTC_DCNT_NUM, +}; + +enum rtw89_btc_wl_state_cnt { + BTC_WCNT_SCANAP = 0x0, + BTC_WCNT_DHCP, + BTC_WCNT_EAPOL, + BTC_WCNT_ARP, + BTC_WCNT_SCBDUPDATE, + BTC_WCNT_RFK_REQ, + BTC_WCNT_RFK_GO, + BTC_WCNT_RFK_REJECT, + BTC_WCNT_RFK_TIMEOUT, + BTC_WCNT_CH_UPDATE, + BTC_WCNT_NUM +}; + +enum rtw89_btc_bt_state_cnt { + BTC_BCNT_RETRY = 0x0, + BTC_BCNT_REINIT, + BTC_BCNT_REENABLE, + BTC_BCNT_SCBDREAD, + BTC_BCNT_RELINK, + BTC_BCNT_IGNOWL, + BTC_BCNT_INQPAG, + BTC_BCNT_INQ, + BTC_BCNT_PAGE, + BTC_BCNT_ROLESW, + BTC_BCNT_AFH, + BTC_BCNT_INFOUPDATE, + BTC_BCNT_INFOSAME, + BTC_BCNT_SCBDUPDATE, + BTC_BCNT_HIPRI_TX, + BTC_BCNT_HIPRI_RX, + BTC_BCNT_LOPRI_TX, + BTC_BCNT_LOPRI_RX, + BTC_BCNT_RATECHG, + BTC_BCNT_NUM +}; + +enum rtw89_btc_bt_profile { + BTC_BT_NOPROFILE = 0, + BTC_BT_HFP = BIT(0), + BTC_BT_HID = BIT(1), + BTC_BT_A2DP = BIT(2), + BTC_BT_PAN = BIT(3), + BTC_PROFILE_MAX = 4, +}; + +struct rtw89_btc_ant_info { + u8 type; /* shared, dedicated */ + u8 num; + u8 isolation; + + u8 single_pos: 1;/* Single antenna at S0 or S1 */ + u8 diversity: 1; +}; + +enum rtw89_tfc_dir { + RTW89_TFC_UL, + RTW89_TFC_DL, +}; + +struct rtw89_btc_wl_smap { + u32 busy: 1; + u32 scan: 1; + u32 connecting: 1; + u32 roaming: 1; + u32 _4way: 1; + u32 rf_off: 1; + u32 lps: 1; + u32 ips: 1; + u32 init_ok: 1; + u32 traffic_dir : 2; + u32 rf_off_pre: 1; + u32 lps_pre: 1; +}; + +enum rtw89_tfc_lv { + RTW89_TFC_IDLE, + RTW89_TFC_ULTRA_LOW, + RTW89_TFC_LOW, + RTW89_TFC_MID, + RTW89_TFC_HIGH, +}; + +#define RTW89_TP_SHIFT 18 /* bytes/2s --> Mbps */ +DECLARE_EWMA(tp, 10, 2); + +struct rtw89_traffic_stats { + /* units in bytes */ + u64 tx_unicast; + u64 rx_unicast; + u32 tx_avg_len; + u32 rx_avg_len; + + /* count for packets */ + u64 tx_cnt; + u64 rx_cnt; + + /* units in Mbps */ + u32 tx_throughput; + u32 rx_throughput; + u32 tx_throughput_raw; + u32 rx_throughput_raw; + enum rtw89_tfc_lv tx_tfc_lv; + enum rtw89_tfc_lv rx_tfc_lv; + struct ewma_tp tx_ewma_tp; + struct ewma_tp rx_ewma_tp; + + u16 tx_rate; + u16 rx_rate; +}; + +struct rtw89_btc_statistic { + u8 rssi; /* 0%~110% (dBm = rssi -110) */ + struct rtw89_traffic_stats traffic; +}; + +#define BTC_WL_RSSI_THMAX 4 + +struct rtw89_btc_wl_link_info { + struct rtw89_btc_statistic stat; + enum rtw89_tfc_dir dir; + u8 rssi_state[BTC_WL_RSSI_THMAX]; + u8 mac_addr[ETH_ALEN]; + u8 busy; + u8 ch; + u8 bw; + u8 band; + u8 role; + u8 pid; + u8 phy; + u8 dtim_period; + u8 mode; + + u8 mac_id; + u8 tx_retry; + + u32 bcn_period; + u32 busy_t; + u32 tx_time; + u32 client_cnt; + u32 rx_rate_drop_cnt; + + u32 active: 1; + u32 noa: 1; + u32 client_ps: 1; + u32 connected: 2; +}; + +union rtw89_btc_wl_state_map { + u32 val; + struct rtw89_btc_wl_smap map; +}; + +struct rtw89_btc_bt_hfp_desc { + u32 exist: 1; + u32 type: 2; + u32 rsvd: 29; +}; + +struct rtw89_btc_bt_hid_desc { + u32 exist: 1; + u32 slot_info: 2; + u32 pair_cnt: 2; + u32 type: 8; + u32 rsvd: 19; +}; + +struct rtw89_btc_bt_a2dp_desc { + u8 exist: 1; + u8 exist_last: 1; + u8 play_latency: 1; + u8 type: 3; + u8 active: 1; + u8 sink: 1; + + u8 bitpool; + u16 vendor_id; + u32 device_name; + u32 flush_time; +}; + +struct rtw89_btc_bt_pan_desc { + u32 exist: 1; + u32 type: 1; + u32 active: 1; + u32 rsvd: 29; +}; + +struct rtw89_btc_bt_rfk_info { + u32 run: 1; + u32 req: 1; + u32 timeout: 1; + u32 rsvd: 29; +}; + +union rtw89_btc_bt_rfk_info_map { + u32 val; + struct rtw89_btc_bt_rfk_info map; +}; + +struct rtw89_btc_bt_ver_info { + u32 fw_coex; /* match with which coex_ver */ + u32 fw; +}; + +struct rtw89_btc_bool_sta_chg { + u32 now: 1; + u32 last: 1; + u32 remain: 1; + u32 srvd: 29; +}; + +struct rtw89_btc_u8_sta_chg { + u8 now; + u8 last; + u8 remain; + u8 rsvd; +}; + +struct rtw89_btc_wl_scan_info { + u8 band[RTW89_PHY_MAX]; + u8 phy_map; + u8 rsvd; +}; + +struct rtw89_btc_wl_dbcc_info { + u8 op_band[RTW89_PHY_MAX]; /* op band in each phy */ + u8 scan_band[RTW89_PHY_MAX]; /* scan band in each phy */ + u8 real_band[RTW89_PHY_MAX]; + u8 role[RTW89_PHY_MAX]; /* role in each phy */ +}; + +struct rtw89_btc_wl_active_role { + u8 connected: 1; + u8 pid: 3; + u8 phy: 1; + u8 noa: 1; + u8 band: 2; + + u8 client_ps: 1; + u8 bw: 7; + + u8 role; + u8 ch; + + u16 tx_lvl; + u16 rx_lvl; + u16 tx_rate; + u16 rx_rate; +}; + +struct rtw89_btc_wl_role_info_bpos { + u16 none: 1; + u16 station: 1; + u16 ap: 1; + u16 vap: 1; + u16 adhoc: 1; + u16 adhoc_master: 1; + u16 mesh: 1; + u16 moniter: 1; + u16 p2p_device: 1; + u16 p2p_gc: 1; + u16 p2p_go: 1; + u16 nan: 1; +}; + +union rtw89_btc_wl_role_info_map { + u16 val; + struct rtw89_btc_wl_role_info_bpos role; +}; + +struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */ + u8 connect_cnt; + u8 link_mode; + union rtw89_btc_wl_role_info_map role_map; + struct rtw89_btc_wl_active_role active_role[RTW89_MAX_HW_PORT_NUM]; +}; + +struct rtw89_btc_wl_ver_info { + u32 fw_coex; /* match with which coex_ver */ + u32 fw; + u32 mac; + u32 bb; + u32 rf; +}; + +struct rtw89_btc_wl_afh_info { + u8 en; + u8 ch; + u8 bw; + u8 rsvd; +} __packed; + +struct rtw89_btc_wl_rfk_info { + u32 state: 2; + u32 path_map: 4; + u32 phy_map: 2; + u32 band: 2; + u32 type: 8; + u32 rsvd: 14; +}; + +struct rtw89_btc_bt_smap { + u32 connect: 1; + u32 ble_connect: 1; + u32 acl_busy: 1; + u32 sco_busy: 1; + u32 mesh_busy: 1; + u32 inq_pag: 1; +}; + +union rtw89_btc_bt_state_map { + u32 val; + struct rtw89_btc_bt_smap map; +}; + +#define BTC_BT_RSSI_THMAX 4 +#define BTC_BT_AFH_GROUP 12 + +struct rtw89_btc_bt_link_info { + struct rtw89_btc_u8_sta_chg profile_cnt; + struct rtw89_btc_bool_sta_chg multi_link; + struct rtw89_btc_bool_sta_chg relink; + struct rtw89_btc_bt_hfp_desc hfp_desc; + struct rtw89_btc_bt_hid_desc hid_desc; + struct rtw89_btc_bt_a2dp_desc a2dp_desc; + struct rtw89_btc_bt_pan_desc pan_desc; + union rtw89_btc_bt_state_map status; + + u8 sut_pwr_level[BTC_PROFILE_MAX]; + u8 golden_rx_shift[BTC_PROFILE_MAX]; + u8 rssi_state[BTC_BT_RSSI_THMAX]; + u8 afh_map[BTC_BT_AFH_GROUP]; + + u32 role_sw: 1; + u32 slave_role: 1; + u32 afh_update: 1; + u32 cqddr: 1; + u32 rssi: 8; + u32 tx_3m: 1; + u32 rsvd: 19; +}; + +struct rtw89_btc_3rdcx_info { + u8 type; /* 0: none, 1:zigbee, 2:LTE */ + u8 hw_coex; + u16 rsvd; +}; + +struct rtw89_btc_dm_emap { + u32 init: 1; + u32 pta_owner: 1; + u32 wl_rfk_timeout: 1; + u32 bt_rfk_timeout: 1; + + u32 wl_fw_hang: 1; + u32 offload_mismatch: 1; + u32 cycle_hang: 1; + u32 w1_hang: 1; + + u32 b1_hang: 1; + u32 tdma_no_sync: 1; + u32 wl_slot_drift: 1; +}; + +union rtw89_btc_dm_error_map { + u32 val; + struct rtw89_btc_dm_emap map; +}; + +struct rtw89_btc_rf_para { + u32 tx_pwr_freerun; + u32 rx_gain_freerun; + u32 tx_pwr_perpkt; + u32 rx_gain_perpkt; +}; + +struct rtw89_btc_wl_info { + struct rtw89_btc_wl_link_info link_info[RTW89_MAX_HW_PORT_NUM]; + struct rtw89_btc_wl_rfk_info rfk_info; + struct rtw89_btc_wl_ver_info ver_info; + struct rtw89_btc_wl_afh_info afh_info; + struct rtw89_btc_wl_role_info role_info; + struct rtw89_btc_wl_scan_info scan_info; + struct rtw89_btc_wl_dbcc_info dbcc_info; + struct rtw89_btc_rf_para rf_para; + union rtw89_btc_wl_state_map status; + + u8 port_id[RTW89_WIFI_ROLE_MLME_MAX]; + u8 rssi_level; + + u32 scbd; +}; + +struct rtw89_btc_module { + struct rtw89_btc_ant_info ant; + u8 rfe_type; + u8 cv; + + u8 bt_solo: 1; + u8 bt_pos: 1; + u8 switch_type: 1; + + u8 rsvd; +}; + +#define RTW89_BTC_DM_MAXSTEP 30 +#define RTW89_BTC_DM_CNT_MAX (RTW89_BTC_DM_MAXSTEP * 8) + +struct rtw89_btc_dm_step { + u16 step[RTW89_BTC_DM_MAXSTEP]; + u8 step_pos; + bool step_ov; +}; + +struct rtw89_btc_init_info { + struct rtw89_btc_module module; + u8 wl_guard_ch; + + u8 wl_only: 1; + u8 wl_init_ok: 1; + u8 dbcc_en: 1; + u8 cx_other: 1; + u8 bt_only: 1; + + u16 rsvd; +}; + +struct rtw89_btc_wl_tx_limit_para { + u16 enable; + u32 tx_time; /* unit: us */ + u16 tx_retry; +}; + +struct rtw89_btc_bt_scan_info { + u16 win; + u16 intvl; + u32 enable: 1; + u32 interlace: 1; + u32 rsvd: 30; +}; + +enum rtw89_btc_bt_scan_type { + BTC_SCAN_INQ = 0, + BTC_SCAN_PAGE, + BTC_SCAN_BLE, + BTC_SCAN_INIT, + BTC_SCAN_TV, + BTC_SCAN_ADV, + BTC_SCAN_MAX1, +}; + +struct rtw89_btc_bt_info { + struct rtw89_btc_bt_link_info link_info; + struct rtw89_btc_bt_scan_info scan_info[BTC_SCAN_MAX1]; + struct rtw89_btc_bt_ver_info ver_info; + struct rtw89_btc_bool_sta_chg enable; + struct rtw89_btc_bool_sta_chg inq_pag; + struct rtw89_btc_rf_para rf_para; + union rtw89_btc_bt_rfk_info_map rfk_info; + + u8 raw_info[BTC_BTINFO_MAX]; /* raw bt info from mailbox */ + + u32 scbd; + u32 feature; + + u32 mbx_avl: 1; + u32 whql_test: 1; + u32 igno_wl: 1; + u32 reinit: 1; + u32 ble_scan_en: 1; + u32 btg_type: 1; + u32 inq: 1; + u32 pag: 1; + u32 run_patch_code: 1; + u32 hi_lna_rx: 1; + u32 rsvd: 22; +}; + +struct rtw89_btc_cx { + struct rtw89_btc_wl_info wl; + struct rtw89_btc_bt_info bt; + struct rtw89_btc_3rdcx_info other; + u32 state_map; + u32 cnt_bt[BTC_BCNT_NUM]; + u32 cnt_wl[BTC_WCNT_NUM]; +}; + +struct rtw89_btc_fbtc_tdma { + u8 type; + u8 rxflctrl; + u8 txpause; + u8 wtgle_n; + u8 leak_n; + u8 ext_ctrl; + u8 rsvd0; + u8 rsvd1; +} __packed; + +#define CXMREG_MAX 30 +#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/ +#define BTCRPT_VER 1 +#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */ + +enum rtw89_btc_bt_rfk_counter { + BTC_BCNT_RFK_REQ = 0, + BTC_BCNT_RFK_GO = 1, + BTC_BCNT_RFK_REJECT = 2, + BTC_BCNT_RFK_FAIL = 3, + BTC_BCNT_RFK_TIMEOUT = 4, + BTC_BCNT_RFK_MAX +}; + +struct rtw89_btc_fbtc_rpt_ctrl { + u16 fver; + u16 rpt_cnt; /* tmr counters */ + u32 wl_fw_coex_ver; /* match which driver's coex version */ + u32 wl_fw_cx_offload; + u32 wl_fw_ver; + u32 rpt_enable; + u32 rpt_para; /* ms */ + u32 mb_send_fail_cnt; /* fw send mailbox fail counter */ + u32 mb_send_ok_cnt; /* fw send mailbox ok counter */ + u32 mb_recv_cnt; /* fw recv mailbox counter */ + u32 mb_a2dp_empty_cnt; /* a2dp empty count */ + u32 mb_a2dp_flct_cnt; /* a2dp empty flow control counter */ + u32 mb_a2dp_full_cnt; /* a2dp empty full counter */ + u32 bt_rfk_cnt[BTC_BCNT_RFK_MAX]; + u32 c2h_cnt; /* fw send c2h counter */ + u32 h2c_cnt; /* fw recv h2c counter */ +} __packed; + +enum rtw89_fbtc_ext_ctrl_type { + CXECTL_OFF = 0x0, /* tdma off */ + CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */ + CXECTL_EXT = 0x2, + CXECTL_MAX +}; + +union rtw89_btc_fbtc_rxflct { + u8 val; + u8 type: 3; + u8 tgln_n: 5; +}; + +enum rtw89_btc_cxst_state { + CXST_OFF = 0x0, + CXST_B2W = 0x1, + CXST_W1 = 0x2, + CXST_W2 = 0x3, + CXST_W2B = 0x4, + CXST_B1 = 0x5, + CXST_B2 = 0x6, + CXST_B3 = 0x7, + CXST_B4 = 0x8, + CXST_LK = 0x9, + CXST_BLK = 0xa, + CXST_E2G = 0xb, + CXST_E5G = 0xc, + CXST_EBT = 0xd, + CXST_ENULL = 0xe, + CXST_WLK = 0xf, + CXST_W1FDD = 0x10, + CXST_B1FDD = 0x11, + CXST_MAX = 0x12, +}; + +enum { + CXBCN_ALL = 0x0, + CXBCN_ALL_OK, + CXBCN_BT_SLOT, + CXBCN_BT_OK, + CXBCN_MAX +}; + +enum btc_slot_type { + SLOT_MIX = 0x0, /* accept BT Lower-Pri Tx/Rx request 0x778 = 1 */ + SLOT_ISO = 0x1, /* no accept BT Lower-Pri Tx/Rx request 0x778 = d*/ + CXSTYPE_NUM, +}; + +enum { /* TIME */ + CXT_BT = 0x0, + CXT_WL = 0x1, + CXT_MAX +}; + +enum { /* TIME-A2DP */ + CXT_FLCTRL_OFF = 0x0, + CXT_FLCTRL_ON = 0x1, + CXT_FLCTRL_MAX +}; + +enum { /* STEP TYPE */ + CXSTEP_NONE = 0x0, + CXSTEP_EVNT = 0x1, + CXSTEP_SLOT = 0x2, + CXSTEP_MAX, +}; + +#define FCXGPIODBG_VER 1 +#define BTC_DBG_MAX1 32 +struct rtw89_btc_fbtc_gpio_dbg { + u8 fver; + u8 rsvd; + u16 rsvd2; + u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */ + u32 pre_state; /* the debug signal is 1 or 0 */ + u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */ +} __packed; + +#define FCXMREG_VER 1 +struct rtw89_btc_fbtc_mreg_val { + u8 fver; + u8 reg_num; + __le16 rsvd; + __le32 mreg_val[CXMREG_MAX]; +} __packed; + +#define RTW89_DEF_FBTC_MREG(__type, __bytes, __offset) \ + { .type = cpu_to_le16(__type), .bytes = cpu_to_le16(__bytes), \ + .offset = cpu_to_le32(__offset), } + +struct rtw89_btc_fbtc_mreg { + __le16 type; + __le16 bytes; + __le32 offset; +} __packed; + +struct rtw89_btc_fbtc_slot { + __le16 dur; + __le32 cxtbl; + __le16 cxtype; +} __packed; + +#define FCXSLOTS_VER 1 +struct rtw89_btc_fbtc_slots { + u8 fver; + u8 tbl_num; + __le16 rsvd; + __le32 update_map; + struct rtw89_btc_fbtc_slot slot[CXST_MAX]; +} __packed; + +#define FCXSTEP_VER 2 +struct rtw89_btc_fbtc_step { + u8 type; + u8 val; + __le16 difft; +} __packed; + +struct rtw89_btc_fbtc_steps { + u8 fver; + u8 rsvd; + __le16 cnt; + __le16 pos_old; + __le16 pos_new; + struct rtw89_btc_fbtc_step step[FCXMAX_STEP]; +} __packed; + +#define FCXCYSTA_VER 2 +struct rtw89_btc_fbtc_cysta { /* statistics for cycles */ + u8 fver; + u8 rsvd; + __le16 cycles; /* total cycle number */ + __le16 cycles_a2dp[CXT_FLCTRL_MAX]; + __le16 a2dpept; /* a2dp empty cnt */ + __le16 a2dpeptto; /* a2dp empty timeout cnt*/ + __le16 tavg_cycle[CXT_MAX]; /* avg wl/bt cycle time */ + __le16 tmax_cycle[CXT_MAX]; /* max wl/bt cycle time */ + __le16 tmaxdiff_cycle[CXT_MAX]; /* max wl-wl bt-bt cycle diff time */ + __le16 tavg_a2dp[CXT_FLCTRL_MAX]; /* avg a2dp PSTDMA/TDMA time */ + __le16 tmax_a2dp[CXT_FLCTRL_MAX]; /* max a2dp PSTDMA/TDMA time */ + __le16 tavg_a2dpept; /* avg a2dp empty time */ + __le16 tmax_a2dpept; /* max a2dp empty time */ + __le16 tavg_lk; /* avg leak-slot time */ + __le16 tmax_lk; /* max leak-slot time */ + __le32 slot_cnt[CXST_MAX]; /* slot count */ + __le32 bcn_cnt[CXBCN_MAX]; + __le32 leakrx_cnt; /* the rximr occur at leak slot */ + __le32 collision_cnt; /* counter for event/timer occur at same time */ + __le32 skip_cnt; + __le32 exception; + __le32 except_cnt; + __le16 tslot_cycle[BTC_CYCLE_SLOT_MAX]; +} __packed; + +#define FCXNULLSTA_VER 1 +struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */ + u8 fver; + u8 rsvd; + __le16 rsvd2; + __le32 max_t[2]; /* max_t for 0:null0/1:null1 */ + __le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */ + __le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */ +} __packed; + +#define FCX_BTVER_VER 1 +struct rtw89_btc_fbtc_btver { + u8 fver; + u8 rsvd; + __le16 rsvd2; + __le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */ + __le32 fw_ver; + __le32 feature; +} __packed; + +#define FCX_BTSCAN_VER 1 +struct rtw89_btc_fbtc_btscan { + u8 fver; + u8 rsvd; + __le16 rsvd2; + u8 scan[6]; +} __packed; + +#define FCX_BTAFH_VER 1 +struct rtw89_btc_fbtc_btafh { + u8 fver; + u8 rsvd; + __le16 rsvd2; + u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */ + u8 afh_m[4]; /*bit0:2434, bit1: 2435.... bit31:2465 */ + u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */ +} __packed; + +#define FCX_BTDEVINFO_VER 1 +struct rtw89_btc_fbtc_btdevinfo { + u8 fver; + u8 rsvd; + __le16 vendor_id; + __le32 dev_name; /* only 24 bits valid */ + __le32 flush_time; +} __packed; + +#define RTW89_BTC_WL_DEF_TX_PWR GENMASK(7, 0) +struct rtw89_btc_rf_trx_para { + u32 wl_tx_power; /* absolute Tx power (dBm), 0xff-> no BTC control */ + u32 wl_rx_gain; /* rx gain table index (TBD.) */ + u8 bt_tx_power; /* decrease Tx power (dB) */ + u8 bt_rx_gain; /* LNA constrain level */ +}; + +struct rtw89_btc_dm { + struct rtw89_btc_fbtc_slot slot[CXST_MAX]; + struct rtw89_btc_fbtc_slot slot_now[CXST_MAX]; + struct rtw89_btc_fbtc_tdma tdma; + struct rtw89_btc_fbtc_tdma tdma_now; + struct rtw89_mac_ax_coex_gnt gnt; + struct rtw89_btc_init_info init_info; /* pass to wl_fw if offload */ + struct rtw89_btc_rf_trx_para rf_trx_para; + struct rtw89_btc_wl_tx_limit_para wl_tx_limit; + struct rtw89_btc_dm_step dm_step; + union rtw89_btc_dm_error_map error; + u32 cnt_dm[BTC_DCNT_NUM]; + u32 cnt_notify[BTC_NCNT_NUM]; + + u32 update_slot_map; + u32 set_ant_path; + + u32 wl_only: 1; + u32 wl_fw_cx_offload: 1; + u32 freerun: 1; + u32 wl_ps_ctrl: 2; + u32 wl_mimo_ps: 1; + u32 leak_ap: 1; + u32 noisy_level: 3; + u32 coex_info_map: 8; + u32 bt_only: 1; + u32 wl_btg_rx: 1; + u32 trx_para_level: 8; + u32 wl_stb_chg: 1; + u32 rsvd: 3; + + u16 slot_dur[CXST_MAX]; + + u8 run_reason; + u8 run_action; +}; + +struct rtw89_btc_ctrl { + u32 manual: 1; + u32 igno_bt: 1; + u32 always_freerun: 1; + u32 trace_step: 16; + u32 rsvd: 12; +}; + +struct rtw89_btc_dbg { + /* cmd "rb" */ + bool rb_done; + u32 rb_val; +}; + +#define FCXTDMA_VER 1 + +enum rtw89_btc_btf_fw_event { + BTF_EVNT_RPT = 0, + BTF_EVNT_BT_INFO = 1, + BTF_EVNT_BT_SCBD = 2, + BTF_EVNT_BT_REG = 3, + BTF_EVNT_CX_RUNINFO = 4, + BTF_EVNT_BT_PSD = 5, + BTF_EVNT_BUF_OVERFLOW, + BTF_EVNT_C2H_LOOPBACK, + BTF_EVNT_MAX, +}; + +enum btf_fw_event_report { + BTC_RPT_TYPE_CTRL = 0x0, + BTC_RPT_TYPE_TDMA, + BTC_RPT_TYPE_SLOT, + BTC_RPT_TYPE_CYSTA, + BTC_RPT_TYPE_STEP, + BTC_RPT_TYPE_NULLSTA, + BTC_RPT_TYPE_MREG, + BTC_RPT_TYPE_GPIO_DBG, + BTC_RPT_TYPE_BT_VER, + BTC_RPT_TYPE_BT_SCAN, + BTC_RPT_TYPE_BT_AFH, + BTC_RPT_TYPE_BT_DEVICE, + BTC_RPT_TYPE_TEST, + BTC_RPT_TYPE_MAX = 31 +}; + +enum rtw_btc_btf_reg_type { + REG_MAC = 0x0, + REG_BB = 0x1, + REG_RF = 0x2, + REG_BT_RF = 0x3, + REG_BT_MODEM = 0x4, + REG_BT_BLUEWIZE = 0x5, + REG_BT_VENDOR = 0x6, + REG_BT_LE = 0x7, + REG_MAX_TYPE, +}; + +struct rtw89_btc_rpt_cmn_info { + u32 rx_cnt; + u32 rx_len; + u32 req_len; /* expected rsp len */ + u8 req_fver; /* expected rsp fver */ + u8 rsp_fver; /* fver from fw */ + u8 valid; +} __packed; + +struct rtw89_btc_report_ctrl_state { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_tdma { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_tdma finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_slots { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_slots finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_cysta { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_cysta finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_step { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_steps finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_nullsta { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_mreg { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_mreg_val finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_gpio_dbg { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_btver { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_btver finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_btscan { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_btscan finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_btafh { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_btafh finfo; /* info from fw */ +}; + +struct rtw89_btc_rpt_fbtc_btdev { + struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ + struct rtw89_btc_fbtc_btdevinfo finfo; /* info from fw */ +}; + +enum rtw89_btc_btfre_type { + BTFRE_INVALID_INPUT = 0x0, /* invalid input parameters */ + BTFRE_UNDEF_TYPE, + BTFRE_EXCEPTION, + BTFRE_MAX, +}; + +struct rtw89_btc_btf_fwinfo { + u32 cnt_c2h; + u32 cnt_h2c; + u32 cnt_h2c_fail; + u32 event[BTF_EVNT_MAX]; + + u32 err[BTFRE_MAX]; + u32 len_mismch; + u32 fver_mismch; + u32 rpt_en_map; + + struct rtw89_btc_report_ctrl_state rpt_ctrl; + struct rtw89_btc_rpt_fbtc_tdma rpt_fbtc_tdma; + struct rtw89_btc_rpt_fbtc_slots rpt_fbtc_slots; + struct rtw89_btc_rpt_fbtc_cysta rpt_fbtc_cysta; + struct rtw89_btc_rpt_fbtc_step rpt_fbtc_step; + struct rtw89_btc_rpt_fbtc_nullsta rpt_fbtc_nullsta; + struct rtw89_btc_rpt_fbtc_mreg rpt_fbtc_mregval; + struct rtw89_btc_rpt_fbtc_gpio_dbg rpt_fbtc_gpio_dbg; + struct rtw89_btc_rpt_fbtc_btver rpt_fbtc_btver; + struct rtw89_btc_rpt_fbtc_btscan rpt_fbtc_btscan; + struct rtw89_btc_rpt_fbtc_btafh rpt_fbtc_btafh; + struct rtw89_btc_rpt_fbtc_btdev rpt_fbtc_btdev; +}; + +#define RTW89_BTC_POLICY_MAXLEN 512 + +struct rtw89_btc { + struct rtw89_btc_cx cx; + struct rtw89_btc_dm dm; + struct rtw89_btc_ctrl ctrl; + struct rtw89_btc_module mdinfo; + struct rtw89_btc_btf_fwinfo fwinfo; + struct rtw89_btc_dbg dbg; + + struct work_struct eapol_notify_work; + struct work_struct arp_notify_work; + struct work_struct dhcp_notify_work; + struct work_struct icmp_notify_work; + + u32 bt_req_len; + + u8 policy[RTW89_BTC_POLICY_MAXLEN]; + u16 policy_len; + u16 policy_type; + bool bt_req_en; + bool update_policy_force; + bool lps; +}; + +enum rtw89_ra_mode { + RTW89_RA_MODE_CCK = BIT(0), + RTW89_RA_MODE_OFDM = BIT(1), + RTW89_RA_MODE_HT = BIT(2), + RTW89_RA_MODE_VHT = BIT(3), + RTW89_RA_MODE_HE = BIT(4), +}; + +enum rtw89_ra_report_mode { + RTW89_RA_RPT_MODE_LEGACY, + RTW89_RA_RPT_MODE_HT, + RTW89_RA_RPT_MODE_VHT, + RTW89_RA_RPT_MODE_HE, +}; + +enum rtw89_dig_noisy_level { + RTW89_DIG_NOISY_LEVEL0 = -1, + RTW89_DIG_NOISY_LEVEL1 = 0, + RTW89_DIG_NOISY_LEVEL2 = 1, + RTW89_DIG_NOISY_LEVEL3 = 2, + RTW89_DIG_NOISY_LEVEL_MAX = 3, +}; + +enum rtw89_gi_ltf { + RTW89_GILTF_LGI_4XHE32 = 0, + RTW89_GILTF_SGI_4XHE08 = 1, + RTW89_GILTF_2XHE16 = 2, + RTW89_GILTF_2XHE08 = 3, + RTW89_GILTF_1XHE16 = 4, + RTW89_GILTF_1XHE08 = 5, + RTW89_GILTF_MAX +}; + +enum rtw89_rx_frame_type { + RTW89_RX_TYPE_MGNT = 0, + RTW89_RX_TYPE_CTRL = 1, + RTW89_RX_TYPE_DATA = 2, + RTW89_RX_TYPE_RSVD = 3, +}; + +struct rtw89_ra_info { + u8 is_dis_ra:1; + /* Bit0 : CCK + * Bit1 : OFDM + * Bit2 : HT + * Bit3 : VHT + * Bit4 : HE + */ + u8 mode_ctrl:5; + u8 bw_cap:2; + u8 macid; + u8 dcm_cap:1; + u8 er_cap:1; + u8 init_rate_lv:2; + u8 upd_all:1; + u8 en_sgi:1; + u8 ldpc_cap:1; + u8 stbc_cap:1; + u8 ss_num:3; + u8 giltf:3; + u8 upd_bw_nss_mask:1; + u8 upd_mask:1; + u64 ra_mask; /* 63 bits ra_mask + 1 bit CSI ctrl */ + /* BFee CSI */ + u8 band_num; + u8 ra_csi_rate_en:1; + u8 fixed_csi_rate_en:1; + u8 cr_tbl_sel:1; + u8 rsvd2:5; + u8 csi_mcs_ss_idx; + u8 csi_mode:2; + u8 csi_gi_ltf:3; + u8 csi_bw:3; +}; + +#define RTW89_PPDU_MAX_USR 4 +#define RTW89_PPDU_MAC_INFO_USR_SIZE 4 +#define RTW89_PPDU_MAC_INFO_SIZE 8 +#define RTW89_PPDU_MAC_RX_CNT_SIZE 96 + +#define RTW89_MAX_RX_AGG_NUM 64 +#define RTW89_MAX_TX_AGG_NUM 128 + +struct rtw89_ampdu_params { + u16 agg_num; + bool amsdu; +}; + +struct rtw89_ra_report { + struct rate_info txrate; + u32 bit_rate; + u16 hw_rate; +}; + +DECLARE_EWMA(rssi, 10, 16); + +struct rtw89_sta { + u8 mac_id; + bool disassoc; + struct rtw89_vif *rtwvif; + struct rtw89_ra_info ra; + struct rtw89_ra_report ra_report; + int max_agg_wait; + u8 prev_rssi; + struct ewma_rssi avg_rssi; + struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS]; + struct ieee80211_rx_status rx_status; + u16 rx_hw_rate; + __le32 htc_template; + + bool use_cfg_mask; + struct cfg80211_bitrate_mask mask; + + bool cctl_tx_time; + u32 ampdu_max_time:4; + bool cctl_tx_retry_limit; + u32 data_tx_cnt_lmt:6; +}; + +#define RTW89_MAX_ADDR_CAM_NUM 128 +#define RTW89_MAX_BSSID_CAM_NUM 20 +#define RTW89_MAX_SEC_CAM_NUM 128 +#define RTW89_SEC_CAM_IN_ADDR_CAM 7 + +struct rtw89_addr_cam_entry { + u8 addr_cam_idx; + u8 offset; + u8 len; + u8 valid : 1; + u8 addr_mask : 6; + u8 wapi : 1; + u8 mask_sel : 2; + u8 bssid_cam_idx: 6; + u8 tma[ETH_ALEN]; + u8 sma[ETH_ALEN]; + + u8 sec_ent_mode; + DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM); + u8 sec_ent_keyid[RTW89_SEC_CAM_IN_ADDR_CAM]; + u8 sec_ent[RTW89_SEC_CAM_IN_ADDR_CAM]; + struct rtw89_sec_cam_entry *sec_entries[RTW89_SEC_CAM_IN_ADDR_CAM]; +}; + +struct rtw89_bssid_cam_entry { + u8 bssid[ETH_ALEN]; + u8 phy_idx; + u8 bssid_cam_idx; + u8 offset; + u8 len; + u8 valid : 1; + u8 num; +}; + +struct rtw89_sec_cam_entry { + u8 sec_cam_idx; + u8 offset; + u8 len; + u8 type : 4; + u8 ext_key : 1; + u8 spp_mode : 1; + /* 256 bits */ + u8 key[32]; +}; + +struct rtw89_efuse { + bool valid; + u8 xtal_cap; + u8 addr[ETH_ALEN]; + u8 rfe_type; + char country_code[2]; +}; + +struct rtw89_phy_rate_pattern { + u64 ra_mask; + u16 rate; + u8 ra_mode; + bool enable; +}; + +struct rtw89_vif { + struct list_head list; + u8 mac_id; + u8 port; + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + u8 phy_idx; + u8 mac_idx; + u8 net_type; + u8 wifi_role; + u8 self_role; + u8 wmm; + u8 bcn_hit_cond; + u8 hit_rule; + bool trigger; + bool lsig_txop; + u8 tgt_ind; + u8 frm_tgt_ind; + bool wowlan_pattern; + bool wowlan_uc; + bool wowlan_magic; + bool is_hesta; + bool last_a_ctrl; + union { + struct { + struct ieee80211_sta *ap; + } mgd; + struct { + struct list_head sta_list; + } ap; + }; + struct rtw89_addr_cam_entry addr_cam; + struct rtw89_bssid_cam_entry bssid_cam; + struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS]; + struct rtw89_traffic_stats stats; + struct rtw89_phy_rate_pattern rate_pattern; +}; + +enum rtw89_lv1_rcvy_step { + RTW89_LV1_RCVY_STEP_1, + RTW89_LV1_RCVY_STEP_2, +}; + +struct rtw89_hci_ops { + int (*tx_write)(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req); + void (*tx_kick_off)(struct rtw89_dev *rtwdev, u8 txch); + void (*flush_queues)(struct rtw89_dev *rtwdev, u32 queues, bool drop); + void (*reset)(struct rtw89_dev *rtwdev); + int (*start)(struct rtw89_dev *rtwdev); + void (*stop)(struct rtw89_dev *rtwdev); + void (*recalc_int_mit)(struct rtw89_dev *rtwdev); + + u8 (*read8)(struct rtw89_dev *rtwdev, u32 addr); + u16 (*read16)(struct rtw89_dev *rtwdev, u32 addr); + u32 (*read32)(struct rtw89_dev *rtwdev, u32 addr); + void (*write8)(struct rtw89_dev *rtwdev, u32 addr, u8 data); + void (*write16)(struct rtw89_dev *rtwdev, u32 addr, u16 data); + void (*write32)(struct rtw89_dev *rtwdev, u32 addr, u32 data); + + int (*mac_pre_init)(struct rtw89_dev *rtwdev); + int (*mac_post_init)(struct rtw89_dev *rtwdev); + int (*deinit)(struct rtw89_dev *rtwdev); + + u32 (*check_and_reclaim_tx_resource)(struct rtw89_dev *rtwdev, u8 txch); + int (*mac_lv1_rcvy)(struct rtw89_dev *rtwdev, enum rtw89_lv1_rcvy_step step); + void (*dump_err_status)(struct rtw89_dev *rtwdev); + int (*napi_poll)(struct napi_struct *napi, int budget); +}; + +struct rtw89_hci_info { + const struct rtw89_hci_ops *ops; + enum rtw89_hci_type type; + u32 rpwm_addr; + u32 cpwm_addr; +}; + +struct rtw89_chip_ops { + void (*bb_reset)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx); + void (*bb_sethw)(struct rtw89_dev *rtwdev); + u32 (*read_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask); + bool (*write_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask, u32 data); + void (*set_channel)(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param); + void (*set_channel_help)(struct rtw89_dev *rtwdev, bool enter, + struct rtw89_channel_help_params *p); + int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map); + int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map); + void (*fem_setup)(struct rtw89_dev *rtwdev); + void (*rfk_init)(struct rtw89_dev *rtwdev); + void (*rfk_channel)(struct rtw89_dev *rtwdev); + void (*rfk_band_changed)(struct rtw89_dev *rtwdev); + void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start); + void (*rfk_track)(struct rtw89_dev *rtwdev); + void (*power_trim)(struct rtw89_dev *rtwdev); + void (*set_txpwr)(struct rtw89_dev *rtwdev); + void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev); + int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); + u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path); + void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg); + void (*query_ppdu)(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status); + void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en); + void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, + s16 pw_ofst, enum rtw89_mac_idx mac_idx); + + void (*btc_set_rfe)(struct rtw89_dev *rtwdev); + void (*btc_init_cfg)(struct rtw89_dev *rtwdev); + void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state); + void (*btc_set_wl_txpwr_ctrl)(struct rtw89_dev *rtwdev, u32 txpwr_val); + s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val); + void (*btc_bt_aci_imp)(struct rtw89_dev *rtwdev); + void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev); + void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state); +}; + +enum rtw89_dma_ch { + RTW89_DMA_ACH0 = 0, + RTW89_DMA_ACH1 = 1, + RTW89_DMA_ACH2 = 2, + RTW89_DMA_ACH3 = 3, + RTW89_DMA_ACH4 = 4, + RTW89_DMA_ACH5 = 5, + RTW89_DMA_ACH6 = 6, + RTW89_DMA_ACH7 = 7, + RTW89_DMA_B0MG = 8, + RTW89_DMA_B0HI = 9, + RTW89_DMA_B1MG = 10, + RTW89_DMA_B1HI = 11, + RTW89_DMA_H2C = 12, + RTW89_DMA_CH_NUM = 13 +}; + +enum rtw89_qta_mode { + RTW89_QTA_SCC, + RTW89_QTA_DLFW, + + /* keep last */ + RTW89_QTA_INVALID, +}; + +struct rtw89_hfc_ch_cfg { + u16 min; + u16 max; +#define grp_0 0 +#define grp_1 1 +#define grp_num 2 + u8 grp; +}; + +struct rtw89_hfc_ch_info { + u16 aval; + u16 used; +}; + +struct rtw89_hfc_pub_cfg { + u16 grp0; + u16 grp1; + u16 pub_max; + u16 wp_thrd; +}; + +struct rtw89_hfc_pub_info { + u16 g0_used; + u16 g1_used; + u16 g0_aval; + u16 g1_aval; + u16 pub_aval; + u16 wp_aval; +}; + +struct rtw89_hfc_prec_cfg { + u16 ch011_prec; + u16 h2c_prec; + u16 wp_ch07_prec; + u16 wp_ch811_prec; + u8 ch011_full_cond; + u8 h2c_full_cond; + u8 wp_ch07_full_cond; + u8 wp_ch811_full_cond; +}; + +struct rtw89_hfc_param { + bool en; + bool h2c_en; + u8 mode; + const struct rtw89_hfc_ch_cfg *ch_cfg; + struct rtw89_hfc_ch_info ch_info[RTW89_DMA_CH_NUM]; + struct rtw89_hfc_pub_cfg pub_cfg; + struct rtw89_hfc_pub_info pub_info; + struct rtw89_hfc_prec_cfg prec_cfg; +}; + +struct rtw89_hfc_param_ini { + const struct rtw89_hfc_ch_cfg *ch_cfg; + const struct rtw89_hfc_pub_cfg *pub_cfg; + const struct rtw89_hfc_prec_cfg *prec_cfg; + u8 mode; +}; + +struct rtw89_dle_size { + u16 pge_size; + u16 lnk_pge_num; + u16 unlnk_pge_num; +}; + +struct rtw89_wde_quota { + u16 hif; + u16 wcpu; + u16 pkt_in; + u16 cpu_io; +}; + +struct rtw89_ple_quota { + u16 cma0_tx; + u16 cma1_tx; + u16 c2h; + u16 h2c; + u16 wcpu; + u16 mpdu_proc; + u16 cma0_dma; + u16 cma1_dma; + u16 bb_rpt; + u16 wd_rel; + u16 cpu_io; +}; + +struct rtw89_dle_mem { + enum rtw89_qta_mode mode; + const struct rtw89_dle_size *wde_size; + const struct rtw89_dle_size *ple_size; + const struct rtw89_wde_quota *wde_min_qt; + const struct rtw89_wde_quota *wde_max_qt; + const struct rtw89_ple_quota *ple_min_qt; + const struct rtw89_ple_quota *ple_max_qt; +}; + +struct rtw89_reg_def { + u32 addr; + u32 mask; +}; + +struct rtw89_reg2_def { + u32 addr; + u32 data; +}; + +struct rtw89_reg3_def { + u32 addr; + u32 mask; + u32 data; +}; + +struct rtw89_reg5_def { + u8 flag; /* recognized by parsers */ + u8 path; + u32 addr; + u32 mask; + u32 data; +}; + +struct rtw89_phy_table { + const struct rtw89_reg2_def *regs; + u32 n_regs; + enum rtw89_rf_path rf_path; +}; + +struct rtw89_txpwr_table { + const void *data; + u32 size; + void (*load)(struct rtw89_dev *rtwdev, + const struct rtw89_txpwr_table *tbl); +}; + +struct rtw89_chip_info { + enum rtw89_core_chip_id chip_id; + const struct rtw89_chip_ops *ops; + const char *fw_name; + u32 fifo_size; + u16 max_amsdu_limit; + bool dis_2g_40m_ul_ofdma; + const struct rtw89_hfc_param_ini *hfc_param_ini; + const struct rtw89_dle_mem *dle_mem; + u32 rf_base_addr[2]; + u8 rf_path_num; + u8 tx_nss; + u8 rx_nss; + u8 acam_num; + u8 bcam_num; + u8 scam_num; + + u8 sec_ctrl_efuse_size; + u32 physical_efuse_size; + u32 logical_efuse_size; + u32 limit_efuse_size; + u32 phycap_addr; + u32 phycap_size; + + const struct rtw89_pwr_cfg * const *pwr_on_seq; + const struct rtw89_pwr_cfg * const *pwr_off_seq; + const struct rtw89_phy_table *bb_table; + const struct rtw89_phy_table *rf_table[RF_PATH_MAX]; + const struct rtw89_phy_table *nctl_table; + const struct rtw89_txpwr_table *byr_table; + const struct rtw89_phy_dig_gain_table *dig_table; + const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; + const s8 (*txpwr_lmt_5g)[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; + const s8 (*txpwr_lmt_ru_2g)[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; + const s8 (*txpwr_lmt_ru_5g)[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; + + u8 txpwr_factor_rf; + u8 txpwr_factor_mac; + + u32 para_ver; + u32 wlcx_desired; + u8 btcx_desired; + u8 scbd; + u8 mailbox; + + u8 afh_guard_ch; + const u8 *wl_rssi_thres; + const u8 *bt_rssi_thres; + u8 rssi_tol; + + u8 mon_reg_num; + const struct rtw89_btc_fbtc_mreg *mon_reg; + u8 rf_para_ulink_num; + const struct rtw89_btc_rf_trx_para *rf_para_ulink; + u8 rf_para_dlink_num; + const struct rtw89_btc_rf_trx_para *rf_para_dlink; + u8 ps_mode_supported; +}; + +enum rtw89_hcifc_mode { + RTW89_HCIFC_POH = 0, + RTW89_HCIFC_STF = 1, + RTW89_HCIFC_SDIO = 2, + + /* keep last */ + RTW89_HCIFC_MODE_INVALID, +}; + +struct rtw89_dle_info { + enum rtw89_qta_mode qta_mode; + u16 wde_pg_size; + u16 ple_pg_size; + u16 c0_rx_qta; + u16 c1_rx_qta; +}; + +enum rtw89_host_rpr_mode { + RTW89_RPR_MODE_POH = 0, + RTW89_RPR_MODE_STF +}; + +struct rtw89_mac_info { + struct rtw89_dle_info dle_info; + struct rtw89_hfc_param hfc_param; + enum rtw89_qta_mode qta_mode; + u8 rpwm_seq_num; + u8 cpwm_seq_num; +}; + +enum rtw89_fw_type { + RTW89_FW_NORMAL = 1, + RTW89_FW_WOWLAN = 3, +}; + +struct rtw89_fw_suit { + const u8 *data; + u32 size; + u8 major_ver; + u8 minor_ver; + u8 sub_ver; + u8 sub_idex; + u16 build_year; + u16 build_mon; + u16 build_date; + u16 build_hour; + u16 build_min; + u8 cmd_ver; +}; + +#define RTW89_FW_VER_CODE(major, minor, sub, idx) \ + (((major) << 24) | ((minor) << 16) | ((sub) << 8) | (idx)) +#define RTW89_FW_SUIT_VER_CODE(s) \ + RTW89_FW_VER_CODE((s)->major_ver, (s)->minor_ver, (s)->sub_ver, (s)->sub_idex) + +struct rtw89_fw_info { + const struct firmware *firmware; + struct rtw89_dev *rtwdev; + struct completion completion; + u8 h2c_seq; + u8 rec_seq; + struct rtw89_fw_suit normal; + struct rtw89_fw_suit wowlan; + bool fw_log_enable; + bool old_ht_ra_format; +}; + +struct rtw89_cam_info { + DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM); + DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM); + DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM); +}; + +enum rtw89_sar_sources { + RTW89_SAR_SOURCE_NONE, + RTW89_SAR_SOURCE_COMMON, + + RTW89_SAR_SOURCE_NR, +}; + +struct rtw89_sar_cfg_common { + bool set[RTW89_SUBBAND_NR]; + s32 cfg[RTW89_SUBBAND_NR]; +}; + +struct rtw89_sar_info { + /* used to decide how to acces SAR cfg union */ + enum rtw89_sar_sources src; + + /* reserved for different knids of SAR cfg struct. + * supposed that a single cfg struct cannot handle various SAR sources. + */ + union { + struct rtw89_sar_cfg_common cfg_common; + }; +}; + +struct rtw89_hal { + u32 rx_fltr; + u8 cv; + u8 current_channel; + u8 current_primary_channel; + enum rtw89_subband current_subband; + u8 current_band_width; + u8 current_band_type; + /* center channel for different available bandwidth, + * val of (bw > current_band_width) is invalid + */ + u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1]; + u32 sw_amsdu_max_size; + u32 antenna_tx; + u32 antenna_rx; + u8 tx_nss; + u8 rx_nss; +}; + +#define RTW89_MAX_MAC_ID_NUM 128 + +enum rtw89_flags { + RTW89_FLAG_POWERON, + RTW89_FLAG_FW_RDY, + RTW89_FLAG_RUNNING, + RTW89_FLAG_BFEE_MON, + RTW89_FLAG_BFEE_EN, + RTW89_FLAG_NAPI_RUNNING, + RTW89_FLAG_LEISURE_PS, + RTW89_FLAG_LOW_POWER_MODE, + RTW89_FLAG_INACTIVE_PS, + + NUM_OF_RTW89_FLAGS, +}; + +struct rtw89_pkt_stat { + u16 beacon_nr; + u32 rx_rate_cnt[RTW89_HW_RATE_NR]; +}; + +DECLARE_EWMA(thermal, 4, 4); + +struct rtw89_phy_stat { + struct ewma_thermal avg_thermal[RF_PATH_MAX]; + struct rtw89_pkt_stat cur_pkt_stat; + struct rtw89_pkt_stat last_pkt_stat; +}; + +#define RTW89_DACK_PATH_NR 2 +#define RTW89_DACK_IDX_NR 2 +#define RTW89_DACK_MSBK_NR 16 +struct rtw89_dack_info { + bool dack_done; + u8 msbk_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR][RTW89_DACK_MSBK_NR]; + u8 dadck_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR]; + u16 addck_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR]; + u16 biask_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR]; + u32 dack_cnt; + bool addck_timeout[RTW89_DACK_PATH_NR]; + bool dadck_timeout[RTW89_DACK_PATH_NR]; + bool msbk_timeout[RTW89_DACK_PATH_NR]; +}; + +#define RTW89_IQK_CHS_NR 2 +#define RTW89_IQK_PATH_NR 4 +struct rtw89_iqk_info { + bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + bool iqk_tx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + bool iqk_rx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + u32 iqk_fail_cnt; + bool is_iqk_init; + u32 iqk_channel[RTW89_IQK_CHS_NR]; + u8 iqk_band[RTW89_IQK_PATH_NR]; + u8 iqk_ch[RTW89_IQK_PATH_NR]; + u8 iqk_bw[RTW89_IQK_PATH_NR]; + u8 kcount; + u8 iqk_times; + u8 version; + u32 nb_txcfir[RTW89_IQK_PATH_NR]; + u32 nb_rxcfir[RTW89_IQK_PATH_NR]; + u32 bp_txkresult[RTW89_IQK_PATH_NR]; + u32 bp_rxkresult[RTW89_IQK_PATH_NR]; + u32 bp_iqkenable[RTW89_IQK_PATH_NR]; + bool is_wb_txiqk[RTW89_IQK_PATH_NR]; + bool is_wb_rxiqk[RTW89_IQK_PATH_NR]; + bool is_nbiqk; + bool iqk_fft_en; + bool iqk_xym_en; + bool iqk_sram_en; + bool iqk_cfir_en; + u8 thermal[RTW89_IQK_PATH_NR]; + bool thermal_rek_en; + u32 syn1to2; + u8 iqk_mcc_ch[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + u8 iqk_table_idx[RTW89_IQK_PATH_NR]; +}; + +#define RTW89_DPK_RF_PATH 2 +#define RTW89_DPK_AVG_THERMAL_NUM 8 +#define RTW89_DPK_BKUP_NUM 2 +struct rtw89_dpk_bkup_para { + enum rtw89_band band; + enum rtw89_bandwidth bw; + u8 ch; + bool path_ok; + u8 txagc_dpk; + u8 ther_dpk; + u8 gs; + u16 pwsf; +}; + +struct rtw89_dpk_info { + bool is_dpk_enable; + bool is_dpk_reload_en; + u16 dc_i[RTW89_DPK_RF_PATH]; + u16 dc_q[RTW89_DPK_RF_PATH]; + u8 corr_val[RTW89_DPK_RF_PATH]; + u8 corr_idx[RTW89_DPK_RF_PATH]; + u8 cur_idx[RTW89_DPK_RF_PATH]; + struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; +}; + +struct rtw89_fem_info { + bool elna_2g; + bool elna_5g; + bool epa_2g; + bool epa_5g; +}; + +struct rtw89_phy_ch_info { + u8 rssi_min; + u16 rssi_min_macid; + u8 pre_rssi_min; + u8 rssi_max; + u16 rssi_max_macid; + u8 rxsc_160; + u8 rxsc_80; + u8 rxsc_40; + u8 rxsc_20; + u8 rxsc_l; + u8 is_noisy; +}; + +struct rtw89_agc_gaincode_set { + u8 lna_idx; + u8 tia_idx; + u8 rxb_idx; +}; + +#define IGI_RSSI_TH_NUM 5 +#define FA_TH_NUM 4 +#define LNA_GAIN_NUM 7 +#define TIA_GAIN_NUM 2 +struct rtw89_dig_info { + struct rtw89_agc_gaincode_set cur_gaincode; + bool force_gaincode_idx_en; + struct rtw89_agc_gaincode_set force_gaincode; + u8 igi_rssi_th[IGI_RSSI_TH_NUM]; + u16 fa_th[FA_TH_NUM]; + u8 igi_rssi; + u8 igi_fa_rssi; + u8 fa_rssi_ofst; + u8 dyn_igi_max; + u8 dyn_igi_min; + bool dyn_pd_th_en; + u8 dyn_pd_th_max; + u8 pd_low_th_ofst; + u8 ib_pbk; + s8 ib_pkpwr; + s8 lna_gain_a[LNA_GAIN_NUM]; + s8 lna_gain_g[LNA_GAIN_NUM]; + s8 *lna_gain; + s8 tia_gain_a[TIA_GAIN_NUM]; + s8 tia_gain_g[TIA_GAIN_NUM]; + s8 *tia_gain; + bool is_linked_pre; + bool bypass_dig; +}; + +enum rtw89_multi_cfo_mode { + RTW89_PKT_BASED_AVG_MODE = 0, + RTW89_ENTRY_BASED_AVG_MODE = 1, + RTW89_TP_BASED_AVG_MODE = 2, +}; + +enum rtw89_phy_cfo_status { + RTW89_PHY_DCFO_STATE_NORMAL = 0, + RTW89_PHY_DCFO_STATE_ENHANCE = 1, + RTW89_PHY_DCFO_STATE_MAX +}; + +struct rtw89_cfo_tracking_info { + u16 cfo_timer_ms; + bool cfo_trig_by_timer_en; + enum rtw89_phy_cfo_status phy_cfo_status; + u8 phy_cfo_trk_cnt; + bool is_adjust; + enum rtw89_multi_cfo_mode rtw89_multi_cfo_mode; + bool apply_compensation; + u8 crystal_cap; + u8 crystal_cap_default; + u8 def_x_cap; + s8 x_cap_ofst; + u32 sta_cfo_tolerance; + s32 cfo_tail[CFO_TRACK_MAX_USER]; + u16 cfo_cnt[CFO_TRACK_MAX_USER]; + s32 cfo_avg_pre; + s32 cfo_avg[CFO_TRACK_MAX_USER]; + s32 pre_cfo_avg[CFO_TRACK_MAX_USER]; + u32 packet_count; + u32 packet_count_pre; + s32 residual_cfo_acc; + u8 phy_cfotrk_state; + u8 phy_cfotrk_cnt; +}; + +/* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */ +#define TSSI_TRIM_CH_GROUP_NUM 8 + +#define TSSI_CCK_CH_GROUP_NUM 6 +#define TSSI_MCS_2G_CH_GROUP_NUM 5 +#define TSSI_MCS_5G_CH_GROUP_NUM 14 +#define TSSI_MCS_CH_GROUP_NUM \ + (TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM) + +struct rtw89_tssi_info { + u8 thermal[RF_PATH_MAX]; + s8 tssi_trim[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM]; + s8 tssi_cck[RF_PATH_MAX][TSSI_CCK_CH_GROUP_NUM]; + s8 tssi_mcs[RF_PATH_MAX][TSSI_MCS_CH_GROUP_NUM]; + s8 extra_ofst[RF_PATH_MAX]; + bool tssi_tracking_check[RF_PATH_MAX]; + u8 default_txagc_offset[RF_PATH_MAX]; + u32 base_thermal[RF_PATH_MAX]; +}; + +struct rtw89_power_trim_info { + bool pg_thermal_trim; + bool pg_pa_bias_trim; + u8 thermal_trim[RF_PATH_MAX]; + u8 pa_bias_trim[RF_PATH_MAX]; +}; + +struct rtw89_regulatory { + char alpha2[3]; + u8 txpwr_regd[RTW89_BAND_MAX]; +}; + +enum rtw89_ifs_clm_application { + RTW89_IFS_CLM_INIT = 0, + RTW89_IFS_CLM_BACKGROUND = 1, + RTW89_IFS_CLM_ACS = 2, + RTW89_IFS_CLM_DIG = 3, + RTW89_IFS_CLM_TDMA_DIG = 4, + RTW89_IFS_CLM_DBG = 5, + RTW89_IFS_CLM_DBG_MANUAL = 6 +}; + +enum rtw89_env_racing_lv { + RTW89_RAC_RELEASE = 0, + RTW89_RAC_LV_1 = 1, + RTW89_RAC_LV_2 = 2, + RTW89_RAC_LV_3 = 3, + RTW89_RAC_LV_4 = 4, + RTW89_RAC_MAX_NUM = 5 +}; + +struct rtw89_ccx_para_info { + enum rtw89_env_racing_lv rac_lv; + u16 mntr_time; + u8 nhm_manual_th_ofst; + u8 nhm_manual_th0; + enum rtw89_ifs_clm_application ifs_clm_app; + u32 ifs_clm_manual_th_times; + u32 ifs_clm_manual_th0; + u8 fahm_manual_th_ofst; + u8 fahm_manual_th0; + u8 fahm_numer_opt; + u8 fahm_denom_opt; +}; + +enum rtw89_ccx_edcca_opt_sc_idx { + RTW89_CCX_EDCCA_SEG0_P0 = 0, + RTW89_CCX_EDCCA_SEG0_S1 = 1, + RTW89_CCX_EDCCA_SEG0_S2 = 2, + RTW89_CCX_EDCCA_SEG0_S3 = 3, + RTW89_CCX_EDCCA_SEG1_P0 = 4, + RTW89_CCX_EDCCA_SEG1_S1 = 5, + RTW89_CCX_EDCCA_SEG1_S2 = 6, + RTW89_CCX_EDCCA_SEG1_S3 = 7 +}; + +enum rtw89_ccx_edcca_opt_bw_idx { + RTW89_CCX_EDCCA_BW20_0 = 0, + RTW89_CCX_EDCCA_BW20_1 = 1, + RTW89_CCX_EDCCA_BW20_2 = 2, + RTW89_CCX_EDCCA_BW20_3 = 3, + RTW89_CCX_EDCCA_BW20_4 = 4, + RTW89_CCX_EDCCA_BW20_5 = 5, + RTW89_CCX_EDCCA_BW20_6 = 6, + RTW89_CCX_EDCCA_BW20_7 = 7 +}; + +#define RTW89_NHM_TH_NUM 11 +#define RTW89_FAHM_TH_NUM 11 +#define RTW89_NHM_RPT_NUM 12 +#define RTW89_FAHM_RPT_NUM 12 +#define RTW89_IFS_CLM_NUM 4 +struct rtw89_env_monitor_info { + u32 ccx_trigger_time; + u64 start_time; + u8 ccx_rpt_stamp; + u8 ccx_watchdog_result; + bool ccx_ongoing; + u8 ccx_rac_lv; + bool ccx_manual_ctrl; + u8 ccx_pre_rssi; + u16 clm_mntr_time; + u16 nhm_mntr_time; + u16 ifs_clm_mntr_time; + enum rtw89_ifs_clm_application ifs_clm_app; + u16 fahm_mntr_time; + u16 edcca_clm_mntr_time; + u16 ccx_period; + u8 ccx_unit_idx; + enum rtw89_ccx_edcca_opt_bw_idx ccx_edcca_opt_bw_idx; + u8 nhm_th[RTW89_NHM_TH_NUM]; + u16 ifs_clm_th_l[RTW89_IFS_CLM_NUM]; + u16 ifs_clm_th_h[RTW89_IFS_CLM_NUM]; + u8 fahm_numer_opt; + u8 fahm_denom_opt; + u8 fahm_th[RTW89_FAHM_TH_NUM]; + u16 clm_result; + u16 nhm_result[RTW89_NHM_RPT_NUM]; + u8 nhm_wgt[RTW89_NHM_RPT_NUM]; + u16 nhm_tx_cnt; + u16 nhm_cca_cnt; + u16 nhm_idle_cnt; + u16 ifs_clm_tx; + u16 ifs_clm_edcca_excl_cca; + u16 ifs_clm_ofdmfa; + u16 ifs_clm_ofdmcca_excl_fa; + u16 ifs_clm_cckfa; + u16 ifs_clm_cckcca_excl_fa; + u16 ifs_clm_total_ifs; + u8 ifs_clm_his[RTW89_IFS_CLM_NUM]; + u16 ifs_clm_avg[RTW89_IFS_CLM_NUM]; + u16 ifs_clm_cca[RTW89_IFS_CLM_NUM]; + u16 fahm_result[RTW89_FAHM_RPT_NUM]; + u16 fahm_denom_result; + u16 edcca_clm_result; + u8 clm_ratio; + u8 nhm_rpt[RTW89_NHM_RPT_NUM]; + u8 nhm_tx_ratio; + u8 nhm_cca_ratio; + u8 nhm_idle_ratio; + u8 nhm_ratio; + u16 nhm_result_sum; + u8 nhm_pwr; + u8 ifs_clm_tx_ratio; + u8 ifs_clm_edcca_excl_cca_ratio; + u8 ifs_clm_cck_fa_ratio; + u8 ifs_clm_ofdm_fa_ratio; + u8 ifs_clm_cck_cca_excl_fa_ratio; + u8 ifs_clm_ofdm_cca_excl_fa_ratio; + u16 ifs_clm_cck_fa_permil; + u16 ifs_clm_ofdm_fa_permil; + u32 ifs_clm_ifs_avg[RTW89_IFS_CLM_NUM]; + u32 ifs_clm_cca_avg[RTW89_IFS_CLM_NUM]; + u8 fahm_rpt[RTW89_FAHM_RPT_NUM]; + u16 fahm_result_sum; + u8 fahm_ratio; + u8 fahm_denom_ratio; + u8 fahm_pwr; + u8 edcca_clm_ratio; +}; + +enum rtw89_ser_rcvy_step { + RTW89_SER_DRV_STOP_TX, + RTW89_SER_DRV_STOP_RX, + RTW89_SER_DRV_STOP_RUN, + RTW89_SER_HAL_STOP_DMA, + RTW89_NUM_OF_SER_FLAGS +}; + +struct rtw89_ser { + u8 state; + u8 alarm_event; + + struct work_struct ser_hdl_work; + struct delayed_work ser_alarm_work; + struct state_ent *st_tbl; + struct event_ent *ev_tbl; + struct list_head msg_q; + spinlock_t msg_q_lock; /* lock when read/write ser msg */ + DECLARE_BITMAP(flags, RTW89_NUM_OF_SER_FLAGS); +}; + +enum rtw89_mac_ax_ps_mode { + RTW89_MAC_AX_PS_MODE_ACTIVE = 0, + RTW89_MAC_AX_PS_MODE_LEGACY = 1, + RTW89_MAC_AX_PS_MODE_WMMPS = 2, + RTW89_MAC_AX_PS_MODE_MAX = 3, +}; + +enum rtw89_last_rpwm_mode { + RTW89_LAST_RPWM_PS = 0x0, + RTW89_LAST_RPWM_ACTIVE = 0x6, +}; + +struct rtw89_lps_parm { + u8 macid; + u8 psmode; /* enum rtw89_mac_ax_ps_mode */ + u8 lastrpwm; /* enum rtw89_last_rpwm_mode */ +}; + +struct rtw89_ppdu_sts_info { + struct sk_buff_head rx_queue[RTW89_PHY_MAX]; + u8 curr_rx_ppdu_cnt[RTW89_PHY_MAX]; +}; + +struct rtw89_early_h2c { + struct list_head list; + u8 *h2c; + u16 h2c_len; +}; + +struct rtw89_dev { + struct ieee80211_hw *hw; + struct device *dev; + + bool dbcc_en; + const struct rtw89_chip_info *chip; + struct rtw89_hal hal; + struct rtw89_mac_info mac; + struct rtw89_fw_info fw; + struct rtw89_hci_info hci; + struct rtw89_efuse efuse; + struct rtw89_traffic_stats stats; + + /* ensures exclusive access from mac80211 callbacks */ + struct mutex mutex; + struct list_head rtwvifs_list; + /* used to protect rf read write */ + struct mutex rf_mutex; + struct workqueue_struct *txq_wq; + struct work_struct txq_work; + struct delayed_work txq_reinvoke_work; + /* used to protect ba_list */ + spinlock_t ba_lock; + /* txqs to setup ba session */ + struct list_head ba_list; + struct work_struct ba_work; + + struct rtw89_cam_info cam_info; + + struct sk_buff_head c2h_queue; + struct work_struct c2h_work; + + struct list_head early_h2c_list; + + struct rtw89_ser ser; + + DECLARE_BITMAP(hw_port, RTW89_MAX_HW_PORT_NUM); + DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM); + DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS); + + struct rtw89_phy_stat phystat; + struct rtw89_dack_info dack; + struct rtw89_iqk_info iqk; + struct rtw89_dpk_info dpk; + bool is_tssi_mode[RF_PATH_MAX]; + bool is_bt_iqk_timeout; + + struct rtw89_fem_info fem; + struct rtw89_txpwr_byrate byr[RTW89_BAND_MAX]; + struct rtw89_tssi_info tssi; + struct rtw89_power_trim_info pwr_trim; + + struct rtw89_cfo_tracking_info cfo_tracking; + struct rtw89_env_monitor_info env_monitor; + struct rtw89_dig_info dig; + struct rtw89_phy_ch_info ch_info; + struct delayed_work track_work; + struct delayed_work coex_act1_work; + struct delayed_work coex_bt_devinfo_work; + struct delayed_work coex_rfk_chk_work; + struct delayed_work cfo_track_work; + struct rtw89_ppdu_sts_info ppdu_sts; + u8 total_sta_assoc; + bool scanning; + + const struct rtw89_regulatory *regd; + struct rtw89_sar_info sar; + + struct rtw89_btc btc; + enum rtw89_ps_mode ps_mode; + bool lps_enabled; + + /* napi structure */ + struct net_device netdev; + struct napi_struct napi; + int napi_budget_countdown; + + /* HCI related data, keep last */ + u8 priv[0] __aligned(sizeof(void *)); +}; + +static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev, + struct rtw89_core_tx_request *tx_req) +{ + return rtwdev->hci.ops->tx_write(rtwdev, tx_req); +} + +static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev) +{ + rtwdev->hci.ops->reset(rtwdev); +} + +static inline int rtw89_hci_start(struct rtw89_dev *rtwdev) +{ + return rtwdev->hci.ops->start(rtwdev); +} + +static inline void rtw89_hci_stop(struct rtw89_dev *rtwdev) +{ + rtwdev->hci.ops->stop(rtwdev); +} + +static inline int rtw89_hci_deinit(struct rtw89_dev *rtwdev) +{ + return rtwdev->hci.ops->deinit(rtwdev); +} + +static inline void rtw89_hci_recalc_int_mit(struct rtw89_dev *rtwdev) +{ + rtwdev->hci.ops->recalc_int_mit(rtwdev); +} + +static inline u32 rtw89_hci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 txch) +{ + return rtwdev->hci.ops->check_and_reclaim_tx_resource(rtwdev, txch); +} + +static inline void rtw89_hci_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) +{ + return rtwdev->hci.ops->tx_kick_off(rtwdev, txch); +} + +static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues, + bool drop) +{ + if (rtwdev->hci.ops->flush_queues) + return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); +} + +static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr) +{ + return rtwdev->hci.ops->read8(rtwdev, addr); +} + +static inline u16 rtw89_read16(struct rtw89_dev *rtwdev, u32 addr) +{ + return rtwdev->hci.ops->read16(rtwdev, addr); +} + +static inline u32 rtw89_read32(struct rtw89_dev *rtwdev, u32 addr) +{ + return rtwdev->hci.ops->read32(rtwdev, addr); +} + +static inline void rtw89_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) +{ + rtwdev->hci.ops->write8(rtwdev, addr, data); +} + +static inline void rtw89_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) +{ + rtwdev->hci.ops->write16(rtwdev, addr, data); +} + +static inline void rtw89_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) +{ + rtwdev->hci.ops->write32(rtwdev, addr, data); +} + +static inline void +rtw89_write8_set(struct rtw89_dev *rtwdev, u32 addr, u8 bit) +{ + u8 val; + + val = rtw89_read8(rtwdev, addr); + rtw89_write8(rtwdev, addr, val | bit); +} + +static inline void +rtw89_write16_set(struct rtw89_dev *rtwdev, u32 addr, u16 bit) +{ + u16 val; + + val = rtw89_read16(rtwdev, addr); + rtw89_write16(rtwdev, addr, val | bit); +} + +static inline void +rtw89_write32_set(struct rtw89_dev *rtwdev, u32 addr, u32 bit) +{ + u32 val; + + val = rtw89_read32(rtwdev, addr); + rtw89_write32(rtwdev, addr, val | bit); +} + +static inline void +rtw89_write8_clr(struct rtw89_dev *rtwdev, u32 addr, u8 bit) +{ + u8 val; + + val = rtw89_read8(rtwdev, addr); + rtw89_write8(rtwdev, addr, val & ~bit); +} + +static inline void +rtw89_write16_clr(struct rtw89_dev *rtwdev, u32 addr, u16 bit) +{ + u16 val; + + val = rtw89_read16(rtwdev, addr); + rtw89_write16(rtwdev, addr, val & ~bit); +} + +static inline void +rtw89_write32_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bit) +{ + u32 val; + + val = rtw89_read32(rtwdev, addr); + rtw89_write32(rtwdev, addr, val & ~bit); +} + +static inline u32 +rtw89_read32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 ret; + + orig = rtw89_read32(rtwdev, addr); + ret = (orig & mask) >> shift; + + return ret; +} + +static inline u16 +rtw89_read16_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 ret; + + orig = rtw89_read16(rtwdev, addr); + ret = (orig & mask) >> shift; + + return ret; +} + +static inline u8 +rtw89_read8_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 ret; + + orig = rtw89_read8(rtwdev, addr); + ret = (orig & mask) >> shift; + + return ret; +} + +static inline void +rtw89_write32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data) +{ + u32 shift = __ffs(mask); + u32 orig; + u32 set; + + WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr); + + orig = rtw89_read32(rtwdev, addr); + set = (orig & ~mask) | ((data << shift) & mask); + rtw89_write32(rtwdev, addr, set); +} + +static inline void +rtw89_write16_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u16 data) +{ + u32 shift; + u16 orig, set; + + mask &= 0xffff; + shift = __ffs(mask); + + orig = rtw89_read16(rtwdev, addr); + set = (orig & ~mask) | ((data << shift) & mask); + rtw89_write16(rtwdev, addr, set); +} + +static inline void +rtw89_write8_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u8 data) +{ + u32 shift; + u8 orig, set; + + mask &= 0xff; + shift = __ffs(mask); + + orig = rtw89_read8(rtwdev, addr); + set = (orig & ~mask) | ((data << shift) & mask); + rtw89_write8(rtwdev, addr, set); +} + +static inline u32 +rtw89_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask) +{ + u32 val; + + mutex_lock(&rtwdev->rf_mutex); + val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask); + mutex_unlock(&rtwdev->rf_mutex); + + return val; +} + +static inline void +rtw89_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask, u32 data) +{ + mutex_lock(&rtwdev->rf_mutex); + rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data); + mutex_unlock(&rtwdev->rf_mutex); +} + +static inline struct ieee80211_txq *rtw89_txq_to_txq(struct rtw89_txq *rtwtxq) +{ + void *p = rtwtxq; + + return container_of(p, struct ieee80211_txq, drv_priv); +} + +static inline void rtw89_core_txq_init(struct rtw89_dev *rtwdev, + struct ieee80211_txq *txq) +{ + struct rtw89_txq *rtwtxq; + + if (!txq) + return; + + rtwtxq = (struct rtw89_txq *)txq->drv_priv; + INIT_LIST_HEAD(&rtwtxq->list); +} + +static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw89_vif *rtwvif) +{ + void *p = rtwvif; + + return container_of(p, struct ieee80211_vif, drv_priv); +} + +static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta) +{ + void *p = rtwsta; + + return container_of(p, struct ieee80211_sta, drv_priv); +} + +static inline +void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev, + struct rtw89_channel_help_params *p) +{ + rtwdev->chip->ops->set_channel_help(rtwdev, true, p); +} + +static inline +void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev, + struct rtw89_channel_help_params *p) +{ + rtwdev->chip->ops->set_channel_help(rtwdev, false, p); +} + +static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->fem_setup) + chip->ops->fem_setup(rtwdev); +} + +static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->bb_sethw) + chip->ops->bb_sethw(rtwdev); +} + +static inline void rtw89_chip_rfk_init(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->rfk_init) + chip->ops->rfk_init(rtwdev); +} + +static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->rfk_channel) + chip->ops->rfk_channel(rtwdev); +} + +static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->rfk_band_changed) + chip->ops->rfk_band_changed(rtwdev); +} + +static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->rfk_scan) + chip->ops->rfk_scan(rtwdev, start); +} + +static inline void rtw89_chip_rfk_track(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->rfk_track) + chip->ops->rfk_track(rtwdev); +} + +static inline void rtw89_chip_set_txpwr_ctrl(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->set_txpwr_ctrl) + chip->ops->set_txpwr_ctrl(rtwdev); +} + +static inline void rtw89_chip_set_txpwr(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u8 ch = rtwdev->hal.current_channel; + + if (!ch) + return; + + if (chip->ops->set_txpwr) + chip->ops->set_txpwr(rtwdev); +} + +static inline void rtw89_chip_power_trim(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->power_trim) + chip->ops->power_trim(rtwdev); +} + +static inline void rtw89_chip_init_txpwr_unit(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->init_txpwr_unit) + chip->ops->init_txpwr_unit(rtwdev, phy_idx); +} + +static inline u8 rtw89_chip_get_thermal(struct rtw89_dev *rtwdev, + enum rtw89_rf_path rf_path) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (!chip->ops->get_thermal) + return 0x10; + + return chip->ops->get_thermal(rtwdev, rf_path); +} + +static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->query_ppdu) + chip->ops->query_ppdu(rtwdev, phy_ppdu, status); +} + +static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, + bool bt_en) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->bb_ctrl_btc_preagc) + chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en); +} + +static inline +void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (!vif->bss_conf.he_support || !vif->bss_conf.assoc) + return; + + if (chip->ops->set_txpwr_ul_tb_offset) + chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx); +} + +static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev, + const struct rtw89_txpwr_table *tbl) +{ + tbl->load(rtwdev, tbl); +} + +static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band) +{ + return rtwdev->regd->txpwr_regd[band]; +} + +static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->ctrl_btg) + chip->ops->ctrl_btg(rtwdev, btg); +} + +static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr) +{ + __le16 fc = hdr->frame_control; + + if (ieee80211_has_tods(fc)) + return hdr->addr1; + else if (ieee80211_has_fromds(fc)) + return hdr->addr2; + else + return hdr->addr3; +} + +static inline bool rtw89_sta_has_beamformer_cap(struct ieee80211_sta *sta) +{ + if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) || + (sta->he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || + (sta->he_cap.he_cap_elem.phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) + return true; + return false; +} + +static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev, + enum rtw89_fw_type type) +{ + struct rtw89_fw_info *fw_info = &rtwdev->fw; + + if (type == RTW89_FW_WOWLAN) + return &fw_info->wowlan; + return &fw_info->normal; +} + +int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel); +int rtw89_h2c_tx(struct rtw89_dev *rtwdev, + struct sk_buff *skb, bool fwdl); +void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel); +void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); +void rtw89_core_rx(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + struct sk_buff *skb); +void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + u8 *data, u32 data_offset); +void rtw89_core_napi_start(struct rtw89_dev *rtwdev); +void rtw89_core_napi_stop(struct rtw89_dev *rtwdev); +void rtw89_core_napi_init(struct rtw89_dev *rtwdev); +void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev); +int rtw89_core_sta_add(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int rtw89_core_init(struct rtw89_dev *rtwdev); +void rtw89_core_deinit(struct rtw89_dev *rtwdev); +int rtw89_core_register(struct rtw89_dev *rtwdev); +void rtw89_core_unregister(struct rtw89_dev *rtwdev); +void rtw89_set_channel(struct rtw89_dev *rtwdev); +u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size); +void rtw89_core_release_bit_map(unsigned long *addr, u8 bit); +void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits); +void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc); +int rtw89_chip_info_setup(struct rtw89_dev *rtwdev); +u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate); +int rtw89_regd_init(struct rtw89_dev *rtwdev, + void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)); +void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request); +void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, + struct rtw89_traffic_stats *stats); +int rtw89_core_start(struct rtw89_dev *rtwdev); +void rtw89_core_stop(struct rtw89_dev *rtwdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c new file mode 100644 index 00000000000000..29eb188c888c7f --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -0,0 +1,2489 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "ps.h" +#include "reg.h" +#include "sar.h" + +#ifdef CONFIG_RTW89_DEBUGMSG +unsigned int rtw89_debug_mask; +EXPORT_SYMBOL(rtw89_debug_mask); +module_param_named(debug_mask, rtw89_debug_mask, uint, 0644); +MODULE_PARM_DESC(debug_mask, "Debugging mask"); +#endif + +#ifdef CONFIG_RTW89_DEBUGFS +struct rtw89_debugfs_priv { + struct rtw89_dev *rtwdev; + int (*cb_read)(struct seq_file *m, void *v); + ssize_t (*cb_write)(struct file *filp, const char __user *buffer, + size_t count, loff_t *loff); + union { + u32 cb_data; + struct { + u32 addr; + u8 len; + } read_reg; + struct { + u32 addr; + u32 mask; + u8 path; + } read_rf; + struct { + u8 ss_dbg:1; + u8 dle_dbg:1; + u8 dmac_dbg:1; + u8 cmac_dbg:1; + u8 dbg_port:1; + } dbgpkg_en; + struct { + u32 start; + u32 len; + u8 sel; + } mac_mem; + }; +}; + +static int rtw89_debugfs_single_show(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + + return debugfs_priv->cb_read(m, v); +} + +static ssize_t rtw89_debugfs_single_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + + return debugfs_priv->cb_write(filp, buffer, count, loff); +} + +static ssize_t rtw89_debugfs_seq_file_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = seqpriv->private; + + return debugfs_priv->cb_write(filp, buffer, count, loff); +} + +static int rtw89_debugfs_single_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, rtw89_debugfs_single_show, inode->i_private); +} + +static int rtw89_debugfs_close(struct inode *inode, struct file *filp) +{ + return 0; +} + +static const struct file_operations file_ops_single_r = { + .owner = THIS_MODULE, + .open = rtw89_debugfs_single_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations file_ops_common_rw = { + .owner = THIS_MODULE, + .open = rtw89_debugfs_single_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, + .write = rtw89_debugfs_seq_file_write, +}; + +static const struct file_operations file_ops_single_w = { + .owner = THIS_MODULE, + .write = rtw89_debugfs_single_write, + .open = simple_open, + .release = rtw89_debugfs_close, +}; + +static ssize_t +rtw89_debug_priv_read_reg_select(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + u32 addr, len; + int num; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + num = sscanf(buf, "%x %x", &addr, &len); + if (num != 2) { + rtw89_info(rtwdev, "invalid format: \n"); + return -EINVAL; + } + + debugfs_priv->read_reg.addr = addr; + debugfs_priv->read_reg.len = len; + + rtw89_info(rtwdev, "select read %d bytes from 0x%08x\n", len, addr); + + return count; +} + +static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + u32 addr, data; + u8 len; + + len = debugfs_priv->read_reg.len; + addr = debugfs_priv->read_reg.addr; + + switch (len) { + case 1: + data = rtw89_read8(rtwdev, addr); + break; + case 2: + data = rtw89_read16(rtwdev, addr); + break; + case 4: + data = rtw89_read32(rtwdev, addr); + break; + default: + rtw89_info(rtwdev, "invalid read reg len %d\n", len); + return -EINVAL; + } + + seq_printf(m, "get %d bytes at 0x%08x=0x%08x\n", len, addr, data); + + return 0; +} + +static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + u32 addr, val, len; + int num; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + num = sscanf(buf, "%x %x %x", &addr, &val, &len); + if (num != 3) { + rtw89_info(rtwdev, "invalid format: \n"); + return -EINVAL; + } + + switch (len) { + case 1: + rtw89_info(rtwdev, "reg write8 0x%08x: 0x%02x\n", addr, val); + rtw89_write8(rtwdev, addr, (u8)val); + break; + case 2: + rtw89_info(rtwdev, "reg write16 0x%08x: 0x%04x\n", addr, val); + rtw89_write16(rtwdev, addr, (u16)val); + break; + case 4: + rtw89_info(rtwdev, "reg write32 0x%08x: 0x%08x\n", addr, val); + rtw89_write32(rtwdev, addr, (u32)val); + break; + default: + rtw89_info(rtwdev, "invalid read write len %d\n", len); + break; + } + + return count; +} + +static ssize_t +rtw89_debug_priv_read_rf_select(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + u32 addr, mask; + u8 path; + int num; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + num = sscanf(buf, "%hhd %x %x", &path, &addr, &mask); + if (num != 3) { + rtw89_info(rtwdev, "invalid format: \n"); + return -EINVAL; + } + + if (path >= rtwdev->chip->rf_path_num) { + rtw89_info(rtwdev, "wrong rf path\n"); + return -EINVAL; + } + debugfs_priv->read_rf.addr = addr; + debugfs_priv->read_rf.mask = mask; + debugfs_priv->read_rf.path = path; + + rtw89_info(rtwdev, "select read rf path %d from 0x%08x\n", path, addr); + + return count; +} + +static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + u32 addr, data, mask; + u8 path; + + addr = debugfs_priv->read_rf.addr; + mask = debugfs_priv->read_rf.mask; + path = debugfs_priv->read_rf.path; + + data = rtw89_read_rf(rtwdev, path, addr, mask); + + seq_printf(m, "path %d, rf register 0x%08x=0x%08x\n", path, addr, data); + + return 0; +} + +static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + u32 addr, val, mask; + u8 path; + int num; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + num = sscanf(buf, "%hhd %x %x %x", &path, &addr, &mask, &val); + if (num != 4) { + rtw89_info(rtwdev, "invalid format: \n"); + return -EINVAL; + } + + if (path >= rtwdev->chip->rf_path_num) { + rtw89_info(rtwdev, "wrong rf path\n"); + return -EINVAL; + } + + rtw89_info(rtwdev, "path %d, rf register write 0x%08x=0x%08x (mask = 0x%08x)\n", + path, addr, val, mask); + rtw89_write_rf(rtwdev, path, addr, mask, val); + + return count; +} + +static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + const struct rtw89_chip_info *chip = rtwdev->chip; + u32 addr, offset, data; + u8 path; + + for (path = 0; path < chip->rf_path_num; path++) { + seq_printf(m, "RF path %d:\n\n", path); + for (addr = 0; addr < 0x100; addr += 4) { + seq_printf(m, "0x%08x: ", addr); + for (offset = 0; offset < 4; offset++) { + data = rtw89_read_rf(rtwdev, path, + addr + offset, RFREG_MASK); + seq_printf(m, "0x%05x ", data); + } + seq_puts(m, "\n"); + } + seq_puts(m, "\n"); + } + + return 0; +} + +struct txpwr_ent { + const char *txt; + u8 len; +}; + +struct txpwr_map { + const struct txpwr_ent *ent; + u8 size; + u32 addr_from; + u32 addr_to; +}; + +#define __GEN_TXPWR_ENT2(_t, _e0, _e1) \ + { .len = 2, .txt = _t "\t- " _e0 " " _e1 } + +#define __GEN_TXPWR_ENT4(_t, _e0, _e1, _e2, _e3) \ + { .len = 4, .txt = _t "\t- " _e0 " " _e1 " " _e2 " " _e3 } + +#define __GEN_TXPWR_ENT8(_t, _e0, _e1, _e2, _e3, _e4, _e5, _e6, _e7) \ + { .len = 8, .txt = _t "\t- " \ + _e0 " " _e1 " " _e2 " " _e3 " " \ + _e4 " " _e5 " " _e6 " " _e7 } + +static const struct txpwr_ent __txpwr_ent_byr[] = { + __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + /* 1NSS */ + __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT4("HEDCM_1NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + /* 2NSS */ + __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT4("HEDCM_2NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), +}; + +static_assert((ARRAY_SIZE(__txpwr_ent_byr) * 4) == + (R_AX_PWR_BY_RATE_MAX - R_AX_PWR_BY_RATE + 4)); + +static const struct txpwr_map __txpwr_map_byr = { + .ent = __txpwr_ent_byr, + .size = ARRAY_SIZE(__txpwr_ent_byr), + .addr_from = R_AX_PWR_BY_RATE, + .addr_to = R_AX_PWR_BY_RATE_MAX, +}; + +static const struct txpwr_ent __txpwr_ent_lmt[] = { + /* 1TX */ + __GEN_TXPWR_ENT2("CCK_1TX_20M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("CCK_1TX_40M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("OFDM_1TX ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_4 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_5 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_6 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_20M_7 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_40M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_40M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_40M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_40M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_80M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_80M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_160M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_40M_0p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_1TX_40M_2p5", "NON_BF", "BF"), + /* 2TX */ + __GEN_TXPWR_ENT2("CCK_2TX_20M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("CCK_2TX_40M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("OFDM_2TX ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_4 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_5 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_6 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_20M_7 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_40M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_40M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_40M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_40M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_80M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_80M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_160M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_40M_0p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_2TX_40M_2p5", "NON_BF", "BF"), +}; + +static_assert((ARRAY_SIZE(__txpwr_ent_lmt) * 2) == + (R_AX_PWR_LMT_MAX - R_AX_PWR_LMT + 4)); + +static const struct txpwr_map __txpwr_map_lmt = { + .ent = __txpwr_ent_lmt, + .size = ARRAY_SIZE(__txpwr_ent_lmt), + .addr_from = R_AX_PWR_LMT, + .addr_to = R_AX_PWR_LMT_MAX, +}; + +static const struct txpwr_ent __txpwr_ent_lmt_ru[] = { + /* 1TX */ + __GEN_TXPWR_ENT8("1TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3", + "RU26__4", "RU26__5", "RU26__6", "RU26__7"), + __GEN_TXPWR_ENT8("1TX", "RU52__0", "RU52__1", "RU52__2", "RU52__3", + "RU52__4", "RU52__5", "RU52__6", "RU52__7"), + __GEN_TXPWR_ENT8("1TX", "RU106_0", "RU106_1", "RU106_2", "RU106_3", + "RU106_4", "RU106_5", "RU106_6", "RU106_7"), + /* 2TX */ + __GEN_TXPWR_ENT8("2TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3", + "RU26__4", "RU26__5", "RU26__6", "RU26__7"), + __GEN_TXPWR_ENT8("2TX", "RU52__0", "RU52__1", "RU52__2", "RU52__3", + "RU52__4", "RU52__5", "RU52__6", "RU52__7"), + __GEN_TXPWR_ENT8("2TX", "RU106_0", "RU106_1", "RU106_2", "RU106_3", + "RU106_4", "RU106_5", "RU106_6", "RU106_7"), +}; + +static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru) * 8) == + (R_AX_PWR_RU_LMT_MAX - R_AX_PWR_RU_LMT + 4)); + +static const struct txpwr_map __txpwr_map_lmt_ru = { + .ent = __txpwr_ent_lmt_ru, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ru), + .addr_from = R_AX_PWR_RU_LMT, + .addr_to = R_AX_PWR_RU_LMT_MAX, +}; + +static u8 __print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, + const u8 *buf, const u8 cur) +{ + char *fmt; + + switch (ent->len) { + case 2: + fmt = "%s\t| %3d, %3d,\tdBm\n"; + seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]); + return 2; + case 4: + fmt = "%s\t| %3d, %3d, %3d, %3d,\tdBm\n"; + seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1], + buf[cur + 2], buf[cur + 3]); + return 4; + case 8: + fmt = "%s\t| %3d, %3d, %3d, %3d, %3d, %3d, %3d, %3d,\tdBm\n"; + seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1], + buf[cur + 2], buf[cur + 3], buf[cur + 4], + buf[cur + 5], buf[cur + 6], buf[cur + 7]); + return 8; + default: + return 0; + } +} + +static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, + const struct txpwr_map *map) +{ + u8 fct = rtwdev->chip->txpwr_factor_mac; + u8 *buf, cur, i; + u32 val, addr; + int ret; + + buf = vzalloc(map->addr_to - map->addr_from + 4); + if (!buf) + return -ENOMEM; + + for (addr = map->addr_from; addr <= map->addr_to; addr += 4) { + ret = rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, addr, &val); + if (ret) + val = MASKDWORD; + + cur = addr - map->addr_from; + for (i = 0; i < 4; i++, val >>= 8) + buf[cur + i] = FIELD_GET(MASKBYTE0, val) >> fct; + } + + for (cur = 0, i = 0; i < map->size; i++) + cur += __print_txpwr_ent(m, &map->ent[i], buf, cur); + + vfree(buf); + return 0; +} + +#define case_REGD(_regd) \ + case RTW89_ ## _regd: \ + seq_puts(m, #_regd "\n"); \ + break + +static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev) +{ + u8 band = rtwdev->hal.current_band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + + switch (regd) { + default: + seq_printf(m, "UNKNOWN: %d\n", regd); + break; + case_REGD(WW); + case_REGD(ETSI); + case_REGD(FCC); + case_REGD(MKK); + case_REGD(NA); + case_REGD(IC); + case_REGD(KCC); + case_REGD(NCC); + case_REGD(CHILE); + case_REGD(ACMA); + case_REGD(MEXICO); + case_REGD(UKRAINE); + case_REGD(CN); + } +} + +#undef case_REGD + +static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + int ret = 0; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + + seq_puts(m, "[Regulatory] "); + __print_regd(m, rtwdev); + + seq_puts(m, "[SAR]\n"); + rtw89_print_sar(m, rtwdev); + + seq_puts(m, "\n[TX power byrate]\n"); + ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_byr); + if (ret) + goto err; + + seq_puts(m, "\n[TX power limit]\n"); + ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt); + if (ret) + goto err; + + seq_puts(m, "\n[TX power limit_ru]\n"); + ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt_ru); + if (ret) + goto err; + +err: + mutex_unlock(&rtwdev->mutex); + return ret; +} + +static ssize_t +rtw89_debug_priv_mac_reg_dump_select(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + int sel; + int ret; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + ret = kstrtoint(buf, 0, &sel); + if (ret) + return ret; + + if (sel < RTW89_DBG_SEL_MAC_00 || sel > RTW89_DBG_SEL_RFC) { + rtw89_info(rtwdev, "invalid args: %d\n", sel); + return -EINVAL; + } + + debugfs_priv->cb_data = sel; + rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data); + + return count; +} + +#define RTW89_MAC_PAGE_SIZE 0x100 + +static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + enum rtw89_debug_mac_reg_sel reg_sel = debugfs_priv->cb_data; + u32 start, end; + u32 i, j, k, page; + u32 val; + + switch (reg_sel) { + case RTW89_DBG_SEL_MAC_00: + seq_puts(m, "Debug selected MAC page 0x00\n"); + start = 0x000; + end = 0x014; + break; + case RTW89_DBG_SEL_MAC_40: + seq_puts(m, "Debug selected MAC page 0x40\n"); + start = 0x040; + end = 0x07f; + break; + case RTW89_DBG_SEL_MAC_80: + seq_puts(m, "Debug selected MAC page 0x80\n"); + start = 0x080; + end = 0x09f; + break; + case RTW89_DBG_SEL_MAC_C0: + seq_puts(m, "Debug selected MAC page 0xc0\n"); + start = 0x0c0; + end = 0x0df; + break; + case RTW89_DBG_SEL_MAC_E0: + seq_puts(m, "Debug selected MAC page 0xe0\n"); + start = 0x0e0; + end = 0x0ff; + break; + case RTW89_DBG_SEL_BB: + seq_puts(m, "Debug selected BB register\n"); + start = 0x100; + end = 0x17f; + break; + case RTW89_DBG_SEL_IQK: + seq_puts(m, "Debug selected IQK register\n"); + start = 0x180; + end = 0x1bf; + break; + case RTW89_DBG_SEL_RFC: + seq_puts(m, "Debug selected RFC register\n"); + start = 0x1c0; + end = 0x1ff; + break; + default: + seq_puts(m, "Selected invalid register page\n"); + return -EINVAL; + } + + for (i = start; i <= end; i++) { + page = i << 8; + for (j = page; j < page + RTW89_MAC_PAGE_SIZE; j += 16) { + seq_printf(m, "%08xh : ", 0x18600000 + j); + for (k = 0; k < 4; k++) { + val = rtw89_read32(rtwdev, j + (k << 2)); + seq_printf(m, "%08x ", val); + } + seq_puts(m, "\n"); + } + } + + return 0; +} + +static ssize_t +rtw89_debug_priv_mac_mem_dump_select(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + u32 sel, start_addr, len; + int num; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + num = sscanf(buf, "%x %x %x", &sel, &start_addr, &len); + if (num != 3) { + rtw89_info(rtwdev, "invalid format: \n"); + return -EINVAL; + } + + debugfs_priv->mac_mem.sel = sel; + debugfs_priv->mac_mem.start = start_addr; + debugfs_priv->mac_mem.len = len; + + rtw89_info(rtwdev, "select mem %d start %d len %d\n", + sel, start_addr, len); + + return count; +} + +static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = { + [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, + [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, + [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, + [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR, + [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR, + [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR, + [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR, + [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR, + [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR, + [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, + [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, + [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, +}; + +static void rtw89_debug_dump_mac_mem(struct seq_file *m, + struct rtw89_dev *rtwdev, + u8 sel, u32 start_addr, u32 len) +{ + u32 base_addr, start_page, residue; + u32 i, j, p, pages; + u32 dump_len, remain; + u32 val; + + remain = len; + pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1; + start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; + residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; + base_addr = mac_mem_base_addr_table[sel]; + base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; + + for (p = 0; p < pages; p++) { + dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE); + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); + for (i = R_AX_INDIR_ACCESS_ENTRY + residue; + i < R_AX_INDIR_ACCESS_ENTRY + dump_len;) { + seq_printf(m, "%08xh:", i); + for (j = 0; + j < 4 && i < R_AX_INDIR_ACCESS_ENTRY + dump_len; + j++, i += 4) { + val = rtw89_read32(rtwdev, i); + seq_printf(m, " %08x", val); + remain -= 4; + } + seq_puts(m, "\n"); + } + base_addr += MAC_MEM_DUMP_PAGE_SIZE; + } +} + +static int +rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + rtw89_debug_dump_mac_mem(m, rtwdev, + debugfs_priv->mac_mem.sel, + debugfs_priv->mac_mem.start, + debugfs_priv->mac_mem.len); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static ssize_t +rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char buf[32]; + size_t buf_size; + int sel, set; + int num; + bool enable; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + num = sscanf(buf, "%d %d", &sel, &set); + if (num != 2) { + rtw89_info(rtwdev, "invalid format: \n"); + return -EINVAL; + } + + enable = set == 0 ? false : true; + switch (sel) { + case 0: + debugfs_priv->dbgpkg_en.ss_dbg = enable; + break; + case 1: + debugfs_priv->dbgpkg_en.dle_dbg = enable; + break; + case 2: + debugfs_priv->dbgpkg_en.dmac_dbg = enable; + break; + case 3: + debugfs_priv->dbgpkg_en.cmac_dbg = enable; + break; + case 4: + debugfs_priv->dbgpkg_en.dbg_port = enable; + break; + default: + rtw89_info(rtwdev, "invalid args: sel %d set %d\n", sel, set); + return -EINVAL; + } + + rtw89_info(rtwdev, "%s debug port dump %d\n", + enable ? "Enable" : "Disable", sel); + + return count; +} + +static int rtw89_debug_mac_dump_ss_dbg(struct rtw89_dev *rtwdev, + struct seq_file *m) +{ + return 0; +} + +static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, + struct seq_file *m) +{ +#define DLE_DFI_DUMP(__type, __target, __sel) \ +({ \ + u32 __ctrl; \ + u32 __reg_ctrl = R_AX_##__type##_DBG_FUN_INTF_CTL; \ + u32 __reg_data = R_AX_##__type##_DBG_FUN_INTF_DATA; \ + u32 __data, __val32; \ + int __ret; \ + \ + __ctrl = FIELD_PREP(B_AX_##__type##_DFI_TRGSEL_MASK, \ + DLE_DFI_TYPE_##__target) | \ + FIELD_PREP(B_AX_##__type##_DFI_ADDR_MASK, __sel) | \ + B_AX_WDE_DFI_ACTIVE; \ + rtw89_write32(rtwdev, __reg_ctrl, __ctrl); \ + __ret = read_poll_timeout(rtw89_read32, __val32, \ + !(__val32 & B_AX_##__type##_DFI_ACTIVE), \ + 1000, 50000, false, \ + rtwdev, __reg_ctrl); \ + if (__ret) { \ + rtw89_err(rtwdev, "failed to dump DLE %s %s %d\n", \ + #__type, #__target, __sel); \ + return __ret; \ + } \ + \ + __data = rtw89_read32(rtwdev, __reg_data); \ + __data; \ +}) + +#define DLE_DFI_FREE_PAGE_DUMP(__m, __type) \ +({ \ + u32 __freepg, __pubpg; \ + u32 __freepg_head, __freepg_tail, __pubpg_num; \ + \ + __freepg = DLE_DFI_DUMP(__type, FREEPG, 0); \ + __pubpg = DLE_DFI_DUMP(__type, FREEPG, 1); \ + __freepg_head = FIELD_GET(B_AX_DLE_FREE_HEADPG, __freepg); \ + __freepg_tail = FIELD_GET(B_AX_DLE_FREE_TAILPG, __freepg); \ + __pubpg_num = FIELD_GET(B_AX_DLE_PUB_PGNUM, __pubpg); \ + seq_printf(__m, "[%s] freepg head: %d\n", \ + #__type, __freepg_head); \ + seq_printf(__m, "[%s] freepg tail: %d\n", \ + #__type, __freepg_tail); \ + seq_printf(__m, "[%s] pubpg num : %d\n", \ + #__type, __pubpg_num); \ +}) + +#define case_QUOTA(__m, __type, __id) \ + case __type##_QTAID_##__id: \ + val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \ + rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, val32); \ + use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, val32); \ + seq_printf(__m, "[%s][%s] rsv_pgnum: %d\n", \ + #__type, #__id, rsv_pgnum); \ + seq_printf(__m, "[%s][%s] use_pgnum: %d\n", \ + #__type, #__id, use_pgnum); \ + break + u32 quota_id; + u32 val32; + u16 rsv_pgnum, use_pgnum; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); + if (ret) { + seq_puts(m, "[DLE] : DMAC not enabled\n"); + return ret; + } + + DLE_DFI_FREE_PAGE_DUMP(m, WDE); + DLE_DFI_FREE_PAGE_DUMP(m, PLE); + for (quota_id = 0; quota_id <= WDE_QTAID_CPUIO; quota_id++) { + switch (quota_id) { + case_QUOTA(m, WDE, HOST_IF); + case_QUOTA(m, WDE, WLAN_CPU); + case_QUOTA(m, WDE, DATA_CPU); + case_QUOTA(m, WDE, PKTIN); + case_QUOTA(m, WDE, CPUIO); + } + } + for (quota_id = 0; quota_id <= PLE_QTAID_CPUIO; quota_id++) { + switch (quota_id) { + case_QUOTA(m, PLE, B0_TXPL); + case_QUOTA(m, PLE, B1_TXPL); + case_QUOTA(m, PLE, C2H); + case_QUOTA(m, PLE, H2C); + case_QUOTA(m, PLE, WLAN_CPU); + case_QUOTA(m, PLE, MPDU); + case_QUOTA(m, PLE, CMAC0_RX); + case_QUOTA(m, PLE, CMAC1_RX); + case_QUOTA(m, PLE, CMAC1_BBRPT); + case_QUOTA(m, PLE, WDRLS); + case_QUOTA(m, PLE, CPUIO); + } + } + + return 0; + +#undef case_QUOTA +#undef DLE_DFI_DUMP +#undef DLE_DFI_FREE_PAGE_DUMP +} + +static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev, + struct seq_file *m) +{ + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); + if (ret) { + seq_puts(m, "[DMAC] : DMAC not enabled\n"); + return ret; + } + + seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR)); + seq_printf(m, "[0]R_AX_WDRLS_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); + seq_printf(m, "[1]R_AX_SEC_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_ERR_IMR_ISR)); + seq_printf(m, "[2.1]R_AX_MPDU_TX_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); + seq_printf(m, "[2.2]R_AX_MPDU_RX_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); + seq_printf(m, "[3]R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); + seq_printf(m, "[4]R_AX_WDE_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); + seq_printf(m, "[5.1]R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); + seq_printf(m, "[5.2]R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); + seq_printf(m, "[6]R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); + seq_printf(m, "[7]R_AX_PKTIN_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); + seq_printf(m, "[8.1]R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); + seq_printf(m, "[8.2]R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); + seq_printf(m, "[8.3]R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); + seq_printf(m, "[10]R_AX_CPUIO_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR)); + seq_printf(m, "[11.1]R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); + seq_printf(m, "[11.2]R_AX_BBRPT_CHINFO_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR_ISR)); + seq_printf(m, "[11.3]R_AX_BBRPT_DFS_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR_ISR)); + seq_printf(m, "[11.4]R_AX_LA_ERRFLAG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_LA_ERRFLAG)); + + return 0; +} + +static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev, + struct seq_file *m) +{ + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL); + if (ret) { + seq_puts(m, "[CMAC] : CMAC 0 not enabled\n"); + return ret; + } + + seq_printf(m, "R_AX_CMAC_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR)); + seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR)); + seq_printf(m, "[1]R_AX_PTCL_ISR0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PTCL_ISR0)); + seq_printf(m, "[3]R_AX_DLE_CTRL=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DLE_CTRL)); + seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR)); + seq_printf(m, "[5]R_AX_TXPWR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPWR_ISR)); + seq_printf(m, "[6]R_AX_RMAC_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR)); + seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR)); + + ret = rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL); + if (ret) { + seq_puts(m, "[CMAC] : CMAC 1 not enabled\n"); + return ret; + } + + seq_printf(m, "R_AX_CMAC_ERR_ISR_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR_C1)); + seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR_C1)); + seq_printf(m, "[1]R_AX_PTCL_ISR0_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PTCL_ISR0_C1)); + seq_printf(m, "[3]R_AX_DLE_CTRL_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DLE_CTRL_C1)); + seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR_C1=0x%02x\n", + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR_C1)); + seq_printf(m, "[5]R_AX_TXPWR_ISR_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPWR_ISR_C1)); + seq_printf(m, "[6]R_AX_RMAC_ERR_ISR_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR_C1)); + seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR_C1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR_C1)); + + return 0; +} + +static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c0 = { + .sel_addr = R_AX_PTCL_DBG, + .sel_byte = 1, + .sel_msk = B_AX_PTCL_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x3F, + .rd_addr = R_AX_PTCL_DBG_INFO, + .rd_byte = 4, + .rd_msk = B_AX_PTCL_DBG_INFO_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c1 = { + .sel_addr = R_AX_PTCL_DBG_C1, + .sel_byte = 1, + .sel_msk = B_AX_PTCL_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x3F, + .rd_addr = R_AX_PTCL_DBG_INFO_C1, + .rd_byte = 4, + .rd_msk = B_AX_PTCL_DBG_INFO_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_sch_c0 = { + .sel_addr = R_AX_SCH_DBG_SEL, + .sel_byte = 1, + .sel_msk = B_AX_SCH_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x2F, + .rd_addr = R_AX_SCH_DBG, + .rd_byte = 4, + .rd_msk = B_AX_SCHEDULER_DBG_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_sch_c1 = { + .sel_addr = R_AX_SCH_DBG_SEL_C1, + .sel_byte = 1, + .sel_msk = B_AX_SCH_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x2F, + .rd_addr = R_AX_SCH_DBG_C1, + .rd_byte = 4, + .rd_msk = B_AX_SCHEDULER_DBG_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_tmac_c0 = { + .sel_addr = R_AX_MACTX_DBG_SEL_CNT, + .sel_byte = 1, + .sel_msk = B_AX_DBGSEL_MACTX_MASK, + .srt = 0x00, + .end = 0x19, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_tmac_c1 = { + .sel_addr = R_AX_MACTX_DBG_SEL_CNT_C1, + .sel_byte = 1, + .sel_msk = B_AX_DBGSEL_MACTX_MASK, + .srt = 0x00, + .end = 0x19, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_rmac_c0 = { + .sel_addr = R_AX_RX_DEBUG_SELECT, + .sel_byte = 1, + .sel_msk = B_AX_DEBUG_SEL_MASK, + .srt = 0x00, + .end = 0x58, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_rmac_c1 = { + .sel_addr = R_AX_RX_DEBUG_SELECT_C1, + .sel_byte = 1, + .sel_msk = B_AX_DEBUG_SEL_MASK, + .srt = 0x00, + .end = 0x58, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_rmacst_c0 = { + .sel_addr = R_AX_RX_STATE_MONITOR, + .sel_byte = 1, + .sel_msk = B_AX_STATE_SEL_MASK, + .srt = 0x00, + .end = 0x17, + .rd_addr = R_AX_RX_STATE_MONITOR, + .rd_byte = 4, + .rd_msk = B_AX_RX_STATE_MONITOR_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_rmacst_c1 = { + .sel_addr = R_AX_RX_STATE_MONITOR_C1, + .sel_byte = 1, + .sel_msk = B_AX_STATE_SEL_MASK, + .srt = 0x00, + .end = 0x17, + .rd_addr = R_AX_RX_STATE_MONITOR_C1, + .rd_byte = 4, + .rd_msk = B_AX_RX_STATE_MONITOR_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_rmac_plcp_c0 = { + .sel_addr = R_AX_RMAC_PLCP_MON, + .sel_byte = 4, + .sel_msk = B_AX_PCLP_MON_SEL_MASK, + .srt = 0x0, + .end = 0xF, + .rd_addr = R_AX_RMAC_PLCP_MON, + .rd_byte = 4, + .rd_msk = B_AX_RMAC_PLCP_MON_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_rmac_plcp_c1 = { + .sel_addr = R_AX_RMAC_PLCP_MON_C1, + .sel_byte = 4, + .sel_msk = B_AX_PCLP_MON_SEL_MASK, + .srt = 0x0, + .end = 0xF, + .rd_addr = R_AX_RMAC_PLCP_MON_C1, + .rd_byte = 4, + .rd_msk = B_AX_RMAC_PLCP_MON_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_trxptcl_c0 = { + .sel_addr = R_AX_DBGSEL_TRXPTCL, + .sel_byte = 1, + .sel_msk = B_AX_DBGSEL_TRXPTCL_MASK, + .srt = 0x08, + .end = 0x10, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_trxptcl_c1 = { + .sel_addr = R_AX_DBGSEL_TRXPTCL_C1, + .sel_byte = 1, + .sel_msk = B_AX_DBGSEL_TRXPTCL_MASK, + .srt = 0x08, + .end = 0x10, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_tx_infol_c0 = { + .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG, + .sel_byte = 1, + .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, + .srt = 0x00, + .end = 0x07, + .rd_addr = R_AX_WMAC_TX_INFO0_DEBUG, + .rd_byte = 4, + .rd_msk = B_AX_TX_CTRL_INFO_P0_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_tx_infoh_c0 = { + .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG, + .sel_byte = 1, + .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, + .srt = 0x00, + .end = 0x07, + .rd_addr = R_AX_WMAC_TX_INFO1_DEBUG, + .rd_byte = 4, + .rd_msk = B_AX_TX_CTRL_INFO_P1_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_tx_infol_c1 = { + .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG_C1, + .sel_byte = 1, + .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, + .srt = 0x00, + .end = 0x07, + .rd_addr = R_AX_WMAC_TX_INFO0_DEBUG_C1, + .rd_byte = 4, + .rd_msk = B_AX_TX_CTRL_INFO_P0_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_tx_infoh_c1 = { + .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG_C1, + .sel_byte = 1, + .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, + .srt = 0x00, + .end = 0x07, + .rd_addr = R_AX_WMAC_TX_INFO1_DEBUG_C1, + .rd_byte = 4, + .rd_msk = B_AX_TX_CTRL_INFO_P1_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infol_c0 = { + .sel_addr = R_AX_WMAC_TX_TF_INFO_0, + .sel_byte = 1, + .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, + .srt = 0x00, + .end = 0x04, + .rd_addr = R_AX_WMAC_TX_TF_INFO_1, + .rd_byte = 4, + .rd_msk = B_AX_WMAC_TX_TF_INFO_P0_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infoh_c0 = { + .sel_addr = R_AX_WMAC_TX_TF_INFO_0, + .sel_byte = 1, + .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, + .srt = 0x00, + .end = 0x04, + .rd_addr = R_AX_WMAC_TX_TF_INFO_2, + .rd_byte = 4, + .rd_msk = B_AX_WMAC_TX_TF_INFO_P1_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infol_c1 = { + .sel_addr = R_AX_WMAC_TX_TF_INFO_0_C1, + .sel_byte = 1, + .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, + .srt = 0x00, + .end = 0x04, + .rd_addr = R_AX_WMAC_TX_TF_INFO_1_C1, + .rd_byte = 4, + .rd_msk = B_AX_WMAC_TX_TF_INFO_P0_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infoh_c1 = { + .sel_addr = R_AX_WMAC_TX_TF_INFO_0_C1, + .sel_byte = 1, + .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, + .srt = 0x00, + .end = 0x04, + .rd_addr = R_AX_WMAC_TX_TF_INFO_2_C1, + .rd_byte = 4, + .rd_msk = B_AX_WMAC_TX_TF_INFO_P1_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_freepg = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80000000, + .end = 0x80000001, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_quota = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80010000, + .end = 0x80010004, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_pagellt = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80020000, + .end = 0x80020FFF, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_pktinfo = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80030000, + .end = 0x80030FFF, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_prepkt = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80040000, + .end = 0x80040FFF, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_nxtpkt = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80050000, + .end = 0x80050FFF, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_qlnktbl = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80060000, + .end = 0x80060453, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_qempty = { + .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_WDE_DFI_DATA_MASK, + .srt = 0x80070000, + .end = 0x80070011, + .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_WDE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_freepg = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80000000, + .end = 0x80000001, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_quota = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80010000, + .end = 0x8001000A, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_pagellt = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80020000, + .end = 0x80020DBF, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_pktinfo = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80030000, + .end = 0x80030DBF, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_prepkt = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80040000, + .end = 0x80040DBF, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_nxtpkt = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80050000, + .end = 0x80050DBF, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_qlnktbl = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80060000, + .end = 0x80060041, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_qempty = { + .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_PLE_DFI_DATA_MASK, + .srt = 0x80070000, + .end = 0x80070001, + .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_PLE_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pktinfo = { + .sel_addr = R_AX_DBG_FUN_INTF_CTL, + .sel_byte = 4, + .sel_msk = B_AX_DFI_DATA_MASK, + .srt = 0x80000000, + .end = 0x8000017f, + .rd_addr = R_AX_DBG_FUN_INTF_DATA, + .rd_byte = 4, + .rd_msk = B_AX_DFI_DATA_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_txdma = { + .sel_addr = R_AX_PCIE_DBG_CTRL, + .sel_byte = 2, + .sel_msk = B_AX_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x03, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_rxdma = { + .sel_addr = R_AX_PCIE_DBG_CTRL, + .sel_byte = 2, + .sel_msk = B_AX_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x04, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cvt = { + .sel_addr = R_AX_PCIE_DBG_CTRL, + .sel_byte = 2, + .sel_msk = B_AX_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x01, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cxpl = { + .sel_addr = R_AX_PCIE_DBG_CTRL, + .sel_byte = 2, + .sel_msk = B_AX_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x05, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_io = { + .sel_addr = R_AX_PCIE_DBG_CTRL, + .sel_byte = 2, + .sel_msk = B_AX_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x05, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc = { + .sel_addr = R_AX_PCIE_DBG_CTRL, + .sel_byte = 2, + .sel_msk = B_AX_DBG_SEL_MASK, + .srt = 0x00, + .end = 0x06, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc2 = { + .sel_addr = R_AX_DBG_CTRL, + .sel_byte = 1, + .sel_msk = B_AX_DBG_SEL0, + .srt = 0x34, + .end = 0x3C, + .rd_addr = R_AX_DBG_PORT_SEL, + .rd_byte = 4, + .rd_msk = B_AX_DEBUG_ST_MASK +}; + +static const struct rtw89_mac_dbg_port_info * +rtw89_debug_mac_dbg_port_sel(struct seq_file *m, + struct rtw89_dev *rtwdev, u32 sel) +{ + const struct rtw89_mac_dbg_port_info *info; + u32 val32; + u16 val16; + u8 val8; + + switch (sel) { + case RTW89_DBG_PORT_SEL_PTCL_C0: + info = &dbg_port_ptcl_c0; + val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG); + val16 |= B_AX_PTCL_DBG_EN; + rtw89_write16(rtwdev, R_AX_PTCL_DBG, val16); + seq_puts(m, "Enable PTCL C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_PTCL_C1: + info = &dbg_port_ptcl_c1; + val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG_C1); + val16 |= B_AX_PTCL_DBG_EN; + rtw89_write16(rtwdev, R_AX_PTCL_DBG_C1, val16); + seq_puts(m, "Enable PTCL C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_SCH_C0: + info = &dbg_port_sch_c0; + val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL); + val32 |= B_AX_SCH_DBG_EN; + rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL, val32); + seq_puts(m, "Enable SCH C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_SCH_C1: + info = &dbg_port_sch_c1; + val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL_C1); + val32 |= B_AX_SCH_DBG_EN; + rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL_C1, val32); + seq_puts(m, "Enable SCH C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_TMAC_C0: + info = &dbg_port_tmac_c0; + val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL); + val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_TMAC, + B_AX_DBGSEL_TRXPTCL_MASK); + rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C0, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C0, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); + val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); + rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); + seq_puts(m, "Enable TMAC C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_TMAC_C1: + info = &dbg_port_tmac_c1; + val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1); + val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_TMAC, + B_AX_DBGSEL_TRXPTCL_MASK); + rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val32); + + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C1, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C1, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); + val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); + rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); + seq_puts(m, "Enable TMAC C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_RMAC_C0: + info = &dbg_port_rmac_c0; + val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL); + val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_RMAC, + B_AX_DBGSEL_TRXPTCL_MASK); + rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C0, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C0, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); + val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); + rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); + + val8 = rtw89_read8(rtwdev, R_AX_DBGSEL_TRXPTCL); + val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL, + B_AX_DBGSEL_TRXPTCL_MASK); + rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL, val8); + seq_puts(m, "Enable RMAC C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_RMAC_C1: + info = &dbg_port_rmac_c1; + val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1); + val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_RMAC, + B_AX_DBGSEL_TRXPTCL_MASK); + rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val32); + + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C1, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C1, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); + val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); + rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); + + val8 = rtw89_read8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1); + val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL, + B_AX_DBGSEL_TRXPTCL_MASK); + rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val8); + seq_puts(m, "Enable RMAC C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_RMACST_C0: + info = &dbg_port_rmacst_c0; + seq_puts(m, "Enable RMAC state C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_RMACST_C1: + info = &dbg_port_rmacst_c1; + seq_puts(m, "Enable RMAC state C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_RMAC_PLCP_C0: + info = &dbg_port_rmac_plcp_c0; + seq_puts(m, "Enable RMAC PLCP C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_RMAC_PLCP_C1: + info = &dbg_port_rmac_plcp_c1; + seq_puts(m, "Enable RMAC PLCP C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_TRXPTCL_C0: + info = &dbg_port_trxptcl_c0; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C0, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C0, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); + val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); + rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); + seq_puts(m, "Enable TRXPTCL C0 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_TRXPTCL_C1: + info = &dbg_port_trxptcl_c1; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C1, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C1, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + + val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); + val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); + rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); + seq_puts(m, "Enable TRXPTCL C1 dbgport.\n"); + break; + case RTW89_DBG_PORT_SEL_TX_INFOL_C0: + info = &dbg_port_tx_infol_c0; + val32 = rtw89_read32(rtwdev, R_AX_TCR1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1, val32); + seq_puts(m, "Enable tx infol dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TX_INFOH_C0: + info = &dbg_port_tx_infoh_c0; + val32 = rtw89_read32(rtwdev, R_AX_TCR1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1, val32); + seq_puts(m, "Enable tx infoh dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TX_INFOL_C1: + info = &dbg_port_tx_infol_c1; + val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); + seq_puts(m, "Enable tx infol dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TX_INFOH_C1: + info = &dbg_port_tx_infoh_c1; + val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); + seq_puts(m, "Enable tx infoh dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TXTF_INFOL_C0: + info = &dbg_port_txtf_infol_c0; + val32 = rtw89_read32(rtwdev, R_AX_TCR1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1, val32); + seq_puts(m, "Enable tx tf infol dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TXTF_INFOH_C0: + info = &dbg_port_txtf_infoh_c0; + val32 = rtw89_read32(rtwdev, R_AX_TCR1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1, val32); + seq_puts(m, "Enable tx tf infoh dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TXTF_INFOL_C1: + info = &dbg_port_txtf_infol_c1; + val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); + seq_puts(m, "Enable tx tf infol dump.\n"); + break; + case RTW89_DBG_PORT_SEL_TXTF_INFOH_C1: + info = &dbg_port_txtf_infoh_c1; + val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); + val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; + rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); + seq_puts(m, "Enable tx tf infoh dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG: + info = &dbg_port_wde_bufmgn_freepg; + seq_puts(m, "Enable wde bufmgn freepg dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA: + info = &dbg_port_wde_bufmgn_quota; + seq_puts(m, "Enable wde bufmgn quota dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT: + info = &dbg_port_wde_bufmgn_pagellt; + seq_puts(m, "Enable wde bufmgn pagellt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO: + info = &dbg_port_wde_bufmgn_pktinfo; + seq_puts(m, "Enable wde bufmgn pktinfo dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT: + info = &dbg_port_wde_quemgn_prepkt; + seq_puts(m, "Enable wde quemgn prepkt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT: + info = &dbg_port_wde_quemgn_nxtpkt; + seq_puts(m, "Enable wde quemgn nxtpkt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL: + info = &dbg_port_wde_quemgn_qlnktbl; + seq_puts(m, "Enable wde quemgn qlnktbl dump.\n"); + break; + case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY: + info = &dbg_port_wde_quemgn_qempty; + seq_puts(m, "Enable wde quemgn qempty dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG: + info = &dbg_port_ple_bufmgn_freepg; + seq_puts(m, "Enable ple bufmgn freepg dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA: + info = &dbg_port_ple_bufmgn_quota; + seq_puts(m, "Enable ple bufmgn quota dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT: + info = &dbg_port_ple_bufmgn_pagellt; + seq_puts(m, "Enable ple bufmgn pagellt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO: + info = &dbg_port_ple_bufmgn_pktinfo; + seq_puts(m, "Enable ple bufmgn pktinfo dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT: + info = &dbg_port_ple_quemgn_prepkt; + seq_puts(m, "Enable ple quemgn prepkt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT: + info = &dbg_port_ple_quemgn_nxtpkt; + seq_puts(m, "Enable ple quemgn nxtpkt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL: + info = &dbg_port_ple_quemgn_qlnktbl; + seq_puts(m, "Enable ple quemgn qlnktbl dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY: + info = &dbg_port_ple_quemgn_qempty; + seq_puts(m, "Enable ple quemgn qempty dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PKTINFO: + info = &dbg_port_pktinfo; + seq_puts(m, "Enable pktinfo dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_TXDMA: + info = &dbg_port_pcie_txdma; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + seq_puts(m, "Enable pcie txdma dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_RXDMA: + info = &dbg_port_pcie_rxdma; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + seq_puts(m, "Enable pcie rxdma dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_CVT: + info = &dbg_port_pcie_cvt; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + seq_puts(m, "Enable pcie cvt dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_CXPL: + info = &dbg_port_pcie_cxpl; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + seq_puts(m, "Enable pcie cxpl dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_IO: + info = &dbg_port_pcie_io; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + seq_puts(m, "Enable pcie io dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_MISC: + info = &dbg_port_pcie_misc; + val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); + val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL0); + val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL1); + rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); + seq_puts(m, "Enable pcie misc dump.\n"); + break; + case RTW89_DBG_PORT_SEL_PCIE_MISC2: + info = &dbg_port_pcie_misc2; + val16 = rtw89_read16(rtwdev, R_AX_PCIE_DBG_CTRL); + val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL, + B_AX_DBG_SEL_MASK); + rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16); + seq_puts(m, "Enable pcie misc2 dump.\n"); + break; + default: + seq_puts(m, "Dbg port select err\n"); + return NULL; + } + + return info; +} + +static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel) +{ + if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE && + sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA && + sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2) + return false; + if (rtwdev->chip->chip_id == RTL8852B && + sel >= RTW89_DBG_PORT_SEL_PTCL_C1 && + sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1) + return false; + if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL) && + sel >= RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG && + sel <= RTW89_DBG_PORT_SEL_PKTINFO) + return false; + if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL) && + sel >= RTW89_DBG_PORT_SEL_PTCL_C0 && + sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C0) + return false; + if (rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL) && + sel >= RTW89_DBG_PORT_SEL_PTCL_C1 && + sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1) + return false; + + return true; +} + +static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev, + struct seq_file *m, u32 sel) +{ + const struct rtw89_mac_dbg_port_info *info; + u8 val8; + u16 val16; + u32 val32; + u32 i; + + info = rtw89_debug_mac_dbg_port_sel(m, rtwdev, sel); + if (!info) { + rtw89_err(rtwdev, "failed to select debug port %d\n", sel); + return -EINVAL; + } + +#define case_DBG_SEL(__sel) \ + case RTW89_DBG_PORT_SEL_##__sel: \ + seq_puts(m, "Dump debug port " #__sel ":\n"); \ + break + + switch (sel) { + case_DBG_SEL(PTCL_C0); + case_DBG_SEL(PTCL_C1); + case_DBG_SEL(SCH_C0); + case_DBG_SEL(SCH_C1); + case_DBG_SEL(TMAC_C0); + case_DBG_SEL(TMAC_C1); + case_DBG_SEL(RMAC_C0); + case_DBG_SEL(RMAC_C1); + case_DBG_SEL(RMACST_C0); + case_DBG_SEL(RMACST_C1); + case_DBG_SEL(TRXPTCL_C0); + case_DBG_SEL(TRXPTCL_C1); + case_DBG_SEL(TX_INFOL_C0); + case_DBG_SEL(TX_INFOH_C0); + case_DBG_SEL(TX_INFOL_C1); + case_DBG_SEL(TX_INFOH_C1); + case_DBG_SEL(TXTF_INFOL_C0); + case_DBG_SEL(TXTF_INFOH_C0); + case_DBG_SEL(TXTF_INFOL_C1); + case_DBG_SEL(TXTF_INFOH_C1); + case_DBG_SEL(WDE_BUFMGN_FREEPG); + case_DBG_SEL(WDE_BUFMGN_QUOTA); + case_DBG_SEL(WDE_BUFMGN_PAGELLT); + case_DBG_SEL(WDE_BUFMGN_PKTINFO); + case_DBG_SEL(WDE_QUEMGN_PREPKT); + case_DBG_SEL(WDE_QUEMGN_NXTPKT); + case_DBG_SEL(WDE_QUEMGN_QLNKTBL); + case_DBG_SEL(WDE_QUEMGN_QEMPTY); + case_DBG_SEL(PLE_BUFMGN_FREEPG); + case_DBG_SEL(PLE_BUFMGN_QUOTA); + case_DBG_SEL(PLE_BUFMGN_PAGELLT); + case_DBG_SEL(PLE_BUFMGN_PKTINFO); + case_DBG_SEL(PLE_QUEMGN_PREPKT); + case_DBG_SEL(PLE_QUEMGN_NXTPKT); + case_DBG_SEL(PLE_QUEMGN_QLNKTBL); + case_DBG_SEL(PLE_QUEMGN_QEMPTY); + case_DBG_SEL(PKTINFO); + case_DBG_SEL(PCIE_TXDMA); + case_DBG_SEL(PCIE_RXDMA); + case_DBG_SEL(PCIE_CVT); + case_DBG_SEL(PCIE_CXPL); + case_DBG_SEL(PCIE_IO); + case_DBG_SEL(PCIE_MISC); + case_DBG_SEL(PCIE_MISC2); + } + +#undef case_DBG_SEL + + seq_printf(m, "Sel addr = 0x%X\n", info->sel_addr); + seq_printf(m, "Read addr = 0x%X\n", info->rd_addr); + + for (i = info->srt; i <= info->end; i++) { + switch (info->sel_byte) { + case 1: + default: + rtw89_write8_mask(rtwdev, info->sel_addr, + info->sel_msk, i); + seq_printf(m, "0x%02X: ", i); + break; + case 2: + rtw89_write16_mask(rtwdev, info->sel_addr, + info->sel_msk, i); + seq_printf(m, "0x%04X: ", i); + break; + case 4: + rtw89_write32_mask(rtwdev, info->sel_addr, + info->sel_msk, i); + seq_printf(m, "0x%04X: ", i); + break; + } + + udelay(10); + + switch (info->rd_byte) { + case 1: + default: + val8 = rtw89_read8_mask(rtwdev, + info->rd_addr, info->rd_msk); + seq_printf(m, "0x%02X\n", val8); + break; + case 2: + val16 = rtw89_read16_mask(rtwdev, + info->rd_addr, info->rd_msk); + seq_printf(m, "0x%04X\n", val16); + break; + case 4: + val32 = rtw89_read32_mask(rtwdev, + info->rd_addr, info->rd_msk); + seq_printf(m, "0x%08X\n", val32); + break; + } + } + + return 0; +} + +static int rtw89_debug_mac_dump_dbg_port(struct rtw89_dev *rtwdev, + struct seq_file *m) +{ + u32 sel; + int ret = 0; + + for (sel = RTW89_DBG_PORT_SEL_PTCL_C0; + sel < RTW89_DBG_PORT_SEL_LAST; sel++) { + if (!is_dbg_port_valid(rtwdev, sel)) + continue; + ret = rtw89_debug_mac_dbg_port_dump(rtwdev, m, sel); + if (ret) { + rtw89_err(rtwdev, + "failed to dump debug port %d\n", sel); + break; + } + } + + return ret; +} + +static int +rtw89_debug_priv_mac_dbg_port_dump_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + + if (debugfs_priv->dbgpkg_en.ss_dbg) + rtw89_debug_mac_dump_ss_dbg(rtwdev, m); + if (debugfs_priv->dbgpkg_en.dle_dbg) + rtw89_debug_mac_dump_dle_dbg(rtwdev, m); + if (debugfs_priv->dbgpkg_en.dmac_dbg) + rtw89_debug_mac_dump_dmac_dbg(rtwdev, m); + if (debugfs_priv->dbgpkg_en.cmac_dbg) + rtw89_debug_mac_dump_cmac_dbg(rtwdev, m); + if (debugfs_priv->dbgpkg_en.dbg_port) + rtw89_debug_mac_dump_dbg_port(rtwdev, m); + + return 0; +}; + +static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev, + const char __user *user_buf, size_t count) +{ + char *buf; + u8 *bin; + int num; + int err = 0; + + buf = memdup_user(user_buf, count); + if (IS_ERR(buf)) + return buf; + + num = count / 2; + bin = kmalloc(num, GFP_KERNEL); + if (!bin) { + err = -EFAULT; + goto out; + } + + if (hex2bin(bin, buf, num)) { + rtw89_info(rtwdev, "valid format: H1H2H3...\n"); + kfree(bin); + err = -EINVAL; + } + +out: + kfree(buf); + + return err ? ERR_PTR(err) : bin; +} + +static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + u8 *h2c; + u16 h2c_len = count / 2; + + h2c = rtw89_hex2bin_user(rtwdev, user_buf, count); + if (IS_ERR(h2c)) + return -EFAULT; + + rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len); + + kfree(h2c); + + return count; +} + +static int +rtw89_debug_priv_early_h2c_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw89_early_h2c *early_h2c; + int seq = 0; + + mutex_lock(&rtwdev->mutex); + list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) + seq_printf(m, "%d: %*ph\n", ++seq, early_h2c->h2c_len, early_h2c->h2c); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static ssize_t +rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw89_early_h2c *early_h2c; + u8 *h2c; + u16 h2c_len = count / 2; + + h2c = rtw89_hex2bin_user(rtwdev, user_buf, count); + if (IS_ERR(h2c)) + return -EFAULT; + + if (h2c_len >= 2 && h2c[0] == 0x00 && h2c[1] == 0x00) { + kfree(h2c); + rtw89_fw_free_all_early_h2c(rtwdev); + goto out; + } + + early_h2c = kmalloc(sizeof(*early_h2c), GFP_KERNEL); + if (!early_h2c) { + kfree(h2c); + return -EFAULT; + } + + early_h2c->h2c = h2c; + early_h2c->h2c_len = h2c_len; + + mutex_lock(&rtwdev->mutex); + list_add_tail(&early_h2c->list, &rtwdev->early_h2c_list); + mutex_unlock(&rtwdev->mutex); + +out: + return count; +} + +static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + + rtw89_btc_dump_info(rtwdev, m); + + return 0; +} + +static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw89_btc *btc = &rtwdev->btc; + bool btc_manual; + + if (kstrtobool_from_user(user_buf, count, &btc_manual)) + goto out; + + btc->ctrl.manual = btc_manual; +out: + return count; +} + +static ssize_t rtw89_debug_fw_log_btc_manual_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw89_fw_info *fw_info = &rtwdev->fw; + bool fw_log_manual; + + if (kstrtobool_from_user(user_buf, count, &fw_log_manual)) + goto out; + + mutex_lock(&rtwdev->mutex); + fw_info->fw_log_enable = fw_log_manual; + rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual); + mutex_unlock(&rtwdev->mutex); +out: + return count; +} + +static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) +{ + static const char * const he_gi_str[] = { + [NL80211_RATE_INFO_HE_GI_0_8] = "0.8", + [NL80211_RATE_INFO_HE_GI_1_6] = "1.6", + [NL80211_RATE_INFO_HE_GI_3_2] = "3.2", + }; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rate_info *rate = &rtwsta->ra_report.txrate; + struct ieee80211_rx_status *status = &rtwsta->rx_status; + struct seq_file *m = (struct seq_file *)data; + u8 rssi; + + seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id); + + if (rate->flags & RATE_INFO_FLAGS_MCS) + seq_printf(m, "HT MCS-%d%s", rate->mcs, + rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); + else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) + seq_printf(m, "VHT %dSS MCS-%d%s", rate->nss, rate->mcs, + rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); + else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) + seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs, + rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? + he_gi_str[rate->he_gi] : "N/A"); + else + seq_printf(m, "Legacy %d", rate->legacy); + seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate); + seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait, + sta->max_rc_amsdu_len); + + seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id); + + switch (status->encoding) { + case RX_ENC_LEGACY: + seq_printf(m, "Legacy %d", status->rate_idx + + (status->band == NL80211_BAND_5GHZ ? 4 : 0)); + break; + case RX_ENC_HT: + seq_printf(m, "HT MCS-%d%s", status->rate_idx, + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); + break; + case RX_ENC_VHT: + seq_printf(m, "VHT %dSS MCS-%d%s", status->nss, status->rate_idx, + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); + break; + case RX_ENC_HE: + seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx, + status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? + he_gi_str[rate->he_gi] : "N/A"); + break; + } + seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate); + + rssi = ewma_rssi_read(&rtwsta->avg_rssi); + seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d)\n", + RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi); +} + +static void +rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat, + enum rtw89_hw_rate first_rate, int len) +{ + int i; + + for (i = 0; i < len; i++) + seq_printf(m, "%s%u", i == 0 ? "" : ", ", + pkt_stat->rx_rate_cnt[first_rate + i]); +} + +static const struct rtw89_rx_rate_cnt_info { + enum rtw89_hw_rate first_rate; + int len; + const char *rate_mode; +} rtw89_rx_rate_cnt_infos[] = { + {RTW89_HW_RATE_CCK1, 4, "Legacy:"}, + {RTW89_HW_RATE_OFDM6, 8, "OFDM:"}, + {RTW89_HW_RATE_MCS0, 8, "HT 0:"}, + {RTW89_HW_RATE_MCS8, 8, "HT 1:"}, + {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, "VHT 1SS:"}, + {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, "VHT 2SS:"}, + {RTW89_HW_RATE_HE_NSS1_MCS0, 12, "HE 1SS:"}, + {RTW89_HW_RATE_HE_NSS2_MCS0, 12, "HE 2ss:"}, +}; + +static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw89_traffic_stats *stats = &rtwdev->stats; + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat; + const struct rtw89_rx_rate_cnt_info *info; + int i; + + seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n", + stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv, + stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv); + seq_printf(m, "Beacon: %u\n", pkt_stat->beacon_nr); + seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len, + stats->rx_avg_len); + + seq_puts(m, "RX count:\n"); + for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) { + info = &rtw89_rx_rate_cnt_infos[i]; + seq_printf(m, "%10s [", info->rate_mode); + rtw89_debug_append_rx_rate(m, pkt_stat, + info->first_rate, info->len); + seq_puts(m, "]\n"); + } + + ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, m); + + return 0; +} + +static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = { + .cb_read = rtw89_debug_priv_read_reg_get, + .cb_write = rtw89_debug_priv_read_reg_select, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = { + .cb_write = rtw89_debug_priv_write_reg_set, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = { + .cb_read = rtw89_debug_priv_read_rf_get, + .cb_write = rtw89_debug_priv_read_rf_select, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = { + .cb_write = rtw89_debug_priv_write_rf_set, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = { + .cb_read = rtw89_debug_priv_rf_reg_dump_get, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = { + .cb_read = rtw89_debug_priv_txpwr_table_get, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = { + .cb_read = rtw89_debug_priv_mac_reg_dump_get, + .cb_write = rtw89_debug_priv_mac_reg_dump_select, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = { + .cb_read = rtw89_debug_priv_mac_mem_dump_get, + .cb_write = rtw89_debug_priv_mac_mem_dump_select, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = { + .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get, + .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = { + .cb_write = rtw89_debug_priv_send_h2c_set, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = { + .cb_read = rtw89_debug_priv_early_h2c_get, + .cb_write = rtw89_debug_priv_early_h2c_set, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = { + .cb_read = rtw89_debug_priv_btc_info_get, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = { + .cb_write = rtw89_debug_priv_btc_manual_set, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = { + .cb_write = rtw89_debug_fw_log_btc_manual_set, +}; + +static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = { + .cb_read = rtw89_debug_priv_phy_info_get, +}; + +#define rtw89_debugfs_add(name, mode, fopname, parent) \ + do { \ + rtw89_debug_priv_ ##name.rtwdev = rtwdev; \ + if (!debugfs_create_file(#name, mode, \ + parent, &rtw89_debug_priv_ ##name, \ + &file_ops_ ##fopname)) \ + pr_debug("Unable to initialize debugfs:%s\n", #name); \ + } while (0) + +#define rtw89_debugfs_add_w(name) \ + rtw89_debugfs_add(name, S_IFREG | 0222, single_w, debugfs_topdir) +#define rtw89_debugfs_add_rw(name) \ + rtw89_debugfs_add(name, S_IFREG | 0666, common_rw, debugfs_topdir) +#define rtw89_debugfs_add_r(name) \ + rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir) + +void rtw89_debugfs_init(struct rtw89_dev *rtwdev) +{ + struct dentry *debugfs_topdir; + + debugfs_topdir = debugfs_create_dir("rtw89", + rtwdev->hw->wiphy->debugfsdir); + + rtw89_debugfs_add_rw(read_reg); + rtw89_debugfs_add_w(write_reg); + rtw89_debugfs_add_rw(read_rf); + rtw89_debugfs_add_w(write_rf); + rtw89_debugfs_add_r(rf_reg_dump); + rtw89_debugfs_add_r(txpwr_table); + rtw89_debugfs_add_rw(mac_reg_dump); + rtw89_debugfs_add_rw(mac_mem_dump); + rtw89_debugfs_add_rw(mac_dbg_port_dump); + rtw89_debugfs_add_w(send_h2c); + rtw89_debugfs_add_rw(early_h2c); + rtw89_debugfs_add_r(btc_info); + rtw89_debugfs_add_w(btc_manual); + rtw89_debugfs_add_w(fw_log_manual); + rtw89_debugfs_add_r(phy_info); +} +#endif + +#ifdef CONFIG_RTW89_DEBUGMSG +void __rtw89_debug(struct rtw89_dev *rtwdev, + enum rtw89_debug_mask mask, + const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + + va_list args; + + va_start(args, fmt); + vaf.va = &args; + + if (rtw89_debug_mask & mask) + dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(__rtw89_debug); +#endif diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h new file mode 100644 index 00000000000000..f14b726c1a9fb2 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/debug.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_DEBUG_H__ +#define __RTW89_DEBUG_H__ + +#include "core.h" + +enum rtw89_debug_mask { + RTW89_DBG_TXRX = BIT(0), + RTW89_DBG_RFK = BIT(1), + RTW89_DBG_RFK_TRACK = BIT(2), + RTW89_DBG_CFO = BIT(3), + RTW89_DBG_TSSI = BIT(4), + RTW89_DBG_TXPWR = BIT(5), + RTW89_DBG_HCI = BIT(6), + RTW89_DBG_RA = BIT(7), + RTW89_DBG_REGD = BIT(8), + RTW89_DBG_PHY_TRACK = BIT(9), + RTW89_DBG_DIG = BIT(10), + RTW89_DBG_SER = BIT(11), + RTW89_DBG_FW = BIT(12), + RTW89_DBG_BTC = BIT(13), + RTW89_DBG_BF = BIT(14), +}; + +enum rtw89_debug_mac_reg_sel { + RTW89_DBG_SEL_MAC_00, + RTW89_DBG_SEL_MAC_40, + RTW89_DBG_SEL_MAC_80, + RTW89_DBG_SEL_MAC_C0, + RTW89_DBG_SEL_MAC_E0, + RTW89_DBG_SEL_BB, + RTW89_DBG_SEL_IQK, + RTW89_DBG_SEL_RFC, +}; + +#ifdef CONFIG_RTW89_DEBUGFS +void rtw89_debugfs_init(struct rtw89_dev *rtwdev); +#else +static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {} +#endif + +#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a) +#define rtw89_warn(rtwdev, a...) dev_warn((rtwdev)->dev, ##a) +#define rtw89_err(rtwdev, a...) dev_err((rtwdev)->dev, ##a) + +#ifdef CONFIG_RTW89_DEBUGMSG +extern unsigned int rtw89_debug_mask; +#define rtw89_debug(rtwdev, a...) __rtw89_debug(rtwdev, ##a) + +__printf(3, 4) +void __rtw89_debug(struct rtw89_dev *rtwdev, + enum rtw89_debug_mask mask, + const char *fmt, ...); +static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev, + enum rtw89_debug_mask mask, + const char *prefix_str, + const void *buf, size_t len) +{ + if (!(rtw89_debug_mask & mask)) + return; + + print_hex_dump_bytes(prefix_str, DUMP_PREFIX_OFFSET, buf, len); +} +#else +static inline void rtw89_debug(struct rtw89_dev *rtwdev, + enum rtw89_debug_mask mask, + const char *fmt, ...) {} +static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev, + enum rtw89_debug_mask mask, + const char *prefix_str, + const void *buf, size_t len) {} +#endif + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c new file mode 100644 index 00000000000000..c0b80f3da56c2c --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/efuse.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "debug.h" +#include "efuse.h" +#include "reg.h" + +enum rtw89_efuse_bank { + RTW89_EFUSE_BANK_WIFI, + RTW89_EFUSE_BANK_BT, +}; + +static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev, + enum rtw89_efuse_bank bank) +{ + u8 val; + + val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1, + B_AX_EF_CELL_SEL_MASK); + if (bank == val) + return 0; + + rtw89_write32_mask(rtwdev, R_AX_EFUSE_CTRL_1, B_AX_EF_CELL_SEL_MASK, + bank); + + val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1, + B_AX_EF_CELL_SEL_MASK); + if (bank == val) + return 0; + + return -EBUSY; +} + +static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map, + u32 dump_addr, u32 dump_size) +{ + u32 efuse_ctl; + u32 addr; + int ret; + + rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI); + + for (addr = dump_addr; addr < dump_addr + dump_size; addr++) { + efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK); + rtw89_write32(rtwdev, R_AX_EFUSE_CTRL, efuse_ctl & ~B_AX_EF_RDY); + + ret = read_poll_timeout_atomic(rtw89_read32, efuse_ctl, + efuse_ctl & B_AX_EF_RDY, 1, 1000000, + true, rtwdev, R_AX_EFUSE_CTRL); + if (ret) + return -EBUSY; + + *map++ = (u8)(efuse_ctl & 0xff); + } + + return 0; +} + +#define invalid_efuse_header(hdr1, hdr2) \ + ((hdr1) == 0xff || (hdr2) == 0xff) +#define invalid_efuse_content(word_en, i) \ + (((word_en) & BIT(i)) != 0x0) +#define get_efuse_blk_idx(hdr1, hdr2) \ + ((((hdr2) & 0xf0) >> 4) | (((hdr1) & 0x0f) << 4)) +#define block_idx_to_logical_idx(blk_idx, i) \ + (((blk_idx) << 3) + ((i) << 1)) +static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map, + u8 *log_map) +{ + u32 physical_size = rtwdev->chip->physical_efuse_size; + u32 logical_size = rtwdev->chip->logical_efuse_size; + u8 sec_ctrl_size = rtwdev->chip->sec_ctrl_efuse_size; + u32 phy_idx = sec_ctrl_size; + u32 log_idx; + u8 hdr1, hdr2; + u8 blk_idx; + u8 word_en; + int i; + + while (phy_idx < physical_size - sec_ctrl_size) { + hdr1 = phy_map[phy_idx]; + hdr2 = phy_map[phy_idx + 1]; + if (invalid_efuse_header(hdr1, hdr2)) + break; + + blk_idx = get_efuse_blk_idx(hdr1, hdr2); + word_en = hdr2 & 0xf; + phy_idx += 2; + + for (i = 0; i < 4; i++) { + if (invalid_efuse_content(word_en, i)) + continue; + + log_idx = block_idx_to_logical_idx(blk_idx, i); + if (phy_idx + 1 > physical_size - sec_ctrl_size - 1 || + log_idx + 1 > logical_size) + return -EINVAL; + + log_map[log_idx] = phy_map[phy_idx]; + log_map[log_idx + 1] = phy_map[phy_idx + 1]; + phy_idx += 2; + } + } + return 0; +} + +int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev) +{ + u32 phy_size = rtwdev->chip->physical_efuse_size; + u32 log_size = rtwdev->chip->logical_efuse_size; + u8 *phy_map = NULL; + u8 *log_map = NULL; + int ret; + + if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS) + rtwdev->efuse.valid = true; + else + rtw89_warn(rtwdev, "failed to check efuse autoload\n"); + + phy_map = kmalloc(phy_size, GFP_KERNEL); + log_map = kmalloc(log_size, GFP_KERNEL); + + if (!phy_map || !log_map) { + ret = -ENOMEM; + goto out_free; + } + + ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size); + if (ret) { + rtw89_warn(rtwdev, "failed to dump efuse physical map\n"); + goto out_free; + } + + memset(log_map, 0xff, log_size); + ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map); + if (ret) { + rtw89_warn(rtwdev, "failed to dump efuse logical map\n"); + goto out_free; + } + + rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, log_size); + + ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map); + if (ret) { + rtw89_warn(rtwdev, "failed to read efuse map\n"); + goto out_free; + } + +out_free: + kfree(log_map); + kfree(phy_map); + + return ret; +} + +int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev) +{ + u32 phycap_addr = rtwdev->chip->phycap_addr; + u32 phycap_size = rtwdev->chip->phycap_size; + u8 *phycap_map = NULL; + int ret = 0; + + if (!phycap_size) + return 0; + + phycap_map = kmalloc(phycap_size, GFP_KERNEL); + if (!phycap_map) + return -ENOMEM; + + ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map, + phycap_addr, phycap_size); + if (ret) { + rtw89_warn(rtwdev, "failed to dump phycap map\n"); + goto out_free; + } + + ret = rtwdev->chip->ops->read_phycap(rtwdev, phycap_map); + if (ret) { + rtw89_warn(rtwdev, "failed to read phycap map\n"); + goto out_free; + } + +out_free: + kfree(phycap_map); + + return ret; +} diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h new file mode 100644 index 00000000000000..622ff95e74763a --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/efuse.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_EFUSE_H__ +#define __RTW89_EFUSE_H__ + +#include "core.h" + +int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev); +int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c new file mode 100644 index 00000000000000..212aaf577d3c5e --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -0,0 +1,1641 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "cam.h" +#include "coex.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" + +static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header) +{ + struct sk_buff *skb; + u32 header_len = 0; + + if (header) + header_len = H2C_HEADER_LEN; + + skb = dev_alloc_skb(len + header_len + 24); + if (!skb) + return NULL; + skb_reserve(skb, header_len + 24); + memset(skb->data, 0, len); + + return skb; +} + +struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len) +{ + return rtw89_fw_h2c_alloc_skb(len, true); +} + +struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len) +{ + return rtw89_fw_h2c_alloc_skb(len, false); +} + +static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) +{ + u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); + + return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); +} + +#define FWDL_WAIT_CNT 400000 +int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) +{ + u8 val; + int ret; + + ret = read_poll_timeout_atomic(_fw_get_rdy, val, + val == RTW89_FWDL_WCPU_FW_INIT_RDY, + 1, FWDL_WAIT_CNT, false, rtwdev); + if (ret) { + switch (val) { + case RTW89_FWDL_CHECKSUM_FAIL: + rtw89_err(rtwdev, "fw checksum fail\n"); + return -EINVAL; + + case RTW89_FWDL_SECURITY_FAIL: + rtw89_err(rtwdev, "fw security fail\n"); + return -EINVAL; + + case RTW89_FWDL_CV_NOT_MATCH: + rtw89_err(rtwdev, "fw cv not match\n"); + return -EINVAL; + + default: + return -EBUSY; + } + } + + set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); + + return 0; +} + +static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, + struct rtw89_fw_bin_info *info) +{ + struct rtw89_fw_hdr_section_info *section_info; + const u8 *fw_end = fw + len; + const u8 *bin; + u32 i; + + if (!info) + return -EINVAL; + + info->section_num = GET_FW_HDR_SEC_NUM(fw); + info->hdr_len = RTW89_FW_HDR_SIZE + + info->section_num * RTW89_FW_SECTION_HDR_SIZE; + SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN); + + bin = fw + info->hdr_len; + + /* jump to section header */ + fw += RTW89_FW_HDR_SIZE; + section_info = info->section_info; + for (i = 0; i < info->section_num; i++) { + section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); + if (GET_FWSECTION_HDR_CHECKSUM(fw)) + section_info->len += FWDL_SECTION_CHKSUM_LEN; + section_info->redl = GET_FWSECTION_HDR_REDL(fw); + section_info->dladdr = + GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; + section_info->addr = bin; + bin += section_info->len; + fw += RTW89_FW_SECTION_HDR_SIZE; + section_info++; + } + + if (fw_end != bin) { + rtw89_err(rtwdev, "[ERR]fw bin size\n"); + return -EINVAL; + } + + return 0; +} + +static +int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + struct rtw89_fw_suit *fw_suit) +{ + struct rtw89_fw_info *fw_info = &rtwdev->fw; + const u8 *mfw = fw_info->firmware->data; + u32 mfw_len = fw_info->firmware->size; + const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; + const struct rtw89_mfw_info *mfw_info; + int i; + + if (mfw_hdr->sig != RTW89_MFW_SIG) { + rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); + /* legacy firmware support normal type only */ + if (type != RTW89_FW_NORMAL) + return -EINVAL; + fw_suit->data = mfw; + fw_suit->size = mfw_len; + return 0; + } + + for (i = 0; i < mfw_hdr->fw_nr; i++) { + mfw_info = &mfw_hdr->info[i]; + if (mfw_info->cv != rtwdev->hal.cv || + mfw_info->type != type || + mfw_info->mp) + continue; + + fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); + fw_suit->size = le32_to_cpu(mfw_info->size); + return 0; + } + + rtw89_err(rtwdev, "no suitable firmware found\n"); + return -ENOENT; +} + +static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, + enum rtw89_fw_type type, + struct rtw89_fw_suit *fw_suit) +{ + const u8 *hdr = fw_suit->data; + + fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); + fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); + fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); + fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); + fw_suit->build_year = GET_FW_HDR_YEAR(hdr); + fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); + fw_suit->build_date = GET_FW_HDR_DATE(hdr); + fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); + fw_suit->build_min = GET_FW_HDR_MIN(hdr); + fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); + + rtw89_info(rtwdev, + "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", + fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, + fw_suit->sub_idex, fw_suit->cmd_ver, type); +} + +static +int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) +{ + struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); + int ret; + + ret = rtw89_mfw_recognize(rtwdev, type, fw_suit); + if (ret) + return ret; + + rtw89_fw_update_ver(rtwdev, type, fw_suit); + + return 0; +} + +static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); + + if (chip->chip_id == RTL8852A && + RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0)) + rtwdev->fw.old_ht_ra_format = true; +} + +int rtw89_fw_recognize(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL); + if (ret) + return ret; + + /* It still works if wowlan firmware isn't existing. */ + __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN); + + rtw89_fw_recognize_features(rtwdev); + + return 0; +} + +void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u8 type, u8 cat, u8 class, u8 func, + bool rack, bool dack, u32 len) +{ + struct fwcmd_hdr *hdr; + + hdr = (struct fwcmd_hdr *)skb_push(skb, 8); + + if (!(rtwdev->fw.h2c_seq % 4)) + rack = true; + hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | + FIELD_PREP(H2C_HDR_CAT, cat) | + FIELD_PREP(H2C_HDR_CLASS, class) | + FIELD_PREP(H2C_HDR_FUNC, func) | + FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); + + hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, + len + H2C_HEADER_LEN) | + (rack ? H2C_HDR_REC_ACK : 0) | + (dack ? H2C_HDR_DONE_ACK : 0)); + + rtwdev->fw.h2c_seq++; +} + +static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, + struct sk_buff *skb, + u8 type, u8 cat, u8 class, u8 func, + u32 len) +{ + struct fwcmd_hdr *hdr; + + hdr = (struct fwcmd_hdr *)skb_push(skb, 8); + + hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | + FIELD_PREP(H2C_HDR_CAT, cat) | + FIELD_PREP(H2C_HDR_CLASS, class) | + FIELD_PREP(H2C_HDR_FUNC, func) | + FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); + + hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, + len + H2C_HEADER_LEN)); +} + +static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) +{ + struct sk_buff *skb; + u32 ret = 0; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); + return -ENOMEM; + } + + skb_put_data(skb, fw, len); + rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FWDL, + H2C_FUNC_MAC_FWHDR_DL, len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + ret = -1; + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return ret; +} + +static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) +{ + u8 val; + int ret; + + ret = __rtw89_fw_download_hdr(rtwdev, fw, len); + if (ret) { + rtw89_err(rtwdev, "[ERR]FW header download\n"); + return ret; + } + + ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, + 1, FWDL_WAIT_CNT, false, + rtwdev, R_AX_WCPU_FW_CTRL); + if (ret) { + rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); + return ret; + } + + rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); + rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); + + return 0; +} + +static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, + struct rtw89_fw_hdr_section_info *info) +{ + struct sk_buff *skb; + const u8 *section = info->addr; + u32 residue_len = info->len; + u32 pkt_len; + int ret; + + while (residue_len) { + if (residue_len >= FWDL_SECTION_PER_PKT_LEN) + pkt_len = FWDL_SECTION_PER_PKT_LEN; + else + pkt_len = residue_len; + + skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put_data(skb, section, pkt_len); + + ret = rtw89_h2c_tx(rtwdev, skb, true); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + ret = -1; + goto fail; + } + + section += pkt_len; + residue_len -= pkt_len; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return ret; +} + +static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, + struct rtw89_fw_bin_info *info) +{ + struct rtw89_fw_hdr_section_info *section_info = info->section_info; + u8 section_num = info->section_num; + int ret; + + while (section_num--) { + ret = __rtw89_fw_download_main(rtwdev, section_info); + if (ret) + return ret; + section_info++; + } + + mdelay(5); + + ret = rtw89_fw_check_rdy(rtwdev); + if (ret) { + rtw89_warn(rtwdev, "download firmware fail\n"); + return ret; + } + + return 0; +} + +static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) +{ + u32 val32; + u16 index; + + rtw89_write32(rtwdev, R_AX_DBG_CTRL, + FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | + FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); + rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); + + for (index = 0; index < 15; index++) { + val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); + rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); + fsleep(10); + } +} + +static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) +{ + u32 val32; + u16 val16; + + val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); + rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); + + val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); + rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); + + rtw89_fw_prog_cnt_dump(rtwdev); +} + +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) +{ + struct rtw89_fw_info *fw_info = &rtwdev->fw; + struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); + struct rtw89_fw_bin_info info; + const u8 *fw = fw_suit->data; + u32 len = fw_suit->size; + u8 val; + int ret; + + if (!fw || !len) { + rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); + return -ENOENT; + } + + ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); + if (ret) { + rtw89_err(rtwdev, "parse fw header fail\n"); + goto fwdl_err; + } + + ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, + 1, FWDL_WAIT_CNT, false, + rtwdev, R_AX_WCPU_FW_CTRL); + if (ret) { + rtw89_err(rtwdev, "[ERR]H2C path ready\n"); + goto fwdl_err; + } + + ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len); + if (ret) { + ret = -EBUSY; + goto fwdl_err; + } + + ret = rtw89_fw_download_main(rtwdev, fw, &info); + if (ret) { + ret = -EBUSY; + goto fwdl_err; + } + + fw_info->h2c_seq = 0; + fw_info->rec_seq = 0; + rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; + rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; + + return ret; + +fwdl_err: + rtw89_fw_dl_fail_dump(rtwdev); + return ret; +} + +int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_info *fw = &rtwdev->fw; + + wait_for_completion(&fw->completion); + if (!fw->firmware) + return -EINVAL; + + return 0; +} + +static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context) +{ + struct rtw89_fw_info *fw = context; + struct rtw89_dev *rtwdev = fw->rtwdev; + + if (!firmware || !firmware->data) { + rtw89_err(rtwdev, "failed to request firmware\n"); + complete_all(&fw->completion); + return; + } + + fw->firmware = firmware; + complete_all(&fw->completion); +} + +int rtw89_load_firmware(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_info *fw = &rtwdev->fw; + const char *fw_name = rtwdev->chip->fw_name; + int ret; + + fw->rtwdev = rtwdev; + init_completion(&fw->completion); + + ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, + GFP_KERNEL, fw, rtw89_load_firmware_cb); + if (ret) { + rtw89_err(rtwdev, "failed to async firmware request\n"); + return ret; + } + + return 0; +} + +void rtw89_unload_firmware(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_info *fw = &rtwdev->fw; + + rtw89_wait_firmware_completion(rtwdev); + + if (fw->firmware) + release_firmware(fw->firmware); +} + +#define H2C_CAM_LEN 60 +int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_CAM_LEN); + rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, skb->data); + rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_ADDR_CAM_UPDATE, + H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, + H2C_CAM_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_BA_CAM_LEN 4 +int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid, + struct ieee80211_ampdu_params *params) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); + return -ENOMEM; + } + skb_put(skb, H2C_BA_CAM_LEN); + SET_BA_CAM_MACID(skb->data, macid); + if (!valid) + goto end; + SET_BA_CAM_VALID(skb->data, valid); + SET_BA_CAM_TID(skb->data, params->tid); + if (params->buf_size > 64) + SET_BA_CAM_BMAP_SIZE(skb->data, 4); + else + SET_BA_CAM_BMAP_SIZE(skb->data, 0); + /* If init req is set, hw will set the ssn */ + SET_BA_CAM_INIT_REQ(skb->data, 0); + SET_BA_CAM_SSN(skb->data, params->ssn); + +end: + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_BA_CAM, + H2C_FUNC_MAC_BA_CAM, 0, 1, + H2C_BA_CAM_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_LOG_CFG_LEN 12 +int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) +{ + struct sk_buff *skb; + u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | + BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); + return -ENOMEM; + } + + skb_put(skb, H2C_LOG_CFG_LEN); + SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); + SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); + SET_LOG_CFG_COMP(skb->data, comp); + SET_LOG_CFG_COMP_EXT(skb->data, 0); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_FW_INFO, + H2C_FUNC_LOG_CFG, 0, 0, + H2C_LOG_CFG_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_GENERAL_PKT_LEN 6 +#define H2C_GENERAL_PKT_ID_UND 0xff +int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_GENERAL_PKT_LEN); + SET_GENERAL_PKT_MACID(skb->data, macid); + SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); + SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); + SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); + SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); + SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_FW_INFO, + H2C_FUNC_MAC_GENERAL_PKT, 0, 1, + H2C_GENERAL_PKT_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_LPS_PARM_LEN 8 +int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, + struct rtw89_lps_parm *lps_param) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_LPS_PARM_LEN); + + SET_LPS_PARM_MACID(skb->data, lps_param->macid); + SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); + SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); + SET_LPS_PARM_RLBM(skb->data, 1); + SET_LPS_PARM_SMARTPS(skb->data, 1); + SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); + SET_LPS_PARM_VOUAPSD(skb->data, 0); + SET_LPS_PARM_VIUAPSD(skb->data, 0); + SET_LPS_PARM_BEUAPSD(skb->data, 0); + SET_LPS_PARM_BKUAPSD(skb->data, 0); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_PS, + H2C_FUNC_MAC_LPS_PARM, 0, 1, + H2C_LPS_PARM_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_CMC_TBL_LEN 68 +int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct sk_buff *skb; + u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; + u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_CMC_TBL_LEN); + SET_CTRL_INFO_MACID(skb->data, macid); + SET_CTRL_INFO_OPERATION(skb->data, 1); + SET_CMC_TBL_TXPWR_MODE(skb->data, 0); + SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); + SET_CMC_TBL_PATH_MAP_A(skb->data, 0); + SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); + SET_CMC_TBL_PATH_MAP_C(skb->data, 0); + SET_CMC_TBL_PATH_MAP_D(skb->data, 0); + SET_CMC_TBL_ANTSEL_A(skb->data, 0); + SET_CMC_TBL_ANTSEL_B(skb->data, 0); + SET_CMC_TBL_ANTSEL_C(skb->data, 0); + SET_CMC_TBL_ANTSEL_D(skb->data, 0); + SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); + SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, + H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, + H2C_CMC_TBL_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, + struct ieee80211_sta *sta, u8 *pads) +{ + bool ppe_th; + u8 ppe16, ppe8; + u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1; + u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0]; + u8 ru_bitmap; + u8 n, idx, sh; + u16 ppe; + int i; + + if (!sta->he_cap.has_he) + return; + + ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, + sta->he_cap.he_cap_elem.phy_cap_info[6]); + if (!ppe_th) { + u8 pad; + + pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK, + sta->he_cap.he_cap_elem.phy_cap_info[9]); + + for (i = 0; i < RTW89_PPE_BW_NUM; i++) + pads[i] = pad; + } + + ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); + n = hweight8(ru_bitmap); + n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; + + for (i = 0; i < RTW89_PPE_BW_NUM; i++) { + if (!(ru_bitmap & BIT(i))) { + pads[i] = 1; + continue; + } + + idx = n >> 3; + sh = n & 7; + n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; + + ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx])); + ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; + sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; + + if (ppe16 != 7 && ppe8 == 7) + pads[i] = 2; + else if (ppe8 != 7) + pads[i] = 1; + else + pads[i] = 0; + } +} + +int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct sk_buff *skb; + u8 pads[RTW89_PPE_BW_NUM]; + + memset(pads, 0, sizeof(pads)); + __get_sta_he_pkt_padding(rtwdev, sta, pads); + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_CMC_TBL_LEN); + SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); + SET_CTRL_INFO_OPERATION(skb->data, 1); + SET_CMC_TBL_DISRTSFB(skb->data, 1); + SET_CMC_TBL_DISDATAFB(skb->data, 1); + if (hal->current_band_type == RTW89_BAND_2G) + SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1); + else + SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6); + SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); + SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); + if (vif->type == NL80211_IFTYPE_STATION) + SET_CMC_TBL_ULDL(skb->data, 1); + else + SET_CMC_TBL_ULDL(skb->data, 0); + SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); + SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); + SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); + SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); + SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, + H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, + H2C_CMC_TBL_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, + struct rtw89_sta *rtwsta) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_CMC_TBL_LEN); + SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); + SET_CTRL_INFO_OPERATION(skb->data, 1); + if (rtwsta->cctl_tx_time) { + SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); + SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); + } + if (rtwsta->cctl_tx_retry_limit) { + SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); + SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, + H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, + H2C_CMC_TBL_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_VIF_MAINTAIN_LEN 4 +int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + enum rtw89_upd_mode upd_mode) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); + return -ENOMEM; + } + skb_put(skb, H2C_VIF_MAINTAIN_LEN); + SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id); + SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role); + SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); + SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, + H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, + H2C_VIF_MAINTAIN_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_JOIN_INFO_LEN 4 +int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u8 dis_conn) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); + return -ENOMEM; + } + skb_put(skb, H2C_JOIN_INFO_LEN); + SET_JOININFO_MACID(skb->data, rtwvif->mac_id); + SET_JOININFO_OP(skb->data, dis_conn); + SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); + SET_JOININFO_WMM(skb->data, rtwvif->wmm); + SET_JOININFO_TGR(skb->data, rtwvif->trigger); + SET_JOININFO_ISHESTA(skb->data, 0); + SET_JOININFO_DLBW(skb->data, 0); + SET_JOININFO_TF_MAC_PAD(skb->data, 0); + SET_JOININFO_DL_T_PE(skb->data, 0); + SET_JOININFO_PORT_ID(skb->data, rtwvif->port); + SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type); + SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); + SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, + H2C_FUNC_MAC_JOININFO, 0, 1, + H2C_JOIN_INFO_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, + bool pause) +{ + struct rtw89_fw_macid_pause_grp h2c = {{0}}; + u8 len = sizeof(struct rtw89_fw_macid_pause_grp); + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); + return -ENOMEM; + } + h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); + if (pause) + h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); + skb_put_data(skb, &h2c, len); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, + H2C_FUNC_MAC_MACID_PAUSE, 1, 0, + len); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_EDCA_LEN 12 +int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u8 ac, u32 val) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); + return -ENOMEM; + } + skb_put(skb, H2C_EDCA_LEN); + RTW89_SET_EDCA_SEL(skb->data, 0); + RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); + RTW89_SET_EDCA_WMM(skb->data, 0); + RTW89_SET_EDCA_AC(skb->data, ac); + RTW89_SET_EDCA_PARAM(skb->data, val); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, + H2C_FUNC_USR_EDCA, 0, 1, + H2C_EDCA_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_OFLD_CFG_LEN 8 +int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) +{ + static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); + return -ENOMEM; + } + skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, + H2C_FUNC_OFLD_CFG, 0, 1, + H2C_OFLD_CFG_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_RA_LEN 16 +int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) +{ + struct sk_buff *skb; + u8 *cmd; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); + return -ENOMEM; + } + skb_put(skb, H2C_RA_LEN); + cmd = skb->data; + rtw89_debug(rtwdev, RTW89_DBG_RA, + "ra cmd msk: %llx ", ra->ra_mask); + + RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); + RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); + RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); + RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); + RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); + RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); + RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); + RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); + RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); + RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); + RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); + RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); + RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); + RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); + RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); + RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); + RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); + RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); + RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); + + if (csi) { + RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); + RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); + RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); + RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); + RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); + RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); + RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); + RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); + RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, + H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, + H2C_RA_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_LEN_CXDRVHDR 2 +#define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR) +int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_init_info *init_info = &dm->init_info; + struct rtw89_btc_module *module = &init_info->module; + struct rtw89_btc_ant_info *ant = &module->ant; + struct sk_buff *skb; + u8 *cmd; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); + return -ENOMEM; + } + skb_put(skb, H2C_LEN_CXDRVINFO_INIT); + cmd = skb->data; + + RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT); + RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR); + + RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type); + RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num); + RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation); + RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos); + RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity); + + RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type); + RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv); + RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo); + RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos); + RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type); + + RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch); + RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only); + RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok); + RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en); + RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other); + RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, BTFC_SET, + SET_DRV_INFO, 0, 0, + H2C_LEN_CXDRVINFO_INIT); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR) +int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_role_info *role_info = &wl->role_info; + struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; + struct rtw89_btc_wl_active_role *active = role_info->active_role; + struct sk_buff *skb; + u8 *cmd; + int i; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); + return -ENOMEM; + } + skb_put(skb, H2C_LEN_CXDRVINFO_ROLE); + cmd = skb->data; + + RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); + RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR); + + RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); + RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); + + RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); + RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); + RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); + RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); + RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); + RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); + RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); + RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); + RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); + RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); + RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); + RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); + + for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) { + RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i); + RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i); + RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i); + RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i); + RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i); + RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i); + RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i); + RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i); + RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i); + RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i); + RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i); + RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i); + RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i); + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, BTFC_SET, + SET_DRV_INFO, 0, 0, + H2C_LEN_CXDRVINFO_ROLE); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) +int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_ctrl *ctrl = &btc->ctrl; + struct sk_buff *skb; + u8 *cmd; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); + cmd = skb->data; + + RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); + RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); + + RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); + RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); + RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); + RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, BTFC_SET, + SET_DRV_INFO, 0, 0, + H2C_LEN_CXDRVINFO_CTRL); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) +int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; + struct sk_buff *skb; + u8 *cmd; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); + return -ENOMEM; + } + skb_put(skb, H2C_LEN_CXDRVINFO_RFK); + cmd = skb->data; + + RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); + RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); + + RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); + RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); + RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); + RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); + RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, BTFC_SET, + SET_DRV_INFO, 0, 0, + H2C_LEN_CXDRVINFO_RFK); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, + struct rtw89_fw_h2c_rf_reg_info *info, + u16 len, u8 page) +{ + struct sk_buff *skb; + u8 class = info->rf_path == RF_PATH_A ? + H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); + return -ENOMEM; + } + skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, class, page, 0, 0, + len); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, + u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, + bool rack, bool dack) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); + return -ENOMEM; + } + skb_put_data(skb, buf, len); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, + len); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_no_hdr(len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); + return -ENOMEM; + } + skb_put_data(skb, buf, len); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} + +void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) +{ + struct rtw89_early_h2c *early_h2c; + + lockdep_assert_held(&rtwdev->mutex); + + list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { + rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); + } +} + +void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) +{ + struct rtw89_early_h2c *early_h2c, *tmp; + + mutex_lock(&rtwdev->mutex); + list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { + list_del(&early_h2c->list); + kfree(early_h2c->h2c); + kfree(early_h2c); + } + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) +{ + skb_queue_tail(&rtwdev->c2h_queue, c2h); + ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); +} + +static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, + struct sk_buff *skb) +{ + u8 category = RTW89_GET_C2H_CATEGORY(skb->data); + u8 class = RTW89_GET_C2H_CLASS(skb->data); + u8 func = RTW89_GET_C2H_FUNC(skb->data); + u16 len = RTW89_GET_C2H_LEN(skb->data); + bool dump = true; + + if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) + return; + + switch (category) { + case RTW89_C2H_CAT_TEST: + break; + case RTW89_C2H_CAT_MAC: + rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); + if (class == RTW89_MAC_C2H_CLASS_INFO && + func == RTW89_MAC_C2H_FUNC_C2H_LOG) + dump = false; + break; + case RTW89_C2H_CAT_OUTSRC: + if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && + class <= RTW89_PHY_C2H_CLASS_BTC_MAX) + rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); + else + rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); + break; + } + + if (dump) + rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); +} + +void rtw89_fw_c2h_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + c2h_work); + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { + skb_unlink(skb, &rtwdev->c2h_queue); + mutex_lock(&rtwdev->mutex); + rtw89_fw_c2h_cmd_handle(rtwdev, skb); + mutex_unlock(&rtwdev->mutex); + dev_kfree_skb_any(skb); + } +} + +static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, + struct rtw89_mac_h2c_info *info) +{ + static const u32 h2c_reg[RTW89_H2CREG_MAX] = { + R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, + R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3 + }; + u8 i, val, len; + int ret; + + ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, + rtwdev, R_AX_H2CREG_CTRL); + if (ret) { + rtw89_warn(rtwdev, "FW does not process h2c registers\n"); + return ret; + } + + len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, + sizeof(info->h2creg[0])); + + RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); + RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); + for (i = 0; i < RTW89_H2CREG_MAX; i++) + rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); + + rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER); + + return 0; +} + +static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, + struct rtw89_mac_c2h_info *info) +{ + static const u32 c2h_reg[RTW89_C2HREG_MAX] = { + R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, + R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3 + }; + u32 ret; + u8 i, val; + + info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; + + ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, + RTW89_C2H_TIMEOUT, false, rtwdev, + R_AX_C2HREG_CTRL); + if (ret) { + rtw89_warn(rtwdev, "c2h reg timeout\n"); + return ret; + } + + for (i = 0; i < RTW89_C2HREG_MAX; i++) + info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); + + rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0); + + info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); + info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - + RTW89_C2HREG_HDR_LEN; + + return 0; +} + +int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, + struct rtw89_mac_h2c_info *h2c_info, + struct rtw89_mac_c2h_info *c2h_info) +{ + u32 ret; + + if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) + lockdep_assert_held(&rtwdev->mutex); + + if (!h2c_info && !c2h_info) + return -EINVAL; + + if (!h2c_info) + goto recv_c2h; + + ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); + if (ret) + return ret; + +recv_c2h: + if (!c2h_info) + return 0; + + ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); + if (ret) + return ret; + + return 0; +} + +void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) +{ + if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { + rtw89_err(rtwdev, "[ERR]pwr is off\n"); + return; + } + + rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); + rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); + rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); + rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); + rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", + rtw89_read32(rtwdev, R_AX_HALT_C2H)); + rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", + rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); + + rtw89_fw_prog_cnt_dump(rtwdev); +} diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h new file mode 100644 index 00000000000000..7ee0d932331075 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -0,0 +1,1378 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_FW_H__ +#define __RTW89_FW_H__ + +#include "core.h" + +enum rtw89_fw_dl_status { + RTW89_FWDL_INITIAL_STATE = 0, + RTW89_FWDL_FWDL_ONGOING = 1, + RTW89_FWDL_CHECKSUM_FAIL = 2, + RTW89_FWDL_SECURITY_FAIL = 3, + RTW89_FWDL_CV_NOT_MATCH = 4, + RTW89_FWDL_RSVD0 = 5, + RTW89_FWDL_WCPU_FWDL_RDY = 6, + RTW89_FWDL_WCPU_FW_INIT_RDY = 7 +}; + +#define RTW89_GET_C2H_HDR_FUNC(info) \ + u32_get_bits(info, GENMASK(6, 0)) +#define RTW89_GET_C2H_HDR_LEN(info) \ + u32_get_bits(info, GENMASK(11, 8)) + +#define RTW89_SET_H2CREG_HDR_FUNC(info, val) \ + u32p_replace_bits(info, val, GENMASK(6, 0)) +#define RTW89_SET_H2CREG_HDR_LEN(info, val) \ + u32p_replace_bits(info, val, GENMASK(11, 8)) + +#define RTW89_H2CREG_MAX 4 +#define RTW89_C2HREG_MAX 4 +#define RTW89_C2HREG_HDR_LEN 2 +#define RTW89_H2CREG_HDR_LEN 2 +#define RTW89_C2H_TIMEOUT 1000000 +struct rtw89_mac_c2h_info { + u8 id; + u8 content_len; + u32 c2hreg[RTW89_C2HREG_MAX]; +}; + +struct rtw89_mac_h2c_info { + u8 id; + u8 content_len; + u32 h2creg[RTW89_H2CREG_MAX]; +}; + +enum rtw89_mac_h2c_type { + RTW89_FWCMD_H2CREG_FUNC_H2CREG_LB = 0, + RTW89_FWCMD_H2CREG_FUNC_CNSL_CMD, + RTW89_FWCMD_H2CREG_FUNC_FWERR, + RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE, + RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM, + RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN +}; + +enum rtw89_mac_c2h_type { + RTW89_FWCMD_C2HREG_FUNC_C2HREG_LB = 0, + RTW89_FWCMD_C2HREG_FUNC_ERR_RPT, + RTW89_FWCMD_C2HREG_FUNC_ERR_MSG, + RTW89_FWCMD_C2HREG_FUNC_PHY_CAP, + RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT, + RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF +}; + +struct rtw89_c2h_phy_cap { + u32 func:7; + u32 ack:1; + u32 len:4; + u32 seq:4; + u32 rx_nss:8; + u32 bw:8; + + u32 tx_nss:8; + u32 prot:8; + u32 nic:8; + u32 wl_func:8; + + u32 hw_type:8; +} __packed; + +enum rtw89_fw_c2h_category { + RTW89_C2H_CAT_TEST, + RTW89_C2H_CAT_MAC, + RTW89_C2H_CAT_OUTSRC, +}; + +enum rtw89_fw_log_level { + RTW89_FW_LOG_LEVEL_OFF, + RTW89_FW_LOG_LEVEL_CRT, + RTW89_FW_LOG_LEVEL_SER, + RTW89_FW_LOG_LEVEL_WARN, + RTW89_FW_LOG_LEVEL_LOUD, + RTW89_FW_LOG_LEVEL_TR, +}; + +enum rtw89_fw_log_path { + RTW89_FW_LOG_LEVEL_UART, + RTW89_FW_LOG_LEVEL_C2H, + RTW89_FW_LOG_LEVEL_SNI, +}; + +enum rtw89_fw_log_comp { + RTW89_FW_LOG_COMP_VER, + RTW89_FW_LOG_COMP_INIT, + RTW89_FW_LOG_COMP_TASK, + RTW89_FW_LOG_COMP_CNS, + RTW89_FW_LOG_COMP_H2C, + RTW89_FW_LOG_COMP_C2H, + RTW89_FW_LOG_COMP_TX, + RTW89_FW_LOG_COMP_RX, + RTW89_FW_LOG_COMP_IPSEC, + RTW89_FW_LOG_COMP_TIMER, + RTW89_FW_LOG_COMP_DBGPKT, + RTW89_FW_LOG_COMP_PS, + RTW89_FW_LOG_COMP_ERROR, + RTW89_FW_LOG_COMP_WOWLAN, + RTW89_FW_LOG_COMP_SECURE_BOOT, + RTW89_FW_LOG_COMP_BTC, + RTW89_FW_LOG_COMP_BB, + RTW89_FW_LOG_COMP_TWT, + RTW89_FW_LOG_COMP_RF, + RTW89_FW_LOG_COMP_MCC = 20, +}; + +#define FWDL_SECTION_MAX_NUM 10 +#define FWDL_SECTION_CHKSUM_LEN 8 +#define FWDL_SECTION_PER_PKT_LEN 2020 + +struct rtw89_fw_hdr_section_info { + u8 redl; + const u8 *addr; + u32 len; + u32 dladdr; +}; + +struct rtw89_fw_bin_info { + u8 section_num; + u32 hdr_len; + struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM]; +}; + +struct rtw89_fw_macid_pause_grp { + __le32 pause_grp[4]; + __le32 mask_grp[4]; +} __packed; + +struct rtw89_h2creg_sch_tx_en { + u8 func:7; + u8 ack:1; + u8 total_len:4; + u8 seq_num:4; + u16 tx_en:16; + u16 mask:16; + u8 band:1; + u16 rsvd:15; +} __packed; + +#define RTW89_SET_FWCMD_RA_IS_DIS(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0)) +#define RTW89_SET_FWCMD_RA_MODE(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1)) +#define RTW89_SET_FWCMD_RA_BW_CAP(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6)) +#define RTW89_SET_FWCMD_RA_MACID(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)) +#define RTW89_SET_FWCMD_RA_DCM(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16)) +#define RTW89_SET_FWCMD_RA_ER(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17)) +#define RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18)) +#define RTW89_SET_FWCMD_RA_UPD_ALL(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20)) +#define RTW89_SET_FWCMD_RA_SGI(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21)) +#define RTW89_SET_FWCMD_RA_LDPC(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22)) +#define RTW89_SET_FWCMD_RA_STBC(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23)) +#define RTW89_SET_FWCMD_RA_SS_NUM(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24)) +#define RTW89_SET_FWCMD_RA_GILTF(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27)) +#define RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30)) +#define RTW89_SET_FWCMD_RA_UPD_MASK(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31)) +#define RTW89_SET_FWCMD_RA_MASK_0(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_RA_MASK_1(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8)) +#define RTW89_SET_FWCMD_RA_MASK_2(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16)) +#define RTW89_SET_FWCMD_RA_MASK_3(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24)) +#define RTW89_SET_FWCMD_RA_MASK_4(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31)) +#define RTW89_SET_FWCMD_RA_BAND_NUM(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8)) +#define RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9)) +#define RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10)) +#define RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16)) +#define RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24)) +#define RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26)) +#define RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29)) + +#define RTW89_SET_FWCMD_SEC_IDX(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_SEC_OFFSET(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)) +#define RTW89_SET_FWCMD_SEC_LEN(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(23, 16)) +#define RTW89_SET_FWCMD_SEC_TYPE(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(3, 0)) +#define RTW89_SET_FWCMD_SEC_EXT_KEY(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(4)) +#define RTW89_SET_FWCMD_SEC_SPP_MODE(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(5)) +#define RTW89_SET_FWCMD_SEC_KEY0(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(31, 0)) +#define RTW89_SET_FWCMD_SEC_KEY1(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 0)) +#define RTW89_SET_FWCMD_SEC_KEY2(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x04, val, GENMASK(31, 0)) +#define RTW89_SET_FWCMD_SEC_KEY3(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x05, val, GENMASK(31, 0)) + +#define RTW89_SET_EDCA_SEL(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(1, 0)) +#define RTW89_SET_EDCA_BAND(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(3)) +#define RTW89_SET_EDCA_WMM(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(4)) +#define RTW89_SET_EDCA_AC(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(6, 5)) +#define RTW89_SET_EDCA_PARAM(cmd, val) \ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 0)) +#define FW_EDCA_PARAM_TXOPLMT_MSK GENMASK(26, 16) +#define FW_EDCA_PARAM_CWMAX_MSK GENMASK(15, 12) +#define FW_EDCA_PARAM_CWMIN_MSK GENMASK(11, 8) +#define FW_EDCA_PARAM_AIFS_MSK GENMASK(7, 0) + +#define GET_FWSECTION_HDR_SEC_SIZE(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 0)) +#define GET_FWSECTION_HDR_CHECKSUM(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(28)) +#define GET_FWSECTION_HDR_REDL(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(29)) +#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr)), GENMASK(31, 0)) + +#define GET_FW_HDR_MAJOR_VERSION(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(7, 0)) +#define GET_FW_HDR_MINOR_VERSION(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(15, 8)) +#define GET_FW_HDR_SUBVERSION(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 16)) +#define GET_FW_HDR_SUBINDEX(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(31, 24)) +#define GET_FW_HDR_MONTH(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(7, 0)) +#define GET_FW_HDR_DATE(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(15, 8)) +#define GET_FW_HDR_HOUR(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(23, 16)) +#define GET_FW_HDR_MIN(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(31, 24)) +#define GET_FW_HDR_YEAR(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 5), GENMASK(31, 0)) +#define GET_FW_HDR_SEC_NUM(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8)) +#define GET_FW_HDR_CMD_VERSERION(fwhdr) \ + le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24)) +#define SET_FW_HDR_PART_SIZE(fwhdr, val) \ + le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0)) + +#define SET_CTRL_INFO_MACID(table, val) \ + le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)) +#define SET_CTRL_INFO_OPERATION(table, val) \ + le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7)) +#define SET_CMC_TBL_MASK_DATARATE GENMASK(8, 0) +#define SET_CMC_TBL_DATARATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE, \ + GENMASK(8, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_FORCE_TXOP BIT(0) +#define SET_CMC_TBL_FORCE_TXOP(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP, \ + BIT(9)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_BW GENMASK(1, 0) +#define SET_CMC_TBL_DATA_BW(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW, \ + GENMASK(11, 10)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_GI_LTF GENMASK(2, 0) +#define SET_CMC_TBL_DATA_GI_LTF(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF, \ + GENMASK(14, 12)); \ +} while (0) +#define SET_CMC_TBL_MASK_DARF_TC_INDEX BIT(0) +#define SET_CMC_TBL_DARF_TC_INDEX(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX, \ + BIT(15)); \ +} while (0) +#define SET_CMC_TBL_MASK_ARFR_CTRL GENMASK(3, 0) +#define SET_CMC_TBL_ARFR_CTRL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL, \ + GENMASK(19, 16)); \ +} while (0) +#define SET_CMC_TBL_MASK_ACQ_RPT_EN BIT(0) +#define SET_CMC_TBL_ACQ_RPT_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN, \ + BIT(20)); \ +} while (0) +#define SET_CMC_TBL_MASK_MGQ_RPT_EN BIT(0) +#define SET_CMC_TBL_MGQ_RPT_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN, \ + BIT(21)); \ +} while (0) +#define SET_CMC_TBL_MASK_ULQ_RPT_EN BIT(0) +#define SET_CMC_TBL_ULQ_RPT_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN, \ + BIT(22)); \ +} while (0) +#define SET_CMC_TBL_MASK_TWTQ_RPT_EN BIT(0) +#define SET_CMC_TBL_TWTQ_RPT_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN, \ + BIT(23)); \ +} while (0) +#define SET_CMC_TBL_MASK_DISRTSFB BIT(0) +#define SET_CMC_TBL_DISRTSFB(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB, \ + BIT(25)); \ +} while (0) +#define SET_CMC_TBL_MASK_DISDATAFB BIT(0) +#define SET_CMC_TBL_DISDATAFB(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB, \ + BIT(26)); \ +} while (0) +#define SET_CMC_TBL_MASK_TRYRATE BIT(0) +#define SET_CMC_TBL_TRYRATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE, \ + BIT(27)); \ +} while (0) +#define SET_CMC_TBL_MASK_AMPDU_DENSITY GENMASK(3, 0) +#define SET_CMC_TBL_AMPDU_DENSITY(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28)); \ + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY, \ + GENMASK(31, 28)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE GENMASK(8, 0) +#define SET_CMC_TBL_DATA_RTY_LOWEST_RATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE, \ + GENMASK(8, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_AMPDU_TIME_SEL BIT(0) +#define SET_CMC_TBL_AMPDU_TIME_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL, \ + BIT(9)); \ +} while (0) +#define SET_CMC_TBL_MASK_AMPDU_LEN_SEL BIT(0) +#define SET_CMC_TBL_AMPDU_LEN_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL, \ + BIT(10)); \ +} while (0) +#define SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL BIT(0) +#define SET_CMC_TBL_RTS_TXCNT_LMT_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL, \ + BIT(11)); \ +} while (0) +#define SET_CMC_TBL_MASK_RTS_TXCNT_LMT GENMASK(3, 0) +#define SET_CMC_TBL_RTS_TXCNT_LMT(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT, \ + GENMASK(15, 12)); \ +} while (0) +#define SET_CMC_TBL_MASK_RTSRATE GENMASK(8, 0) +#define SET_CMC_TBL_RTSRATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE, \ + GENMASK(24, 16)); \ +} while (0) +#define SET_CMC_TBL_MASK_VCS_STBC BIT(0) +#define SET_CMC_TBL_VCS_STBC(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC, \ + BIT(27)); \ +} while (0) +#define SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE GENMASK(3, 0) +#define SET_CMC_TBL_RTS_RTY_LOWEST_RATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28)); \ + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE, \ + GENMASK(31, 28)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_TX_CNT_LMT GENMASK(5, 0) +#define SET_CMC_TBL_DATA_TX_CNT_LMT(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT, \ + GENMASK(5, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL BIT(0) +#define SET_CMC_TBL_DATA_TXCNT_LMT_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL, \ + BIT(6)); \ +} while (0) +#define SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL BIT(0) +#define SET_CMC_TBL_MAX_AGG_NUM_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL, \ + BIT(7)); \ +} while (0) +#define SET_CMC_TBL_MASK_RTS_EN BIT(0) +#define SET_CMC_TBL_RTS_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN, \ + BIT(8)); \ +} while (0) +#define SET_CMC_TBL_MASK_CTS2SELF_EN BIT(0) +#define SET_CMC_TBL_CTS2SELF_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN, \ + BIT(9)); \ +} while (0) +#define SET_CMC_TBL_MASK_CCA_RTS GENMASK(1, 0) +#define SET_CMC_TBL_CCA_RTS(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS, \ + GENMASK(11, 10)); \ +} while (0) +#define SET_CMC_TBL_MASK_HW_RTS_EN BIT(0) +#define SET_CMC_TBL_HW_RTS_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN, \ + BIT(12)); \ +} while (0) +#define SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE GENMASK(1, 0) +#define SET_CMC_TBL_RTS_DROP_DATA_MODE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE, \ + GENMASK(14, 13)); \ +} while (0) +#define SET_CMC_TBL_MASK_AMPDU_MAX_LEN GENMASK(10, 0) +#define SET_CMC_TBL_AMPDU_MAX_LEN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN, \ + GENMASK(26, 16)); \ +} while (0) +#define SET_CMC_TBL_MASK_UL_MU_DIS BIT(0) +#define SET_CMC_TBL_UL_MU_DIS(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS, \ + BIT(27)); \ +} while (0) +#define SET_CMC_TBL_MASK_AMPDU_MAX_TIME GENMASK(3, 0) +#define SET_CMC_TBL_AMPDU_MAX_TIME(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28)); \ + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME, \ + GENMASK(31, 28)); \ +} while (0) +#define SET_CMC_TBL_MASK_MAX_AGG_NUM GENMASK(7, 0) +#define SET_CMC_TBL_MAX_AGG_NUM(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM, \ + GENMASK(7, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_BA_BMAP GENMASK(1, 0) +#define SET_CMC_TBL_BA_BMAP(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP, \ + GENMASK(9, 8)); \ +} while (0) +#define SET_CMC_TBL_MASK_VO_LFTIME_SEL GENMASK(2, 0) +#define SET_CMC_TBL_VO_LFTIME_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL, \ + GENMASK(18, 16)); \ +} while (0) +#define SET_CMC_TBL_MASK_VI_LFTIME_SEL GENMASK(2, 0) +#define SET_CMC_TBL_VI_LFTIME_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL, \ + GENMASK(21, 19)); \ +} while (0) +#define SET_CMC_TBL_MASK_BE_LFTIME_SEL GENMASK(2, 0) +#define SET_CMC_TBL_BE_LFTIME_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL, \ + GENMASK(24, 22)); \ +} while (0) +#define SET_CMC_TBL_MASK_BK_LFTIME_SEL GENMASK(2, 0) +#define SET_CMC_TBL_BK_LFTIME_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL, \ + GENMASK(27, 25)); \ +} while (0) +#define SET_CMC_TBL_MASK_SECTYPE GENMASK(3, 0) +#define SET_CMC_TBL_SECTYPE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28)); \ + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE, \ + GENMASK(31, 28)); \ +} while (0) +#define SET_CMC_TBL_MASK_MULTI_PORT_ID GENMASK(2, 0) +#define SET_CMC_TBL_MULTI_PORT_ID(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID, \ + GENMASK(2, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_BMC BIT(0) +#define SET_CMC_TBL_BMC(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC, \ + BIT(3)); \ +} while (0) +#define SET_CMC_TBL_MASK_MBSSID GENMASK(3, 0) +#define SET_CMC_TBL_MBSSID(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID, \ + GENMASK(7, 4)); \ +} while (0) +#define SET_CMC_TBL_MASK_NAVUSEHDR BIT(0) +#define SET_CMC_TBL_NAVUSEHDR(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR, \ + BIT(8)); \ +} while (0) +#define SET_CMC_TBL_MASK_TXPWR_MODE GENMASK(2, 0) +#define SET_CMC_TBL_TXPWR_MODE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE, \ + GENMASK(11, 9)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_DCM BIT(0) +#define SET_CMC_TBL_DATA_DCM(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM, \ + BIT(12)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_ER BIT(0) +#define SET_CMC_TBL_DATA_ER(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER, \ + BIT(13)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_LDPC BIT(0) +#define SET_CMC_TBL_DATA_LDPC(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC, \ + BIT(14)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_STBC BIT(0) +#define SET_CMC_TBL_DATA_STBC(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC, \ + BIT(15)); \ +} while (0) +#define SET_CMC_TBL_MASK_A_CTRL_BQR BIT(0) +#define SET_CMC_TBL_A_CTRL_BQR(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR, \ + BIT(16)); \ +} while (0) +#define SET_CMC_TBL_MASK_A_CTRL_UPH BIT(0) +#define SET_CMC_TBL_A_CTRL_UPH(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH, \ + BIT(17)); \ +} while (0) +#define SET_CMC_TBL_MASK_A_CTRL_BSR BIT(0) +#define SET_CMC_TBL_A_CTRL_BSR(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR, \ + BIT(18)); \ +} while (0) +#define SET_CMC_TBL_MASK_A_CTRL_CAS BIT(0) +#define SET_CMC_TBL_A_CTRL_CAS(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS, \ + BIT(19)); \ +} while (0) +#define SET_CMC_TBL_MASK_DATA_BW_ER BIT(0) +#define SET_CMC_TBL_DATA_BW_ER(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER, \ + BIT(20)); \ +} while (0) +#define SET_CMC_TBL_MASK_LSIG_TXOP_EN BIT(0) +#define SET_CMC_TBL_LSIG_TXOP_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN, \ + BIT(21)); \ +} while (0) +#define SET_CMC_TBL_MASK_CTRL_CNT_VLD BIT(0) +#define SET_CMC_TBL_CTRL_CNT_VLD(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD, \ + BIT(27)); \ +} while (0) +#define SET_CMC_TBL_MASK_CTRL_CNT GENMASK(3, 0) +#define SET_CMC_TBL_CTRL_CNT(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28)); \ + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT, \ + GENMASK(31, 28)); \ +} while (0) +#define SET_CMC_TBL_MASK_RESP_REF_RATE GENMASK(8, 0) +#define SET_CMC_TBL_RESP_REF_RATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE, \ + GENMASK(8, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_ALL_ACK_SUPPORT BIT(0) +#define SET_CMC_TBL_ALL_ACK_SUPPORT(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT, \ + BIT(12)); \ +} while (0) +#define SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT BIT(0) +#define SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT, \ + BIT(13)); \ +} while (0) +#define SET_CMC_TBL_MASK_NTX_PATH_EN GENMASK(3, 0) +#define SET_CMC_TBL_NTX_PATH_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN, \ + GENMASK(19, 16)); \ +} while (0) +#define SET_CMC_TBL_MASK_PATH_MAP_A GENMASK(1, 0) +#define SET_CMC_TBL_PATH_MAP_A(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A, \ + GENMASK(21, 20)); \ +} while (0) +#define SET_CMC_TBL_MASK_PATH_MAP_B GENMASK(1, 0) +#define SET_CMC_TBL_PATH_MAP_B(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B, \ + GENMASK(23, 22)); \ +} while (0) +#define SET_CMC_TBL_MASK_PATH_MAP_C GENMASK(1, 0) +#define SET_CMC_TBL_PATH_MAP_C(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C, \ + GENMASK(25, 24)); \ +} while (0) +#define SET_CMC_TBL_MASK_PATH_MAP_D GENMASK(1, 0) +#define SET_CMC_TBL_PATH_MAP_D(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D, \ + GENMASK(27, 26)); \ +} while (0) +#define SET_CMC_TBL_MASK_ANTSEL_A BIT(0) +#define SET_CMC_TBL_ANTSEL_A(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A, \ + BIT(28)); \ +} while (0) +#define SET_CMC_TBL_MASK_ANTSEL_B BIT(0) +#define SET_CMC_TBL_ANTSEL_B(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B, \ + BIT(29)); \ +} while (0) +#define SET_CMC_TBL_MASK_ANTSEL_C BIT(0) +#define SET_CMC_TBL_ANTSEL_C(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C, \ + BIT(30)); \ +} while (0) +#define SET_CMC_TBL_MASK_ANTSEL_D BIT(0) +#define SET_CMC_TBL_ANTSEL_D(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31)); \ + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D, \ + BIT(31)); \ +} while (0) +#define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0) +#define SET_CMC_TBL_ADDR_CAM_INDEX(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX, \ + GENMASK(7, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_PAID GENMASK(8, 0) +#define SET_CMC_TBL_PAID(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID, \ + GENMASK(16, 8)); \ +} while (0) +#define SET_CMC_TBL_MASK_ULDL BIT(0) +#define SET_CMC_TBL_ULDL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL, \ + BIT(17)); \ +} while (0) +#define SET_CMC_TBL_MASK_DOPPLER_CTRL GENMASK(1, 0) +#define SET_CMC_TBL_DOPPLER_CTRL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL, \ + GENMASK(19, 18)); \ +} while (0) +#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0) +#define SET_CMC_TBL_NOMINAL_PKT_PADDING(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \ + GENMASK(21, 20)); \ +} while (0) +#define SET_CMC_TBL_NOMINAL_PKT_PADDING40(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \ + GENMASK(23, 22)); \ +} while (0) +#define SET_CMC_TBL_MASK_TXPWR_TOLERENCE GENMASK(3, 0) +#define SET_CMC_TBL_TXPWR_TOLERENCE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE, \ + GENMASK(27, 24)); \ +} while (0) +#define SET_CMC_TBL_NOMINAL_PKT_PADDING80(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30)); \ + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \ + GENMASK(31, 30)); \ +} while (0) +#define SET_CMC_TBL_MASK_NC GENMASK(2, 0) +#define SET_CMC_TBL_NC(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC, \ + GENMASK(2, 0)); \ +} while (0) +#define SET_CMC_TBL_MASK_NR GENMASK(2, 0) +#define SET_CMC_TBL_NR(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR, \ + GENMASK(5, 3)); \ +} while (0) +#define SET_CMC_TBL_MASK_NG GENMASK(1, 0) +#define SET_CMC_TBL_NG(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG, \ + GENMASK(7, 6)); \ +} while (0) +#define SET_CMC_TBL_MASK_CB GENMASK(1, 0) +#define SET_CMC_TBL_CB(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB, \ + GENMASK(9, 8)); \ +} while (0) +#define SET_CMC_TBL_MASK_CS GENMASK(1, 0) +#define SET_CMC_TBL_CS(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS, \ + GENMASK(11, 10)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_TXBF_EN BIT(0) +#define SET_CMC_TBL_CSI_TXBF_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN, \ + BIT(12)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_STBC_EN BIT(0) +#define SET_CMC_TBL_CSI_STBC_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN, \ + BIT(13)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_LDPC_EN BIT(0) +#define SET_CMC_TBL_CSI_LDPC_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN, \ + BIT(14)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_PARA_EN BIT(0) +#define SET_CMC_TBL_CSI_PARA_EN(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN, \ + BIT(15)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_FIX_RATE GENMASK(8, 0) +#define SET_CMC_TBL_CSI_FIX_RATE(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE, \ + GENMASK(24, 16)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_GI_LTF GENMASK(2, 0) +#define SET_CMC_TBL_CSI_GI_LTF(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF, \ + GENMASK(27, 25)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0) +#define SET_CMC_TBL_CSI_GID_SEL(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL, \ + BIT(29)); \ +} while (0) +#define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0) +#define SET_CMC_TBL_CSI_BW(table, val) \ +do { \ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30)); \ + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW, \ + GENMASK(31, 30)); \ +} while (0) + +#define SET_FWROLE_MAINTAIN_MACID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) +#define SET_FWROLE_MAINTAIN_SELF_ROLE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8)) +#define SET_FWROLE_MAINTAIN_UPD_MODE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10)) +#define SET_FWROLE_MAINTAIN_WIFI_ROLE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13)) + +#define SET_JOININFO_MACID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) +#define SET_JOININFO_OP(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, BIT(8)) +#define SET_JOININFO_BAND(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, BIT(9)) +#define SET_JOININFO_WMM(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10)) +#define SET_JOININFO_TGR(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, BIT(12)) +#define SET_JOININFO_ISHESTA(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, BIT(13)) +#define SET_JOININFO_DLBW(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14)) +#define SET_JOININFO_TF_MAC_PAD(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16)) +#define SET_JOININFO_DL_T_PE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18)) +#define SET_JOININFO_PORT_ID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21)) +#define SET_JOININFO_NET_TYPE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24)) +#define SET_JOININFO_WIFI_ROLE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26)) +#define SET_JOININFO_SELF_ROLE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30)) + +#define SET_GENERAL_PKT_MACID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) +#define SET_GENERAL_PKT_PROBRSP_ID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) +#define SET_GENERAL_PKT_PSPOLL_ID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16)) +#define SET_GENERAL_PKT_NULL_ID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)) +#define SET_GENERAL_PKT_QOS_NULL_ID(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0)) +#define SET_GENERAL_PKT_CTS2SELF_ID(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)) + +#define SET_LOG_CFG_LEVEL(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) +#define SET_LOG_CFG_PATH(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) +#define SET_LOG_CFG_COMP(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0)) +#define SET_LOG_CFG_COMP_EXT(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0)) + +#define SET_BA_CAM_VALID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, BIT(0)) +#define SET_BA_CAM_INIT_REQ(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, BIT(1)) +#define SET_BA_CAM_ENTRY_IDX(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2)) +#define SET_BA_CAM_TID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4)) +#define SET_BA_CAM_MACID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) +#define SET_BA_CAM_BMAP_SIZE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16)) +#define SET_BA_CAM_SSN(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20)) + +#define SET_LPS_PARM_MACID(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) +#define SET_LPS_PARM_PSMODE(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) +#define SET_LPS_PARM_RLBM(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16)) +#define SET_LPS_PARM_SMARTPS(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 20)) +#define SET_LPS_PARM_AWAKEINTERVAL(h2c, val) \ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)) +#define SET_LPS_PARM_VOUAPSD(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(0)) +#define SET_LPS_PARM_VIUAPSD(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(1)) +#define SET_LPS_PARM_BEUAPSD(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(2)) +#define SET_LPS_PARM_BKUAPSD(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(3)) +#define SET_LPS_PARM_LASTRPWM(h2c, val) \ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)) + +enum rtw89_btc_btf_h2c_class { + BTFC_SET = 0x10, + BTFC_GET = 0x11, + BTFC_FW_EVENT = 0x12, +}; + +enum rtw89_btc_btf_set { + SET_REPORT_EN = 0x0, + SET_SLOT_TABLE, + SET_MREG_TABLE, + SET_CX_POLICY, + SET_GPIO_DBG, + SET_DRV_INFO, + SET_DRV_EVENT, + SET_BT_WREG_ADDR, + SET_BT_WREG_VAL, + SET_BT_RREG_ADDR, + SET_BT_WL_CH_INFO, + SET_BT_INFO_REPORT, + SET_BT_IGNORE_WLAN_ACT, + SET_BT_TX_PWR, + SET_BT_LNA_CONSTRAIN, + SET_BT_GOLDEN_RX_RANGE, + SET_BT_PSD_REPORT, + SET_H2C_TEST, + SET_MAX1, +}; + +enum rtw89_btc_cxdrvinfo { + CXDRVINFO_INIT = 0, + CXDRVINFO_ROLE, + CXDRVINFO_DBCC, + CXDRVINFO_SMAP, + CXDRVINFO_RFK, + CXDRVINFO_RUN, + CXDRVINFO_CTRL, + CXDRVINFO_SCAN, + CXDRVINFO_MAX, +}; + +#define RTW89_SET_FWCMD_CXHDR_TYPE(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXHDR_LEN(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 1, val, GENMASK(7, 0)) + +#define RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 4, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(0)) +#define RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(1)) +#define RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 6, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 7, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(0)) +#define RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(1)) +#define RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(2)) +#define RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 10, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(0)) +#define RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(1)) +#define RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(2)) +#define RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(3)) +#define RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(4)) + +#define RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, val) \ + u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(0)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(1)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(2)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(3)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(4)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(5)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(6)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(7)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(8)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(9)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(10)) +#define RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, val) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11)) +#define RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1)) +#define RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4)) +#define RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5)) +#define RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6)) +#define RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1)) +#define RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, val, n) \ + u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, val, n) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, val, n) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, val, n) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0)) +#define RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, val, n) \ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0)) + +#define RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(0)) +#define RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(1)) +#define RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(2)) +#define RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(18, 3)) + +#define RTW89_SET_FWCMD_CXRFK_STATE(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(1, 0)) +#define RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(5, 2)) +#define RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(7, 6)) +#define RTW89_SET_FWCMD_CXRFK_BAND(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(9, 8)) +#define RTW89_SET_FWCMD_CXRFK_TYPE(cmd, val) \ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10)) + +#define RTW89_C2H_HEADER_LEN 8 + +#define RTW89_GET_C2H_CATEGORY(c2h) \ + le32_get_bits(*((__le32 *)c2h), GENMASK(1, 0)) +#define RTW89_GET_C2H_CLASS(c2h) \ + le32_get_bits(*((__le32 *)c2h), GENMASK(7, 2)) +#define RTW89_GET_C2H_FUNC(c2h) \ + le32_get_bits(*((__le32 *)c2h), GENMASK(15, 8)) +#define RTW89_GET_C2H_LEN(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 1), GENMASK(13, 0)) + +#define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2) +#define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN) + +#define RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0)) +#define RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2)) +#define RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8)) +#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16)) +#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(31, 24)) + +#define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0)) +#define RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2)) +#define RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8)) +#define RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16)) + +#define RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 0)) +#define RTW89_GET_PHY_C2H_RA_RPT_RETRY_RATIO(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16)) +#define RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(6, 0)) +#define RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(9, 8)) +#define RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(12, 10)) +#define RTW89_GET_PHY_C2H_RA_RPT_BW(c2h) \ + le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(14, 13)) + +/* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS + * HT-new: [6:5]: NA, [4:0]: MCS + */ +#define RTW89_RA_RATE_MASK_NSS GENMASK(6, 4) +#define RTW89_RA_RATE_MASK_MCS GENMASK(3, 0) +#define RTW89_RA_RATE_MASK_HT_MCS GENMASK(4, 0) +#define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \ + FIELD_PREP(GENMASK(2, 0), mcs)) + +#define RTW89_FW_HDR_SIZE 32 +#define RTW89_FW_SECTION_HDR_SIZE 16 + +#define RTW89_MFW_SIG 0xFF + +struct rtw89_mfw_info { + u8 cv; + u8 type; /* enum rtw89_fw_type */ + u8 mp; + u8 rsvd; + __le32 shift; + __le32 size; + u8 rsvd2[4]; +} __packed; + +struct rtw89_mfw_hdr { + u8 sig; /* RTW89_MFW_SIG */ + u8 fw_nr; + u8 rsvd[14]; + struct rtw89_mfw_info info[]; +} __packed; + +struct fwcmd_hdr { + __le32 hdr0; + __le32 hdr1; +}; + +#define RTW89_H2C_RF_PAGE_SIZE 500 +#define RTW89_H2C_RF_PAGE_NUM 3 +struct rtw89_fw_h2c_rf_reg_info { + enum rtw89_rf_path rf_path; + __le32 rtw89_phy_config_rf_h2c[RTW89_H2C_RF_PAGE_NUM][RTW89_H2C_RF_PAGE_SIZE]; + u16 curr_idx; +}; + +#define H2C_SEC_CAM_LEN 24 + +#define H2C_HEADER_LEN 8 +#define H2C_HDR_CAT GENMASK(1, 0) +#define H2C_HDR_CLASS GENMASK(7, 2) +#define H2C_HDR_FUNC GENMASK(15, 8) +#define H2C_HDR_DEL_TYPE GENMASK(19, 16) +#define H2C_HDR_H2C_SEQ GENMASK(31, 24) +#define H2C_HDR_TOTAL_LEN GENMASK(13, 0) +#define H2C_HDR_REC_ACK BIT(14) +#define H2C_HDR_DONE_ACK BIT(15) + +#define FWCMD_TYPE_H2C 0 + +#define H2C_CAT_MAC 0x1 + +/* CLASS 0 - FW INFO */ +#define H2C_CL_FW_INFO 0x0 +#define H2C_FUNC_LOG_CFG 0x0 +#define H2C_FUNC_MAC_GENERAL_PKT 0x1 + +/* CLASS 2 - PS */ +#define H2C_CL_MAC_PS 0x2 +#define H2C_FUNC_MAC_LPS_PARM 0x0 + +/* CLASS 3 - FW download */ +#define H2C_CL_MAC_FWDL 0x3 +#define H2C_FUNC_MAC_FWHDR_DL 0x0 + +/* CLASS 5 - Frame Exchange */ +#define H2C_CL_MAC_FR_EXCHG 0x5 +#define H2C_FUNC_MAC_CCTLINFO_UD 0x2 + +/* CLASS 6 - Address CAM */ +#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6 +#define H2C_FUNC_MAC_ADDR_CAM_UPD 0x0 + +/* CLASS 8 - Media Status Report */ +#define H2C_CL_MAC_MEDIA_RPT 0x8 +#define H2C_FUNC_MAC_JOININFO 0x0 +#define H2C_FUNC_MAC_FWROLE_MAINTAIN 0x4 + +/* CLASS 9 - FW offload */ +#define H2C_CL_MAC_FW_OFLD 0x9 +#define H2C_FUNC_MAC_MACID_PAUSE 0x8 +#define H2C_FUNC_USR_EDCA 0xF +#define H2C_FUNC_OFLD_CFG 0x14 + +/* CLASS 10 - Security CAM */ +#define H2C_CL_MAC_SEC_CAM 0xa +#define H2C_FUNC_MAC_SEC_UPD 0x1 + +/* CLASS 12 - BA CAM */ +#define H2C_CL_BA_CAM 0xc +#define H2C_FUNC_MAC_BA_CAM 0x0 + +#define H2C_CAT_OUTSRC 0x2 + +#define H2C_CL_OUTSRC_RA 0x1 +#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0 + +#define H2C_CL_OUTSRC_RF_REG_A 0x8 +#define H2C_CL_OUTSRC_RF_REG_B 0x9 + +int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev); +int rtw89_fw_recognize(struct rtw89_dev *rtwdev); +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type); +int rtw89_load_firmware(struct rtw89_dev *rtwdev); +void rtw89_unload_firmware(struct rtw89_dev *rtwdev); +int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev); +void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u8 type, u8 cat, u8 class, u8 func, + bool rack, bool dack, u32 len); +int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid); +int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, + struct rtw89_sta *rtwsta); +int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); +void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h); +void rtw89_fw_c2h_work(struct work_struct *work); +int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + enum rtw89_upd_mode upd_mode); +int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u8 dis_conn); +int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, + bool pause); +int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u8 ac, u32 val); +int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi); +int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, + struct rtw89_fw_h2c_rf_reg_info *info, + u16 len, u8 page); +int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, + u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, + bool rack, bool dack); +int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len); +void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev); +void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid); +int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid, + struct ieee80211_ampdu_params *params); +int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, + struct rtw89_lps_parm *lps_param); +struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len); +struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len); +int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, + struct rtw89_mac_h2c_info *h2c_info, + struct rtw89_mac_c2h_info *c2h_info); +int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable); +void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c new file mode 100644 index 00000000000000..afcd07ab1de791 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -0,0 +1,3836 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "cam.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "ps.h" +#include "reg.h" +#include "util.h" + +int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, + enum rtw89_mac_hwmod_sel sel) +{ + u32 val, r_val; + + if (sel == RTW89_DMAC_SEL) { + r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN); + val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN); + } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) { + r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN); + val = B_AX_CMAC_EN; + } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) { + r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND); + val = B_AX_CMAC1_FEN; + } else { + return -EINVAL; + } + if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD || + (val & r_val) != val) + return -EFAULT; + + return 0; +} + +int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val) +{ + u8 lte_ctrl; + int ret; + + ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, + 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); + if (ret) + rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); + + rtw89_write32(rtwdev, R_AX_LTE_WDATA, val); + rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset); + + return ret; +} + +int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val) +{ + u8 lte_ctrl; + int ret; + + ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, + 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); + if (ret) + rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); + + rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset); + *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA); + + return ret; +} + +static +int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl) +{ + u32 ctrl_reg, data_reg, ctrl_data; + u32 val; + int ret; + + switch (ctrl->type) { + case DLE_CTRL_TYPE_WDE: + ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL; + data_reg = R_AX_WDE_DBG_FUN_INTF_DATA; + ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) | + FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) | + B_AX_WDE_DFI_ACTIVE; + break; + case DLE_CTRL_TYPE_PLE: + ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL; + data_reg = R_AX_PLE_DBG_FUN_INTF_DATA; + ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) | + FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) | + B_AX_PLE_DFI_ACTIVE; + break; + default: + rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type); + return -EINVAL; + } + + rtw89_write32(rtwdev, ctrl_reg, ctrl_data); + + ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE), + 1, 1000, false, rtwdev, ctrl_reg); + if (ret) { + rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n", + ctrl_reg, ctrl_data); + return ret; + } + + ctrl->out_data = rtw89_read32(rtwdev, data_reg); + return 0; +} + +static int dle_dfi_quota(struct rtw89_dev *rtwdev, + struct rtw89_mac_dle_dfi_quota *quota) +{ + struct rtw89_mac_dle_dfi_ctrl ctrl; + int ret; + + ctrl.type = quota->dle_type; + ctrl.target = DLE_DFI_TYPE_QUOTA; + ctrl.addr = quota->qtaid; + ret = dle_dfi_ctrl(rtwdev, &ctrl); + if (ret) { + rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); + return ret; + } + + quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data); + quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data); + return 0; +} + +static int dle_dfi_qempty(struct rtw89_dev *rtwdev, + struct rtw89_mac_dle_dfi_qempty *qempty) +{ + struct rtw89_mac_dle_dfi_ctrl ctrl; + u32 ret; + + ctrl.type = qempty->dle_type; + ctrl.target = DLE_DFI_TYPE_QEMPTY; + ctrl.addr = qempty->grpsel; + ret = dle_dfi_ctrl(rtwdev, &ctrl); + if (ret) { + rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); + return ret; + } + + qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data); + return 0; +} + +static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev) +{ + rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); + rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); + rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); +} + +static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev) +{ + struct rtw89_mac_dle_dfi_qempty qempty; + struct rtw89_mac_dle_dfi_quota quota; + struct rtw89_mac_dle_dfi_ctrl ctrl; + u32 val, not_empty, i; + int ret; + + qempty.dle_type = DLE_CTRL_TYPE_PLE; + qempty.grpsel = 0; + ret = dle_dfi_qempty(rtwdev, &qempty); + if (ret) + rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); + else + rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty); + + for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) { + if (!(not_empty & BIT(0))) + continue; + ctrl.type = DLE_CTRL_TYPE_PLE; + ctrl.target = DLE_DFI_TYPE_QLNKTBL; + ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) | + FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i); + ret = dle_dfi_ctrl(rtwdev, &ctrl); + if (ret) + rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); + else + rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i, + FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK, + ctrl.out_data)); + } + + quota.dle_type = DLE_CTRL_TYPE_PLE; + quota.qtaid = 6; + ret = dle_dfi_quota(rtwdev, "a); + if (ret) + rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); + else + rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n", + quota.rsv_pgnum, quota.use_pgnum); + + val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG); + rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n", + FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val)); + rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n", + FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val)); + + dump_err_status_dispatcher(rtwdev); +} + +static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev, + enum mac_ax_err_info err) +{ + u32 dbg, event; + + dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO); + event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg); + + switch (event) { + case MAC_AX_L0_TO_L1_RX_QTA_LOST: + rtw89_info(rtwdev, "quota lost!\n"); + rtw89_mac_dump_qta_lost(rtwdev); + break; + default: + break; + } +} + +static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, + enum mac_ax_err_info err) +{ + u32 dmac_err, cmac_err; + + if (err != MAC_AX_ERR_L1_ERR_DMAC && + err != MAC_AX_ERR_L0_PROMOTE_TO_L1) + return; + + rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); + rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); + + cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR); + rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err); + dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); + rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err); + + if (dmac_err) { + rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ", + rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG)); + rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG)); + } + + if (dmac_err & B_AX_WDRLS_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ", + rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); + } + + if (dmac_err & B_AX_WSEC_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); + rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); + } + + if (dmac_err & B_AX_MPDU_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ", + rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); + rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ", + rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); + } + + if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ", + rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n", + rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); + } + + if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); + rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); + dump_err_status_dispatcher(rtwdev); + } + + if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); + rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); + } + + if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); + rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); + rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); + rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); + rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); + rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); + rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); + rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); + rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); + rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); + rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); + rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); + rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); + dump_err_status_dispatcher(rtwdev); + } + + if (dmac_err & B_AX_PKTIN_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); + rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); + } + + if (dmac_err & B_AX_DISPATCH_ERR_FLAG) + dump_err_status_dispatcher(rtwdev); + + if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) { + rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR)); + } + + if (dmac_err & BIT(11)) { + rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); + } + + if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { + rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR)); + rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n", + rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR)); + } + + if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { + rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ", + rtw89_read32(rtwdev, R_AX_PTCL_IMR0)); + rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PTCL_ISR0)); + } + + if (cmac_err & B_AX_DMA_TOP_ERR_IND) { + rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DLE_CTRL)); + } + + if (cmac_err & B_AX_PHYINTF_ERR_IND) { + rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR)); + } + + if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { + rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ", + rtw89_read32(rtwdev, R_AX_TXPWR_IMR)); + rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPWR_ISR)); + } + + if (cmac_err & B_AX_WMAC_RX_ERR_IND) { + rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ", + rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); + rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR)); + } + + if (cmac_err & B_AX_WMAC_TX_ERR_IND) { + rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ", + rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR)); + rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); + } + + rtwdev->hci.ops->dump_err_status(rtwdev); + + if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1) + rtw89_mac_dump_l0_to_l1(rtwdev, err); + + rtw89_info(rtwdev, "<---\n"); +} + +u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) +{ + u32 err; + int ret; + + ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, + false, rtwdev, R_AX_HALT_C2H_CTRL); + if (ret) { + rtw89_warn(rtwdev, "Polling FW err status fail\n"); + return ret; + } + + err = rtw89_read32(rtwdev, R_AX_HALT_C2H); + rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); + + rtw89_fw_st_dbg_dump(rtwdev); + rtw89_mac_dump_err_status(rtwdev, err); + + return err; +} +EXPORT_SYMBOL(rtw89_mac_get_err_status); + +int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) +{ + u32 halt; + int ret = 0; + + if (err > MAC_AX_SET_ERR_MAX) { + rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err); + return -EINVAL; + } + + ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000, + 100000, false, rtwdev, R_AX_HALT_H2C_CTRL); + if (ret) { + rtw89_err(rtwdev, "FW doesn't receive previous msg\n"); + return -EFAULT; + } + + rtw89_write32(rtwdev, R_AX_HALT_H2C, err); + rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER); + + return 0; +} +EXPORT_SYMBOL(rtw89_mac_set_err_status); + +const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = { + 2, 40, 0, 0, 1, 0, 0, 0 +}; + +static int hfc_reset_param(struct rtw89_dev *rtwdev) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + struct rtw89_hfc_param_ini param_ini = {NULL}; + u8 qta_mode = rtwdev->mac.dle_info.qta_mode; + + switch (rtwdev->hci.type) { + case RTW89_HCI_TYPE_PCIE: + param_ini = rtwdev->chip->hfc_param_ini[qta_mode]; + param->en = 0; + break; + default: + return -EINVAL; + } + + if (param_ini.pub_cfg) + param->pub_cfg = *param_ini.pub_cfg; + + if (param_ini.prec_cfg) { + param->prec_cfg = *param_ini.prec_cfg; + rtwdev->hal.sw_amsdu_max_size = + param->prec_cfg.wp_ch07_prec * HFC_PAGE_UNIT; + } + + if (param_ini.ch_cfg) + param->ch_cfg = param_ini.ch_cfg; + + memset(¶m->ch_info, 0, sizeof(param->ch_info)); + memset(¶m->pub_info, 0, sizeof(param->pub_info)); + param->mode = param_ini.mode; + + return 0; +} + +static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg; + const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; + const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; + + if (ch >= RTW89_DMA_CH_NUM) + return -EINVAL; + + if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) || + ch_cfg[ch].max > pub_cfg->pub_max) + return -EINVAL; + if (ch_cfg[ch].grp >= grp_num) + return -EINVAL; + + return 0; +} + +static int hfc_pub_info_chk(struct rtw89_dev *rtwdev) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg; + struct rtw89_hfc_pub_info *info = ¶m->pub_info; + + if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) { + if (rtwdev->chip->chip_id == RTL8852A) + return 0; + else + return -EFAULT; + } + + return 0; +} + +static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; + + if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max) + return -EFAULT; + + return 0; +} + +static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; + int ret = 0; + u32 val = 0; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + ret = hfc_ch_cfg_chk(rtwdev, ch); + if (ret) + return ret; + + if (ch > RTW89_DMA_B1HI) + return -EINVAL; + + val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) | + u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) | + (cfg[ch].grp ? B_AX_GRP : 0); + rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val); + + return 0; +} + +static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + struct rtw89_hfc_ch_info *info = param->ch_info; + const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; + u32 val; + u32 ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + if (ch > RTW89_DMA_H2C) + return -EINVAL; + + val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4); + info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK); + if (ch < RTW89_DMA_H2C) + info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK); + else + info[ch].used = cfg[ch].min - info[ch].aval; + + return 0; +} + +static int hfc_pub_ctrl(struct rtw89_dev *rtwdev) +{ + const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg; + u32 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + ret = hfc_pub_cfg_chk(rtwdev); + if (ret) + return ret; + + val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) | + u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK); + rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val); + + val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK); + rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val); + + return 0; +} + +static int hfc_upd_mix_info(struct rtw89_dev *rtwdev) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; + struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; + struct rtw89_hfc_pub_info *info = ¶m->pub_info; + u32 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1); + info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK); + info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK); + val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3); + info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK); + info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK); + info->pub_aval = + u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2), + B_AX_PUB_AVAL_PG_MASK); + info->wp_aval = + u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1), + B_AX_WP_AVAL_PG_MASK); + + val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); + param->en = val & B_AX_HCI_FC_EN ? 1 : 0; + param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0; + param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK); + prec_cfg->ch011_full_cond = + u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK); + prec_cfg->h2c_full_cond = + u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK); + prec_cfg->wp_ch07_full_cond = + u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); + prec_cfg->wp_ch811_full_cond = + u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); + + val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL); + prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK); + prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK); + + val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2); + pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK); + + val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1); + prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK); + prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK); + + val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2); + pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK); + + val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1); + pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK); + pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK); + + ret = hfc_pub_info_chk(rtwdev); + if (param->en && ret) + return ret; + + return 0; +} + +static void hfc_h2c_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; + u32 val; + + val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); + rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); + + rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL, + B_AX_HCI_FC_CH12_FULL_COND_MASK, + prec_cfg->h2c_full_cond); +} + +static void hfc_mix_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; + const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; + u32 val; + + val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) | + u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); + rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); + + val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK); + rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val); + + val = u32_encode_bits(prec_cfg->wp_ch07_prec, + B_AX_PREC_PAGE_WP_CH07_MASK) | + u32_encode_bits(prec_cfg->wp_ch811_prec, + B_AX_PREC_PAGE_WP_CH811_MASK); + rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val); + + val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL), + param->mode, B_AX_HCI_FC_MODE_MASK); + val = u32_replace_bits(val, prec_cfg->ch011_full_cond, + B_AX_HCI_FC_WD_FULL_COND_MASK); + val = u32_replace_bits(val, prec_cfg->h2c_full_cond, + B_AX_HCI_FC_CH12_FULL_COND_MASK); + val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond, + B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); + val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond, + B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); + rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); +} + +static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en) +{ + struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; + u32 val; + + val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); + param->en = en; + param->h2c_en = h2c_en; + val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN); + val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) : + (val & ~B_AX_HCI_FC_CH12_EN); + rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); +} + +static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en) +{ + u8 ch; + u32 ret = 0; + + if (reset) + ret = hfc_reset_param(rtwdev); + if (ret) + return ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + hfc_func_en(rtwdev, false, false); + + if (!en && h2c_en) { + hfc_h2c_cfg(rtwdev); + hfc_func_en(rtwdev, en, h2c_en); + return ret; + } + + for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { + ret = hfc_ch_ctrl(rtwdev, ch); + if (ret) + return ret; + } + + ret = hfc_pub_ctrl(rtwdev); + if (ret) + return ret; + + hfc_mix_cfg(rtwdev); + if (en || h2c_en) { + hfc_func_en(rtwdev, en, h2c_en); + udelay(10); + } + for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { + ret = hfc_upd_ch_info(rtwdev, ch); + if (ret) + return ret; + } + ret = hfc_upd_mix_info(rtwdev); + + return ret; +} + +#define PWR_POLL_CNT 2000 +static int pwr_cmd_poll(struct rtw89_dev *rtwdev, + const struct rtw89_pwr_cfg *cfg) +{ + u8 val = 0; + int ret; + u32 addr = cfg->base == PWR_INTF_MSK_SDIO ? + cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr; + + ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk), + 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr); + + if (!ret) + return 0; + + rtw89_warn(rtwdev, "[ERR] Polling timeout\n"); + rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr); + rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val); + + return -EBUSY; +} + +static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk, + u8 intf_msk, const struct rtw89_pwr_cfg *cfg) +{ + const struct rtw89_pwr_cfg *cur_cfg; + u32 addr; + u8 val; + + for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) { + if (!(cur_cfg->intf_msk & intf_msk) || + !(cur_cfg->cv_msk & cv_msk)) + continue; + + switch (cur_cfg->cmd) { + case PWR_CMD_WRITE: + addr = cur_cfg->addr; + + if (cur_cfg->base == PWR_BASE_SDIO) + addr |= SDIO_LOCAL_BASE_ADDR; + + val = rtw89_read8(rtwdev, addr); + val &= ~(cur_cfg->msk); + val |= (cur_cfg->val & cur_cfg->msk); + + rtw89_write8(rtwdev, addr, val); + break; + case PWR_CMD_POLL: + if (pwr_cmd_poll(rtwdev, cur_cfg)) + return -EBUSY; + break; + case PWR_CMD_DELAY: + if (cur_cfg->val == PWR_DELAY_US) + udelay(cur_cfg->addr); + else + fsleep(cur_cfg->addr * 1000); + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev, + const struct rtw89_pwr_cfg * const *cfg_seq) +{ + int ret; + + for (; *cfg_seq; cfg_seq++) { + ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv), + PWR_INTF_MSK_PCIE, *cfg_seq); + if (ret) + return -EBUSY; + } + + return 0; +} + +static enum rtw89_rpwm_req_pwr_state +rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev) +{ + enum rtw89_rpwm_req_pwr_state state; + + switch (rtwdev->ps_mode) { + case RTW89_PS_MODE_RFOFF: + state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF; + break; + case RTW89_PS_MODE_CLK_GATED: + state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED; + break; + case RTW89_PS_MODE_PWR_GATED: + state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED; + break; + default: + state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; + break; + } + return state; +} + +static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev, + enum rtw89_rpwm_req_pwr_state req_pwr_state) +{ + u16 request; + + request = rtw89_read16(rtwdev, R_AX_RPWM); + request ^= request | PS_RPWM_TOGGLE; + + rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) & + RPWM_SEQ_NUM_MAX; + request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num); + + request |= req_pwr_state; + + if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) + request |= PS_RPWM_ACK; + + rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request); +} + +static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, + enum rtw89_rpwm_req_pwr_state req_pwr_state) +{ + bool request_deep_mode; + bool in_deep_mode; + u8 rpwm_req_num; + u8 cpwm_rsp_seq; + u8 cpwm_seq; + u8 cpwm_status; + + if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) + request_deep_mode = true; + else + request_deep_mode = false; + + if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K)) + in_deep_mode = true; + else + in_deep_mode = false; + + if (request_deep_mode != in_deep_mode) + return -EPERM; + + if (request_deep_mode) + return 0; + + rpwm_req_num = rtwdev->mac.rpwm_seq_num; + cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, + PS_CPWM_RSP_SEQ_NUM); + + if (rpwm_req_num != cpwm_rsp_seq) + return -EPERM; + + rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & + CPWM_SEQ_NUM_MAX; + + cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM); + if (cpwm_seq != rtwdev->mac.cpwm_seq_num) + return -EPERM; + + cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE); + if (cpwm_status != req_pwr_state) + return -EPERM; + + return 0; +} + +void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) +{ + enum rtw89_rpwm_req_pwr_state state; + int ret; + + if (enter) + state = rtw89_mac_get_req_pwr_state(rtwdev); + else + state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; + + rtw89_mac_send_rpwm(rtwdev, state); + ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret, + 1000, 15000, false, rtwdev, state); + if (ret) + rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", + enter ? "entering" : "leaving"); +} + +static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) +{ +#define PWR_ACT 1 + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_pwr_cfg * const *cfg_seq; + struct rtw89_hal *hal = &rtwdev->hal; + int ret; + u8 val; + + if (on) + cfg_seq = chip->pwr_on_seq; + else + cfg_seq = chip->pwr_off_seq; + + if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) + __rtw89_leave_ps_mode(rtwdev); + + val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK); + if (on && val == PWR_ACT) { + rtw89_err(rtwdev, "MAC has already powered on\n"); + return -EBUSY; + } + + ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq); + if (ret) + return ret; + + if (on) { + set_bit(RTW89_FLAG_POWERON, rtwdev->flags); + rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR); + } else { + clear_bit(RTW89_FLAG_POWERON, rtwdev->flags); + clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); + rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR); + hal->current_channel = 0; + } + + return 0; +#undef PWR_ACT +} + +void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) +{ + rtw89_mac_power_switch(rtwdev, false); +} + +static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) +{ + u32 func_en = 0; + u32 ck_en = 0; + u32 c1pc_en = 0; + u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1}; + u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1}; + + func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | + B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | + B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN; + ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | + B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | + B_AX_RMAC_CKEN; + c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN | + B_AX_R_SYM_WLCMAC1_P1_PC_EN | + B_AX_R_SYM_WLCMAC1_P2_PC_EN | + B_AX_R_SYM_WLCMAC1_P3_PC_EN | + B_AX_R_SYM_WLCMAC1_P4_PC_EN; + + if (en) { + if (mac_idx == RTW89_MAC_1) { + rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en); + rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, + B_AX_R_SYM_ISO_CMAC12PP); + rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, + B_AX_CMAC1_FEN); + } + rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en); + rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en); + } else { + rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en); + rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en); + if (mac_idx == RTW89_MAC_1) { + rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, + B_AX_CMAC1_FEN); + rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, + B_AX_R_SYM_ISO_CMAC12PP); + rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en); + } + } + + return 0; +} + +static int dmac_func_en(struct rtw89_dev *rtwdev) +{ + u32 val32; + u32 ret = 0; + + val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN | + B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | + B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN | + B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN); + rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32); + + val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN | + B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | + B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | + B_AX_WD_RLS_CLK_EN); + rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); + + return ret; +} + +static int chip_func_en(struct rtw89_dev *rtwdev) +{ + rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK); + + return 0; +} + +static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = dmac_func_en(rtwdev); + if (ret) + return ret; + + ret = cmac_func_en(rtwdev, 0, true); + if (ret) + return ret; + + ret = chip_func_en(rtwdev); + if (ret) + return ret; + + return ret; +} + +/* PCIE 64 */ +const struct rtw89_dle_size wde_size0 = { + RTW89_WDE_PG_64, 4095, 1, +}; + +/* DLFW */ +const struct rtw89_dle_size wde_size4 = { + RTW89_WDE_PG_64, 0, 4096, +}; + +/* PCIE */ +const struct rtw89_dle_size ple_size0 = { + RTW89_PLE_PG_128, 1520, 16, +}; + +/* DLFW */ +const struct rtw89_dle_size ple_size4 = { + RTW89_PLE_PG_128, 64, 1472, +}; + +/* PCIE 64 */ +const struct rtw89_wde_quota wde_qt0 = { + 3792, 196, 0, 107, +}; + +/* DLFW */ +const struct rtw89_wde_quota wde_qt4 = { + 0, 0, 0, 0, +}; + +/* PCIE SCC */ +const struct rtw89_ple_quota ple_qt4 = { + 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8, +}; + +/* PCIE SCC */ +const struct rtw89_ple_quota ple_qt5 = { + 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120, +}; + +/* DLFW */ +const struct rtw89_ple_quota ple_qt13 = { + 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0 +}; + +static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, + enum rtw89_qta_mode mode) +{ + struct rtw89_mac_info *mac = &rtwdev->mac; + const struct rtw89_dle_mem *cfg; + + cfg = &rtwdev->chip->dle_mem[mode]; + if (!cfg) + return NULL; + + if (cfg->mode != mode) { + rtw89_warn(rtwdev, "qta mode unmatch!\n"); + return NULL; + } + + mac->dle_info.wde_pg_size = cfg->wde_size->pge_size; + mac->dle_info.ple_pg_size = cfg->ple_size->pge_size; + mac->dle_info.qta_mode = mode; + mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma; + mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma; + + return cfg; +} + +static inline u32 dle_used_size(const struct rtw89_dle_size *wde, + const struct rtw89_dle_size *ple) +{ + return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) + + ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num); +} + +static void dle_func_en(struct rtw89_dev *rtwdev, bool enable) +{ + if (enable) + rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, + B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); + else + rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN, + B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); +} + +static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable) +{ + if (enable) + rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN, + B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); + else + rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN, + B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); +} + +static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg) +{ + const struct rtw89_dle_size *size_cfg; + u32 val; + u8 bound = 0; + + val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG); + size_cfg = cfg->wde_size; + + switch (size_cfg->pge_size) { + default: + case RTW89_WDE_PG_64: + val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64, + B_AX_WDE_PAGE_SEL_MASK); + break; + case RTW89_WDE_PG_128: + val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128, + B_AX_WDE_PAGE_SEL_MASK); + break; + case RTW89_WDE_PG_256: + rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n"); + return -EINVAL; + } + + val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK); + val = u32_replace_bits(val, size_cfg->lnk_pge_num, + B_AX_WDE_FREE_PAGE_NUM_MASK); + rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val); + + val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG); + bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num) + * size_cfg->pge_size / DLE_BOUND_UNIT; + size_cfg = cfg->ple_size; + + switch (size_cfg->pge_size) { + default: + case RTW89_PLE_PG_64: + rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n"); + return -EINVAL; + case RTW89_PLE_PG_128: + val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128, + B_AX_PLE_PAGE_SEL_MASK); + break; + case RTW89_PLE_PG_256: + val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256, + B_AX_PLE_PAGE_SEL_MASK); + break; + } + + val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK); + val = u32_replace_bits(val, size_cfg->lnk_pge_num, + B_AX_PLE_FREE_PAGE_NUM_MASK); + rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val); + + return 0; +} + +#define INVALID_QT_WCPU U16_MAX +#define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \ + do { \ + val = ((_min_x) & \ + B_AX_ ## _module ## _MIN_SIZE_MASK) | \ + (((_max_x) << 16) & \ + B_AX_ ## _module ## _MAX_SIZE_MASK); \ + rtw89_write32(rtwdev, \ + R_AX_ ## _module ## _QTA ## _idx ## _CFG, \ + val); \ + } while (0) +#define SET_QUOTA(_x, _module, _idx) \ + SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx) + +static void wde_quota_cfg(struct rtw89_dev *rtwdev, + const struct rtw89_wde_quota *min_cfg, + const struct rtw89_wde_quota *max_cfg, + u16 ext_wde_min_qt_wcpu) +{ + u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ? + ext_wde_min_qt_wcpu : min_cfg->wcpu; + u32 val; + + SET_QUOTA(hif, WDE, 0); + SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1); + SET_QUOTA(pkt_in, WDE, 3); + SET_QUOTA(cpu_io, WDE, 4); +} + +static void ple_quota_cfg(struct rtw89_dev *rtwdev, + const struct rtw89_ple_quota *min_cfg, + const struct rtw89_ple_quota *max_cfg) +{ + u32 val; + + SET_QUOTA(cma0_tx, PLE, 0); + SET_QUOTA(cma1_tx, PLE, 1); + SET_QUOTA(c2h, PLE, 2); + SET_QUOTA(h2c, PLE, 3); + SET_QUOTA(wcpu, PLE, 4); + SET_QUOTA(mpdu_proc, PLE, 5); + SET_QUOTA(cma0_dma, PLE, 6); + SET_QUOTA(cma1_dma, PLE, 7); + SET_QUOTA(bb_rpt, PLE, 8); + SET_QUOTA(wd_rel, PLE, 9); + SET_QUOTA(cpu_io, PLE, 10); +} + +#undef SET_QUOTA + +static void dle_quota_cfg(struct rtw89_dev *rtwdev, + const struct rtw89_dle_mem *cfg, + u16 ext_wde_min_qt_wcpu) +{ + wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu); + ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt); +} + +static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, + enum rtw89_qta_mode ext_mode) +{ + const struct rtw89_dle_mem *cfg, *ext_cfg; + u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU; + int ret = 0; + u32 ini; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + cfg = get_dle_mem_cfg(rtwdev, mode); + if (!cfg) { + rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); + ret = -EINVAL; + goto error; + } + + if (mode == RTW89_QTA_DLFW) { + ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode); + if (!ext_cfg) { + rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n", + ext_mode); + ret = -EINVAL; + goto error; + } + ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu; + } + + if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { + rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); + ret = -EINVAL; + goto error; + } + + dle_func_en(rtwdev, false); + dle_clk_en(rtwdev, true); + + ret = dle_mix_cfg(rtwdev, cfg); + if (ret) { + rtw89_err(rtwdev, "[ERR] dle mix cfg\n"); + goto error; + } + dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu); + + dle_func_en(rtwdev, true); + + ret = read_poll_timeout(rtw89_read32, ini, + (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, + 2000, false, rtwdev, R_AX_WDE_INI_STATUS); + if (ret) { + rtw89_err(rtwdev, "[ERR]WDE cfg ready\n"); + return ret; + } + + ret = read_poll_timeout(rtw89_read32, ini, + (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, + 2000, false, rtwdev, R_AX_PLE_INI_STATUS); + if (ret) { + rtw89_err(rtwdev, "[ERR]PLE cfg ready\n"); + return ret; + } + + return 0; +error: + dle_func_en(rtwdev, false); + rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n", + rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS)); + rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n", + rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS)); + + return ret; +} + +static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) +{ + u32 msk32; + u32 val32; + + msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH | + B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 | + B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS | + B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C | + B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | + B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | + B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | + B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | + B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | + B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX | + B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | + B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | + B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU; + val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); + + if ((val32 & msk32) == msk32) + return true; + + return false; +} + +static int sta_sch_init(struct rtw89_dev *rtwdev) +{ + u32 p_val; + u8 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + val = rtw89_read8(rtwdev, R_AX_SS_CTRL); + val |= B_AX_SS_EN; + rtw89_write8(rtwdev, R_AX_SS_CTRL, val); + + ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1, + 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL); + if (ret) { + rtw89_err(rtwdev, "[ERR]STA scheduler init\n"); + return ret; + } + + rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); + + return 0; +} + +static int mpdu_proc_init(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); + rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); + rtw89_write32_set(rtwdev, R_AX_MPDU_PROC, + B_AX_APPEND_FCS | B_AX_A_ICV_ERR); + rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL); + + return 0; +} + +static int sec_eng_init(struct rtw89_dev *rtwdev) +{ + u32 val = 0; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) + return ret; + + val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL); + /* init clock */ + val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP); + /* init TX encryption */ + val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); + val |= (B_AX_MC_DEC | B_AX_BC_DEC); + val &= ~B_AX_TX_PARTIAL_MODE; + rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); + + /* init MIC ICV append */ + val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC); + val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC); + + /* option init */ + rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); + + return 0; +} + +static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + int ret; + + ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID); + if (ret) { + rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret); + return ret; + } + + ret = hfc_init(rtwdev, true, true, true); + if (ret) { + rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret); + return ret; + } + + ret = sta_sch_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret); + return ret; + } + + ret = mpdu_proc_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret); + return ret; + } + + ret = sec_eng_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret); + return ret; + } + + return ret; +} + +static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 val, reg; + u16 p_val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx); + + val = rtw89_read32(rtwdev, reg); + val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | + B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN; + rtw89_write32(rtwdev, reg, val); + + ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR), + 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR); + if (ret) { + rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n"); + return ret; + } + + return 0; +} + +static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 ret; + u32 reg; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US); + + return 0; +} + +static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, + enum rtw89_machdr_frame_type type, + enum rtw89_mac_fwd_target fwd_target, + u8 mac_idx) +{ + u32 reg; + u32 val; + + switch (fwd_target) { + case RTW89_FWD_DONT_CARE: + val = RX_FLTR_FRAME_DROP; + break; + case RTW89_FWD_TO_HOST: + val = RX_FLTR_FRAME_TO_HOST; + break; + case RTW89_FWD_TO_WLAN_CPU: + val = RX_FLTR_FRAME_TO_WLCPU; + break; + default: + rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n"); + return -EINVAL; + } + + switch (type) { + case RTW89_MGNT: + reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx); + break; + case RTW89_CTRL: + reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx); + break; + case RTW89_DATA: + reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx); + break; + default: + rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); + return -EINVAL; + } + rtw89_write32(rtwdev, reg, val); + + return 0; +} + +static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + int ret, i; + u32 mac_ftlr, plcp_ftlr; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + for (i = RTW89_MGNT; i <= RTW89_DATA; i++) { + ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST, + mac_idx); + if (ret) + return ret; + } + mac_ftlr = rtwdev->hal.rx_fltr; + plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK | + B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | + B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | + B_AX_HE_SIGB_CRC_CHK; + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx), + mac_ftlr); + rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx), + plcp_ftlr); + + return 0; +} + +static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg, val32; + u32 b_rsp_chk_nav, b_rsp_chk_cca; + + b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV | + B_AX_RSP_CHK_BASIC_NAV; + b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 | + B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA | + B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA; + + switch (rtwdev->chip->chip_id) { + case RTL8852A: + case RTL8852B: + reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); + val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; + rtw89_write32(rtwdev, reg, val32); + + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; + rtw89_write32(rtwdev, reg, val32); + break; + default: + reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); + val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; + rtw89_write32(rtwdev, reg, val32); + + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; + rtw89_write32(rtwdev, reg, val32); + break; + } +} + +static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 val, reg; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx); + val = rtw89_read32(rtwdev, reg); + val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | + B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | + B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 | + B_AX_CTN_CHK_INTRA_NAV | + B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | + B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | + B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | + B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA); + val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | + B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | + B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | + B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV); + + rtw89_write32(rtwdev, reg, val); + + _patch_dis_resp_chk(rtwdev, mac_idx); + + return 0; +} + +static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx); + rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); + + return 0; +} + +static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); + rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); + + return 0; +} + +static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg, val, sifs; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + val = rtw89_read32(rtwdev, reg); + val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; + val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); + + switch (rtwdev->chip->chip_id) { + case RTL8852A: + sifs = WMAC_SPEC_SIFS_OFDM_52A; + break; + case RTL8852B: + sifs = WMAC_SPEC_SIFS_OFDM_52B; + break; + default: + sifs = WMAC_SPEC_SIFS_OFDM_52C; + break; + } + val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK; + val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); + rtw89_write32(rtwdev, reg, val); + + reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); + + return 0; +} + +static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ +#define TRXCFG_RMAC_CCA_TO 32 +#define TRXCFG_RMAC_DATA_TO 15 +#define RX_MAX_LEN_UNIT 512 +#define PLD_RLS_MAX_PG 127 + int ret; + u32 reg, rx_max_len, rx_qta; + u16 val; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); + rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); + + reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx); + val = rtw89_read16(rtwdev, reg); + val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, + B_AX_RX_DLK_DATA_TIME_MASK); + val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, + B_AX_RX_DLK_CCA_TIME_MASK); + rtw89_write16(rtwdev, reg, val); + + reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx); + rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); + + reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx); + if (mac_idx == RTW89_MAC_0) + rx_qta = rtwdev->mac.dle_info.c0_rx_qta; + else + rx_qta = rtwdev->mac.dle_info.c1_rx_qta; + rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta; + rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size / + RX_MAX_LEN_UNIT; + rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ? + B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len; + rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); + + if (rtwdev->chip->chip_id == RTL8852A && + rtwdev->hal.cv == CHIP_CBV) { + rtw89_write16_mask(rtwdev, + rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx), + B_AX_RX_DLK_CCA_TIME_MASK, 0); + rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx), + BIT(12)); + } + + reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx); + rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); + + return ret; +} + +static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 val, reg; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + val = rtw89_read32(rtwdev, reg); + val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); + val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); + val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); + rtw89_write32(rtwdev, reg, val); + + return 0; +} + +static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) +{ + const struct rtw89_dle_mem *cfg; + + cfg = get_dle_mem_cfg(rtwdev, mode); + if (!cfg) { + rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); + return false; + } + + return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma); +} + +static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 val, reg; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { + reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); + val = rtw89_read32(rtwdev, reg); + val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, + B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); + val |= B_AX_HW_CTS2SELF_EN; + rtw89_write32(rtwdev, reg, val); + + reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx); + val = rtw89_read32(rtwdev, reg); + val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); + val &= ~B_AX_PTCL_TX_ARB_TO_MODE; + rtw89_write32(rtwdev, reg, val); + } + + reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); + val = rtw89_read32(rtwdev, reg); + val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); + val |= B_AX_HW_CTS2SELF_EN; + rtw89_write32(rtwdev, reg, val); + + return 0; +} + +static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + int ret; + + ret = scheduler_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret); + return ret; + } + + ret = addr_cam_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx, + ret); + return ret; + } + + ret = rx_fltr_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx, + ret); + return ret; + } + + ret = cca_ctrl_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx, + ret); + return ret; + } + + ret = spatial_reuse_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", + mac_idx, ret); + return ret; + } + + ret = tmac_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret); + return ret; + } + + ret = trxptcl_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret); + return ret; + } + + ret = rmac_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret); + return ret; + } + + ret = cmac_com_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret); + return ret; + } + + ret = ptcl_init(rtwdev, mac_idx); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret); + return ret; + } + + return ret; +} + +static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev, + struct rtw89_mac_c2h_info *c2h_info) +{ + struct rtw89_mac_h2c_info h2c_info = {0}; + u32 ret; + + h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE; + h2c_info.content_len = 0; + + ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info); + if (ret) + return ret; + + if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP) + return -EINVAL; + + return 0; +} + +int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_mac_c2h_info c2h_info = {0}; + struct rtw89_c2h_phy_cap *cap = + (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0]; + u32 ret; + + ret = rtw89_mac_read_phycap(rtwdev, &c2h_info); + if (ret) + return ret; + + hal->tx_nss = cap->tx_nss ? + min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss; + hal->rx_nss = cap->rx_nss ? + min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss; + + rtw89_debug(rtwdev, RTW89_DBG_FW, + "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n", + hal->tx_nss, cap->tx_nss, chip->tx_nss, + hal->rx_nss, cap->rx_nss, chip->rx_nss); + + return 0; +} + +static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, + u16 tx_en_u16, u16 mask_u16) +{ + u32 ret; + struct rtw89_mac_c2h_info c2h_info = {0}; + struct rtw89_mac_h2c_info h2c_info = {0}; + struct rtw89_h2creg_sch_tx_en *h2creg = + (struct rtw89_h2creg_sch_tx_en *)h2c_info.h2creg; + + h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; + h2c_info.content_len = sizeof(*h2creg) - RTW89_H2CREG_HDR_LEN; + h2creg->tx_en = tx_en_u16; + h2creg->mask = mask_u16; + h2creg->band = band; + + ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); + if (ret) + return ret; + + if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT) + return -EINVAL; + + return 0; +} + +static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, + u16 tx_en, u16 tx_en_mask) +{ + u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx); + u16 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) + return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx, + tx_en, tx_en_mask); + + val = rtw89_read16(rtwdev, reg); + val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); + rtw89_write16(rtwdev, reg, val); + + return 0; +} + +int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, + u16 *tx_en, enum rtw89_sch_tx_sel sel) +{ + int ret; + + *tx_en = rtw89_read16(rtwdev, + rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx)); + + switch (sel) { + case RTW89_SCH_TX_SEL_ALL: + ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); + if (ret) + return ret; + break; + case RTW89_SCH_TX_SEL_HIQ: + ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, + 0, B_AX_CTN_TXEN_HGQ); + if (ret) + return ret; + break; + case RTW89_SCH_TX_SEL_MG0: + ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, + 0, B_AX_CTN_TXEN_MGQ); + if (ret) + return ret; + break; + case RTW89_SCH_TX_SEL_MACID: + ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); + if (ret) + return ret; + break; + default: + return 0; + } + + return 0; +} + +int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en) +{ + int ret; + + ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff); + if (ret) + return ret; + + return 0; +} + +static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, + bool wd) +{ + u32 val, reg; + int ret; + + reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ; + val = buf_len; + val |= B_AX_WD_BUF_REQ_EXEC; + rtw89_write32(rtwdev, reg, val); + + reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS; + + ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE, + 1, 2000, false, rtwdev, reg); + if (ret) + return 0xffff; + + return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val); +} + +static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev, + struct rtw89_cpuio_ctrl *ctrl_para, + bool wd) +{ + u32 val, cmd_type, reg; + int ret; + + cmd_type = ctrl_para->cmd_type; + + reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2; + val = 0; + val = u32_replace_bits(val, ctrl_para->start_pktid, + B_AX_WD_CPUQ_OP_STRT_PKTID_MASK); + val = u32_replace_bits(val, ctrl_para->end_pktid, + B_AX_WD_CPUQ_OP_END_PKTID_MASK); + rtw89_write32(rtwdev, reg, val); + + reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1; + val = 0; + val = u32_replace_bits(val, ctrl_para->src_pid, + B_AX_CPUQ_OP_SRC_PID_MASK); + val = u32_replace_bits(val, ctrl_para->src_qid, + B_AX_CPUQ_OP_SRC_QID_MASK); + val = u32_replace_bits(val, ctrl_para->dst_pid, + B_AX_CPUQ_OP_DST_PID_MASK); + val = u32_replace_bits(val, ctrl_para->dst_qid, + B_AX_CPUQ_OP_DST_QID_MASK); + rtw89_write32(rtwdev, reg, val); + + reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0; + val = 0; + val = u32_replace_bits(val, cmd_type, + B_AX_CPUQ_OP_CMD_TYPE_MASK); + val = u32_replace_bits(val, ctrl_para->macid, + B_AX_CPUQ_OP_MACID_MASK); + val = u32_replace_bits(val, ctrl_para->pkt_num, + B_AX_CPUQ_OP_PKTNUM_MASK); + val |= B_AX_WD_CPUQ_OP_EXEC; + rtw89_write32(rtwdev, reg, val); + + reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS; + + ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE, + 1, 2000, false, rtwdev, reg); + if (ret) + return ret; + + if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID || + cmd_type == CPUIO_OP_CMD_GET_NEXT_PID) + ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val); + + return 0; +} + +static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) +{ + const struct rtw89_dle_mem *cfg; + struct rtw89_cpuio_ctrl ctrl_para = {0}; + u16 pkt_id; + int ret; + + cfg = get_dle_mem_cfg(rtwdev, mode); + if (!cfg) { + rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); + return -EINVAL; + } + + if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { + rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); + return -EINVAL; + } + + dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU); + + pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true); + if (pkt_id == 0xffff) { + rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n"); + return -ENOMEM; + } + + ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; + ctrl_para.start_pktid = pkt_id; + ctrl_para.end_pktid = pkt_id; + ctrl_para.pkt_num = 0; + ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS; + ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT; + ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true); + if (ret) { + rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n"); + return -EFAULT; + } + + pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, false); + if (pkt_id == 0xffff) { + rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n"); + return -ENOMEM; + } + + ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; + ctrl_para.start_pktid = pkt_id; + ctrl_para.end_pktid = pkt_id; + ctrl_para.pkt_num = 0; + ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS; + ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT; + ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false); + if (ret) { + rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n"); + return -EFAULT; + } + + return 0; +} + +static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + int ret; + u32 reg; + u8 val; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx); + + ret = read_poll_timeout(rtw89_read8, val, + (val & B_AX_PTCL_TX_ON_STAT) == 0, + SW_CVR_DUR_US, + SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT, + false, rtwdev, reg); + if (ret) + return ret; + + return 0; +} + +static int band1_enable(struct rtw89_dev *rtwdev) +{ + int ret, i; + u32 sleep_bak[4] = {0}; + u32 pause_bak[4] = {0}; + u16 tx_en; + + ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL); + if (ret) { + rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret); + return ret; + } + + for (i = 0; i < 4; i++) { + sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4); + pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4); + rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX); + rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX); + } + + ret = band_idle_ck_b(rtwdev, 0); + if (ret) { + rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret); + return ret; + } + + ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode); + if (ret) { + rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret); + return ret; + } + + for (i = 0; i < 4; i++) { + rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]); + rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]); + } + + ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret); + return ret; + } + + ret = cmac_func_en(rtwdev, 1, true); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret); + return ret; + } + + ret = cmac_init(rtwdev, 1); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret); + return ret; + } + + rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, + B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1); + + return 0; +} + +static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, + enum rtw89_mac_hwmod_sel sel) +{ + u32 reg, val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); + if (ret) { + rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n", + sel, mac_idx); + return ret; + } + + if (sel == RTW89_DMAC_SEL) { + rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, + B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | + B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | + B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN); + rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1, + B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | + B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN); + rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, + B_AX_HDT_PKT_FAIL_DBG_INT_EN | + B_AX_HDT_OFFSET_UNMATCH_INT_EN); + rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, + B_AX_CPU_SHIFT_EN_ERR_INT_EN); + rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, + B_AX_PLE_GETNPG_STRPG_ERR_INT_EN); + rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, + B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN); + rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN); + rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, + B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN); + } else if (sel == RTW89_CMAC_SEL) { + reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); + rtw89_write32_clr(rtwdev, reg, + B_AX_SORT_NON_IDLE_ERR_INT_EN); + + reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx); + rtw89_write32_clr(rtwdev, reg, + B_AX_NO_RESERVE_PAGE_ERR_IMR | + B_AX_RXDATA_FSM_HANG_ERROR_IMR); + + reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); + val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | + B_AX_TX_RECORD_PKTID_ERR_INT_EN | + B_AX_FSM_TIMEOUT_ERR_INT_EN; + rtw89_write32(rtwdev, reg, val); + + reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx); + rtw89_write32_set(rtwdev, reg, + B_AX_PHY_TXON_TIMEOUT_INT_EN | + B_AX_CCK_CCA_TIMEOUT_INT_EN | + B_AX_OFDM_CCA_TIMEOUT_INT_EN | + B_AX_DATA_ON_TIMEOUT_INT_EN | + B_AX_STS_ON_TIMEOUT_INT_EN | + B_AX_CSI_ON_TIMEOUT_INT_EN); + + reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx); + val = rtw89_read32(rtwdev, reg); + val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN | + B_AX_RMAC_RX_TIMEOUT_INT_EN | + B_AX_RMAC_CSI_TIMEOUT_INT_EN); + val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | + B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | + B_AX_RMAC_CCA_TIMEOUT_INT_EN | + B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN); + rtw89_write32(rtwdev, reg, val); + } else { + return -EINVAL; + } + + return 0; +} + +static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable) +{ + int ret = 0; + + if (enable) { + ret = band1_enable(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret); + return ret; + } + + ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL); + if (ret) { + rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret); + return ret; + } + } else { + rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n"); + return -EINVAL; + } + + return 0; +} + +static int set_host_rpr(struct rtw89_dev *rtwdev) +{ + if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { + rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, + B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH); + rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0, + B_AX_RLSRPT0_FLTR_MAP_MASK); + } else { + rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, + B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF); + rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0, + B_AX_RLSRPT0_FLTR_MAP_MASK); + } + + rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30); + rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255); + + return 0; +} + +static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) +{ + enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; + int ret; + + ret = dmac_init(rtwdev, 0); + if (ret) { + rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret); + return ret; + } + + ret = cmac_init(rtwdev, 0); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret); + return ret; + } + + if (is_qta_dbcc(rtwdev, qta_mode)) { + ret = rtw89_mac_dbcc_enable(rtwdev, true); + if (ret) { + rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret); + return ret; + } + } + + ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); + if (ret) { + rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret); + return ret; + } + + ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); + if (ret) { + rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret); + return ret; + } + + ret = set_host_rpr(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); + return ret; + } + + return 0; +} + +static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) +{ + clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); + + rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); + rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); +} + +static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, + bool dlfw) +{ + u32 val; + int ret; + + if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN) + return -EFAULT; + + rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); + rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); + + rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); + + val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); + val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); + val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE, + B_AX_WCPU_FWDL_STS_MASK); + + if (dlfw) + val |= B_AX_WCPU_FWDL_EN; + + rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); + rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK, + boot_reason); + rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); + + if (!dlfw) { + mdelay(5); + + ret = rtw89_fw_check_rdy(rtwdev); + if (ret) + return ret; + } + + return 0; +} + +static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev) +{ + u32 val; + int ret; + + val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | + B_AX_PKT_BUF_EN; + rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); + + val = B_AX_DISPATCHER_CLK_EN; + rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); + + ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); + if (ret) { + rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); + return ret; + } + + ret = hfc_init(rtwdev, true, false, true); + if (ret) { + rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret); + return ret; + } + + return ret; +} + +static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev) +{ + rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, + B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN); +} + +void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, + B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | + B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); + rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); +} + +void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, + B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | + B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); + rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); +} + +int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_mac_power_switch(rtwdev, true); + if (ret) { + rtw89_mac_power_switch(rtwdev, false); + ret = rtw89_mac_power_switch(rtwdev, true); + if (ret) + return ret; + } + + rtw89_mac_hci_func_en(rtwdev); + + if (rtwdev->hci.ops->mac_pre_init) { + ret = rtwdev->hci.ops->mac_pre_init(rtwdev); + if (ret) + return ret; + } + + ret = rtw89_mac_fw_dl_pre_init(rtwdev); + if (ret) + return ret; + + rtw89_mac_disable_cpu(rtwdev); + ret = rtw89_mac_enable_cpu(rtwdev, 0, true); + if (ret) + return ret; + + ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL); + if (ret) + return ret; + + return 0; +} + +int rtw89_mac_init(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_mac_partial_init(rtwdev); + if (ret) + goto fail; + + rtw89_mac_enable_bb_rf(rtwdev); + + ret = rtw89_mac_sys_init(rtwdev); + if (ret) + goto fail; + + ret = rtw89_mac_trx_init(rtwdev); + if (ret) + goto fail; + + if (rtwdev->hci.ops->mac_post_init) { + ret = rtwdev->hci.ops->mac_post_init(rtwdev); + if (ret) + goto fail; + } + + rtw89_fw_send_all_early_h2c(rtwdev); + rtw89_fw_h2c_set_ofld_cfg(rtwdev); + + return ret; +fail: + rtw89_mac_power_switch(rtwdev, false); + + return ret; +} + +static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) +{ + u8 i; + + for (i = 0; i < 4; i++) { + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, + DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0); + } +} + +static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) +{ + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, + CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109); +} + +static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) +{ + u8 sh = FIELD_GET(GENMASK(4, 0), macid); + u8 grp = macid >> 5; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); + if (ret) + return ret; + + rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause); + + return 0; +} + +static const struct rtw89_port_reg rtw_port_base = { + .port_cfg = R_AX_PORT_CFG_P0, + .tbtt_prohib = R_AX_TBTT_PROHIB_P0, + .bcn_area = R_AX_BCN_AREA_P0, + .bcn_early = R_AX_BCNERLYINT_CFG_P0, + .tbtt_early = R_AX_TBTTERLYINT_CFG_P0, + .tbtt_agg = R_AX_TBTT_AGG_P0, + .bcn_space = R_AX_BCN_SPACE_CFG_P0, + .bcn_forcetx = R_AX_BCN_FORCETX_P0, + .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0, + .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0, + .dtim_ctrl = R_AX_DTIM_CTRL_P0, + .tbtt_shift = R_AX_TBTT_SHIFT_P0, + .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, + .tsftr_l = R_AX_TSFTR_LOW_P0, + .tsftr_h = R_AX_TSFTR_HIGH_P0 +}; + +#define BCN_INTERVAL 100 +#define BCN_ERLY_DEF 160 +#define BCN_SETUP_DEF 2 +#define BCN_HOLD_DEF 200 +#define BCN_MASK_DEF 0 +#define TBTT_ERLY_DEF 5 +#define BCN_SET_UNIT 32 +#define BCN_ERLY_SET_DLY (10 * 2) + +static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + const struct rtw89_port_reg *p = &rtw_port_base; + + if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) + return; + + rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); + rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); + rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); + rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); + + msleep(vif->bss_conf.beacon_int + 1); + + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN | + B_AX_BRK_SETUP); + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST); + rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0); +} + +static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool en) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + if (en) + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); + else + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); +} + +static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool en) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + if (en) + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); + else + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); +} + +static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, + rtwvif->net_type); +} + +static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; + u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; + + if (en) + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits); + else + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits); +} + +static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || + rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; + u32 bit = B_AX_RX_BSSID_FIT_EN; + + if (en) + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit); + else + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit); +} + +static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || + rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; + + if (en) + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); + else + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); +} + +static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || + rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; + + if (en) + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); + else + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); +} + +static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + const struct rtw89_port_reg *p = &rtw_port_base; + u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; + + rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, + bcn_int); +} + +static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, + B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); +} + +static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, + B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); +} + +static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, + B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); +} + +static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, + B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); +} + +static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + static const u32 masks[RTW89_PORT_NUM] = { + B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, + B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK, + B_AX_BSS_COLOB_AX_PORT_4_MASK, + }; + u8 port = rtwvif->port; + u32 reg_base; + u32 reg; + u8 bss_color; + + bss_color = vif->bss_conf.he_bss_color.color; + reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; + reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); +} + +static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + u8 port = rtwvif->port; + u32 reg; + + if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) + return; + + if (port == 0) { + reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx); + rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); + } +} + +static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + u8 port = rtwvif->port; + u32 reg; + u32 val; + + reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx); + val = rtw89_read32(rtwdev, reg); + val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); + if (port == 0) + val &= ~BIT(0); + rtw89_write32(rtwdev, reg, val); +} + +static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN); +} + +static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + const struct rtw89_port_reg *p = &rtw_port_base; + + rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, + BCN_ERLY_DEF); +} + +int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + int ret; + + ret = rtw89_mac_port_update(rtwdev, rtwvif); + if (ret) + return ret; + + rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id); + rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id); + + ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false); + if (ret) + return ret; + + ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE); + if (ret) + return ret; + + ret = rtw89_cam_init(rtwdev, rtwvif); + if (ret) + return ret; + + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + if (ret) + return ret; + + ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id); + if (ret) + return ret; + + return 0; +} + +int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + int ret; + + ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE); + if (ret) + return ret; + + rtw89_cam_deinit(rtwdev, rtwvif); + + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + if (ret) + return ret; + + return 0; +} + +int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + u8 port = rtwvif->port; + + if (port >= RTW89_PORT_NUM) + return -EINVAL; + + rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif); + rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false); + rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false); + rtw89_mac_port_cfg_net_type(rtwdev, rtwvif); + rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif); + rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif); + rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif); + rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif); + rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif); + rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif); + rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif); + rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif); + rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif); + rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif); + rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif); + rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif); + rtw89_mac_port_cfg_func_en(rtwdev, rtwvif); + fsleep(BCN_ERLY_SET_DLY); + rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif); + + return 0; +} + +int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + int ret; + + rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, + RTW89_MAX_MAC_ID_NUM); + if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM) + return -ENOSPC; + + ret = rtw89_mac_vif_init(rtwdev, rtwvif); + if (ret) + goto release_mac_id; + + return 0; + +release_mac_id: + rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); + + return ret; +} + +int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + int ret; + + ret = rtw89_mac_vif_deinit(rtwdev, rtwvif); + rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); + + return ret; +} + +static void +rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ +} + +static void +rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ + rtw89_debug(rtwdev, RTW89_DBG_FW, + "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n", + RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data), + RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data), + RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data), + RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data)); +} + +static void +rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ + rtw89_debug(rtwdev, RTW89_DBG_FW, + "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n", + RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data), + RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data), + RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data), + RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data), + RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data)); +} + +static void +rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ + rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len), + RTW89_GET_C2H_LOG_SRT_PRT(c2h->data)); +} + +static +void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = { + [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, + [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, + [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL, + [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, + [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, +}; + +static +void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = { + [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack, + [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, + [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, +}; + +void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u32 len, u8 class, u8 func) +{ + void (*handler)(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = NULL; + + switch (class) { + case RTW89_MAC_C2H_CLASS_INFO: + if (func < RTW89_MAC_C2H_FUNC_INFO_MAX) + handler = rtw89_mac_c2h_info_handler[func]; + break; + case RTW89_MAC_C2H_CLASS_OFLD: + if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX) + handler = rtw89_mac_c2h_ofld_handler[func]; + break; + case RTW89_MAC_C2H_CLASS_FWDBG: + return; + default: + rtw89_info(rtwdev, "c2h class %d not support\n", class); + return; + } + if (!handler) { + rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, + func); + return; + } + handler(rtwdev, skb, len); +} + +bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr) +{ + const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; + enum rtw89_qta_mode mode = dle_mem->mode; + u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx); + + if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { + rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", + addr); + goto error; + } + + if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR) + if (mode == RTW89_QTA_SCC) { + rtw89_err(rtwdev, + "[TXPWR] addr=0x%x but hw not enable\n", + addr); + goto error; + } + + *cr = addr; + return true; + +error: + rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n", + addr, phy_idx); + + return false; +} + +int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) +{ + u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx); + int ret = 0; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if (!enable) { + rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN); + return ret; + } + + rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN | + B_AX_APP_MAC_INFO_RPT | + B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT | + B_AX_PPDU_STAT_RPT_CRC32); + rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK, + RTW89_PRPT_DEST_HOST); + + return ret; +} + +void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) +{ +#define MAC_AX_TIME_TH_SH 5 +#define MAC_AX_LEN_TH_SH 4 +#define MAC_AX_TIME_TH_MAX 255 +#define MAC_AX_LEN_TH_MAX 255 +#define MAC_AX_TIME_TH_DEF 88 +#define MAC_AX_LEN_TH_DEF 4080 + struct ieee80211_hw *hw = rtwdev->hw; + u32 rts_threshold = hw->wiphy->rts_threshold; + u32 time_th, len_th; + u32 reg; + + if (rts_threshold == (u32)-1) { + time_th = MAC_AX_TIME_TH_DEF; + len_th = MAC_AX_LEN_TH_DEF; + } else { + time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH; + len_th = rts_threshold; + } + + time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); + len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); + + reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx); + rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); + rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); +} + +void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) +{ + bool empty; + int ret; + + if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) + return; + + ret = read_poll_timeout(dle_is_txq_empty, empty, empty, + 10000, 200000, false, rtwdev); + if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning)) + rtw89_info(rtwdev, "timed out to flush queues\n"); +} + +int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) +{ + u8 val; + u16 val16; + u32 val32; + int ret; + + rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); + rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); + rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); + rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); + rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); + rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); + + val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); + val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN; + rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16); + + ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32); + if (ret) { + rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n"); + return ret; + } + val32 = val32 & B_AX_WL_RX_CTRL; + ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32); + if (ret) { + rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n"); + return ret; + } + + switch (coex->pta_mode) { + case RTW89_MAC_AX_COEX_RTK_MODE: + val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); + val &= ~B_AX_BTMODE_MASK; + val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3); + rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); + + val = rtw89_read8(rtwdev, R_AX_TDMA_MODE); + rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE); + + val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5); + val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK; + val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE); + rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val); + break; + case RTW89_MAC_AX_COEX_CSR_MODE: + val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); + val &= ~B_AX_BTMODE_MASK; + val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2); + rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); + + val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE); + val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK; + val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO); + val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK; + val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO); + val16 &= ~B_AX_BT_STAT_DELAY_MASK; + val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY); + val16 |= B_AX_ENHANCED_BT; + rtw89_write16(rtwdev, R_AX_CSR_MODE, val16); + + rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE); + break; + default: + return -EINVAL; + } + + switch (coex->direction) { + case RTW89_MAC_AX_COEX_INNER: + val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); + val = (val & ~BIT(2)) | BIT(1); + rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); + break; + case RTW89_MAC_AX_COEX_OUTPUT: + val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); + val = val | BIT(1) | BIT(0); + rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); + break; + case RTW89_MAC_AX_COEX_INPUT: + val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); + val = val & ~(BIT(2) | BIT(1)); + rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); + break; + default: + return -EINVAL; + } + + return 0; +} + +int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, + const struct rtw89_mac_ax_coex_gnt *gnt_cfg) +{ + u32 val, ret; + + ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); + if (ret) { + rtw89_err(rtwdev, "Read LTE fail!\n"); + return ret; + } + val = (gnt_cfg->band[0].gnt_bt ? + B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) | + (gnt_cfg->band[0].gnt_bt_sw_en ? + B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) | + (gnt_cfg->band[0].gnt_wl ? + B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) | + (gnt_cfg->band[0].gnt_wl_sw_en ? + B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) | + (gnt_cfg->band[1].gnt_bt ? + B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) | + (gnt_cfg->band[1].gnt_bt_sw_en ? + B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) | + (gnt_cfg->band[1].gnt_wl ? + B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) | + (gnt_cfg->band[1].gnt_wl_sw_en ? + B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0); + ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val); + if (ret) { + rtw89_err(rtwdev, "Write LTE fail!\n"); + return ret; + } + + return 0; +} + +int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) +{ + u32 reg; + u8 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band); + val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | + (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | + (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | + (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) | + (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) | + (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) | + (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) | + (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0); + rtw89_write8(rtwdev, reg, val); + + return 0; +} + +void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val) +{ + u32 fw_sb; + + fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD); + fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb); + fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY; + if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) + fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR; + else + fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR; + val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val); + val = B_AX_TOGGLE | + FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) | + FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb); + rtw89_write32(rtwdev, R_AX_SCOREBOARD, val); + fsleep(1000); /* avoid BT FW loss information */ +} + +u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev) +{ + return rtw89_read32(rtwdev, R_AX_SCOREBOARD); +} + +int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl) +{ + u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); + + val = wl ? val | BIT(2) : val & ~BIT(2); + rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val); + + return 0; +} + +bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) +{ + u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); + + return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val); +} + +static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) +{ + u32 reg; + u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | + B_AX_BFMEE_HE_NDPA_EN; + + rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); + reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + if (en) { + set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); + rtw89_write32_set(rtwdev, reg, mask); + } else { + clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); + rtw89_write32_clr(rtwdev, reg, mask); + } +} + +static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg; + u32 val32; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + /* AP mode set tx gid to 63 */ + /* STA mode set tx gid to 0(default) */ + reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); + + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); + rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); + + reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + val32 = FIELD_PREP(B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, BFRP_RX_STANDBY_TIMER); + val32 |= FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); + rtw89_write32(rtwdev, reg, val32); + rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); + + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | + B_AX_BFMEE_USE_NSTS | + B_AX_BFMEE_CSI_GID_SEL | + B_AX_BFMEE_CSI_FORCE_RETE_EN); + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); + rtw89_write32(rtwdev, reg, + u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); + + return 0; +} + +static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u8 mac_idx = rtwvif->mac_idx; + u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; + u8 port_sel = rtwvif->port; + u8 sound_dim = 3, t; + u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info; + u32 reg; + u16 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || + (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { + ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); + stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); + t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, + phy_cap[5]); + sound_dim = min(sound_dim, t); + } + if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { + ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); + t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, + sta->vht_cap.cap); + sound_dim = min(sound_dim, t); + } + nc = min(nc, sound_dim); + nr = min(nr, sound_dim); + + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); + + val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | + FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) | + FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) | + FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) | + FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) | + FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) | + FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); + + if (port_sel == 0) + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + else + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); + + rtw89_write16(rtwdev, reg, val); + + return 0; +} + +static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); + u32 reg; + u8 mac_idx = rtwvif->mac_idx; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if (sta->he_cap.has_he) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | + BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | + BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); + } + if (sta->vht_cap.vht_supported) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | + BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | + BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); + } + if (sta->ht_cap.ht_supported) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | + BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | + BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); + } + reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); + rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); + rtw89_write32(rtwdev, + rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), + rrsc); + + return 0; +} + +void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + if (rtw89_sta_has_beamformer_cap(sta)) { + rtw89_debug(rtwdev, RTW89_DBG_BF, + "initialize bfee for new association\n"); + rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx); + rtw89_mac_set_csi_para_reg(rtwdev, vif, sta); + rtw89_mac_csi_rrsc(rtwdev, vif, sta); + } +} + +void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false); +} + +void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u8 mac_idx = rtwvif->mac_idx; + __le32 *p; + + rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); + + p = (__le32 *)conf->mu_group.membership; + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx), + le32_to_cpu(p[0])); + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx), + le32_to_cpu(p[1])); + + p = (__le32 *)conf->mu_group.position; + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx), + le32_to_cpu(p[0])); + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx), + le32_to_cpu(p[1])); + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx), + le32_to_cpu(p[2])); + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx), + le32_to_cpu(p[3])); +} + +struct rtw89_mac_bf_monitor_iter_data { + struct rtw89_dev *rtwdev; + struct ieee80211_sta *down_sta; + int count; +}; + +static +void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_mac_bf_monitor_iter_data *iter_data = + (struct rtw89_mac_bf_monitor_iter_data *)data; + struct ieee80211_sta *down_sta = iter_data->down_sta; + int *count = &iter_data->count; + + if (down_sta == sta) + return; + + if (rtw89_sta_has_beamformer_cap(sta)) + (*count)++; +} + +void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, + struct ieee80211_sta *sta, bool disconnect) +{ + struct rtw89_mac_bf_monitor_iter_data data; + + data.rtwdev = rtwdev; + data.down_sta = disconnect ? sta : NULL; + data.count = 0; + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_mac_bf_monitor_calc_iter, + &data); + + rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count); + if (data.count) + set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); + else + clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); +} + +void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_traffic_stats *stats = &rtwdev->stats; + struct rtw89_vif *rtwvif; + bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true; + bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); + + if (en == old) + return; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en); +} + +static int +__rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + u32 tx_time) +{ +#define MAC_AX_DFLT_TX_TIME 5280 + u8 mac_idx = rtwsta->rtwvif->mac_idx; + u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; + u32 reg; + int ret = 0; + + if (rtwsta->cctl_tx_time) { + rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9; + ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); + } else { + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) { + rtw89_warn(rtwdev, "failed to check cmac in set txtime\n"); + return ret; + } + + reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, + max_tx_time >> 5); + } + + return ret; +} + +int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + bool resume, u32 tx_time) +{ + int ret = 0; + + if (!resume) { + rtwsta->cctl_tx_time = true; + ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); + } else { + ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); + rtwsta->cctl_tx_time = false; + } + + return ret; +} + +int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + u32 *tx_time) +{ + u8 mac_idx = rtwsta->rtwvif->mac_idx; + u32 reg; + int ret = 0; + + if (rtwsta->cctl_tx_time) { + *tx_time = (rtwsta->ampdu_max_time + 1) << 9; + } else { + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) { + rtw89_warn(rtwdev, "failed to check cmac in tx_time\n"); + return ret; + } + + reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); + *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; + } + + return ret; +} + +int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, + struct rtw89_sta *rtwsta, + bool resume, u8 tx_retry) +{ + int ret = 0; + + rtwsta->data_tx_cnt_lmt = tx_retry; + + if (!resume) { + rtwsta->cctl_tx_retry_limit = true; + ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); + } else { + ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); + rtwsta->cctl_tx_retry_limit = false; + } + + return ret; +} + +int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, + struct rtw89_sta *rtwsta, u8 *tx_retry) +{ + u8 mac_idx = rtwsta->rtwvif->mac_idx; + u32 reg; + int ret = 0; + + if (rtwsta->cctl_tx_retry_limit) { + *tx_retry = rtwsta->data_tx_cnt_lmt; + } else { + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) { + rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n"); + return ret; + } + + reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx); + *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); + } + + return ret; +} + +int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool en) +{ + u8 mac_idx = rtwvif->mac_idx; + u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0; + u32 reg; + u32 ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx); + if (en) + rtw89_write16_set(rtwdev, reg, set); + else + rtw89_write16_clr(rtwdev, reg, set); + + return 0; +} diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h new file mode 100644 index 00000000000000..6f3db8a2a9c2a0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -0,0 +1,860 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_MAC_H__ +#define __RTW89_MAC_H__ + +#include "core.h" + +#define MAC_MEM_DUMP_PAGE_SIZE 0x40000 +#define ADDR_CAM_ENT_SIZE 0x40 +#define BSSID_CAM_ENT_SIZE 0x08 +#define HFC_PAGE_UNIT 64 + +enum rtw89_mac_hwmod_sel { + RTW89_DMAC_SEL = 0, + RTW89_CMAC_SEL = 1, + + RTW89_MAC_INVALID, +}; + +enum rtw89_mac_fwd_target { + RTW89_FWD_DONT_CARE = 0, + RTW89_FWD_TO_HOST = 1, + RTW89_FWD_TO_WLAN_CPU = 2 +}; + +enum rtw89_mac_wd_dma_intvl { + RTW89_MAC_WD_DMA_INTVL_0S, + RTW89_MAC_WD_DMA_INTVL_256NS, + RTW89_MAC_WD_DMA_INTVL_512NS, + RTW89_MAC_WD_DMA_INTVL_768NS, + RTW89_MAC_WD_DMA_INTVL_1US, + RTW89_MAC_WD_DMA_INTVL_1_5US, + RTW89_MAC_WD_DMA_INTVL_2US, + RTW89_MAC_WD_DMA_INTVL_4US, + RTW89_MAC_WD_DMA_INTVL_8US, + RTW89_MAC_WD_DMA_INTVL_16US, + RTW89_MAC_WD_DMA_INTVL_DEF = 0xFE +}; + +enum rtw89_mac_multi_tag_num { + RTW89_MAC_TAG_NUM_1, + RTW89_MAC_TAG_NUM_2, + RTW89_MAC_TAG_NUM_3, + RTW89_MAC_TAG_NUM_4, + RTW89_MAC_TAG_NUM_5, + RTW89_MAC_TAG_NUM_6, + RTW89_MAC_TAG_NUM_7, + RTW89_MAC_TAG_NUM_8, + RTW89_MAC_TAG_NUM_DEF = 0xFE +}; + +enum rtw89_mac_lbc_tmr { + RTW89_MAC_LBC_TMR_8US = 0, + RTW89_MAC_LBC_TMR_16US, + RTW89_MAC_LBC_TMR_32US, + RTW89_MAC_LBC_TMR_64US, + RTW89_MAC_LBC_TMR_128US, + RTW89_MAC_LBC_TMR_256US, + RTW89_MAC_LBC_TMR_512US, + RTW89_MAC_LBC_TMR_1MS, + RTW89_MAC_LBC_TMR_2MS, + RTW89_MAC_LBC_TMR_4MS, + RTW89_MAC_LBC_TMR_8MS, + RTW89_MAC_LBC_TMR_DEF = 0xFE +}; + +enum rtw89_mac_cpuio_op_cmd_type { + CPUIO_OP_CMD_GET_1ST_PID = 0, + CPUIO_OP_CMD_GET_NEXT_PID = 1, + CPUIO_OP_CMD_ENQ_TO_TAIL = 4, + CPUIO_OP_CMD_ENQ_TO_HEAD = 5, + CPUIO_OP_CMD_DEQ = 8, + CPUIO_OP_CMD_DEQ_ENQ_ALL = 9, + CPUIO_OP_CMD_DEQ_ENQ_TO_TAIL = 12 +}; + +enum rtw89_mac_wde_dle_port_id { + WDE_DLE_PORT_ID_DISPATCH = 0, + WDE_DLE_PORT_ID_PKTIN = 1, + WDE_DLE_PORT_ID_CMAC0 = 3, + WDE_DLE_PORT_ID_CMAC1 = 4, + WDE_DLE_PORT_ID_CPU_IO = 6, + WDE_DLE_PORT_ID_WDRLS = 7, + WDE_DLE_PORT_ID_END = 8 +}; + +enum rtw89_mac_wde_dle_queid_wdrls { + WDE_DLE_QUEID_TXOK = 0, + WDE_DLE_QUEID_DROP_RETRY_LIMIT = 1, + WDE_DLE_QUEID_DROP_LIFETIME_TO = 2, + WDE_DLE_QUEID_DROP_MACID_DROP = 3, + WDE_DLE_QUEID_NO_REPORT = 4 +}; + +enum rtw89_mac_ple_dle_port_id { + PLE_DLE_PORT_ID_DISPATCH = 0, + PLE_DLE_PORT_ID_MPDU = 1, + PLE_DLE_PORT_ID_SEC = 2, + PLE_DLE_PORT_ID_CMAC0 = 3, + PLE_DLE_PORT_ID_CMAC1 = 4, + PLE_DLE_PORT_ID_WDRLS = 5, + PLE_DLE_PORT_ID_CPU_IO = 6, + PLE_DLE_PORT_ID_PLRLS = 7, + PLE_DLE_PORT_ID_END = 8 +}; + +enum rtw89_mac_ple_dle_queid_plrls { + PLE_DLE_QUEID_NO_REPORT = 0x0 +}; + +enum rtw89_machdr_frame_type { + RTW89_MGNT = 0, + RTW89_CTRL = 1, + RTW89_DATA = 2, +}; + +enum rtw89_mac_dle_dfi_type { + DLE_DFI_TYPE_FREEPG = 0, + DLE_DFI_TYPE_QUOTA = 1, + DLE_DFI_TYPE_PAGELLT = 2, + DLE_DFI_TYPE_PKTINFO = 3, + DLE_DFI_TYPE_PREPKTLLT = 4, + DLE_DFI_TYPE_NXTPKTLLT = 5, + DLE_DFI_TYPE_QLNKTBL = 6, + DLE_DFI_TYPE_QEMPTY = 7, +}; + +enum rtw89_mac_dle_wde_quota_id { + WDE_QTAID_HOST_IF = 0, + WDE_QTAID_WLAN_CPU = 1, + WDE_QTAID_DATA_CPU = 2, + WDE_QTAID_PKTIN = 3, + WDE_QTAID_CPUIO = 4, +}; + +enum rtw89_mac_dle_ple_quota_id { + PLE_QTAID_B0_TXPL = 0, + PLE_QTAID_B1_TXPL = 1, + PLE_QTAID_C2H = 2, + PLE_QTAID_H2C = 3, + PLE_QTAID_WLAN_CPU = 4, + PLE_QTAID_MPDU = 5, + PLE_QTAID_CMAC0_RX = 6, + PLE_QTAID_CMAC1_RX = 7, + PLE_QTAID_CMAC1_BBRPT = 8, + PLE_QTAID_WDRLS = 9, + PLE_QTAID_CPUIO = 10, +}; + +enum rtw89_mac_dle_ctrl_type { + DLE_CTRL_TYPE_WDE = 0, + DLE_CTRL_TYPE_PLE = 1, + DLE_CTRL_TYPE_NUM = 2, +}; + +enum rtw89_mac_ax_l0_to_l1_event { + MAC_AX_L0_TO_L1_CHIF_IDLE = 0, + MAC_AX_L0_TO_L1_CMAC_DMA_IDLE = 1, + MAC_AX_L0_TO_L1_RLS_PKID = 2, + MAC_AX_L0_TO_L1_PTCL_IDLE = 3, + MAC_AX_L0_TO_L1_RX_QTA_LOST = 4, + MAC_AX_L0_TO_L1_DLE_STAT_HANG = 5, + MAC_AX_L0_TO_L1_PCIE_STUCK = 6, + MAC_AX_L0_TO_L1_EVENT_MAX = 15, +}; + +enum rtw89_mac_dbg_port_sel { + /* CMAC 0 related */ + RTW89_DBG_PORT_SEL_PTCL_C0 = 0, + RTW89_DBG_PORT_SEL_SCH_C0, + RTW89_DBG_PORT_SEL_TMAC_C0, + RTW89_DBG_PORT_SEL_RMAC_C0, + RTW89_DBG_PORT_SEL_RMACST_C0, + RTW89_DBG_PORT_SEL_RMAC_PLCP_C0, + RTW89_DBG_PORT_SEL_TRXPTCL_C0, + RTW89_DBG_PORT_SEL_TX_INFOL_C0, + RTW89_DBG_PORT_SEL_TX_INFOH_C0, + RTW89_DBG_PORT_SEL_TXTF_INFOL_C0, + RTW89_DBG_PORT_SEL_TXTF_INFOH_C0, + /* CMAC 1 related */ + RTW89_DBG_PORT_SEL_PTCL_C1, + RTW89_DBG_PORT_SEL_SCH_C1, + RTW89_DBG_PORT_SEL_TMAC_C1, + RTW89_DBG_PORT_SEL_RMAC_C1, + RTW89_DBG_PORT_SEL_RMACST_C1, + RTW89_DBG_PORT_SEL_RMAC_PLCP_C1, + RTW89_DBG_PORT_SEL_TRXPTCL_C1, + RTW89_DBG_PORT_SEL_TX_INFOL_C1, + RTW89_DBG_PORT_SEL_TX_INFOH_C1, + RTW89_DBG_PORT_SEL_TXTF_INFOL_C1, + RTW89_DBG_PORT_SEL_TXTF_INFOH_C1, + /* DLE related */ + RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG, + RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA, + RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT, + RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO, + RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT, + RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT, + RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL, + RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY, + RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG, + RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA, + RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT, + RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO, + RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT, + RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT, + RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL, + RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY, + RTW89_DBG_PORT_SEL_PKTINFO, + /* PCIE related */ + RTW89_DBG_PORT_SEL_PCIE_TXDMA, + RTW89_DBG_PORT_SEL_PCIE_RXDMA, + RTW89_DBG_PORT_SEL_PCIE_CVT, + RTW89_DBG_PORT_SEL_PCIE_CXPL, + RTW89_DBG_PORT_SEL_PCIE_IO, + RTW89_DBG_PORT_SEL_PCIE_MISC, + RTW89_DBG_PORT_SEL_PCIE_MISC2, + + /* keep last */ + RTW89_DBG_PORT_SEL_LAST, + RTW89_DBG_PORT_SEL_MAX = RTW89_DBG_PORT_SEL_LAST, + RTW89_DBG_PORT_SEL_INVALID = RTW89_DBG_PORT_SEL_LAST, +}; + +/* SRAM mem dump */ +#define R_AX_INDIR_ACCESS_ENTRY 0x40000 + +#define STA_SCHED_BASE_ADDR 0x18808000 +#define RXPLD_FLTR_CAM_BASE_ADDR 0x18813000 +#define SECURITY_CAM_BASE_ADDR 0x18814000 +#define WOW_CAM_BASE_ADDR 0x18815000 +#define CMAC_TBL_BASE_ADDR 0x18840000 +#define ADDR_CAM_BASE_ADDR 0x18850000 +#define BSSID_CAM_BASE_ADDR 0x18853000 +#define BA_CAM_BASE_ADDR 0x18854000 +#define BCN_IE_CAM0_BASE_ADDR 0x18855000 +#define SHARED_BUF_BASE_ADDR 0x18700000 +#define DMAC_TBL_BASE_ADDR 0x18800000 +#define SHCUT_MACHDR_BASE_ADDR 0x18800800 +#define BCN_IE_CAM1_BASE_ADDR 0x188A0000 + +#define CCTL_INFO_SIZE 32 + +enum rtw89_mac_mem_sel { + RTW89_MAC_MEM_SHARED_BUF, + RTW89_MAC_MEM_DMAC_TBL, + RTW89_MAC_MEM_SHCUT_MACHDR, + RTW89_MAC_MEM_STA_SCHED, + RTW89_MAC_MEM_RXPLD_FLTR_CAM, + RTW89_MAC_MEM_SECURITY_CAM, + RTW89_MAC_MEM_WOW_CAM, + RTW89_MAC_MEM_CMAC_TBL, + RTW89_MAC_MEM_ADDR_CAM, + RTW89_MAC_MEM_BA_CAM, + RTW89_MAC_MEM_BCN_IE_CAM0, + RTW89_MAC_MEM_BCN_IE_CAM1, + + /* keep last */ + RTW89_MAC_MEM_LAST, + RTW89_MAC_MEM_MAX = RTW89_MAC_MEM_LAST, + RTW89_MAC_MEM_INVALID = RTW89_MAC_MEM_LAST, +}; + +enum rtw89_rpwm_req_pwr_state { + RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0, + RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1, + RTW89_MAC_RPWM_REQ_PWR_STATE_BAND1_RFON = 2, + RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF = 3, + RTW89_MAC_RPWM_REQ_PWR_STATE_BAND1_RFOFF = 4, + RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED = 5, + RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED = 6, + RTW89_MAC_RPWM_REQ_PWR_STATE_HIOE_PWR_GATED = 7, + RTW89_MAC_RPWM_REQ_PWR_STATE_MAX, +}; + +struct rtw89_pwr_cfg { + u16 addr; + u8 cv_msk; + u8 intf_msk; + u8 base:4; + u8 cmd:4; + u8 msk; + u8 val; +}; + +enum rtw89_mac_c2h_ofld_func { + RTW89_MAC_C2H_FUNC_EFUSE_DUMP, + RTW89_MAC_C2H_FUNC_READ_RSP, + RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP, + RTW89_MAC_C2H_FUNC_BCN_RESEND, + RTW89_MAC_C2H_FUNC_MACID_PAUSE, + RTW89_MAC_C2H_FUNC_OFLD_MAX, +}; + +enum rtw89_mac_c2h_info_func { + RTW89_MAC_C2H_FUNC_REC_ACK, + RTW89_MAC_C2H_FUNC_DONE_ACK, + RTW89_MAC_C2H_FUNC_C2H_LOG, + RTW89_MAC_C2H_FUNC_INFO_MAX, +}; + +enum rtw89_mac_c2h_class { + RTW89_MAC_C2H_CLASS_INFO, + RTW89_MAC_C2H_CLASS_OFLD, + RTW89_MAC_C2H_CLASS_TWT, + RTW89_MAC_C2H_CLASS_WOW, + RTW89_MAC_C2H_CLASS_MCC, + RTW89_MAC_C2H_CLASS_FWDBG, + RTW89_MAC_C2H_CLASS_MAX, +}; + +struct rtw89_mac_ax_coex { +#define RTW89_MAC_AX_COEX_RTK_MODE 0 +#define RTW89_MAC_AX_COEX_CSR_MODE 1 + u8 pta_mode; +#define RTW89_MAC_AX_COEX_INNER 0 +#define RTW89_MAC_AX_COEX_OUTPUT 1 +#define RTW89_MAC_AX_COEX_INPUT 2 + u8 direction; +}; + +struct rtw89_mac_ax_plt { +#define RTW89_MAC_AX_PLT_LTE_RX BIT(0) +#define RTW89_MAC_AX_PLT_GNT_BT_TX BIT(1) +#define RTW89_MAC_AX_PLT_GNT_BT_RX BIT(2) +#define RTW89_MAC_AX_PLT_GNT_WL BIT(3) + u8 band; + u8 tx; + u8 rx; +}; + +enum rtw89_mac_bf_rrsc_rate { + RTW89_MAC_BF_RRSC_6M = 0, + RTW89_MAC_BF_RRSC_9M = 1, + RTW89_MAC_BF_RRSC_12M, + RTW89_MAC_BF_RRSC_18M, + RTW89_MAC_BF_RRSC_24M, + RTW89_MAC_BF_RRSC_36M, + RTW89_MAC_BF_RRSC_48M, + RTW89_MAC_BF_RRSC_54M, + RTW89_MAC_BF_RRSC_HT_MSC0, + RTW89_MAC_BF_RRSC_HT_MSC1, + RTW89_MAC_BF_RRSC_HT_MSC2, + RTW89_MAC_BF_RRSC_HT_MSC3, + RTW89_MAC_BF_RRSC_HT_MSC4, + RTW89_MAC_BF_RRSC_HT_MSC5, + RTW89_MAC_BF_RRSC_HT_MSC6, + RTW89_MAC_BF_RRSC_HT_MSC7, + RTW89_MAC_BF_RRSC_VHT_MSC0, + RTW89_MAC_BF_RRSC_VHT_MSC1, + RTW89_MAC_BF_RRSC_VHT_MSC2, + RTW89_MAC_BF_RRSC_VHT_MSC3, + RTW89_MAC_BF_RRSC_VHT_MSC4, + RTW89_MAC_BF_RRSC_VHT_MSC5, + RTW89_MAC_BF_RRSC_VHT_MSC6, + RTW89_MAC_BF_RRSC_VHT_MSC7, + RTW89_MAC_BF_RRSC_HE_MSC0, + RTW89_MAC_BF_RRSC_HE_MSC1, + RTW89_MAC_BF_RRSC_HE_MSC2, + RTW89_MAC_BF_RRSC_HE_MSC3, + RTW89_MAC_BF_RRSC_HE_MSC4, + RTW89_MAC_BF_RRSC_HE_MSC5, + RTW89_MAC_BF_RRSC_HE_MSC6, + RTW89_MAC_BF_RRSC_HE_MSC7 = 31, + RTW89_MAC_BF_RRSC_MAX = 32 +}; + +#define RTW89_R32_EA 0xEAEAEAEA +#define RTW89_R32_DEAD 0xDEADBEEF +#define MAC_REG_POOL_COUNT 10 +#define ACCESS_CMAC(_addr) \ + ({typeof(_addr) __addr = (_addr); \ + __addr >= R_AX_CMAC_REG_START && __addr <= R_AX_CMAC_REG_END; }) + +#define PTCL_IDLE_POLL_CNT 10000 +#define SW_CVR_DUR_US 8 +#define SW_CVR_CNT 8 + +#define DLE_BOUND_UNIT (8 * 1024) +#define DLE_WAIT_CNT 2000 +#define TRXCFG_WAIT_CNT 2000 + +#define RTW89_WDE_PG_64 64 +#define RTW89_WDE_PG_128 128 +#define RTW89_WDE_PG_256 256 + +#define S_AX_WDE_PAGE_SEL_64 0 +#define S_AX_WDE_PAGE_SEL_128 1 +#define S_AX_WDE_PAGE_SEL_256 2 + +#define RTW89_PLE_PG_64 64 +#define RTW89_PLE_PG_128 128 +#define RTW89_PLE_PG_256 256 + +#define S_AX_PLE_PAGE_SEL_64 0 +#define S_AX_PLE_PAGE_SEL_128 1 +#define S_AX_PLE_PAGE_SEL_256 2 + +#define SDIO_LOCAL_BASE_ADDR 0x80000000 + +#define PWR_CMD_WRITE 0 +#define PWR_CMD_POLL 1 +#define PWR_CMD_DELAY 2 +#define PWR_CMD_END 3 + +#define PWR_INTF_MSK_SDIO BIT(0) +#define PWR_INTF_MSK_USB BIT(1) +#define PWR_INTF_MSK_PCIE BIT(2) +#define PWR_INTF_MSK_ALL 0x7 + +#define PWR_BASE_MAC 0 +#define PWR_BASE_USB 1 +#define PWR_BASE_PCIE 2 +#define PWR_BASE_SDIO 3 + +#define PWR_CV_MSK_A BIT(0) +#define PWR_CV_MSK_B BIT(1) +#define PWR_CV_MSK_C BIT(2) +#define PWR_CV_MSK_D BIT(3) +#define PWR_CV_MSK_E BIT(4) +#define PWR_CV_MSK_F BIT(5) +#define PWR_CV_MSK_G BIT(6) +#define PWR_CV_MSK_TEST BIT(7) +#define PWR_CV_MSK_ALL 0xFF + +#define PWR_DELAY_US 0 +#define PWR_DELAY_MS 1 + +/* STA scheduler */ +#define SS_MACID_SH 8 +#define SS_TX_LEN_MSK 0x1FFFFF +#define SS_CTRL1_R_TX_LEN 5 +#define SS_CTRL1_R_NEXT_LINK 20 +#define SS_LINK_SIZE 256 + +/* MAC debug port */ +#define TMAC_DBG_SEL_C0 0xA5 +#define RMAC_DBG_SEL_C0 0xA6 +#define TRXPTCL_DBG_SEL_C0 0xA7 +#define TMAC_DBG_SEL_C1 0xB5 +#define RMAC_DBG_SEL_C1 0xB6 +#define TRXPTCL_DBG_SEL_C1 0xB7 +#define FW_PROG_CNTR_DBG_SEL 0xF2 +#define PCIE_TXDMA_DBG_SEL 0x30 +#define PCIE_RXDMA_DBG_SEL 0x31 +#define PCIE_CVT_DBG_SEL 0x32 +#define PCIE_CXPL_DBG_SEL 0x33 +#define PCIE_IO_DBG_SEL 0x37 +#define PCIE_MISC_DBG_SEL 0x38 +#define PCIE_MISC2_DBG_SEL 0x00 +#define MAC_DBG_SEL 1 +#define RMAC_CMAC_DBG_SEL 1 + +/* TRXPTCL dbg port sel */ +#define TRXPTRL_DBG_SEL_TMAC 0 +#define TRXPTRL_DBG_SEL_RMAC 1 + +struct rtw89_cpuio_ctrl { + u16 pkt_num; + u16 start_pktid; + u16 end_pktid; + u8 cmd_type; + u8 macid; + u8 src_pid; + u8 src_qid; + u8 dst_pid; + u8 dst_qid; + u16 pktid; +}; + +struct rtw89_mac_dbg_port_info { + u32 sel_addr; + u8 sel_byte; + u32 sel_msk; + u32 srt; + u32 end; + u32 rd_addr; + u8 rd_byte; + u32 rd_msk; +}; + +#define QLNKTBL_ADDR_INFO_SEL BIT(0) +#define QLNKTBL_ADDR_INFO_SEL_0 0 +#define QLNKTBL_ADDR_INFO_SEL_1 1 +#define QLNKTBL_ADDR_TBL_IDX_MASK GENMASK(10, 1) +#define QLNKTBL_DATA_SEL1_PKT_CNT_MASK GENMASK(11, 0) + +struct rtw89_mac_dle_dfi_ctrl { + enum rtw89_mac_dle_ctrl_type type; + u32 target; + u32 addr; + u32 out_data; +}; + +struct rtw89_mac_dle_dfi_quota { + enum rtw89_mac_dle_ctrl_type dle_type; + u32 qtaid; + u16 rsv_pgnum; + u16 use_pgnum; +}; + +struct rtw89_mac_dle_dfi_qempty { + enum rtw89_mac_dle_ctrl_type dle_type; + u32 grpsel; + u32 qempty; +}; + +/* Define DBG and recovery enum */ +enum mac_ax_err_info { + /* Get error info */ + + /* L0 */ + MAC_AX_ERR_L0_ERR_CMAC0 = 0x0001, + MAC_AX_ERR_L0_ERR_CMAC1 = 0x0002, + MAC_AX_ERR_L0_RESET_DONE = 0x0003, + MAC_AX_ERR_L0_PROMOTE_TO_L1 = 0x0010, + + /* L1 */ + MAC_AX_ERR_L1_ERR_DMAC = 0x1000, + MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE = 0x1001, + MAC_AX_ERR_L1_RESET_RECOVERY_DONE = 0x1002, + MAC_AX_ERR_L1_PROMOTE_TO_L2 = 0x1010, + MAC_AX_ERR_L1_RCVY_STOP_DONE = 0x1011, + + /* L2 */ + /* address hole (master) */ + MAC_AX_ERR_L2_ERR_AH_DMA = 0x2000, + MAC_AX_ERR_L2_ERR_AH_HCI = 0x2010, + MAC_AX_ERR_L2_ERR_AH_RLX4081 = 0x2020, + MAC_AX_ERR_L2_ERR_AH_IDDMA = 0x2030, + MAC_AX_ERR_L2_ERR_AH_HIOE = 0x2040, + MAC_AX_ERR_L2_ERR_AH_IPSEC = 0x2050, + MAC_AX_ERR_L2_ERR_AH_RX4281 = 0x2060, + MAC_AX_ERR_L2_ERR_AH_OTHERS = 0x2070, + + /* AHB bridge timeout (master) */ + MAC_AX_ERR_L2_ERR_AHB_TO_DMA = 0x2100, + MAC_AX_ERR_L2_ERR_AHB_TO_HCI = 0x2110, + MAC_AX_ERR_L2_ERR_AHB_TO_RLX4081 = 0x2120, + MAC_AX_ERR_L2_ERR_AHB_TO_IDDMA = 0x2130, + MAC_AX_ERR_L2_ERR_AHB_TO_HIOE = 0x2140, + MAC_AX_ERR_L2_ERR_AHB_TO_IPSEC = 0x2150, + MAC_AX_ERR_L2_ERR_AHB_TO_RX4281 = 0x2160, + MAC_AX_ERR_L2_ERR_AHB_TO_OTHERS = 0x2170, + + /* APB_SA bridge timeout (master + slave) */ + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WVA = 0x2200, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_UART = 0x2201, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_CPULOCAL = 0x2202, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_AXIDMA = 0x2203, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_HIOE = 0x2204, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_IDDMA = 0x2205, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_IPSEC = 0x2206, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WON = 0x2207, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WDMAC = 0x2208, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WCMAC = 0x2209, + MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_OTHERS = 0x220A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_WVA = 0x2210, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_UART = 0x2211, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_CPULOCAL = 0x2212, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_AXIDMA = 0x2213, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_HIOE = 0x2214, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_IDDMA = 0x2215, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_IPSEC = 0x2216, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_WDMAC = 0x2218, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_WCMAC = 0x2219, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_OTHERS = 0x221A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WVA = 0x2220, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_UART = 0x2221, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_CPULOCAL = 0x2222, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_AXIDMA = 0x2223, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_HIOE = 0x2224, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_IDDMA = 0x2225, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_IPSEC = 0x2226, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WON = 0x2227, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WDMAC = 0x2228, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WCMAC = 0x2229, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_OTHERS = 0x222A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WVA = 0x2230, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_UART = 0x2231, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_CPULOCAL = 0x2232, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_AXIDMA = 0x2233, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_HIOE = 0x2234, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_IDDMA = 0x2235, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_IPSEC = 0x2236, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WON = 0x2237, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WDMAC = 0x2238, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WCMAC = 0x2239, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_OTHERS = 0x223A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WVA = 0x2240, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_UART = 0x2241, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_CPULOCAL = 0x2242, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_AXIDMA = 0x2243, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_HIOE = 0x2244, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_IDDMA = 0x2245, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_IPSEC = 0x2246, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WON = 0x2247, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WDMAC = 0x2248, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WCMAC = 0x2249, + MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_OTHERS = 0x224A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WVA = 0x2250, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_UART = 0x2251, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_CPULOCAL = 0x2252, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_AXIDMA = 0x2253, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_HIOE = 0x2254, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_IDDMA = 0x2255, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_IPSEC = 0x2256, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WON = 0x2257, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WDMAC = 0x2258, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WCMAC = 0x2259, + MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_OTHERS = 0x225A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WVA = 0x2260, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_UART = 0x2261, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_CPULOCAL = 0x2262, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_AXIDMA = 0x2263, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_HIOE = 0x2264, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_IDDMA = 0x2265, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_IPSEC = 0x2266, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WON = 0x2267, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WDMAC = 0x2268, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WCMAC = 0x2269, + MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_OTHERS = 0x226A, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WVA = 0x2270, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_UART = 0x2271, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_CPULOCAL = 0x2272, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_AXIDMA = 0x2273, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_HIOE = 0x2274, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_IDDMA = 0x2275, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_IPSEC = 0x2276, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WON = 0x2277, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WDMAC = 0x2278, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WCMAC = 0x2279, + MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_OTHERS = 0x227A, + + /* APB_BBRF bridge timeout (master) */ + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_DMA = 0x2300, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_HCI = 0x2310, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_RLX4081 = 0x2320, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_IDDMA = 0x2330, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_HIOE = 0x2340, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_IPSEC = 0x2350, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_RX4281 = 0x2360, + MAC_AX_ERR_L2_ERR_APB_BBRF_TO_OTHERS = 0x2370, + MAC_AX_ERR_L2_RESET_DONE = 0x2400, + MAC_AX_ERR_CPU_EXCEPTION = 0x3000, + MAC_AX_GET_ERR_MAX, + MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000, + + /* set error info */ + MAC_AX_ERR_L1_DISABLE_EN = 0x0001, + MAC_AX_ERR_L1_RCVY_EN = 0x0002, + MAC_AX_ERR_L1_RCVY_STOP_REQ = 0x0003, + MAC_AX_ERR_L1_RCVY_START_REQ = 0x0004, + MAC_AX_ERR_L0_CFG_NOTIFY = 0x0010, + MAC_AX_ERR_L0_CFG_DIS_NOTIFY = 0x0011, + MAC_AX_ERR_L0_CFG_HANDSHAKE = 0x0012, + MAC_AX_ERR_L0_RCVY_EN = 0x0013, + MAC_AX_SET_ERR_MAX, +}; + +extern const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie; +extern const struct rtw89_dle_size wde_size0; +extern const struct rtw89_dle_size wde_size4; +extern const struct rtw89_dle_size ple_size0; +extern const struct rtw89_dle_size ple_size4; +extern const struct rtw89_wde_quota wde_qt0; +extern const struct rtw89_wde_quota wde_qt4; +extern const struct rtw89_ple_quota ple_qt4; +extern const struct rtw89_ple_quota ple_qt5; +extern const struct rtw89_ple_quota ple_qt13; + +static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band) +{ + return band == 0 ? reg_base : (reg_base + 0x2000); +} + +static inline u32 rtw89_mac_reg_by_port(u32 base, u8 port, u8 mac_idx) +{ + return rtw89_mac_reg_by_idx(base + port * 0x40, mac_idx); +} + +static inline u32 +rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u32 base, u32 mask) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + return rtw89_read32_mask(rtwdev, reg, mask); +} + +static inline void +rtw89_write32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base, + u32 data) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + rtw89_write32(rtwdev, reg, data); +} + +static inline void +rtw89_write32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u32 base, u32 mask, u32 data) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, mask, data); +} + +static inline void +rtw89_write16_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u32 base, u32 mask, u16 data) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + rtw89_write16_mask(rtwdev, reg, mask, data); +} + +static inline void +rtw89_write32_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u32 base, u32 bit) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + rtw89_write32_clr(rtwdev, reg, bit); +} + +static inline void +rtw89_write16_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u32 base, u16 bit) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + rtw89_write16_clr(rtwdev, reg, bit); +} + +static inline void +rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + u32 base, u32 bit) +{ + u32 reg; + + reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + rtw89_write32_set(rtwdev, reg, bit); +} + +void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev); +int rtw89_mac_partial_init(struct rtw89_dev *rtwdev); +int rtw89_mac_init(struct rtw89_dev *rtwdev); +int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 band, + enum rtw89_mac_hwmod_sel sel); +int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val); +int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val); +int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); +int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); +int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); +void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev); +void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev); +u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev); +int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err); +void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u32 len, u8 class, u8 func); +int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev); +int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, + u16 *tx_en, enum rtw89_sch_tx_sel sel); +int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en); +int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable); +void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx); +void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop); +int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex); +int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, + const struct rtw89_mac_ax_coex_gnt *gnt_cfg); +int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt); +void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val); +u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev); +bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev); +int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl); +bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr); +void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter); +void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf); +void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, + struct ieee80211_sta *sta, bool disconnect); +void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev); +int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); +int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); +int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool en); + +static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) +{ + if (!test_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags)) + return; + + _rtw89_mac_bf_monitor_track(rtwdev); +} + +static inline int rtw89_mac_txpwr_read32(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *val) +{ + u32 cr; + + if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + return -EINVAL; + + *val = rtw89_read32(rtwdev, cr); + return 0; +} + +static inline int rtw89_mac_txpwr_write32(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 val) +{ + u32 cr; + + if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + return -EINVAL; + + rtw89_write32(rtwdev, cr, val); + return 0; +} + +static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 mask, u32 val) +{ + u32 cr; + + if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + return -EINVAL; + + rtw89_write32_mask(rtwdev, cr, mask, val); + return 0; +} + +int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + bool resume, u32 tx_time); +int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + u32 *tx_time); +int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, + struct rtw89_sta *rtwsta, + bool resume, u8 tx_retry); +int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, + struct rtw89_sta *rtwsta, u8 *tx_retry); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c new file mode 100644 index 00000000000000..16dc6fb7dbb0b7 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "cam.h" +#include "coex.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "phy.h" +#include "ps.h" +#include "reg.h" +#include "sar.h" +#include "ser.h" + +static void rtw89_ops_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct ieee80211_sta *sta = control->sta; + int ret, qsel; + + ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel); + if (ret) { + rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret); + ieee80211_free_txskb(hw, skb); + } + rtw89_core_tx_kick_off(rtwdev, qsel); +} + +static void rtw89_ops_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct rtw89_dev *rtwdev = hw->priv; + + ieee80211_schedule_txq(hw, txq); + queue_work(rtwdev->txq_wq, &rtwdev->txq_work); +} + +static int rtw89_ops_start(struct ieee80211_hw *hw) +{ + struct rtw89_dev *rtwdev = hw->priv; + int ret; + + mutex_lock(&rtwdev->mutex); + ret = rtw89_core_start(rtwdev); + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static void rtw89_ops_stop(struct ieee80211_hw *hw) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_core_stop(rtwdev); + mutex_unlock(&rtwdev->mutex); +} + +static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + !(hw->conf.flags & IEEE80211_CONF_IDLE)) + rtw89_leave_ips(rtwdev); + + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (hw->conf.flags & IEEE80211_CONF_PS) { + rtwdev->lps_enabled = true; + } else { + rtw89_leave_lps(rtwdev); + rtwdev->lps_enabled = false; + } + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) + rtw89_set_channel(rtwdev); + + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + (hw->conf.flags & IEEE80211_CONF_IDLE)) + rtw89_enter_ips(rtwdev); + + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static int rtw89_ops_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + int ret = 0; + + mutex_lock(&rtwdev->mutex); + list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); + rtw89_leave_ps_mode(rtwdev); + + rtw89_traffic_stats_init(rtwdev, &rtwvif->stats); + rtw89_vif_type_mapping(vif, false); + rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port, + RTW89_MAX_HW_PORT_NUM); + if (rtwvif->port == RTW89_MAX_HW_PORT_NUM) { + ret = -ENOSPC; + goto out; + } + + rtwvif->bcn_hit_cond = 0; + rtwvif->mac_idx = RTW89_MAC_0; + rtwvif->phy_idx = RTW89_PHY_0; + rtwvif->hit_rule = 0; + ether_addr_copy(rtwvif->mac_addr, vif->addr); + + ret = rtw89_mac_add_vif(rtwdev, rtwvif); + if (ret) { + rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); + goto out; + } + + rtw89_core_txq_init(rtwdev, vif->txq); + + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START); +out: + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static void rtw89_ops_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP); + rtw89_mac_remove_vif(rtwdev, rtwvif); + rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); + list_del_init(&rtwvif->list); + mutex_unlock(&rtwdev->mutex); +} + +static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + + *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL | + FIF_BCN_PRBRESP_PROMISC; + + if (changed_flags & FIF_ALLMULTI) { + if (*new_flags & FIF_ALLMULTI) + rtwdev->hal.rx_fltr &= ~B_AX_A_MC; + else + rtwdev->hal.rx_fltr |= B_AX_A_MC; + } + if (changed_flags & FIF_FCSFAIL) { + if (*new_flags & FIF_FCSFAIL) + rtwdev->hal.rx_fltr |= B_AX_A_CRC32_ERR; + else + rtwdev->hal.rx_fltr &= ~B_AX_A_CRC32_ERR; + } + if (changed_flags & FIF_OTHER_BSS) { + if (*new_flags & FIF_OTHER_BSS) + rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH; + else + rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH; + } + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*new_flags & FIF_BCN_PRBRESP_PROMISC) { + rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN; + rtwdev->hal.rx_fltr &= ~B_AX_A_BC; + rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH; + } else { + rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN; + rtwdev->hal.rx_fltr |= B_AX_A_BC; + rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH; + } + } + + rtw89_write32_mask(rtwdev, + rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + B_AX_RX_FLTR_CFG_MASK, + rtwdev->hal.rx_fltr); + if (!rtwdev->dbcc_en) + goto out; + rtw89_write32_mask(rtwdev, + rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_1), + B_AX_RX_FLTR_CFG_MASK, + rtwdev->hal.rx_fltr); + +out: + mutex_unlock(&rtwdev->mutex); +} + +static const u8 ac_to_fw_idx[IEEE80211_NUM_ACS] = { + [IEEE80211_AC_VO] = 3, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, +}; + +static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, u8 aifsn) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + u8 slot_time; + u8 sifs; + + slot_time = vif->bss_conf.use_short_slot ? 9 : 20; + sifs = rtwdev->hal.current_band_type == RTW89_BAND_5G ? 16 : 10; + + return aifsn * slot_time + sifs; +} + +static void ____rtw89_conf_tx_edca(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, u16 ac) +{ + struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac]; + u32 val; + u8 ecw_max, ecw_min; + u8 aifs; + + /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */ + ecw_max = ilog2(params->cw_max + 1); + ecw_min = ilog2(params->cw_min + 1); + aifs = rtw89_aifsn_to_aifs(rtwdev, rtwvif, params->aifs); + val = FIELD_PREP(FW_EDCA_PARAM_TXOPLMT_MSK, params->txop) | + FIELD_PREP(FW_EDCA_PARAM_CWMAX_MSK, ecw_max) | + FIELD_PREP(FW_EDCA_PARAM_CWMIN_MSK, ecw_min) | + FIELD_PREP(FW_EDCA_PARAM_AIFS_MSK, aifs); + rtw89_fw_h2c_set_edca(rtwdev, rtwvif, ac_to_fw_idx[ac], val); +} + +static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS] = { + [IEEE80211_AC_VO] = R_AX_MUEDCA_VO_PARAM_0, + [IEEE80211_AC_VI] = R_AX_MUEDCA_VI_PARAM_0, + [IEEE80211_AC_BE] = R_AX_MUEDCA_BE_PARAM_0, + [IEEE80211_AC_BK] = R_AX_MUEDCA_BK_PARAM_0, +}; + +static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, u16 ac) +{ + struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac]; + struct ieee80211_he_mu_edca_param_ac_rec *mu_edca; + u8 aifs, aifsn; + u16 timer_32us; + u32 reg; + u32 val; + + if (!params->mu_edca) + return; + + mu_edca = ¶ms->mu_edca_param_rec; + aifsn = FIELD_GET(GENMASK(3, 0), mu_edca->aifsn); + aifs = aifsn ? rtw89_aifsn_to_aifs(rtwdev, rtwvif, aifsn) : 0; + timer_32us = mu_edca->mu_edca_timer << 8; + + val = FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK, timer_32us) | + FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_CW_MASK, mu_edca->ecw_min_max) | + FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK, aifs); + reg = rtw89_mac_reg_by_idx(ac_to_mu_edca_param[ac], rtwvif->mac_idx); + rtw89_write32(rtwdev, reg, val); + + rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif, true); +} + +static void __rtw89_conf_tx(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, u16 ac) +{ + ____rtw89_conf_tx_edca(rtwdev, rtwvif, ac); + ____rtw89_conf_tx_mu_edca(rtwdev, rtwvif, ac); +} + +static void rtw89_conf_tx(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + u16 ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + __rtw89_conf_tx(rtwdev, rtwvif, ac); +} + +static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf) +{ + struct ieee80211_sta *sta; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + sta = ieee80211_find_sta(vif, conf->bssid); + if (!sta) { + rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n"); + return; + } + rtw89_core_sta_assoc(rtwdev, vif, sta); +} + +static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changed) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + + if (changed & BSS_CHANGED_ASSOC) { + if (conf->assoc) { + rtw89_station_mode_sta_assoc(rtwdev, vif, conf); + rtw89_phy_set_bss_color(rtwdev, vif); + rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif); + rtw89_mac_port_update(rtwdev, rtwvif); + } + } + + if (changed & BSS_CHANGED_BSSID) { + ether_addr_copy(rtwvif->bssid, conf->bssid); + rtw89_cam_bssid_changed(rtwdev, rtwvif); + rtw89_fw_h2c_cam(rtwdev, rtwvif); + } + + if (changed & BSS_CHANGED_ERP_SLOT) + rtw89_conf_tx(rtwdev, rtwvif); + + if (changed & BSS_CHANGED_HE_BSS_COLOR) + rtw89_phy_set_bss_color(rtwdev, vif); + + if (changed & BSS_CHANGED_MU_GROUPS) + rtw89_mac_bf_set_gid_table(rtwdev, vif, conf); + + mutex_unlock(&rtwdev->mutex); +} + +static int rtw89_ops_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + rtwvif->tx_params[ac] = *params; + __rtw89_conf_tx(rtwdev, rtwvif, ac); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static int __rtw89_ops_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct rtw89_dev *rtwdev = hw->priv; + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) + return rtw89_core_sta_add(rtwdev, vif, sta); + + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + if (vif->type == NL80211_IFTYPE_STATION) + return 0; /* defer to bss_info_changed to have vif info */ + return rtw89_core_sta_assoc(rtwdev, vif, sta); + } + + if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) + return rtw89_core_sta_disassoc(rtwdev, vif, sta); + + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_NONE) + return rtw89_core_sta_disconnect(rtwdev, vif, sta); + + if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) + return rtw89_core_sta_remove(rtwdev, vif, sta); + + return 0; +} + +static int rtw89_ops_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct rtw89_dev *rtwdev = hw->priv; + int ret; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + ret = __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state); + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct rtw89_dev *rtwdev = hw->priv; + int ret = 0; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + + switch (cmd) { + case SET_KEY: + rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL_END); + ret = rtw89_cam_sec_key_add(rtwdev, vif, sta, key); + if (ret && ret != -EOPNOTSUPP) { + rtw89_err(rtwdev, "failed to add key to sec cam\n"); + goto out; + } + break; + case DISABLE_KEY: + rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, + false); + rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, false); + ret = rtw89_cam_sec_key_del(rtwdev, vif, sta, key, true); + if (ret) { + rtw89_err(rtwdev, "failed to remove key from sec cam\n"); + goto out; + } + break; + } + +out: + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct ieee80211_sta *sta = params->sta; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + u16 tid = params->tid; + struct ieee80211_txq *txq = sta->txq[tid]; + struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv; + + switch (params->action) { + case IEEE80211_AMPDU_TX_START: + return IEEE80211_AMPDU_TX_START_IMMEDIATE; + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mutex_lock(&rtwdev->mutex); + clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags); + rtw89_fw_h2c_ba_cam(rtwdev, false, rtwsta->mac_id, params); + mutex_unlock(&rtwdev->mutex); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mutex_lock(&rtwdev->mutex); + set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags); + rtwsta->ampdu_params[tid].agg_num = params->buf_size; + rtwsta->ampdu_params[tid].amsdu = params->amsdu; + rtw89_leave_ps_mode(rtwdev); + rtw89_fw_h2c_ba_cam(rtwdev, true, rtwsta->mac_id, params); + mutex_unlock(&rtwdev->mutex); + break; + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + break; + default: + WARN_ON(1); + return -ENOTSUPP; + } + + return 0; +} + +static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + if (test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) + rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + + sinfo->txrate = rtwsta->ra_report.txrate; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); +} + +static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_lps(rtwdev); + rtw89_hci_flush_queues(rtwdev, queues, drop); + rtw89_mac_flush_txq(rtwdev, queues, drop); + mutex_unlock(&rtwdev->mutex); +} + +struct rtw89_iter_bitrate_mask_data { + struct rtw89_dev *rtwdev; + struct ieee80211_vif *vif; + const struct cfg80211_bitrate_mask *mask; +}; + +static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_iter_bitrate_mask_data *br_data = data; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif); + + if (vif != br_data->vif) + return; + + rtwsta->use_cfg_mask = true; + rtwsta->mask = *br_data->mask; + rtw89_phy_ra_updata_sta(br_data->rtwdev, sta); +} + +static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct rtw89_iter_bitrate_mask_data br_data = { .rtwdev = rtwdev, + .vif = vif, + .mask = mask}; + + ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_ra_mask_info_update_iter, + &br_data); +} + +static int rtw89_ops_set_bitrate_mask(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_phy_rate_pattern_vif(rtwdev, vif, mask); + rtw89_ra_mask_info_update(rtwdev, vif, mask); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static +int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_hal *hal = &rtwdev->hal; + + if (rx_ant != hw->wiphy->available_antennas_rx) + return -EINVAL; + + mutex_lock(&rtwdev->mutex); + hal->antenna_tx = tx_ant; + hal->antenna_rx = rx_ant; + mutex_unlock(&rtwdev->mutex); + + return 0; +} + +static +int rtw89_ops_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_hal *hal = &rtwdev->hal; + + *tx_ant = hal->antenna_tx; + *rx_ant = hal->antenna_rx; + + return 0; +} + +static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_hal *hal = &rtwdev->hal; + + mutex_lock(&rtwdev->mutex); + rtwdev->scanning = true; + rtw89_leave_lps(rtwdev); + rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type); + rtw89_chip_rfk_scan(rtwdev, true); + rtw89_hci_recalc_int_mit(rtwdev); + mutex_unlock(&rtwdev->mutex); +} + +static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_chip_rfk_scan(rtwdev, false); + rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0); + rtwdev->scanning = false; + rtwdev->dig.bypass_dig = true; + mutex_unlock(&rtwdev->mutex); +} + +static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct rtw89_dev *rtwdev = hw->priv; + + if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) + rtw89_ser_recfg_done(rtwdev); +} + +const struct ieee80211_ops rtw89_ops = { + .tx = rtw89_ops_tx, + .wake_tx_queue = rtw89_ops_wake_tx_queue, + .start = rtw89_ops_start, + .stop = rtw89_ops_stop, + .config = rtw89_ops_config, + .add_interface = rtw89_ops_add_interface, + .remove_interface = rtw89_ops_remove_interface, + .configure_filter = rtw89_ops_configure_filter, + .bss_info_changed = rtw89_ops_bss_info_changed, + .conf_tx = rtw89_ops_conf_tx, + .sta_state = rtw89_ops_sta_state, + .set_key = rtw89_ops_set_key, + .ampdu_action = rtw89_ops_ampdu_action, + .set_rts_threshold = rtw89_ops_set_rts_threshold, + .sta_statistics = rtw89_ops_sta_statistics, + .flush = rtw89_ops_flush, + .set_bitrate_mask = rtw89_ops_set_bitrate_mask, + .set_antenna = rtw89_ops_set_antenna, + .get_antenna = rtw89_ops_get_antenna, + .sw_scan_start = rtw89_ops_sw_scan_start, + .sw_scan_complete = rtw89_ops_sw_scan_complete, + .reconfig_complete = rtw89_ops_reconfig_complete, + .set_sar_specs = rtw89_ops_set_sar_specs, +}; +EXPORT_SYMBOL(rtw89_ops); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c new file mode 100644 index 00000000000000..2c94762e4f9309 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -0,0 +1,3060 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2020 Realtek Corporation + */ + +#include + +#include "mac.h" +#include "pci.h" +#include "reg.h" +#include "ser.h" + +static bool rtw89_pci_disable_clkreq; +static bool rtw89_pci_disable_aspm_l1; +static bool rtw89_pci_disable_l1ss; +module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); +module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); +module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); +MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); +MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); +MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); + +static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) +{ + u32 val; + int ret; + + rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, + rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); + + ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), + 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, + rtwdev, R_AX_PCIE_INIT_CFG1); + + if (ret) + return -EBUSY; + + return 0; +} + +static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, + struct rtw89_pci_dma_ring *bd_ring, + u32 cur_idx, bool tx) +{ + u32 cnt, cur_rp, wp, rp, len; + + rp = bd_ring->rp; + wp = bd_ring->wp; + len = bd_ring->len; + + cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); + if (tx) + cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); + else + cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); + + bd_ring->rp = cur_rp; + + return cnt; +} + +static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; + u32 addr_idx = bd_ring->addr_idx; + u32 cnt, idx; + + idx = rtw89_read32(rtwdev, addr_idx); + cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); + + return cnt; +} + +static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + u32 cnt, bool release_all) +{ + struct rtw89_pci_tx_data *tx_data; + struct sk_buff *skb; + u32 qlen; + + while (cnt--) { + skb = skb_dequeue(&rtwpci->h2c_queue); + if (!skb) { + rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); + return; + } + skb_queue_tail(&rtwpci->h2c_release_queue, skb); + } + + qlen = skb_queue_len(&rtwpci->h2c_release_queue); + if (!release_all) + qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; + + while (qlen--) { + skb = skb_dequeue(&rtwpci->h2c_release_queue); + if (!skb) { + rtw89_err(rtwdev, "failed to release fwcmd\n"); + return; + } + tx_data = RTW89_PCI_TX_SKB_CB(skb); + dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci) +{ + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; + u32 cnt; + + cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); + if (!cnt) + return; + rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); +} + +static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, + struct rtw89_pci_rx_ring *rx_ring) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + u32 addr_idx = bd_ring->addr_idx; + u32 cnt, idx; + + idx = rtw89_read32(rtwdev, addr_idx); + cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); + + return cnt; +} + +static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, + struct sk_buff *skb) +{ + struct rtw89_pci_rx_info *rx_info; + dma_addr_t dma; + + rx_info = RTW89_PCI_RX_SKB_CB(skb); + dma = rx_info->dma; + dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, + DMA_FROM_DEVICE); +} + +static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, + struct sk_buff *skb) +{ + struct rtw89_pci_rx_info *rx_info; + dma_addr_t dma; + + rx_info = RTW89_PCI_RX_SKB_CB(skb); + dma = rx_info->dma; + dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, + DMA_FROM_DEVICE); +} + +static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, + struct sk_buff *skb) +{ + struct rtw89_pci_rxbd_info *rxbd_info; + struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); + + rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; + rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); + rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); + rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); + rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); + + return 0; +} + +static bool +rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, + struct sk_buff *new, + const struct sk_buff *skb, u32 offset, + const struct rtw89_pci_rx_info *rx_info, + const struct rtw89_rx_desc_info *desc_info) +{ + u32 copy_len = rx_info->len - offset; + + if (unlikely(skb_tailroom(new) < copy_len)) { + rtw89_debug(rtwdev, RTW89_DBG_TXRX, + "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", + rx_info->len, desc_info->pkt_size, offset, fs, ls); + rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", + skb->data, rx_info->len); + /* length of a single segment skb is desc_info->pkt_size */ + if (fs && ls) { + copy_len = desc_info->pkt_size; + } else { + rtw89_info(rtwdev, "drop rx data due to invalid length\n"); + return false; + } + } + + skb_put_data(new, skb->data + offset, copy_len); + + return true; +} + +static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, + struct rtw89_pci_rx_ring *rx_ring) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + struct rtw89_pci_rx_info *rx_info; + struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; + struct sk_buff *new = rx_ring->diliver_skb; + struct sk_buff *skb; + u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); + u32 offset; + u32 cnt = 1; + bool fs, ls; + int ret; + + skb = rx_ring->buf[bd_ring->wp]; + rtw89_pci_sync_skb_for_cpu(rtwdev, skb); + + ret = rtw89_pci_rxbd_info_update(rtwdev, skb); + if (ret) { + rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", + bd_ring->wp, ret); + goto err_sync_device; + } + + rx_info = RTW89_PCI_RX_SKB_CB(skb); + fs = rx_info->fs; + ls = rx_info->ls; + + if (fs) { + if (new) { + rtw89_err(rtwdev, "skb should not be ready before first segment start\n"); + goto err_sync_device; + } + if (desc_info->ready) { + rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); + goto err_sync_device; + } + + rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); + + new = dev_alloc_skb(desc_info->pkt_size); + if (!new) + goto err_sync_device; + + rx_ring->diliver_skb = new; + + /* first segment has RX desc */ + offset = desc_info->offset; + offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : + sizeof(struct rtw89_rxdesc_short); + } else { + offset = sizeof(struct rtw89_pci_rxbd_info); + if (!new) { + rtw89_warn(rtwdev, "no last skb\n"); + goto err_sync_device; + } + } + if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) + goto err_sync_device; + rtw89_pci_sync_skb_for_device(rtwdev, skb); + rtw89_pci_rxbd_increase(rx_ring, 1); + + if (!desc_info->ready) { + rtw89_warn(rtwdev, "no rx desc information\n"); + goto err_free_resource; + } + if (ls) { + rtw89_core_rx(rtwdev, desc_info, new); + rx_ring->diliver_skb = NULL; + desc_info->ready = false; + } + + return cnt; + +err_sync_device: + rtw89_pci_sync_skb_for_device(rtwdev, skb); + rtw89_pci_rxbd_increase(rx_ring, 1); +err_free_resource: + if (new) + dev_kfree_skb_any(new); + rx_ring->diliver_skb = NULL; + desc_info->ready = false; + + return cnt; +} + +static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, + struct rtw89_pci_rx_ring *rx_ring, + u32 cnt) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + u32 rx_cnt; + + while (cnt && rtwdev->napi_budget_countdown > 0) { + rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); + if (!rx_cnt) { + rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); + + /* skip the rest RXBD bufs */ + rtw89_pci_rxbd_increase(rx_ring, cnt); + break; + } + + cnt -= rx_cnt; + } + + rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp); +} + +static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, int budget) +{ + struct rtw89_pci_rx_ring *rx_ring; + int countdown = rtwdev->napi_budget_countdown; + u32 cnt; + + rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; + + cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); + if (!cnt) + return 0; + + cnt = min_t(u32, budget, cnt); + + rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); + + /* In case of flushing pending SKBs, the countdown may exceed. */ + if (rtwdev->napi_budget_countdown <= 0) + return budget; + + return budget - countdown; +} + +static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring, + struct sk_buff *skb, u8 tx_status) +{ + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(info); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + if (tx_status == RTW89_TX_DONE) { + info->flags |= IEEE80211_TX_STAT_ACK; + tx_ring->tx_acked++; + } else { + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) + rtw89_debug(rtwdev, RTW89_DBG_FW, + "failed to TX of status %x\n", tx_status); + switch (tx_status) { + case RTW89_TX_RETRY_LIMIT: + tx_ring->tx_retry_lmt++; + break; + case RTW89_TX_LIFE_TIME: + tx_ring->tx_life_time++; + break; + case RTW89_TX_MACID_DROP: + tx_ring->tx_mac_id_drop++; + break; + default: + rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); + break; + } + } + + ieee80211_tx_status_ni(rtwdev->hw, skb); +} + +static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_tx_wd *txwd; + u32 cnt; + + cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); + while (cnt--) { + txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); + if (!txwd) { + rtw89_warn(rtwdev, "No busy txwd pages available\n"); + break; + } + + list_del_init(&txwd->list); + } +} + +static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + struct rtw89_pci_tx_wd *txwd; + int i; + + for (i = 0; i < wd_ring->page_num; i++) { + txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); + if (!txwd) + break; + + list_del_init(&txwd->list); + } +} + +static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring, + struct rtw89_pci_tx_wd *txwd, u16 seq, + u8 tx_status) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_data *tx_data; + struct sk_buff *skb, *tmp; + u8 txch = tx_ring->txch; + + if (!list_empty(&txwd->list)) { + rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", + txch, seq); + return; + } + + /* currently, support for only one frame */ + if (skb_queue_len(&txwd->queue) != 1) { + rtw89_warn(rtwdev, "empty pending queue %d page %d\n", + txch, seq); + return; + } + + skb_queue_walk_safe(&txwd->queue, skb, tmp) { + skb_unlink(skb, &txwd->queue); + + tx_data = RTW89_PCI_TX_SKB_CB(skb); + dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, + DMA_TO_DEVICE); + + rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); + } + + rtw89_pci_enqueue_txwd(tx_ring, txwd); +} + +static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, + struct rtw89_pci_rpp_fmt *rpp) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring; + struct rtw89_pci_tx_wd_ring *wd_ring; + struct rtw89_pci_tx_wd *txwd; + u16 seq; + u8 qsel, tx_status, txch; + + seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); + qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); + tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); + txch = rtw89_core_get_ch_dma(rtwdev, qsel); + + if (txch == RTW89_TXCH_CH12) { + rtw89_warn(rtwdev, "should no fwcmd release report\n"); + return; + } + + tx_ring = &rtwpci->tx_rings[txch]; + rtw89_pci_reclaim_txbd(rtwdev, tx_ring); + wd_ring = &tx_ring->wd_ring; + txwd = &wd_ring->pages[seq]; + + rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); +} + +static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + struct rtw89_pci_tx_wd *txwd; + int i; + + for (i = 0; i < wd_ring->page_num; i++) { + txwd = &wd_ring->pages[i]; + + if (!list_empty(&txwd->list)) + continue; + + rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); + } +} + +static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, + struct rtw89_pci_rx_ring *rx_ring, + u32 max_cnt) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + struct rtw89_pci_rx_info *rx_info; + struct rtw89_pci_rpp_fmt *rpp; + struct rtw89_rx_desc_info desc_info = {}; + struct sk_buff *skb; + u32 cnt = 0; + u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); + u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); + u32 offset; + int ret; + + skb = rx_ring->buf[bd_ring->wp]; + rtw89_pci_sync_skb_for_cpu(rtwdev, skb); + + ret = rtw89_pci_rxbd_info_update(rtwdev, skb); + if (ret) { + rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", + bd_ring->wp, ret); + goto err_sync_device; + } + + rx_info = RTW89_PCI_RX_SKB_CB(skb); + if (!rx_info->fs || !rx_info->ls) { + rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); + return cnt; + } + + rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); + + /* first segment has RX desc */ + offset = desc_info.offset; + offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : + sizeof(struct rtw89_rxdesc_short); + for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { + rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); + rtw89_pci_release_rpp(rtwdev, rpp); + } + + rtw89_pci_sync_skb_for_device(rtwdev, skb); + rtw89_pci_rxbd_increase(rx_ring, 1); + cnt++; + + return cnt; + +err_sync_device: + rtw89_pci_sync_skb_for_device(rtwdev, skb); + return 0; +} + +static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, + struct rtw89_pci_rx_ring *rx_ring, + u32 cnt) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + u32 release_cnt; + + while (cnt) { + release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); + if (!release_cnt) { + rtw89_err(rtwdev, "failed to release TX skbs\n"); + + /* skip the rest RXBD bufs */ + rtw89_pci_rxbd_increase(rx_ring, cnt); + break; + } + + cnt -= release_cnt; + } + + rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp); +} + +static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, int budget) +{ + struct rtw89_pci_rx_ring *rx_ring; + u32 cnt; + int work_done; + + rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; + + spin_lock_bh(&rtwpci->trx_lock); + + cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); + if (cnt == 0) + goto out_unlock; + + rtw89_pci_release_tx(rtwdev, rx_ring, cnt); + +out_unlock: + spin_unlock_bh(&rtwpci->trx_lock); + + /* always release all RPQ */ + work_done = min_t(int, cnt, budget); + rtwdev->napi_budget_countdown -= work_done; + + return work_done; +} + +static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci) +{ + struct rtw89_pci_rx_ring *rx_ring; + struct rtw89_pci_dma_ring *bd_ring; + u32 reg_idx; + u16 hw_idx, hw_idx_next, host_idx; + int i; + + for (i = 0; i < RTW89_RXCH_NUM; i++) { + rx_ring = &rtwpci->rx_rings[i]; + bd_ring = &rx_ring->bd_ring; + + reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx); + hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); + host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); + hw_idx_next = (hw_idx + 1) % bd_ring->len; + + if (hw_idx_next == host_idx) + rtw89_warn(rtwdev, "%d RXD unavailable\n", i); + + rtw89_debug(rtwdev, RTW89_DBG_TXRX, + "%d RXD unavailable, idx=0x%08x, len=%d\n", + i, reg_idx, bd_ring->len); + } +} + +static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs) +{ + isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; + isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; + isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; + + rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); + rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); + rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); +} + +static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) +{ + /* write 1 clear */ + rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); +} + +static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci) +{ + rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); + rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); + rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); +} + +static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci) +{ + rtw89_write32(rtwdev, R_AX_HIMR0, 0); + rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); + rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); +} + +static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) +{ + struct rtw89_dev *rtwdev = dev; + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_isrs isrs; + unsigned long flags; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); + + if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) + rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); + + if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) + rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); + + if (likely(rtwpci->running)) { + local_bh_disable(); + napi_schedule(&rtwdev->napi); + local_bh_enable(); + } + + return IRQ_HANDLED; +} + +static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) +{ + struct rtw89_dev *rtwdev = dev; + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + unsigned long flags; + irqreturn_t irqret = IRQ_WAKE_THREAD; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + + /* If interrupt event is on the road, it is still trigger interrupt + * even we have done pci_stop() to turn off IMR. + */ + if (unlikely(!rtwpci->running)) { + irqret = IRQ_HANDLED; + goto exit; + } + + rtw89_pci_disable_intr(rtwdev, rtwpci); +exit: + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); + + return irqret; +} + +#define case_TXCHADDRS(txch) \ + case RTW89_TXCH_##txch: \ + *addr_num = R_AX_##txch##_TXBD_NUM; \ + *addr_idx = R_AX_##txch##_TXBD_IDX; \ + *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \ + *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \ + *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \ + break + +static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch, + u32 *addr_num, + u32 *addr_idx, + u32 *addr_bdram, + u32 *addr_desa_l, + u32 *addr_desa_h) +{ + switch (txch) { + case_TXCHADDRS(ACH0); + case_TXCHADDRS(ACH1); + case_TXCHADDRS(ACH2); + case_TXCHADDRS(ACH3); + case_TXCHADDRS(ACH4); + case_TXCHADDRS(ACH5); + case_TXCHADDRS(ACH6); + case_TXCHADDRS(ACH7); + case_TXCHADDRS(CH8); + case_TXCHADDRS(CH9); + case_TXCHADDRS(CH10); + case_TXCHADDRS(CH11); + case_TXCHADDRS(CH12); + default: + return -EINVAL; + } + + return 0; +} + +#undef case_TXCHADDRS + +#define case_RXCHADDRS(rxch) \ + case RTW89_RXCH_##rxch: \ + *addr_num = R_AX_##rxch##_RXBD_NUM; \ + *addr_idx = R_AX_##rxch##_RXBD_IDX; \ + *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \ + *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \ + break + +static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch, + u32 *addr_num, + u32 *addr_idx, + u32 *addr_desa_l, + u32 *addr_desa_h) +{ + switch (rxch) { + case_RXCHADDRS(RXQ); + case_RXCHADDRS(RPQ); + default: + return -EINVAL; + } + + return 0; +} + +#undef case_RXCHADDRS + +static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) +{ + struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; + + /* reserved 1 desc check ring is full or not */ + if (bd_ring->rp > bd_ring->wp) + return bd_ring->rp - bd_ring->wp - 1; + + return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; +} + +static +u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; + u32 cnt; + + spin_lock_bh(&rtwpci->trx_lock); + rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); + cnt = rtw89_pci_get_avail_txbd_num(tx_ring); + spin_unlock_bh(&rtwpci->trx_lock); + + return cnt; +} + +static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, + u8 txch) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + u32 bd_cnt, wd_cnt, min_cnt = 0; + struct rtw89_pci_rx_ring *rx_ring; + u32 cnt; + + rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; + + spin_lock_bh(&rtwpci->trx_lock); + bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); + wd_cnt = wd_ring->curr_num; + + if (wd_cnt == 0 || bd_cnt == 0) { + cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); + if (!cnt) + goto out_unlock; + rtw89_pci_release_tx(rtwdev, rx_ring, cnt); + } + + bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); + wd_cnt = wd_ring->curr_num; + min_cnt = min(bd_cnt, wd_cnt); + if (min_cnt == 0) + rtw89_warn(rtwdev, "still no tx resource after reclaim\n"); + +out_unlock: + spin_unlock_bh(&rtwpci->trx_lock); + + return min_cnt; +} + +static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, + u8 txch) +{ + if (txch == RTW89_TXCH_CH12) + return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); + + return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); +} + +static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; + u32 host_idx, addr; + + addr = bd_ring->addr_idx; + host_idx = bd_ring->wp; + rtw89_write16(rtwdev, addr, host_idx); +} + +static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, + int n_txbd) +{ + struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; + u32 host_idx, len; + + len = bd_ring->len; + host_idx = bd_ring->wp + n_txbd; + host_idx = host_idx < len ? host_idx : host_idx - len; + + bd_ring->wp = host_idx; +} + +static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; + + spin_lock_bh(&rtwpci->trx_lock); + __rtw89_pci_tx_kick_off(rtwdev, tx_ring); + spin_unlock_bh(&rtwpci->trx_lock); +} + +static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; + struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; + u32 cur_idx, cur_rp; + u8 i; + + /* Because the time taked by the I/O is a bit dynamic, it's hard to + * define a reasonable fixed total timeout to use read_poll_timeout* + * helper. Instead, we can ensure a reasonable polling times, so we + * just use for loop with udelay here. + */ + for (i = 0; i < 60; i++) { + cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx); + cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); + if (cur_rp == bd_ring->wp) + return; + + udelay(1); + } + + if (!drop) + rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); +} + +static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, + bool drop) +{ + u8 i; + + for (i = 0; i < RTW89_TXCH_NUM; i++) { + /* It may be unnecessary to flush FWCMD queue. */ + if (i == RTW89_TXCH_CH12) + continue; + + if (txchs & BIT(i)) + __pci_flush_txch(rtwdev, i, drop); + } +} + +static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, + bool drop) +{ + __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); +} + +static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring, + struct rtw89_pci_tx_wd *txwd, + struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct rtw89_txwd_body *txwd_body; + struct rtw89_txwd_info *txwd_info; + struct rtw89_pci_tx_wp_info *txwp_info; + struct rtw89_pci_tx_addr_info_32 *txaddr_info; + struct pci_dev *pdev = rtwpci->pdev; + struct sk_buff *skb = tx_req->skb; + struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); + bool en_wd_info = desc_info->en_wd_info; + u32 txwd_len; + u32 txwp_len; + u32 txaddr_info_len; + dma_addr_t dma; + int ret; + + rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr); + + dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma)) { + rtw89_err(rtwdev, "failed to map skb dma data\n"); + ret = -EBUSY; + goto err; + } + + tx_data->dma = dma; + + txaddr_info_len = sizeof(*txaddr_info); + txwp_len = sizeof(*txwp_info); + txwd_len = sizeof(*txwd_body); + txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; + + txwp_info = txwd->vaddr + txwd_len; + txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); + txwp_info->seq1 = 0; + txwp_info->seq2 = 0; + txwp_info->seq3 = 0; + + tx_ring->tx_cnt++; + txaddr_info = txwd->vaddr + txwd_len + txwp_len; + txaddr_info->length = cpu_to_le16(skb->len); + txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | + RTW89_PCI_ADDR_NUM(1)); + txaddr_info->dma = cpu_to_le32(dma); + + txwd->len = txwd_len + txwp_len + txaddr_info_len; + + skb_queue_tail(&txwd->queue, skb); + + return 0; + +err: + return ret; +} + +static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring, + struct rtw89_pci_tx_bd_32 *txbd, + struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct rtw89_txwd_body *txwd_body; + struct pci_dev *pdev = rtwpci->pdev; + struct sk_buff *skb = tx_req->skb; + struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); + dma_addr_t dma; + + txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body)); + memset(txwd_body, 0, sizeof(*txwd_body)); + rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body); + + dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma)) { + rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); + return -EBUSY; + } + + tx_data->dma = dma; + txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); + txbd->length = cpu_to_le16(skb->len); + txbd->dma = cpu_to_le32(tx_data->dma); + skb_queue_tail(&rtwpci->h2c_queue, skb); + + rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); + + return 0; +} + +static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring, + struct rtw89_pci_tx_bd_32 *txbd, + struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_pci_tx_wd *txwd; + int ret; + + /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD + * buffer with WD BODY only. So here we don't need to check the free + * pages of the wd ring. + */ + if (tx_ring->txch == RTW89_TXCH_CH12) + return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); + + txwd = rtw89_pci_dequeue_txwd(tx_ring); + if (!txwd) { + rtw89_err(rtwdev, "no available TXWD\n"); + ret = -ENOSPC; + goto err; + } + + ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); + if (ret) { + rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); + goto err_enqueue_wd; + } + + list_add_tail(&txwd->list, &tx_ring->busy_pages); + + txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); + txbd->length = cpu_to_le16(txwd->len); + txbd->dma = cpu_to_le32(txwd->paddr); + + rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); + + return 0; + +err_enqueue_wd: + rtw89_pci_enqueue_txwd(tx_ring, txwd); +err: + return ret; +} + +static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, + u8 txch) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring; + struct rtw89_pci_tx_bd_32 *txbd; + u32 n_avail_txbd; + int ret = 0; + + /* check the tx type and dma channel for fw cmd queue */ + if ((txch == RTW89_TXCH_CH12 || + tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && + (txch != RTW89_TXCH_CH12 || + tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { + rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); + return -EINVAL; + } + + tx_ring = &rtwpci->tx_rings[txch]; + spin_lock_bh(&rtwpci->trx_lock); + + n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); + if (n_avail_txbd == 0) { + rtw89_err(rtwdev, "no available TXBD\n"); + ret = -ENOSPC; + goto err_unlock; + } + + txbd = rtw89_pci_get_next_txbd(tx_ring); + ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); + if (ret) { + rtw89_err(rtwdev, "failed to submit TXBD\n"); + goto err_unlock; + } + + spin_unlock_bh(&rtwpci->trx_lock); + return 0; + +err_unlock: + spin_unlock_bh(&rtwpci->trx_lock); + return ret; +} + +static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + int ret; + + ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); + if (ret) { + rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); + return ret; + } + + return 0; +} + +static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { + [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, + [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, + [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, + [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, + [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, + [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, +}; + +static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring; + struct rtw89_pci_rx_ring *rx_ring; + struct rtw89_pci_dma_ring *bd_ring; + const struct rtw89_pci_bd_ram *bd_ram; + u32 addr_num; + u32 addr_bdram; + u32 addr_desa_l; + u32 val32; + int i; + + for (i = 0; i < RTW89_TXCH_NUM; i++) { + tx_ring = &rtwpci->tx_rings[i]; + bd_ring = &tx_ring->bd_ring; + bd_ram = &bd_ram_table[i]; + addr_num = bd_ring->addr_num; + addr_bdram = bd_ring->addr_bdram; + addr_desa_l = bd_ring->addr_desa_l; + bd_ring->wp = 0; + bd_ring->rp = 0; + + val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | + FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | + FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); + + rtw89_write16(rtwdev, addr_num, bd_ring->len); + rtw89_write32(rtwdev, addr_bdram, val32); + rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); + } + + for (i = 0; i < RTW89_RXCH_NUM; i++) { + rx_ring = &rtwpci->rx_rings[i]; + bd_ring = &rx_ring->bd_ring; + addr_num = bd_ring->addr_num; + addr_desa_l = bd_ring->addr_desa_l; + bd_ring->wp = 0; + bd_ring->rp = 0; + rx_ring->diliver_skb = NULL; + rx_ring->diliver_desc.ready = false; + + rtw89_write16(rtwdev, addr_num, bd_ring->len); + rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); + } +} + +static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, + struct rtw89_pci_tx_ring *tx_ring) +{ + rtw89_pci_release_busy_txwd(rtwdev, tx_ring); + rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); +} + +static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + int txch; + + rtw89_pci_reset_trx_rings(rtwdev); + + spin_lock_bh(&rtwpci->trx_lock); + for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { + if (txch == RTW89_TXCH_CH12) { + rtw89_pci_release_fwcmd(rtwdev, rtwpci, + skb_queue_len(&rtwpci->h2c_queue), true); + continue; + } + rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); + } + spin_unlock_bh(&rtwpci->trx_lock); +} + +static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + unsigned long flags; + + rtw89_core_napi_start(rtwdev); + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + rtwpci->running = true; + rtw89_pci_enable_intr(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); + + return 0; +} + +static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + unsigned long flags; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + rtwpci->running = false; + rtw89_pci_disable_intr(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); + + synchronize_irq(pdev->irq); + rtw89_core_napi_stop(rtwdev); +} + +static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); + +static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + u32 val = readl(rtwpci->mmap + addr); + int count; + + for (count = 0; ; count++) { + if (val != RTW89_R32_DEAD) + return val; + if (count >= MAC_REG_POOL_COUNT) { + rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); + return RTW89_R32_DEAD; + } + rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); + val = readl(rtwpci->mmap + addr); + } + + return val; +} + +static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + u32 addr32, val32, shift; + + if (!ACCESS_CMAC(addr)) + return readb(rtwpci->mmap + addr); + + addr32 = addr & ~0x3; + shift = (addr & 0x3) * 8; + val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); + return val32 >> shift; +} + +static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + u32 addr32, val32, shift; + + if (!ACCESS_CMAC(addr)) + return readw(rtwpci->mmap + addr); + + addr32 = addr & ~0x3; + shift = (addr & 0x3) * 8; + val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); + return val32 >> shift; +} + +static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + if (!ACCESS_CMAC(addr)) + return readl(rtwpci->mmap + addr); + + return rtw89_pci_ops_read32_cmac(rtwdev, addr); +} + +static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + writeb(data, rtwpci->mmap + addr); +} + +static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + writew(data, rtwpci->mmap + addr); +} + +static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + writel(data, rtwpci->mmap + addr); +} + +static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) +{ + if (enable) { + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_TXHCI_EN | B_AX_RXHCI_EN); + rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, + B_AX_STOP_PCIEIO); + } else { + rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, + B_AX_STOP_PCIEIO); + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_TXHCI_EN | B_AX_RXHCI_EN); + } +} + +static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) +{ + u16 val; + + rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); + + val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); + switch (speed) { + case PCIE_PHY_GEN1: + if (addr < 0x20) + val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); + else + val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); + break; + case PCIE_PHY_GEN2: + if (addr < 0x20) + val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); + else + val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); + break; + default: + rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); + return -EINVAL; + } + rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); + rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); + + return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, + false, rtwdev, R_AX_MDIO_CFG); +} + +static int +rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) +{ + int ret; + + ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); + if (ret) { + rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); + return ret; + } + *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); + + return 0; +} + +static int +rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) +{ + int ret; + + rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); + ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); + if (ret) { + rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); + return ret; + } + + return 0; +} + +static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) +{ + int ret; + u16 val; + + ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); + if (ret) + return ret; + ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); + if (ret) + return ret; + + return 0; +} + +static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) +{ + int ret; + u16 val; + + ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); + if (ret) + return ret; + ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); + if (ret) + return ret; + + return 0; +} + +static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) +{ + u16 write_addr; + u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK); + u8 flag; + int ret; + + write_addr = addr & B_AX_DBI_ADDR_MSK; + write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK); + rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data); + rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); + rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); + + ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, + 10 * RTW89_PCI_WR_RETRY_CNT, false, + rtwdev, R_AX_DBI_FLAG + 2); + if (ret) + WARN(flag, "failed to write to DBI register, addr=0x%04x\n", + addr); + + return ret; +} + +static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) +{ + u16 read_addr = addr & B_AX_DBI_ADDR_MSK; + u8 flag; + int ret; + + rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); + rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); + + ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, + 10 * RTW89_PCI_WR_RETRY_CNT, false, + rtwdev, R_AX_DBI_FLAG + 2); + + if (!ret) { + read_addr = R_AX_DBI_RDATA + (addr & 3); + *value = rtw89_read8(rtwdev, read_addr); + } else { + WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); + ret = -EIO; + } + + return ret; +} + +static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit) +{ + u8 value; + int ret; + + ret = rtw89_dbi_read8(rtwdev, addr, &value); + if (ret) + return ret; + + value |= bit; + ret = rtw89_dbi_write8(rtwdev, addr, value); + + return ret; +} + +static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit) +{ + u8 value; + int ret; + + ret = rtw89_dbi_read8(rtwdev, addr, &value); + if (ret) + return ret; + + value &= ~bit; + ret = rtw89_dbi_write8(rtwdev, addr, value); + + return ret; +} + +static int +__get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) +{ + u16 val, tar; + int ret; + + /* Enable counter */ + ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); + if (ret) + return ret; + ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, + phy_rate); + if (ret) + return ret; + ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, + phy_rate); + if (ret) + return ret; + + fsleep(300); + + ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); + if (ret) + return ret; + ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, + phy_rate); + if (ret) + return ret; + + tar = tar & 0x0FFF; + if (tar == 0 || tar == 0x0FFF) { + rtw89_err(rtwdev, "[ERR]Get target failed.\n"); + return -EINVAL; + } + + *target = tar; + + return 0; +} + +static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) +{ + enum rtw89_pcie_phy phy_rate; + u16 val16, mgn_set, div_set, tar; + u8 val8, bdr_ori; + bool l1_flag = false; + int ret = 0; + + if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) || + rtwdev->chip->chip_id == RTL8852C) + return 0; + + ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8); + if (ret) { + rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE); + return ret; + } + + if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { + phy_rate = PCIE_PHY_GEN1; + } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { + phy_rate = PCIE_PHY_GEN2; + } else { + rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); + return -EOPNOTSUPP; + } + /* Disable L1BD */ + ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); + if (ret) { + rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL); + return ret; + } + + if (bdr_ori & RTW89_PCIE_BIT_L1) { + ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, + bdr_ori & ~RTW89_PCIE_BIT_L1); + if (ret) { + rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL); + return ret; + } + l1_flag = true; + } + + ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); + goto end; + } + + if (val16 & B_AX_CALIB_EN) { + ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, + val16 & ~B_AX_CALIB_EN, phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); + goto end; + } + } + + if (!autook_en) + goto end; + /* Set div */ + ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); + goto end; + } + + /* Obtain div and margin */ + ret = __get_target(rtwdev, &tar, phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); + goto end; + } + + mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; + + if (mgn_set >= 128) { + div_set = 0x0003; + mgn_set = 0x000F; + } else if (mgn_set >= 64) { + div_set = 0x0003; + mgn_set >>= 3; + } else if (mgn_set >= 32) { + div_set = 0x0002; + mgn_set >>= 2; + } else if (mgn_set >= 16) { + div_set = 0x0001; + mgn_set >>= 1; + } else if (mgn_set == 0) { + rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); + goto end; + } else { + div_set = 0x0000; + } + + ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); + goto end; + } + + val16 |= u16_encode_bits(div_set, B_AX_DIV); + + ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); + goto end; + } + + ret = __get_target(rtwdev, &tar, phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); + goto end; + } + + rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", + tar, div_set, mgn_set); + ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, + (tar & 0x0FFF) | (mgn_set << 12), phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); + goto end; + } + + /* Enable function */ + ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); + if (ret) { + rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); + goto end; + } + + /* CLK delay = 0 */ + ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0); + +end: + /* Set L1BD to ori */ + if (l1_flag) { + ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori); + if (ret) { + rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL); + return ret; + } + } + + return ret; +} + +static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) +{ + int ret; + + if (rtwdev->chip->chip_id != RTL8852A) + return 0; + + ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, + PCIE_PHY_GEN1); + if (ret) + return ret; + ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, + PCIE_PHY_GEN2); + if (ret) + return ret; + + return 0; +} + +static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) +{ + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); +} + +static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id == RTL8852C) + return; + + rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); +} + +static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) +{ + int ret; + + if (rtwdev->chip->chip_id == RTL8852C) + return 0; + + ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, + PCIE_PHY_GEN1); + if (ret) + return ret; + + ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, + PCIE_PHY_GEN2); + if (ret) + return ret; + + return 0; +} + +static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852A) + return; + + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); +} + +static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852A) + return; + + rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); + rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_WLSUS_AFT_PDN); +} + +static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id == RTL8852C) + return; + + rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, + B_AX_SIC_EN_FORCE_CLKREQ); +} + +static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id == RTL8852C) + return; + + rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, + B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); + + if (rtwdev->chip->chip_id == RTL8852A) + rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, + B_AX_EN_CHKDSC_NO_RX_STUCK); +} + +static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) +{ + u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | + B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | + B_AX_CLR_CH12_IDX; + + if (rtwdev->chip->chip_id == RTL8852A) + val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | + B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; + /* clear DMA indexes */ + rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); + if (rtwdev->chip->chip_id == RTL8852A) + rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2, + B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); + rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR, + B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); +} + +static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id == RTL8852A) { + /* ltr sw trigger */ + rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); + } + rtw89_pci_ctrl_dma_all(rtwdev, false); + rtw89_pci_clr_idx_all(rtwdev); + + return 0; +} + +static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) +{ + u32 dma_busy; + u32 check; + u32 lbc; + int ret; + + rtw89_pci_rxdma_prefth(rtwdev); + rtw89_pci_l1off_pwroff(rtwdev); + rtw89_pci_deglitch_setting(rtwdev); + ret = rtw89_pci_l2_rxen_lat(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); + return ret; + } + + rtw89_pci_aphy_pwrcut(rtwdev); + rtw89_pci_hci_ldo(rtwdev); + + ret = rtw89_pci_auto_refclk_cal(rtwdev, false); + if (ret) { + rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); + return ret; + } + + rtw89_pci_set_sic(rtwdev); + rtw89_pci_set_dbg(rtwdev); + + if (rtwdev->chip->chip_id == RTL8852A) + rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_AUXCLK_GATE); + + lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); + lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER); + lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; + rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); + + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); + rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA); + + /* stop DMA activities */ + rtw89_pci_ctrl_dma_all(rtwdev, false); + + /* check PCI at idle state */ + check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; + ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, + 100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1); + if (ret) { + rtw89_err(rtwdev, "failed to poll io busy\n"); + return ret; + } + + rtw89_pci_clr_idx_all(rtwdev); + + /* configure TX/RX op modes */ + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE | + B_AX_RX_TRUNC_MODE); + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE); + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7); + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3); + /* multi-tag mode */ + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL); + rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM, + RTW89_MAC_TAG_NUM_8); + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, + RTW89_MAC_WD_DMA_INTVL_256NS); + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, + RTW89_MAC_WD_DMA_INTVL_256NS); + + /* fill TRX BD indexes */ + rtw89_pci_ops_reset(rtwdev); + + ret = rtw89_pci_rst_bdram_pcie(rtwdev); + if (ret) { + rtw89_warn(rtwdev, "reset bdram busy\n"); + return ret; + } + + /* enable FW CMD queue to download firmware */ + rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL); + rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12); + rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL); + + /* start DMA activities */ + rtw89_pci_ctrl_dma_all(rtwdev, true); + + return 0; +} + +static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev) +{ + u32 val; + + val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); + if (rtw89_pci_ltr_is_err_reg_val(val)) + return -EINVAL; + val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); + if (rtw89_pci_ltr_is_err_reg_val(val)) + return -EINVAL; + val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); + if (rtw89_pci_ltr_is_err_reg_val(val)) + return -EINVAL; + val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); + if (rtw89_pci_ltr_is_err_reg_val(val)) + return -EINVAL; + + rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN); + rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, + PCI_LTR_SPC_500US); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, + PCI_LTR_IDLE_TIMER_800US); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); + rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0); + rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); + + return 0; +} + +static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_pci_ltr_set(rtwdev); + if (ret) { + rtw89_err(rtwdev, "pci ltr set fail\n"); + return ret; + } + if (rtwdev->chip->chip_id == RTL8852A) { + /* ltr sw trigger */ + rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); + } + /* ADDR info 8-byte mode */ + rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, + B_AX_HOST_ADDR_INFO_8B_SEL); + rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); + + /* enable DMA for all queues */ + rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL); + rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL); + + /* Release PCI IO */ + rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, + B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); + + return 0; +} + +static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + rtw89_err(rtwdev, "failed to enable pci device\n"); + return ret; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, rtwdev->hw); + + rtwpci->pdev = pdev; + + return 0; +} + +static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + unsigned long resource_len; + u8 bar_id = 2; + int ret; + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + rtw89_err(rtwdev, "failed to request pci regions\n"); + goto err; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); + goto err_release_regions; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); + goto err_release_regions; + } + + resource_len = pci_resource_len(pdev, bar_id); + rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); + if (!rtwpci->mmap) { + rtw89_err(rtwdev, "failed to map pci io\n"); + ret = -EIO; + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_release_regions(pdev); +err: + return ret; +} + +static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + if (rtwpci->mmap) { + pci_iounmap(pdev, rtwpci->mmap); + pci_release_regions(pdev); + } +} + +static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + u8 *head = wd_ring->head; + dma_addr_t dma = wd_ring->dma; + u32 page_size = wd_ring->page_size; + u32 page_num = wd_ring->page_num; + u32 ring_sz = page_size * page_num; + + dma_free_coherent(&pdev->dev, ring_sz, head, dma); + wd_ring->head = NULL; +} + +static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + struct rtw89_pci_tx_ring *tx_ring) +{ + int ring_sz; + u8 *head; + dma_addr_t dma; + + head = tx_ring->bd_ring.head; + dma = tx_ring->bd_ring.dma; + ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; + dma_free_coherent(&pdev->dev, ring_sz, head, dma); + + tx_ring->bd_ring.head = NULL; +} + +static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring; + int i; + + for (i = 0; i < RTW89_TXCH_NUM; i++) { + tx_ring = &rtwpci->tx_rings[i]; + rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); + rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); + } +} + +static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + struct rtw89_pci_rx_ring *rx_ring) +{ + struct rtw89_pci_rx_info *rx_info; + struct sk_buff *skb; + dma_addr_t dma; + u32 buf_sz; + u8 *head; + int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; + int i; + + buf_sz = rx_ring->buf_sz; + for (i = 0; i < rx_ring->bd_ring.len; i++) { + skb = rx_ring->buf[i]; + if (!skb) + continue; + + rx_info = RTW89_PCI_RX_SKB_CB(skb); + dma = rx_info->dma; + dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_ring->buf[i] = NULL; + } + + head = rx_ring->bd_ring.head; + dma = rx_ring->bd_ring.dma; + dma_free_coherent(&pdev->dev, ring_sz, head, dma); + + rx_ring->bd_ring.head = NULL; +} + +static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_rx_ring *rx_ring; + int i; + + for (i = 0; i < RTW89_RXCH_NUM; i++) { + rx_ring = &rtwpci->rx_rings[i]; + rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); + } +} + +static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + rtw89_pci_free_rx_rings(rtwdev, pdev); + rtw89_pci_free_tx_rings(rtwdev, pdev); +} + +static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, + struct rtw89_pci_rx_ring *rx_ring, + struct sk_buff *skb, int buf_sz, u32 idx) +{ + struct rtw89_pci_rx_info *rx_info; + struct rtw89_pci_rx_bd_32 *rx_bd; + dma_addr_t dma; + + if (!skb) + return -EINVAL; + + dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, dma)) + return -EBUSY; + + rx_info = RTW89_PCI_RX_SKB_CB(skb); + rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); + + memset(rx_bd, 0, sizeof(*rx_bd)); + rx_bd->buf_size = cpu_to_le16(buf_sz); + rx_bd->dma = cpu_to_le32(dma); + rx_info->dma = dma; + + return 0; +} + +static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + struct rtw89_pci_tx_ring *tx_ring, + enum rtw89_tx_channel txch) +{ + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + struct rtw89_pci_tx_wd *txwd; + dma_addr_t dma; + dma_addr_t cur_paddr; + u8 *head; + u8 *cur_vaddr; + u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; + u32 page_num = RTW89_PCI_TXWD_NUM_MAX; + u32 ring_sz = page_size * page_num; + u32 page_offset; + int i; + + /* FWCMD queue doesn't use txwd as pages */ + if (txch == RTW89_TXCH_CH12) + return 0; + + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); + if (!head) + return -ENOMEM; + + INIT_LIST_HEAD(&wd_ring->free_pages); + wd_ring->head = head; + wd_ring->dma = dma; + wd_ring->page_size = page_size; + wd_ring->page_num = page_num; + + page_offset = 0; + for (i = 0; i < page_num; i++) { + txwd = &wd_ring->pages[i]; + cur_paddr = dma + page_offset; + cur_vaddr = head + page_offset; + + skb_queue_head_init(&txwd->queue); + INIT_LIST_HEAD(&txwd->list); + txwd->paddr = cur_paddr; + txwd->vaddr = cur_vaddr; + txwd->len = page_size; + txwd->seq = i; + rtw89_pci_enqueue_txwd(tx_ring, txwd); + + page_offset += page_size; + } + + return 0; +} + +static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + struct rtw89_pci_tx_ring *tx_ring, + u32 desc_size, u32 len, + enum rtw89_tx_channel txch) +{ + int ring_sz = desc_size * len; + u8 *head; + dma_addr_t dma; + u32 addr_num; + u32 addr_idx; + u32 addr_bdram; + u32 addr_desa_l; + u32 addr_desa_h; + int ret; + + ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); + if (ret) { + rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); + goto err; + } + + ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram, + &addr_desa_l, &addr_desa_h); + if (ret) { + rtw89_err(rtwdev, "failed to get address of txch %d", txch); + goto err_free_wd_ring; + } + + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); + if (!head) { + ret = -ENOMEM; + goto err_free_wd_ring; + } + + INIT_LIST_HEAD(&tx_ring->busy_pages); + tx_ring->bd_ring.head = head; + tx_ring->bd_ring.dma = dma; + tx_ring->bd_ring.len = len; + tx_ring->bd_ring.desc_size = desc_size; + tx_ring->bd_ring.addr_num = addr_num; + tx_ring->bd_ring.addr_idx = addr_idx; + tx_ring->bd_ring.addr_bdram = addr_bdram; + tx_ring->bd_ring.addr_desa_l = addr_desa_l; + tx_ring->bd_ring.addr_desa_h = addr_desa_h; + tx_ring->bd_ring.wp = 0; + tx_ring->bd_ring.rp = 0; + tx_ring->txch = txch; + + return 0; + +err_free_wd_ring: + rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); +err: + return ret; +} + +static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring; + u32 desc_size; + u32 len; + u32 i, tx_allocated; + int ret; + + for (i = 0; i < RTW89_TXCH_NUM; i++) { + tx_ring = &rtwpci->tx_rings[i]; + desc_size = sizeof(struct rtw89_pci_tx_bd_32); + len = RTW89_PCI_TXBD_NUM_MAX; + ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, + desc_size, len, i); + if (ret) { + rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); + goto err_free; + } + } + + return 0; + +err_free: + tx_allocated = i; + for (i = 0; i < tx_allocated; i++) { + tx_ring = &rtwpci->tx_rings[i]; + rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); + } + + return ret; +} + +static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + struct rtw89_pci_rx_ring *rx_ring, + u32 desc_size, u32 len, u32 rxch) +{ + struct sk_buff *skb; + u8 *head; + dma_addr_t dma; + u32 addr_num; + u32 addr_idx; + u32 addr_desa_l; + u32 addr_desa_h; + int ring_sz = desc_size * len; + int buf_sz = RTW89_PCI_RX_BUF_SIZE; + int i, allocated; + int ret; + + ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx, + &addr_desa_l, &addr_desa_h); + if (ret) { + rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); + return ret; + } + + head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); + if (!head) { + ret = -ENOMEM; + goto err; + } + + rx_ring->bd_ring.head = head; + rx_ring->bd_ring.dma = dma; + rx_ring->bd_ring.len = len; + rx_ring->bd_ring.desc_size = desc_size; + rx_ring->bd_ring.addr_num = addr_num; + rx_ring->bd_ring.addr_idx = addr_idx; + rx_ring->bd_ring.addr_desa_l = addr_desa_l; + rx_ring->bd_ring.addr_desa_h = addr_desa_h; + rx_ring->bd_ring.wp = 0; + rx_ring->bd_ring.rp = 0; + rx_ring->buf_sz = buf_sz; + rx_ring->diliver_skb = NULL; + rx_ring->diliver_desc.ready = false; + + for (i = 0; i < len; i++) { + skb = dev_alloc_skb(buf_sz); + if (!skb) { + ret = -ENOMEM; + goto err_free; + } + + memset(skb->data, 0, buf_sz); + rx_ring->buf[i] = skb; + ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, + buf_sz, i); + if (ret) { + rtw89_err(rtwdev, "failed to init rx buf %d\n", i); + dev_kfree_skb_any(skb); + rx_ring->buf[i] = NULL; + goto err_free; + } + } + + return 0; + +err_free: + allocated = i; + for (i = 0; i < allocated; i++) { + skb = rx_ring->buf[i]; + if (!skb) + continue; + dma = *((dma_addr_t *)skb->cb); + dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_ring->buf[i] = NULL; + } + + head = rx_ring->bd_ring.head; + dma = rx_ring->bd_ring.dma; + dma_free_coherent(&pdev->dev, ring_sz, head, dma); + + rx_ring->bd_ring.head = NULL; +err: + return ret; +} + +static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_rx_ring *rx_ring; + u32 desc_size; + u32 len; + int i, rx_allocated; + int ret; + + for (i = 0; i < RTW89_RXCH_NUM; i++) { + rx_ring = &rtwpci->rx_rings[i]; + desc_size = sizeof(struct rtw89_pci_rx_bd_32); + len = RTW89_PCI_RXBD_NUM_MAX; + ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, + desc_size, len, i); + if (ret) { + rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); + goto err_free; + } + } + + return 0; + +err_free: + rx_allocated = i; + for (i = 0; i < rx_allocated; i++) { + rx_ring = &rtwpci->rx_rings[i]; + rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); + } + + return ret; +} + +static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + int ret; + + ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); + goto err; + } + + ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); + goto err_free_tx_rings; + } + + return 0; + +err_free_tx_rings: + rtw89_pci_free_tx_rings(rtwdev, pdev); +err: + return ret; +} + +static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci) +{ + skb_queue_head_init(&rtwpci->h2c_queue); + skb_queue_head_init(&rtwpci->h2c_release_queue); +} + +static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + int ret; + + ret = rtw89_pci_setup_mapping(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to setup pci mapping\n"); + goto err; + } + + ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); + goto err_pci_unmap; + } + + rtw89_pci_h2c_init(rtwdev, rtwpci); + + spin_lock_init(&rtwpci->irq_lock); + spin_lock_init(&rtwpci->trx_lock); + + return 0; + +err_pci_unmap: + rtw89_pci_clear_mapping(rtwdev, pdev); +err: + return ret; +} + +static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + rtw89_pci_free_trx_rings(rtwdev, pdev); + rtw89_pci_clear_mapping(rtwdev, pdev); + rtw89_pci_release_fwcmd(rtwdev, rtwpci, + skb_queue_len(&rtwpci->h2c_queue), true); +} + +static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; + rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | + B_AX_RXDMA_INT_EN | + B_AX_RXP1DMA_INT_EN | + B_AX_RPQDMA_INT_EN | + B_AX_RXDMA_STUCK_INT_EN | + B_AX_RDU_INT_EN | + B_AX_RPQBD_FULL_INT_EN | + B_AX_HS0ISR_IND_INT_EN; + + rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; +} + +static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + unsigned long flags = 0; + int ret; + + flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; + ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); + if (ret < 0) { + rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); + goto err; + } + + ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, + rtw89_pci_interrupt_handler, + rtw89_pci_interrupt_threadfn, + IRQF_SHARED, KBUILD_MODNAME, rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to request threaded irq\n"); + goto err_free_vector; + } + + rtw89_pci_default_intr_mask(rtwdev); + + return 0; + +err_free_vector: + pci_free_irq_vectors(pdev); +err: + return ret; +} + +static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, + struct pci_dev *pdev) +{ + devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); + pci_free_irq_vectors(pdev); +} + +static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) +{ + int ret; + + if (rtw89_pci_disable_clkreq) + return; + + ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, + PCIE_CLKDLY_HW_30US); + if (ret) + rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); + + if (enable) + ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL, + RTW89_PCIE_BIT_CLK); + else + ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL, + RTW89_PCIE_BIT_CLK); + if (ret) + rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", + enable ? "set" : "unset", ret); +} + +static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) +{ + u8 value = 0; + int ret; + + if (rtw89_pci_disable_aspm_l1) + return; + + ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); + if (ret) + rtw89_err(rtwdev, "failed to read ASPM Delay\n"); + + value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); + value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | + FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); + + ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value); + if (ret) + rtw89_err(rtwdev, "failed to read ASPM Delay\n"); + + if (enable) + ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL, + RTW89_PCIE_BIT_L1); + else + ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL, + RTW89_PCIE_BIT_L1); + if (ret) + rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", + enable ? "set" : "unset", ret); +} + +static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) +{ + struct rtw89_traffic_stats *stats = &rtwdev->stats; + enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; + enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; + u32 val = 0; + + if (!rtwdev->scanning && + (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) + val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | + FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | + FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | + FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); + + rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); +} + +static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + u16 link_ctrl; + int ret; + + /* Though there is standard PCIE configuration space to set the + * link control register, but by Realtek's design, driver should + * check if host supports CLKREQ/ASPM to enable the HW module. + * + * These functions are implemented by two HW modules associated, + * one is responsible to access PCIE configuration space to + * follow the host settings, and another is in charge of doing + * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes + * the host does not support it, and due to some reasons or wrong + * settings (ex. CLKREQ# not Bi-Direction), it could lead to device + * loss if HW misbehaves on the link. + * + * Hence it's designed that driver should first check the PCIE + * configuration space is sync'ed and enabled, then driver can turn + * on the other module that is actually working on the mechanism. + */ + ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); + if (ret) { + rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); + return; + } + + if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) + rtw89_pci_clkreq_set(rtwdev, true); + + if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) + rtw89_pci_aspm_set(rtwdev, true); +} + +static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) +{ + int ret; + + if (enable) + ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL, + RTW89_PCIE_BIT_L1SUB); + else + ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL, + RTW89_PCIE_BIT_L1SUB); + if (ret) + rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", + enable ? "set" : "unset", ret); +} + +static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + u32 l1ss_cap_ptr, l1ss_ctrl; + + if (rtw89_pci_disable_l1ss) + return; + + l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss_cap_ptr) + return; + + pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); + + if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) + rtw89_pci_l1ss_set(rtwdev, true); +} + +static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) +{ + u32 val32; + + if (en == MAC_AX_FUNC_EN) { + val32 = B_AX_STOP_PCIEIO; + rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32); + + val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); + } else { + val32 = B_AX_STOP_PCIEIO; + rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32); + + val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); + } +} + +static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) +{ + int ret = 0; + u32 sts; + u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; + + ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, + 10, 1000, false, rtwdev, + R_AX_PCIE_DMA_BUSY1); + if (ret) { + rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", + rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); + return -EINVAL; + } + return ret; +} + +static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) +{ + u32 val, dma_rst = 0; + int ret; + + rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS); + ret = rtw89_pci_poll_io_idle(rtwdev); + if (ret) { + val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); + rtw89_debug(rtwdev, RTW89_DBG_HCI, + "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", + R_AX_DBG_ERR_FLAG, val); + if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) + dma_rst |= B_AX_HCI_TXDMA_EN; + if (val & B_AX_RX_STUCK) + dma_rst |= B_AX_HCI_RXDMA_EN; + val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN); + rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst); + rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst); + ret = rtw89_pci_poll_io_idle(rtwdev); + val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); + rtw89_debug(rtwdev, RTW89_DBG_HCI, + "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", + R_AX_DBG_ERR_FLAG, val); + } + + return ret; +} + +static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en) +{ + u32 val32; + + if (en == MAC_AX_FUNC_EN) { + val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; + rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32); + } else { + val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; + rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32); + } +} + +static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) +{ + int ret = 0; + u32 val32, sts; + + val32 = B_AX_RST_BDRAM; + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); + + ret = read_poll_timeout_atomic(rtw89_read32, sts, + (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, + true, rtwdev, R_AX_PCIE_INIT_CFG1); + return ret; +} + +static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) +{ + u32 ret; + + rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS); + rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN); + rtw89_pci_clr_idx_all(rtwdev); + + ret = rtw89_pci_rst_bdram(rtwdev); + if (ret) + return ret; + + rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN); + return ret; +} + +static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, + enum rtw89_lv1_rcvy_step step) +{ + int ret; + + switch (step) { + case RTW89_LV1_RCVY_STEP_1: + ret = rtw89_pci_lv1rst_stop_dma(rtwdev); + if (ret) + rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); + + break; + + case RTW89_LV1_RCVY_STEP_2: + ret = rtw89_pci_lv1rst_start_dma(rtwdev); + if (ret) + rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); + break; + + default: + return -EINVAL; + } + + return ret; +} + +static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) +{ + rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", + rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); + rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); + rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); +} + +static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) +{ + struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + unsigned long flags; + int work_done; + + rtwdev->napi_budget_countdown = budget; + + rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); + work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); + if (work_done == budget) + return budget; + + rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); + work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); + if (work_done < budget && napi_complete_done(napi, work_done)) { + spin_lock_irqsave(&rtwpci->irq_lock, flags); + if (likely(rtwpci->running)) + rtw89_pci_enable_intr(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); + } + + return work_done; +} + +static int __maybe_unused rtw89_pci_suspend(struct device *dev) +{ + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw89_dev *rtwdev = hw->priv; + + rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); + rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); + rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); + rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); + + return 0; +} + +static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id == RTL8852C) + return; + + /* Hardware need write the reg twice to ensure the setting work */ + rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE, + RTW89_PCIE_BIT_CFG_RST_MSTATE); + rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE, + RTW89_PCIE_BIT_CFG_RST_MSTATE); +} + +static int __maybe_unused rtw89_pci_resume(struct device *dev) +{ + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw89_dev *rtwdev = hw->priv; + + rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); + rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); + rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); + rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); + rtw89_pci_l2_hci_ldo(rtwdev); + rtw89_pci_link_cfg(rtwdev); + rtw89_pci_l1ss_cfg(rtwdev); + + return 0; +} + +SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); +EXPORT_SYMBOL(rtw89_pm_ops); + +static const struct rtw89_hci_ops rtw89_pci_ops = { + .tx_write = rtw89_pci_ops_tx_write, + .tx_kick_off = rtw89_pci_ops_tx_kick_off, + .flush_queues = rtw89_pci_ops_flush_queues, + .reset = rtw89_pci_ops_reset, + .start = rtw89_pci_ops_start, + .stop = rtw89_pci_ops_stop, + .recalc_int_mit = rtw89_pci_recalc_int_mit, + + .read8 = rtw89_pci_ops_read8, + .read16 = rtw89_pci_ops_read16, + .read32 = rtw89_pci_ops_read32, + .write8 = rtw89_pci_ops_write8, + .write16 = rtw89_pci_ops_write16, + .write32 = rtw89_pci_ops_write32, + + .mac_pre_init = rtw89_pci_ops_mac_pre_init, + .mac_post_init = rtw89_pci_ops_mac_post_init, + .deinit = rtw89_pci_ops_deinit, + + .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, + .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, + .dump_err_status = rtw89_pci_ops_dump_err_status, + .napi_poll = rtw89_pci_napi_poll, +}; + +static int rtw89_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct ieee80211_hw *hw; + struct rtw89_dev *rtwdev; + int driver_data_size; + int ret; + + driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci); + hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops); + if (!hw) { + dev_err(&pdev->dev, "failed to allocate hw\n"); + return -ENOMEM; + } + + rtwdev = hw->priv; + rtwdev->hw = hw; + rtwdev->dev = &pdev->dev; + rtwdev->hci.ops = &rtw89_pci_ops; + rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; + rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM; + rtwdev->hci.cpwm_addr = R_AX_CPWM; + + SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); + + switch (id->driver_data) { + case RTL8852A: + rtwdev->chip = &rtw8852a_chip_info; + break; + default: + return -ENOENT; + } + + ret = rtw89_core_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to initialise core\n"); + goto err_release_hw; + } + + ret = rtw89_pci_claim_device(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to claim pci device\n"); + goto err_core_deinit; + } + + ret = rtw89_pci_setup_resource(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to setup pci resource\n"); + goto err_declaim_pci; + } + + ret = rtw89_chip_info_setup(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to setup chip information\n"); + goto err_clear_resource; + } + + rtw89_pci_link_cfg(rtwdev); + rtw89_pci_l1ss_cfg(rtwdev); + + ret = rtw89_core_register(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to register core\n"); + goto err_clear_resource; + } + + rtw89_core_napi_init(rtwdev); + + ret = rtw89_pci_request_irq(rtwdev, pdev); + if (ret) { + rtw89_err(rtwdev, "failed to request pci irq\n"); + goto err_unregister; + } + + return 0; + +err_unregister: + rtw89_core_napi_deinit(rtwdev); + rtw89_core_unregister(rtwdev); +err_clear_resource: + rtw89_pci_clear_resource(rtwdev, pdev); +err_declaim_pci: + rtw89_pci_declaim_device(rtwdev, pdev); +err_core_deinit: + rtw89_core_deinit(rtwdev); +err_release_hw: + ieee80211_free_hw(hw); + + return ret; +} + +static void rtw89_pci_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtw89_dev *rtwdev; + + rtwdev = hw->priv; + + rtw89_pci_free_irq(rtwdev, pdev); + rtw89_core_napi_deinit(rtwdev); + rtw89_core_unregister(rtwdev); + rtw89_pci_clear_resource(rtwdev, pdev); + rtw89_pci_declaim_device(rtwdev, pdev); + rtw89_core_deinit(rtwdev); + ieee80211_free_hw(hw); +} + +static const struct pci_device_id rtw89_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A }, + {}, +}; +MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table); + +static struct pci_driver rtw89_pci_driver = { + .name = "rtw89_pci", + .id_table = rtw89_pci_id_table, + .probe = rtw89_pci_probe, + .remove = rtw89_pci_remove, + .driver.pm = &rtw89_pm_ops, +}; +module_pci_driver(rtw89_pci_driver); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h new file mode 100644 index 00000000000000..20e6767ea5c4b9 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -0,0 +1,630 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2020 Realtek Corporation + */ + +#ifndef __RTW89_PCI_H__ +#define __RTW89_PCI_H__ + +#include "txrx.h" + +#define MDIO_PG0_G1 0 +#define MDIO_PG1_G1 1 +#define MDIO_PG0_G2 2 +#define MDIO_PG1_G2 3 +#define RAC_ANA10 0x10 +#define RAC_ANA19 0x19 +#define RAC_ANA1F 0x1F +#define RAC_ANA24 0x24 +#define B_AX_DEGLITCH GENMASK(11, 8) +#define RAC_ANA26 0x26 +#define B_AX_RXEN GENMASK(15, 14) +#define RAC_CTRL_PPR_V1 0x30 +#define B_AX_CLK_CALIB_EN BIT(12) +#define B_AX_CALIB_EN BIT(13) +#define B_AX_DIV GENMASK(15, 14) +#define RAC_SET_PPR_V1 0x31 + +#define R_AX_DBI_FLAG 0x1090 +#define B_AX_DBI_RFLAG BIT(17) +#define B_AX_DBI_WFLAG BIT(16) +#define B_AX_DBI_WREN_MSK GENMASK(15, 12) +#define B_AX_DBI_ADDR_MSK GENMASK(11, 2) +#define R_AX_DBI_WDATA 0x1094 +#define R_AX_DBI_RDATA 0x1098 + +#define R_AX_MDIO_WDATA 0x10A4 +#define R_AX_MDIO_RDATA 0x10A6 + +#define RTW89_PCI_WR_RETRY_CNT 20 + +/* Interrupts */ +#define R_AX_HIMR0 0x01A0 +#define B_AX_HALT_C2H_INT_EN BIT(21) +#define R_AX_HISR0 0x01A4 + +#define R_AX_MDIO_CFG 0x10A0 +#define B_AX_MDIO_PHY_ADDR_MASK GENMASK(13, 12) +#define B_AX_MDIO_RFLAG BIT(9) +#define B_AX_MDIO_WFLAG BIT(8) +#define B_AX_MDIO_ADDR_MASK GENMASK(4, 0) + +#define R_AX_PCIE_HIMR00 0x10B0 +#define B_AX_HC00ISR_IND_INT_EN BIT(27) +#define B_AX_HD1ISR_IND_INT_EN BIT(26) +#define B_AX_HD0ISR_IND_INT_EN BIT(25) +#define B_AX_HS0ISR_IND_INT_EN BIT(24) +#define B_AX_RETRAIN_INT_EN BIT(21) +#define B_AX_RPQBD_FULL_INT_EN BIT(20) +#define B_AX_RDU_INT_EN BIT(19) +#define B_AX_RXDMA_STUCK_INT_EN BIT(18) +#define B_AX_TXDMA_STUCK_INT_EN BIT(17) +#define B_AX_PCIE_HOTRST_INT_EN BIT(16) +#define B_AX_PCIE_FLR_INT_EN BIT(15) +#define B_AX_PCIE_PERST_INT_EN BIT(14) +#define B_AX_TXDMA_CH12_INT_EN BIT(13) +#define B_AX_TXDMA_CH9_INT_EN BIT(12) +#define B_AX_TXDMA_CH8_INT_EN BIT(11) +#define B_AX_TXDMA_ACH7_INT_EN BIT(10) +#define B_AX_TXDMA_ACH6_INT_EN BIT(9) +#define B_AX_TXDMA_ACH5_INT_EN BIT(8) +#define B_AX_TXDMA_ACH4_INT_EN BIT(7) +#define B_AX_TXDMA_ACH3_INT_EN BIT(6) +#define B_AX_TXDMA_ACH2_INT_EN BIT(5) +#define B_AX_TXDMA_ACH1_INT_EN BIT(4) +#define B_AX_TXDMA_ACH0_INT_EN BIT(3) +#define B_AX_RPQDMA_INT_EN BIT(2) +#define B_AX_RXP1DMA_INT_EN BIT(1) +#define B_AX_RXDMA_INT_EN BIT(0) + +#define R_AX_PCIE_HISR00 0x10B4 +#define B_AX_HC00ISR_IND_INT BIT(27) +#define B_AX_HD1ISR_IND_INT BIT(26) +#define B_AX_HD0ISR_IND_INT BIT(25) +#define B_AX_HS0ISR_IND_INT BIT(24) +#define B_AX_RETRAIN_INT BIT(21) +#define B_AX_RPQBD_FULL_INT BIT(20) +#define B_AX_RDU_INT BIT(19) +#define B_AX_RXDMA_STUCK_INT BIT(18) +#define B_AX_TXDMA_STUCK_INT BIT(17) +#define B_AX_PCIE_HOTRST_INT BIT(16) +#define B_AX_PCIE_FLR_INT BIT(15) +#define B_AX_PCIE_PERST_INT BIT(14) +#define B_AX_TXDMA_CH12_INT BIT(13) +#define B_AX_TXDMA_CH9_INT BIT(12) +#define B_AX_TXDMA_CH8_INT BIT(11) +#define B_AX_TXDMA_ACH7_INT BIT(10) +#define B_AX_TXDMA_ACH6_INT BIT(9) +#define B_AX_TXDMA_ACH5_INT BIT(8) +#define B_AX_TXDMA_ACH4_INT BIT(7) +#define B_AX_TXDMA_ACH3_INT BIT(6) +#define B_AX_TXDMA_ACH2_INT BIT(5) +#define B_AX_TXDMA_ACH1_INT BIT(4) +#define B_AX_TXDMA_ACH0_INT BIT(3) +#define B_AX_RPQDMA_INT BIT(2) +#define B_AX_RXP1DMA_INT BIT(1) +#define B_AX_RXDMA_INT BIT(0) + +#define R_AX_PCIE_HIMR10 0x13B0 +#define B_AX_HC10ISR_IND_INT_EN BIT(28) +#define B_AX_TXDMA_CH11_INT_EN BIT(12) +#define B_AX_TXDMA_CH10_INT_EN BIT(11) + +#define R_AX_PCIE_HISR10 0x13B4 +#define B_AX_HC10ISR_IND_INT BIT(28) +#define B_AX_TXDMA_CH11_INT BIT(12) +#define B_AX_TXDMA_CH10_INT BIT(11) + +/* TX/RX */ +#define R_AX_RXQ_RXBD_IDX 0x1050 +#define R_AX_RPQ_RXBD_IDX 0x1054 +#define R_AX_ACH0_TXBD_IDX 0x1058 +#define R_AX_ACH1_TXBD_IDX 0x105C +#define R_AX_ACH2_TXBD_IDX 0x1060 +#define R_AX_ACH3_TXBD_IDX 0x1064 +#define R_AX_ACH4_TXBD_IDX 0x1068 +#define R_AX_ACH5_TXBD_IDX 0x106C +#define R_AX_ACH6_TXBD_IDX 0x1070 +#define R_AX_ACH7_TXBD_IDX 0x1074 +#define R_AX_CH8_TXBD_IDX 0x1078 /* Management Queue band 0 */ +#define R_AX_CH9_TXBD_IDX 0x107C /* HI Queue band 0 */ +#define R_AX_CH10_TXBD_IDX 0x137C /* Management Queue band 1 */ +#define R_AX_CH11_TXBD_IDX 0x1380 /* HI Queue band 1 */ +#define R_AX_CH12_TXBD_IDX 0x1080 /* FWCMD Queue */ +#define TXBD_HW_IDX_MASK GENMASK(27, 16) +#define TXBD_HOST_IDX_MASK GENMASK(11, 0) + +#define R_AX_ACH0_TXBD_DESA_L 0x1110 +#define R_AX_ACH0_TXBD_DESA_H 0x1114 +#define R_AX_ACH1_TXBD_DESA_L 0x1118 +#define R_AX_ACH1_TXBD_DESA_H 0x111C +#define R_AX_ACH2_TXBD_DESA_L 0x1120 +#define R_AX_ACH2_TXBD_DESA_H 0x1124 +#define R_AX_ACH3_TXBD_DESA_L 0x1128 +#define R_AX_ACH3_TXBD_DESA_H 0x112C +#define R_AX_ACH4_TXBD_DESA_L 0x1130 +#define R_AX_ACH4_TXBD_DESA_H 0x1134 +#define R_AX_ACH5_TXBD_DESA_L 0x1138 +#define R_AX_ACH5_TXBD_DESA_H 0x113C +#define R_AX_ACH6_TXBD_DESA_L 0x1140 +#define R_AX_ACH6_TXBD_DESA_H 0x1144 +#define R_AX_ACH7_TXBD_DESA_L 0x1148 +#define R_AX_ACH7_TXBD_DESA_H 0x114C +#define R_AX_CH8_TXBD_DESA_L 0x1150 +#define R_AX_CH8_TXBD_DESA_H 0x1154 +#define R_AX_CH9_TXBD_DESA_L 0x1158 +#define R_AX_CH9_TXBD_DESA_H 0x115C +#define R_AX_CH10_TXBD_DESA_L 0x1358 +#define R_AX_CH10_TXBD_DESA_H 0x135C +#define R_AX_CH11_TXBD_DESA_L 0x1360 +#define R_AX_CH11_TXBD_DESA_H 0x1364 +#define R_AX_CH12_TXBD_DESA_L 0x1160 +#define R_AX_CH12_TXBD_DESA_H 0x1164 +#define R_AX_RXQ_RXBD_DESA_L 0x1100 +#define R_AX_RXQ_RXBD_DESA_H 0x1104 +#define R_AX_RPQ_RXBD_DESA_L 0x1108 +#define R_AX_RPQ_RXBD_DESA_H 0x110C +#define B_AX_DESC_NUM_MSK GENMASK(11, 0) + +#define R_AX_RXQ_RXBD_NUM 0x1020 +#define R_AX_RPQ_RXBD_NUM 0x1022 +#define R_AX_ACH0_TXBD_NUM 0x1024 +#define R_AX_ACH1_TXBD_NUM 0x1026 +#define R_AX_ACH2_TXBD_NUM 0x1028 +#define R_AX_ACH3_TXBD_NUM 0x102A +#define R_AX_ACH4_TXBD_NUM 0x102C +#define R_AX_ACH5_TXBD_NUM 0x102E +#define R_AX_ACH6_TXBD_NUM 0x1030 +#define R_AX_ACH7_TXBD_NUM 0x1032 +#define R_AX_CH8_TXBD_NUM 0x1034 +#define R_AX_CH9_TXBD_NUM 0x1036 +#define R_AX_CH10_TXBD_NUM 0x1338 +#define R_AX_CH11_TXBD_NUM 0x133A +#define R_AX_CH12_TXBD_NUM 0x1038 + +#define R_AX_ACH0_BDRAM_CTRL 0x1200 +#define R_AX_ACH1_BDRAM_CTRL 0x1204 +#define R_AX_ACH2_BDRAM_CTRL 0x1208 +#define R_AX_ACH3_BDRAM_CTRL 0x120C +#define R_AX_ACH4_BDRAM_CTRL 0x1210 +#define R_AX_ACH5_BDRAM_CTRL 0x1214 +#define R_AX_ACH6_BDRAM_CTRL 0x1218 +#define R_AX_ACH7_BDRAM_CTRL 0x121C +#define R_AX_CH8_BDRAM_CTRL 0x1220 +#define R_AX_CH9_BDRAM_CTRL 0x1224 +#define R_AX_CH10_BDRAM_CTRL 0x1320 +#define R_AX_CH11_BDRAM_CTRL 0x1324 +#define R_AX_CH12_BDRAM_CTRL 0x1228 +#define BDRAM_SIDX_MASK GENMASK(7, 0) +#define BDRAM_MAX_MASK GENMASK(15, 8) +#define BDRAM_MIN_MASK GENMASK(23, 16) + +#define R_AX_PCIE_INIT_CFG1 0x1000 +#define B_AX_PCIE_RXRST_KEEP_REG BIT(23) +#define B_AX_PCIE_TXRST_KEEP_REG BIT(22) +#define B_AX_PCIE_PERST_KEEP_REG BIT(21) +#define B_AX_PCIE_FLR_KEEP_REG BIT(20) +#define B_AX_PCIE_TRAIN_KEEP_REG BIT(19) +#define B_AX_RXBD_MODE BIT(18) +#define B_AX_PCIE_MAX_RXDMA_MASK GENMASK(16, 14) +#define B_AX_RXHCI_EN BIT(13) +#define B_AX_LATENCY_CONTROL BIT(12) +#define B_AX_TXHCI_EN BIT(11) +#define B_AX_PCIE_MAX_TXDMA_MASK GENMASK(10, 8) +#define B_AX_TX_TRUNC_MODE BIT(5) +#define B_AX_RX_TRUNC_MODE BIT(4) +#define B_AX_RST_BDRAM BIT(3) +#define B_AX_DIS_RXDMA_PRE BIT(2) + +#define R_AX_TXDMA_ADDR_H 0x10F0 +#define R_AX_RXDMA_ADDR_H 0x10F4 + +#define R_AX_PCIE_DMA_STOP1 0x1010 +#define B_AX_STOP_PCIEIO BIT(20) +#define B_AX_STOP_WPDMA BIT(19) +#define B_AX_STOP_CH12 BIT(18) +#define B_AX_STOP_CH9 BIT(17) +#define B_AX_STOP_CH8 BIT(16) +#define B_AX_STOP_ACH7 BIT(15) +#define B_AX_STOP_ACH6 BIT(14) +#define B_AX_STOP_ACH5 BIT(13) +#define B_AX_STOP_ACH4 BIT(12) +#define B_AX_STOP_ACH3 BIT(11) +#define B_AX_STOP_ACH2 BIT(10) +#define B_AX_STOP_ACH1 BIT(9) +#define B_AX_STOP_ACH0 BIT(8) +#define B_AX_STOP_RPQ BIT(1) +#define B_AX_STOP_RXQ BIT(0) +#define B_AX_TX_STOP1_ALL GENMASK(18, 8) + +#define R_AX_PCIE_DMA_STOP2 0x1310 +#define B_AX_STOP_CH11 BIT(1) +#define B_AX_STOP_CH10 BIT(0) +#define B_AX_TX_STOP2_ALL GENMASK(1, 0) + +#define R_AX_TXBD_RWPTR_CLR1 0x1014 +#define B_AX_CLR_CH12_IDX BIT(10) +#define B_AX_CLR_CH9_IDX BIT(9) +#define B_AX_CLR_CH8_IDX BIT(8) +#define B_AX_CLR_ACH7_IDX BIT(7) +#define B_AX_CLR_ACH6_IDX BIT(6) +#define B_AX_CLR_ACH5_IDX BIT(5) +#define B_AX_CLR_ACH4_IDX BIT(4) +#define B_AX_CLR_ACH3_IDX BIT(3) +#define B_AX_CLR_ACH2_IDX BIT(2) +#define B_AX_CLR_ACH1_IDX BIT(1) +#define B_AX_CLR_ACH0_IDX BIT(0) +#define B_AX_TXBD_CLR1_ALL GENMASK(10, 0) + +#define R_AX_RXBD_RWPTR_CLR 0x1018 +#define B_AX_CLR_RPQ_IDX BIT(1) +#define B_AX_CLR_RXQ_IDX BIT(0) +#define B_AX_RXBD_CLR_ALL GENMASK(1, 0) + +#define R_AX_TXBD_RWPTR_CLR2 0x1314 +#define B_AX_CLR_CH11_IDX BIT(1) +#define B_AX_CLR_CH10_IDX BIT(0) +#define B_AX_TXBD_CLR2_ALL GENMASK(1, 0) + +#define R_AX_PCIE_DMA_BUSY1 0x101C +#define B_AX_PCIEIO_RX_BUSY BIT(22) +#define B_AX_PCIEIO_TX_BUSY BIT(21) +#define B_AX_PCIEIO_BUSY BIT(20) +#define B_AX_WPDMA_BUSY BIT(19) + +#define R_AX_PCIE_DMA_BUSY2 0x131C +#define B_AX_CH11_BUSY BIT(1) +#define B_AX_CH10_BUSY BIT(0) + +/* Configure */ +#define R_AX_PCIE_INIT_CFG2 0x1004 +#define B_AX_WD_ITVL_IDLE GENMASK(27, 24) +#define B_AX_WD_ITVL_ACT GENMASK(19, 16) + +#define R_AX_PCIE_PS_CTRL 0x1008 +#define B_AX_L1OFF_PWR_OFF_EN BIT(5) + +#define R_AX_INT_MIT_RX 0x10D4 +#define B_AX_RXMIT_RXP2_SEL BIT(19) +#define B_AX_RXMIT_RXP1_SEL BIT(18) +#define B_AX_RXTIMER_UNIT_MASK GENMASK(17, 16) +#define AX_RXTIMER_UNIT_64US 0 +#define AX_RXTIMER_UNIT_128US 1 +#define AX_RXTIMER_UNIT_256US 2 +#define AX_RXTIMER_UNIT_512US 3 +#define B_AX_RXCOUNTER_MATCH_MASK GENMASK(15, 8) +#define B_AX_RXTIMER_MATCH_MASK GENMASK(7, 0) + +#define R_AX_DBG_ERR_FLAG 0x11C4 +#define B_AX_PCIE_RPQ_FULL BIT(29) +#define B_AX_PCIE_RXQ_FULL BIT(28) +#define B_AX_CPL_STATUS_MASK GENMASK(27, 25) +#define B_AX_RX_STUCK BIT(22) +#define B_AX_TX_STUCK BIT(21) +#define B_AX_PCIEDBG_TXERR0 BIT(16) +#define B_AX_PCIE_RXP1_ERR0 BIT(4) +#define B_AX_PCIE_TXBD_LEN0 BIT(1) +#define B_AX_PCIE_TXBD_4KBOUD_LENERR BIT(0) + +#define R_AX_LBC_WATCHDOG 0x11D8 +#define B_AX_LBC_TIMER GENMASK(7, 4) +#define B_AX_LBC_FLAG BIT(1) +#define B_AX_LBC_EN BIT(0) + +#define R_AX_PCIE_EXP_CTRL 0x13F0 +#define B_AX_EN_CHKDSC_NO_RX_STUCK BIT(20) +#define B_AX_MAX_TAG_NUM GENMASK(18, 16) +#define B_AX_SIC_EN_FORCE_CLKREQ BIT(4) + +#define R_AX_PCIE_RX_PREF_ADV 0x13F4 +#define B_AX_RXDMA_PREF_ADV_EN BIT(0) + +#define RTW89_PCI_TXBD_NUM_MAX 256 +#define RTW89_PCI_RXBD_NUM_MAX 256 +#define RTW89_PCI_TXWD_NUM_MAX 512 +#define RTW89_PCI_TXWD_PAGE_SIZE 128 +#define RTW89_PCI_ADDRINFO_MAX 4 +#define RTW89_PCI_RX_BUF_SIZE 11460 + +#define RTW89_PCI_POLL_BDRAM_RST_CNT 100 +#define RTW89_PCI_MULTITAG 8 + +/* PCIE CFG register */ +#define RTW89_PCIE_ASPM_CTRL 0x070F +#define RTW89_L1DLY_MASK GENMASK(5, 3) +#define RTW89_L0DLY_MASK GENMASK(2, 0) +#define RTW89_PCIE_TIMER_CTRL 0x0718 +#define RTW89_PCIE_BIT_L1SUB BIT(5) +#define RTW89_PCIE_L1_CTRL 0x0719 +#define RTW89_PCIE_BIT_CLK BIT(4) +#define RTW89_PCIE_BIT_L1 BIT(3) +#define RTW89_PCIE_CLK_CTRL 0x0725 +#define RTW89_PCIE_RST_MSTATE 0x0B48 +#define RTW89_PCIE_BIT_CFG_RST_MSTATE BIT(0) +#define RTW89_PCIE_PHY_RATE 0x82 +#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0) +#define INTF_INTGRA_MINREF_V1 90 +#define INTF_INTGRA_HOSTREF_V1 100 + +enum rtw89_pcie_phy { + PCIE_PHY_GEN1, + PCIE_PHY_GEN2, + PCIE_PHY_GEN1_UNDEFINE = 0x7F, +}; + +enum mac_ax_func_sw { + MAC_AX_FUNC_DIS, + MAC_AX_FUNC_EN, +}; + +enum rtw89_pcie_l0sdly { + PCIE_L0SDLY_1US = 0, + PCIE_L0SDLY_2US = 1, + PCIE_L0SDLY_3US = 2, + PCIE_L0SDLY_4US = 3, + PCIE_L0SDLY_5US = 4, + PCIE_L0SDLY_6US = 5, + PCIE_L0SDLY_7US = 6, +}; + +enum rtw89_pcie_l1dly { + PCIE_L1DLY_16US = 4, + PCIE_L1DLY_32US = 5, + PCIE_L1DLY_64US = 6, + PCIE_L1DLY_HW_INFI = 7, +}; + +enum rtw89_pcie_clkdly_hw { + PCIE_CLKDLY_HW_0 = 0, + PCIE_CLKDLY_HW_30US = 0x1, + PCIE_CLKDLY_HW_50US = 0x2, + PCIE_CLKDLY_HW_100US = 0x3, + PCIE_CLKDLY_HW_150US = 0x4, + PCIE_CLKDLY_HW_200US = 0x5, +}; + +struct rtw89_pci_bd_ram { + u8 start_idx; + u8 max_num; + u8 min_num; +}; + +struct rtw89_pci_tx_data { + dma_addr_t dma; +}; + +struct rtw89_pci_rx_info { + dma_addr_t dma; + u32 fs:1, ls:1, tag:11, len:14; +}; + +#define RTW89_PCI_TXBD_OPTION_LS BIT(14) + +struct rtw89_pci_tx_bd_32 { + __le16 length; + __le16 option; + __le32 dma; +} __packed; + +#define RTW89_PCI_TXWP_VALID BIT(15) + +struct rtw89_pci_tx_wp_info { + __le16 seq0; + __le16 seq1; + __le16 seq2; + __le16 seq3; +} __packed; + +#define RTW89_PCI_ADDR_MSDU_LS BIT(15) +#define RTW89_PCI_ADDR_LS BIT(14) +#define RTW89_PCI_ADDR_HIGH(a) (((a) << 6) & GENMASK(13, 6)) +#define RTW89_PCI_ADDR_NUM(x) ((x) & GENMASK(5, 0)) + +struct rtw89_pci_tx_addr_info_32 { + __le16 length; + __le16 option; + __le32 dma; +} __packed; + +#define RTW89_PCI_RPP_POLLUTED BIT(31) +#define RTW89_PCI_RPP_SEQ GENMASK(30, 16) +#define RTW89_PCI_RPP_TX_STATUS GENMASK(15, 13) +#define RTW89_TX_DONE 0x0 +#define RTW89_TX_RETRY_LIMIT 0x1 +#define RTW89_TX_LIFE_TIME 0x2 +#define RTW89_TX_MACID_DROP 0x3 +#define RTW89_PCI_RPP_QSEL GENMASK(12, 8) +#define RTW89_PCI_RPP_MACID GENMASK(7, 0) + +struct rtw89_pci_rpp_fmt { + __le32 dword; +} __packed; + +struct rtw89_pci_rx_bd_32 { + __le16 buf_size; + __le16 rsvd; + __le32 dma; +} __packed; + +#define RTW89_PCI_RXBD_FS BIT(15) +#define RTW89_PCI_RXBD_LS BIT(14) +#define RTW89_PCI_RXBD_WRITE_SIZE GENMASK(13, 0) +#define RTW89_PCI_RXBD_TAG GENMASK(28, 16) + +struct rtw89_pci_rxbd_info { + __le32 dword; +}; + +struct rtw89_pci_tx_wd { + struct list_head list; + struct sk_buff_head queue; + + void *vaddr; + dma_addr_t paddr; + u32 len; + u32 seq; +}; + +struct rtw89_pci_dma_ring { + void *head; + u8 desc_size; + dma_addr_t dma; + + u32 addr_num; + u32 addr_idx; + u32 addr_bdram; + u32 addr_desa_l; + u32 addr_desa_h; + + u32 len; + u32 wp; /* host idx */ + u32 rp; /* hw idx */ +}; + +struct rtw89_pci_tx_wd_ring { + void *head; + dma_addr_t dma; + + struct rtw89_pci_tx_wd pages[RTW89_PCI_TXWD_NUM_MAX]; + struct list_head free_pages; + + u32 page_size; + u32 page_num; + u32 curr_num; +}; + +#define RTW89_RX_TAG_MAX 0x1fff + +struct rtw89_pci_tx_ring { + struct rtw89_pci_tx_wd_ring wd_ring; + struct rtw89_pci_dma_ring bd_ring; + struct list_head busy_pages; + u8 txch; + bool dma_enabled; + u16 tag; /* range from 0x0001 ~ 0x1fff */ + + u64 tx_cnt; + u64 tx_acked; + u64 tx_retry_lmt; + u64 tx_life_time; + u64 tx_mac_id_drop; +}; + +struct rtw89_pci_rx_ring { + struct rtw89_pci_dma_ring bd_ring; + struct sk_buff *buf[RTW89_PCI_RXBD_NUM_MAX]; + u32 buf_sz; + struct sk_buff *diliver_skb; + struct rtw89_rx_desc_info diliver_desc; +}; + +struct rtw89_pci_isrs { + u32 halt_c2h_isrs; + u32 isrs[2]; +}; + +struct rtw89_pci { + struct pci_dev *pdev; + + /* protect HW irq related registers */ + spinlock_t irq_lock; + /* protect TRX resources (exclude RXQ) */ + spinlock_t trx_lock; + bool running; + struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM]; + struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; + struct sk_buff_head h2c_queue; + struct sk_buff_head h2c_release_queue; + + u32 halt_c2h_intrs; + u32 intrs[2]; + void __iomem *mmap; +}; + +static inline struct rtw89_pci_rx_info *RTW89_PCI_RX_SKB_CB(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + BUILD_BUG_ON(sizeof(struct rtw89_pci_tx_data) > + sizeof(info->status.status_driver_data)); + + return (struct rtw89_pci_rx_info *)skb->cb; +} + +static inline struct rtw89_pci_rx_bd_32 * +RTW89_PCI_RX_BD(struct rtw89_pci_rx_ring *rx_ring, u32 idx) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + u8 *head = bd_ring->head; + u32 desc_size = bd_ring->desc_size; + u32 offset = idx * desc_size; + + return (struct rtw89_pci_rx_bd_32 *)(head + offset); +} + +static inline void +rtw89_pci_rxbd_increase(struct rtw89_pci_rx_ring *rx_ring, u32 cnt) +{ + struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; + + bd_ring->wp += cnt; + + if (bd_ring->wp >= bd_ring->len) + bd_ring->wp -= bd_ring->len; +} + +static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + return (struct rtw89_pci_tx_data *)info->status.status_driver_data; +} + +static inline struct rtw89_pci_tx_bd_32 * +rtw89_pci_get_next_txbd(struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; + struct rtw89_pci_tx_bd_32 *tx_bd, *head; + + head = bd_ring->head; + tx_bd = head + bd_ring->wp; + + return tx_bd; +} + +static inline struct rtw89_pci_tx_wd * +rtw89_pci_dequeue_txwd(struct rtw89_pci_tx_ring *tx_ring) +{ + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + struct rtw89_pci_tx_wd *txwd; + + txwd = list_first_entry_or_null(&wd_ring->free_pages, + struct rtw89_pci_tx_wd, list); + if (!txwd) + return NULL; + + list_del_init(&txwd->list); + txwd->len = 0; + wd_ring->curr_num--; + + return txwd; +} + +static inline void +rtw89_pci_enqueue_txwd(struct rtw89_pci_tx_ring *tx_ring, + struct rtw89_pci_tx_wd *txwd) +{ + struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; + + memset(txwd->vaddr, 0, wd_ring->page_size); + list_add_tail(&txwd->list, &wd_ring->free_pages); + wd_ring->curr_num++; +} + +static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val) +{ + return val == 0xffffffff || val == 0xeaeaeaea; +} + +extern const struct dev_pm_ops rtw89_pm_ops; + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c new file mode 100644 index 00000000000000..ab134856baac73 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -0,0 +1,2868 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "debug.h" +#include "fw.h" +#include "phy.h" +#include "ps.h" +#include "reg.h" +#include "sar.h" +#include "coex.h" + +static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev, + const struct rtw89_ra_report *report) +{ + const struct rate_info *txrate = &report->txrate; + u32 bit_rate = report->bit_rate; + u8 mcs; + + /* lower than ofdm, do not aggregate */ + if (bit_rate < 550) + return 1; + + /* prevent hardware rate fallback to G mode rate */ + if (txrate->flags & RATE_INFO_FLAGS_MCS) + mcs = txrate->mcs & 0x07; + else if (txrate->flags & (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS)) + mcs = txrate->mcs; + else + mcs = 0; + + if (mcs <= 2) + return 1; + + /* lower than 20M vht 2ss mcs8, make it small */ + if (bit_rate < 1800) + return 1200; + + /* lower than 40M vht 2ss mcs9, make it medium */ + if (bit_rate < 4000) + return 2600; + + /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */ + if (bit_rate < 7000) + return 3500; + + return rtwdev->chip->max_amsdu_limit; +} + +static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap) +{ + u64 ra_mask = 0; + u8 mcs_cap; + int i, nss; + + for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) { + mcs_cap = mcs_map & 0x3; + switch (mcs_cap) { + case 2: + ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss; + break; + case 1: + ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss; + break; + case 0: + ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss; + break; + default: + break; + } + } + + return ra_mask; +} + +static u64 get_he_ra_mask(struct ieee80211_sta *sta) +{ + struct ieee80211_sta_he_cap cap = sta->he_cap; + u16 mcs_map; + + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + if (cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80); + else + mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160); + break; + default: + mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80); + } + + /* MCS11, MCS9, MCS7 */ + return get_mcs_ra_mask(mcs_map, 11, 2); +} + +#define RA_FLOOR_TABLE_SIZE 7 +#define RA_FLOOR_UP_GAP 3 +static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, + u8 ratr_state) +{ + u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100}; + u8 rssi_lv = 0; + u8 i; + + rssi >>= 1; + for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { + if (i >= ratr_state) + rssi_lv_t[i] += RA_FLOOR_UP_GAP; + if (rssi < rssi_lv_t[i]) { + rssi_lv = i; + break; + } + } + if (rssi_lv == 0) + return 0xffffffffffffffffULL; + else if (rssi_lv == 1) + return 0xfffffffffffffff0ULL; + else if (rssi_lv == 2) + return 0xffffffffffffffe0ULL; + else if (rssi_lv == 3) + return 0xffffffffffffffc0ULL; + else if (rssi_lv == 4) + return 0xffffffffffffff80ULL; + else if (rssi_lv >= 5) + return 0xffffffffffffff00ULL; + + return 0xffffffffffffffffULL; +} + +static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); + struct cfg80211_bitrate_mask *mask = &rtwsta->mask; + enum nl80211_band band; + u64 cfg_mask; + + if (!rtwsta->use_cfg_mask) + return -1; + + switch (hal->current_band_type) { + case RTW89_BAND_2G: + band = NL80211_BAND_2GHZ; + cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy, + RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES); + break; + case RTW89_BAND_5G: + band = NL80211_BAND_5GHZ; + cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy, + RA_MASK_OFDM_RATES); + break; + default: + rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type); + return -1; + } + + if (sta->he_cap.has_he) { + cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0], + RA_MASK_HE_1SS_RATES); + cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1], + RA_MASK_HE_2SS_RATES); + } else if (sta->vht_cap.vht_supported) { + cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], + RA_MASK_VHT_1SS_RATES); + cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], + RA_MASK_VHT_2SS_RATES); + } else if (sta->ht_cap.ht_supported) { + cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], + RA_MASK_HT_1SS_RATES); + cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], + RA_MASK_HT_2SS_RATES); + } + + return cfg_mask; +} + +static const u64 +rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES, + RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES}; +static const u64 +rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, + RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES}; +static const u64 +rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, + RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; + +static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, + struct ieee80211_sta *sta, bool csi) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_vif *rtwvif = rtwsta->rtwvif; + struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; + struct rtw89_ra_info *ra = &rtwsta->ra; + const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; + u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi); + u64 high_rate_mask = 0; + u64 ra_mask = 0; + u8 mode = 0; + u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY; + u8 bw_mode = 0; + u8 stbc_en = 0; + u8 ldpc_en = 0; + u8 i; + bool sgi = false; + + memset(ra, 0, sizeof(*ra)); + /* Set the ra mask from sta's capability */ + if (sta->he_cap.has_he) { + mode |= RTW89_RA_MODE_HE; + csi_mode = RTW89_RA_RPT_MODE_HE; + ra_mask |= get_he_ra_mask(sta); + high_rate_masks = rtw89_ra_mask_he_rates; + if (sta->he_cap.he_cap_elem.phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) + stbc_en = 1; + if (sta->he_cap.he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) + ldpc_en = 1; + } else if (sta->vht_cap.vht_supported) { + u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); + + mode |= RTW89_RA_MODE_VHT; + csi_mode = RTW89_RA_RPT_MODE_VHT; + /* MCS9, MCS8, MCS7 */ + ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); + high_rate_masks = rtw89_ra_mask_vht_rates; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + stbc_en = 1; + if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + ldpc_en = 1; + } else if (sta->ht_cap.ht_supported) { + mode |= RTW89_RA_MODE_HT; + csi_mode = RTW89_RA_RPT_MODE_HT; + ra_mask |= ((u64)sta->ht_cap.mcs.rx_mask[3] << 48) | + ((u64)sta->ht_cap.mcs.rx_mask[2] << 36) | + (sta->ht_cap.mcs.rx_mask[1] << 24) | + (sta->ht_cap.mcs.rx_mask[0] << 12); + high_rate_masks = rtw89_ra_mask_ht_rates; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + stbc_en = 1; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + ldpc_en = 1; + } + + if (rtwdev->hal.current_band_type == RTW89_BAND_2G) { + if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf) + mode |= RTW89_RA_MODE_CCK; + else + mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM; + } else { + mode |= RTW89_RA_MODE_OFDM; + } + + if (mode >= RTW89_RA_MODE_HT) { + for (i = 0; i < rtwdev->hal.tx_nss; i++) + high_rate_mask |= high_rate_masks[i]; + ra_mask &= high_rate_mask; + if (mode & RTW89_RA_MODE_OFDM) + ra_mask |= RA_MASK_SUBOFDM_RATES; + if (mode & RTW89_RA_MODE_CCK) + ra_mask |= RA_MASK_SUBCCK_RATES; + } else if (mode & RTW89_RA_MODE_OFDM) { + if (mode & RTW89_RA_MODE_CCK) + ra_mask |= RA_MASK_SUBCCK_RATES; + ra_mask |= RA_MASK_OFDM_RATES; + } else { + ra_mask = RA_MASK_CCK_RATES; + } + + if (mode != RTW89_RA_MODE_CCK) { + ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); + ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); + } + + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_80: + bw_mode = RTW89_CHANNEL_WIDTH_80; + sgi = sta->vht_cap.vht_supported && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); + break; + case IEEE80211_STA_RX_BW_40: + bw_mode = RTW89_CHANNEL_WIDTH_40; + sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + break; + default: + bw_mode = RTW89_CHANNEL_WIDTH_20; + sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + break; + } + + if (sta->he_cap.he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM) + ra->dcm_cap = 1; + + if (rate_pattern->enable) { + ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); + ra_mask &= rate_pattern->ra_mask; + mode = rate_pattern->ra_mode; + } + + ra->bw_cap = bw_mode; + ra->mode_ctrl = mode; + ra->macid = rtwsta->mac_id; + ra->stbc_cap = stbc_en; + ra->ldpc_cap = ldpc_en; + ra->ss_num = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1; + ra->en_sgi = sgi; + ra->ra_mask = ra_mask; + + if (!csi) + return; + + ra->fixed_csi_rate_en = false; + ra->ra_csi_rate_en = true; + ra->cr_tbl_sel = false; + ra->band_num = rtwvif->phy_idx; + ra->csi_bw = bw_mode; + ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32; + ra->csi_mcs_ss_idx = 5; + ra->csi_mode = csi_mode; +} + +void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_ra_info *ra = &rtwsta->ra; + + rtw89_phy_ra_sta_update(rtwdev, sta, false); + ra->upd_mask = 1; + rtw89_debug(rtwdev, RTW89_DBG_RA, + "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d", + ra->macid, + ra->bw_cap, + ra->ss_num, + ra->en_sgi, + ra->giltf); + + rtw89_fw_h2c_ra(rtwdev, ra, false); +} + +static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, + u16 rate_base, u64 ra_mask, u8 ra_mode, + u32 rate_ctrl, u32 ctrl_skip, bool force) +{ + u8 n, c; + + if (rate_ctrl == ctrl_skip) + return true; + + n = hweight32(rate_ctrl); + if (n == 0) + return true; + + if (force && n != 1) + return false; + + if (next->enable) + return false; + + c = __fls(rate_ctrl); + next->rate = rate_base + c; + next->ra_mode = ra_mode; + next->ra_mask = ra_mask; + next->enable = true; + + return true; +} + +void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct ieee80211_supported_band *sband; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_phy_rate_pattern next_pattern = {0}; + static const u16 hw_rate_he[] = {RTW89_HW_RATE_HE_NSS1_MCS0, + RTW89_HW_RATE_HE_NSS2_MCS0, + RTW89_HW_RATE_HE_NSS3_MCS0, + RTW89_HW_RATE_HE_NSS4_MCS0}; + static const u16 hw_rate_vht[] = {RTW89_HW_RATE_VHT_NSS1_MCS0, + RTW89_HW_RATE_VHT_NSS2_MCS0, + RTW89_HW_RATE_VHT_NSS3_MCS0, + RTW89_HW_RATE_VHT_NSS4_MCS0}; + static const u16 hw_rate_ht[] = {RTW89_HW_RATE_MCS0, + RTW89_HW_RATE_MCS8, + RTW89_HW_RATE_MCS16, + RTW89_HW_RATE_MCS24}; + u8 band = rtwdev->hal.current_band_type; + u8 tx_nss = rtwdev->hal.tx_nss; + u8 i; + + for (i = 0; i < tx_nss; i++) + if (!__check_rate_pattern(&next_pattern, hw_rate_he[i], + RA_MASK_HE_RATES, RTW89_RA_MODE_HE, + mask->control[band].he_mcs[i], + 0, true)) + goto out; + + for (i = 0; i < tx_nss; i++) + if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i], + RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, + mask->control[band].vht_mcs[i], + 0, true)) + goto out; + + for (i = 0; i < tx_nss; i++) + if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i], + RA_MASK_HT_RATES, RTW89_RA_MODE_HT, + mask->control[band].ht_mcs[i], + 0, true)) + goto out; + + /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and + * require at least one basic rate for ieee80211_set_bitrate_mask, + * so the decision just depends on if all bitrates are set or not. + */ + sband = rtwdev->hw->wiphy->bands[band]; + if (band == RTW89_BAND_2G) { + if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1, + RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES, + RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM, + mask->control[band].legacy, + BIT(sband->n_bitrates) - 1, false)) + goto out; + } else { + if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6, + RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM, + mask->control[band].legacy, + BIT(sband->n_bitrates) - 1, false)) + goto out; + } + + if (!next_pattern.enable) + goto out; + + rtwvif->rate_pattern = next_pattern; + rtw89_debug(rtwdev, RTW89_DBG_RA, + "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n", + next_pattern.rate, + next_pattern.ra_mask, + next_pattern.ra_mode); + return; + +out: + rtwvif->rate_pattern.enable = false; + rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n"); +} + +static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; + + rtw89_phy_ra_updata_sta(rtwdev, sta); +} + +void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) +{ + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_phy_ra_updata_sta_iter, + rtwdev); +} + +void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_ra_info *ra = &rtwsta->ra; + u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR; + bool csi = rtw89_sta_has_beamformer_cap(sta); + + rtw89_phy_ra_sta_update(rtwdev, sta, csi); + + if (rssi > 40) + ra->init_rate_lv = 1; + else if (rssi > 20) + ra->init_rate_lv = 2; + else if (rssi > 1) + ra->init_rate_lv = 3; + else + ra->init_rate_lv = 0; + ra->upd_all = 1; + rtw89_debug(rtwdev, RTW89_DBG_RA, + "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d", + ra->macid, + ra->mode_ctrl, + ra->bw_cap, + ra->ss_num, + ra->init_rate_lv); + rtw89_debug(rtwdev, RTW89_DBG_RA, + "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d", + ra->dcm_cap, + ra->er_cap, + ra->ldpc_cap, + ra->stbc_cap, + ra->en_sgi, + ra->giltf); + + rtw89_fw_h2c_ra(rtwdev, ra, csi); +} + +u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_bandwidth dbw) +{ + enum rtw89_bandwidth cbw = param->bandwidth; + u8 pri_ch = param->primary_chan; + u8 central_ch = param->center_chan; + u8 txsc_idx = 0; + u8 tmp = 0; + + if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) + return txsc_idx; + + switch (cbw) { + case RTW89_CHANNEL_WIDTH_40: + txsc_idx = pri_ch > central_ch ? 1 : 2; + break; + case RTW89_CHANNEL_WIDTH_80: + if (dbw == RTW89_CHANNEL_WIDTH_20) { + if (pri_ch > central_ch) + txsc_idx = (pri_ch - central_ch) >> 1; + else + txsc_idx = ((central_ch - pri_ch) >> 1) + 1; + } else { + txsc_idx = pri_ch > central_ch ? 9 : 10; + } + break; + case RTW89_CHANNEL_WIDTH_160: + if (pri_ch > central_ch) + tmp = (pri_ch - central_ch) >> 1; + else + tmp = ((central_ch - pri_ch) >> 1) + 1; + + if (dbw == RTW89_CHANNEL_WIDTH_20) { + txsc_idx = tmp; + } else if (dbw == RTW89_CHANNEL_WIDTH_40) { + if (tmp == 1 || tmp == 3) + txsc_idx = 9; + else if (tmp == 5 || tmp == 7) + txsc_idx = 11; + else if (tmp == 2 || tmp == 4) + txsc_idx = 10; + else if (tmp == 6 || tmp == 8) + txsc_idx = 12; + else + return 0xff; + } else { + txsc_idx = pri_ch > central_ch ? 13 : 14; + } + break; + case RTW89_CHANNEL_WIDTH_80_80: + if (dbw == RTW89_CHANNEL_WIDTH_20) { + if (pri_ch > central_ch) + txsc_idx = (10 - (pri_ch - central_ch)) >> 1; + else + txsc_idx = ((central_ch - pri_ch) >> 1) + 5; + } else if (dbw == RTW89_CHANNEL_WIDTH_40) { + txsc_idx = pri_ch > central_ch ? 10 : 12; + } else { + txsc_idx = 14; + } + break; + default: + break; + } + + return txsc_idx; +} + +u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const u32 *base_addr = chip->rf_base_addr; + u32 val, direct_addr; + + if (rf_path >= rtwdev->chip->rf_path_num) { + rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return INV_RF_DATA; + } + + addr &= 0xff; + direct_addr = base_addr[rf_path] + (addr << 2); + mask &= RFREG_MASK; + + val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask); + + return val; +} +EXPORT_SYMBOL(rtw89_phy_read_rf); + +bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask, u32 data) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const u32 *base_addr = chip->rf_base_addr; + u32 direct_addr; + + if (rf_path >= rtwdev->chip->rf_path_num) { + rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return false; + } + + addr &= 0xff; + direct_addr = base_addr[rf_path] + (addr << 2); + mask &= RFREG_MASK; + + rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data); + + /* delay to ensure writing properly */ + udelay(1); + + return true; +} +EXPORT_SYMBOL(rtw89_phy_write_rf); + +static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + chip->ops->bb_reset(rtwdev, phy_idx); +} + +static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev, + const struct rtw89_reg2_def *reg, + enum rtw89_rf_path rf_path, + void *extra_data) +{ + if (reg->addr == 0xfe) + mdelay(50); + else if (reg->addr == 0xfd) + mdelay(5); + else if (reg->addr == 0xfc) + mdelay(1); + else if (reg->addr == 0xfb) + udelay(50); + else if (reg->addr == 0xfa) + udelay(5); + else if (reg->addr == 0xf9) + udelay(1); + else + rtw89_phy_write32(rtwdev, reg->addr, reg->data); +} + +static void +rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, + const struct rtw89_reg2_def *reg, + enum rtw89_rf_path rf_path, + struct rtw89_fw_h2c_rf_reg_info *info) +{ + u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; + u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; + + info->rtw89_phy_config_rf_h2c[page][idx] = + cpu_to_le32((reg->addr << 20) | reg->data); + info->curr_idx++; +} + +static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, + struct rtw89_fw_h2c_rf_reg_info *info) +{ + u16 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; + u16 len = (info->curr_idx % RTW89_H2C_RF_PAGE_SIZE) * 4; + u8 i; + int ret = 0; + + if (page > RTW89_H2C_RF_PAGE_NUM) { + rtw89_warn(rtwdev, + "rf reg h2c total page num %d larger than %d (RTW89_H2C_RF_PAGE_NUM)\n", + page, RTW89_H2C_RF_PAGE_NUM); + return -EINVAL; + } + + for (i = 0; i < page; i++) { + ret = rtw89_fw_h2c_rf_reg(rtwdev, info, + RTW89_H2C_RF_PAGE_SIZE * 4, i); + if (ret) + return ret; + } + ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len, i); + if (ret) + return ret; + info->curr_idx = 0; + + return 0; +} + +static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, + const struct rtw89_reg2_def *reg, + enum rtw89_rf_path rf_path, + void *extra_data) +{ + if (reg->addr == 0xfe) { + mdelay(50); + } else if (reg->addr == 0xfd) { + mdelay(5); + } else if (reg->addr == 0xfc) { + mdelay(1); + } else if (reg->addr == 0xfb) { + udelay(50); + } else if (reg->addr == 0xfa) { + udelay(5); + } else if (reg->addr == 0xf9) { + udelay(1); + } else { + rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data); + rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, + (struct rtw89_fw_h2c_rf_reg_info *)extra_data); + } +} + +static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev, + const struct rtw89_phy_table *table, + u32 *headline_size, u32 *headline_idx, + u8 rfe, u8 cv) +{ + const struct rtw89_reg2_def *reg; + u32 headline; + u32 compare, target; + u8 rfe_para, cv_para; + u8 cv_max = 0; + bool case_matched = false; + u32 i; + + for (i = 0; i < table->n_regs; i++) { + reg = &table->regs[i]; + headline = get_phy_headline(reg->addr); + if (headline != PHY_HEADLINE_VALID) + break; + } + *headline_size = i; + if (*headline_size == 0) + return 0; + + /* case 1: RFE match, CV match */ + compare = get_phy_compare(rfe, cv); + for (i = 0; i < *headline_size; i++) { + reg = &table->regs[i]; + target = get_phy_target(reg->addr); + if (target == compare) { + *headline_idx = i; + return 0; + } + } + + /* case 2: RFE match, CV don't care */ + compare = get_phy_compare(rfe, PHY_COND_DONT_CARE); + for (i = 0; i < *headline_size; i++) { + reg = &table->regs[i]; + target = get_phy_target(reg->addr); + if (target == compare) { + *headline_idx = i; + return 0; + } + } + + /* case 3: RFE match, CV max in table */ + for (i = 0; i < *headline_size; i++) { + reg = &table->regs[i]; + rfe_para = get_phy_cond_rfe(reg->addr); + cv_para = get_phy_cond_cv(reg->addr); + if (rfe_para == rfe) { + if (cv_para >= cv_max) { + cv_max = cv_para; + *headline_idx = i; + case_matched = true; + } + } + } + + if (case_matched) + return 0; + + /* case 4: RFE don't care, CV max in table */ + for (i = 0; i < *headline_size; i++) { + reg = &table->regs[i]; + rfe_para = get_phy_cond_rfe(reg->addr); + cv_para = get_phy_cond_cv(reg->addr); + if (rfe_para == PHY_COND_DONT_CARE) { + if (cv_para >= cv_max) { + cv_max = cv_para; + *headline_idx = i; + case_matched = true; + } + } + } + + if (case_matched) + return 0; + + return -EINVAL; +} + +static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev, + const struct rtw89_phy_table *table, + void (*config)(struct rtw89_dev *rtwdev, + const struct rtw89_reg2_def *reg, + enum rtw89_rf_path rf_path, + void *data), + void *extra_data) +{ + const struct rtw89_reg2_def *reg; + enum rtw89_rf_path rf_path = table->rf_path; + u8 rfe = rtwdev->efuse.rfe_type; + u8 cv = rtwdev->hal.cv; + u32 i; + u32 headline_size = 0, headline_idx = 0; + u32 target = 0, cfg_target; + u8 cond; + bool is_matched = true; + bool target_found = false; + int ret; + + ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size, + &headline_idx, rfe, cv); + if (ret) { + rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv); + return; + } + + cfg_target = get_phy_target(table->regs[headline_idx].addr); + for (i = headline_size; i < table->n_regs; i++) { + reg = &table->regs[i]; + cond = get_phy_cond(reg->addr); + switch (cond) { + case PHY_COND_BRANCH_IF: + case PHY_COND_BRANCH_ELIF: + target = get_phy_target(reg->addr); + break; + case PHY_COND_BRANCH_ELSE: + is_matched = false; + if (!target_found) { + rtw89_warn(rtwdev, "failed to load CR %x/%x\n", + reg->addr, reg->data); + return; + } + break; + case PHY_COND_BRANCH_END: + is_matched = true; + target_found = false; + break; + case PHY_COND_CHECK: + if (target_found) { + is_matched = false; + break; + } + + if (target == cfg_target) { + is_matched = true; + target_found = true; + } else { + is_matched = false; + target_found = false; + } + break; + default: + if (is_matched) + config(rtwdev, reg, rf_path, extra_data); + break; + } + } +} + +void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_phy_table *bb_table = chip->bb_table; + + rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); + rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0); + rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0); +} + +static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32(rtwdev, 0x8080, 0x4); + udelay(1); + return rtw89_phy_read32(rtwdev, 0x8080); +} + +void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_phy_table *rf_table; + struct rtw89_fw_h2c_rf_reg_info *rf_reg_info; + u8 path; + + rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL); + if (!rf_reg_info) + return; + + for (path = RF_PATH_A; path < chip->rf_path_num; path++) { + rf_reg_info->rf_path = path; + rf_table = chip->rf_table[path]; + rtw89_phy_init_reg(rtwdev, rf_table, rtw89_phy_config_rf_reg, + (void *)rf_reg_info); + if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info)) + rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n", + path); + } + kfree(rf_reg_info); +} + +static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_phy_table *nctl_table; + u32 val; + int ret; + + /* IQK/DPK clock & reset */ + rtw89_phy_write32_set(rtwdev, 0x0c60, 0x3); + rtw89_phy_write32_set(rtwdev, 0x0c6c, 0x1); + rtw89_phy_write32_set(rtwdev, 0x58ac, 0x8000000); + rtw89_phy_write32_set(rtwdev, 0x78ac, 0x8000000); + + /* check 0x8080 */ + rtw89_phy_write32(rtwdev, 0x8000, 0x8); + + ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10, + 1000, false, rtwdev); + if (ret) + rtw89_err(rtwdev, "failed to poll nctl block\n"); + + nctl_table = chip->nctl_table; + rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL); +} + +static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) +{ + u32 phy_page = addr >> 8; + u32 ofst = 0; + + switch (phy_page) { + case 0x6: + case 0x7: + case 0x8: + case 0x9: + case 0xa: + case 0xb: + case 0xc: + case 0xd: + case 0x19: + case 0x1a: + case 0x1b: + ofst = 0x2000; + break; + default: + /* warning case */ + ofst = 0; + break; + } + + if (phy_page >= 0x40 && phy_page <= 0x4f) + ofst = 0x2000; + + return ofst; +} + +void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, + u32 data, enum rtw89_phy_idx phy_idx) +{ + if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) + addr += rtw89_phy0_phy1_offset(rtwdev, addr); + rtw89_phy_write32_mask(rtwdev, addr, mask, data); +} + +void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, + u32 val) +{ + rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0); + + if (!rtwdev->dbcc_en) + return; + + rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1); +} + +void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, + const struct rtw89_phy_reg3_tbl *tbl) +{ + const struct rtw89_reg3_def *reg3; + int i; + + for (i = 0; i < tbl->size; i++) { + reg3 = &tbl->reg3[i]; + rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data); + } +} + +const u8 rtw89_rs_idx_max[] = { + [RTW89_RS_CCK] = RTW89_RATE_CCK_MAX, + [RTW89_RS_OFDM] = RTW89_RATE_OFDM_MAX, + [RTW89_RS_MCS] = RTW89_RATE_MCS_MAX, + [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX, + [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX, +}; + +const u8 rtw89_rs_nss_max[] = { + [RTW89_RS_CCK] = 1, + [RTW89_RS_OFDM] = 1, + [RTW89_RS_MCS] = RTW89_NSS_MAX, + [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX, + [RTW89_RS_OFFSET] = 1, +}; + +static const u8 _byr_of_rs[] = { + [RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck), + [RTW89_RS_OFDM] = offsetof(struct rtw89_txpwr_byrate, ofdm), + [RTW89_RS_MCS] = offsetof(struct rtw89_txpwr_byrate, mcs), + [RTW89_RS_HEDCM] = offsetof(struct rtw89_txpwr_byrate, hedcm), + [RTW89_RS_OFFSET] = offsetof(struct rtw89_txpwr_byrate, offset), +}; + +#define _byr_seek(rs, raw) ((s8 *)(raw) + _byr_of_rs[rs]) +#define _byr_idx(rs, nss, idx) ((nss) * rtw89_rs_idx_max[rs] + (idx)) +#define _byr_chk(rs, nss, idx) \ + ((nss) < rtw89_rs_nss_max[rs] && (idx) < rtw89_rs_idx_max[rs]) + +void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, + const struct rtw89_txpwr_table *tbl) +{ + const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; + const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; + s8 *byr; + u32 data; + u8 i, idx; + + for (; cfg < end; cfg++) { + byr = _byr_seek(cfg->rs, &rtwdev->byr[cfg->band]); + data = cfg->data; + + for (i = 0; i < cfg->len; i++, data >>= 8) { + idx = _byr_idx(cfg->rs, cfg->nss, (cfg->shf + i)); + byr[idx] = (s8)(data & 0xff); + } + } +} + +#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \ +({ \ + const struct rtw89_chip_info *__c = (rtwdev)->chip; \ + (txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \ +}) + +s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, + const struct rtw89_rate_desc *rate_desc) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + s8 *byr; + u8 idx; + + if (rate_desc->rs == RTW89_RS_CCK) + band = RTW89_BAND_2G; + + if (!_byr_chk(rate_desc->rs, rate_desc->nss, rate_desc->idx)) { + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] unknown byrate desc rs=%d nss=%d idx=%d\n", + rate_desc->rs, rate_desc->nss, rate_desc->idx); + + return 0; + } + + byr = _byr_seek(rate_desc->rs, &rtwdev->byr[band]); + idx = _byr_idx(rate_desc->rs, rate_desc->nss, rate_desc->idx); + + return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]); +} + +static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel) +{ + switch (channel) { + case 1 ... 14: + return channel - 1; + case 36 ... 64: + return (channel - 36) / 2; + case 100 ... 144: + return ((channel - 100) / 2) + 15; + case 149 ... 177: + return ((channel - 149) / 2) + 38; + default: + rtw89_warn(rtwdev, "unknown channel: %d\n", channel); + return 0; + } +} + +s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, + u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch); + u8 band = rtwdev->hal.current_band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + s8 lmt = 0, sar; + + switch (band) { + case RTW89_BAND_2G: + lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf][regd][ch_idx]; + break; + case RTW89_BAND_5G: + lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf][regd][ch_idx]; + break; + default: + rtw89_warn(rtwdev, "unknown band type: %d\n", band); + return 0; + } + + lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt); + sar = rtw89_query_sar(rtwdev); + + return min(lmt, sar); +} + +#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \ + do { \ + u8 __i; \ + for (__i = 0; __i < RTW89_BF_NUM; __i++) \ + ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \ + bw, ntx, \ + rs, __i, \ + (ch)); \ + } while (0) + +static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit *lmt, + u8 ntx, u8 ch) +{ + __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_CCK, ch); + __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_CCK, ch); + __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_OFDM, ch); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch); +} + +static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit *lmt, + u8 ntx, u8 ch) +{ + __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_CCK, ch - 2); + __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_CCK, ch); + __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_OFDM, ch - 2); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch - 2); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch + 2); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_MCS, ch); +} + +static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit *lmt, + u8 ntx, u8 ch) +{ + s8 val_0p5_n[RTW89_BF_NUM]; + s8 val_0p5_p[RTW89_BF_NUM]; + u8 i; + + __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_OFDM, ch - 6); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch - 6); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch - 2); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch + 2); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20, + ntx, RTW89_RS_MCS, ch + 6); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_MCS, ch - 4); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_MCS, ch + 4); + __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80, + ntx, RTW89_RS_MCS, ch); + + __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_MCS, ch - 4); + __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40, + ntx, RTW89_RS_MCS, ch + 4); + + for (i = 0; i < RTW89_BF_NUM; i++) + lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); +} + +void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit *lmt, + u8 ntx) +{ + u8 ch = rtwdev->hal.current_channel; + u8 bw = rtwdev->hal.current_band_width; + + memset(lmt, 0, sizeof(*lmt)); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch); + break; + } +} + +static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, + u8 ru, u8 ntx, u8 ch) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch); + u8 band = rtwdev->hal.current_band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + s8 lmt_ru = 0, sar; + + switch (band) { + case RTW89_BAND_2G: + lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx][regd][ch_idx]; + break; + case RTW89_BAND_5G: + lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx][regd][ch_idx]; + break; + default: + rtw89_warn(rtwdev, "unknown band type: %d\n", band); + return 0; + } + + lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru); + sar = rtw89_query_sar(rtwdev); + + return min(lmt_ru, sar); +} + +static void +rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru *lmt_ru, + u8 ntx, u8 ch) +{ + lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch); + lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch); + lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch); +} + +static void +rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru *lmt_ru, + u8 ntx, u8 ch) +{ + lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch - 2); + lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch + 2); + lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch - 2); + lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch + 2); + lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch - 2); + lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch + 2); +} + +static void +rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru *lmt_ru, + u8 ntx, u8 ch) +{ + lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch - 6); + lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch - 2); + lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch + 2); + lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26, + ntx, ch + 6); + lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch - 6); + lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch - 2); + lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch + 2); + lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52, + ntx, ch + 6); + lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch - 6); + lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch - 2); + lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch + 2); + lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106, + ntx, ch + 6); +} + +void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru *lmt_ru, + u8 ntx) +{ + u8 ch = rtwdev->hal.current_channel; + u8 bw = rtwdev->hal.current_band_width; + + memset(lmt_ru, 0, sizeof(*lmt_ru)); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch); + break; + } +} + +struct rtw89_phy_iter_ra_data { + struct rtw89_dev *rtwdev; + struct sk_buff *c2h; +}; + +static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data; + struct rtw89_dev *rtwdev = ra_data->rtwdev; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_ra_report *ra_report = &rtwsta->ra_report; + struct sk_buff *c2h = ra_data->c2h; + u8 mode, rate, bw, giltf, mac_id; + + mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data); + if (mac_id != rtwsta->mac_id) + return; + + memset(ra_report, 0, sizeof(*ra_report)); + + rate = RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h->data); + bw = RTW89_GET_PHY_C2H_RA_RPT_BW(c2h->data); + giltf = RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h->data); + mode = RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h->data); + + switch (mode) { + case RTW89_RA_RPT_MODE_LEGACY: + ra_report->txrate.legacy = rtw89_ra_report_to_bitrate(rtwdev, rate); + break; + case RTW89_RA_RPT_MODE_HT: + ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS; + if (rtwdev->fw.old_ht_ra_format) + rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate), + FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate)); + else + rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate); + ra_report->txrate.mcs = rate; + if (giltf) + ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case RTW89_RA_RPT_MODE_VHT: + ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; + ra_report->txrate.mcs = FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate); + ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1; + if (giltf) + ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case RTW89_RA_RPT_MODE_HE: + ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS; + ra_report->txrate.mcs = FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate); + ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1; + if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) + ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) + ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + if (bw == RTW89_CHANNEL_WIDTH_80) + ra_report->txrate.bw = RATE_INFO_BW_80; + else if (bw == RTW89_CHANNEL_WIDTH_40) + ra_report->txrate.bw = RATE_INFO_BW_40; + else + ra_report->txrate.bw = RATE_INFO_BW_20; + + ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate); + ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) | + FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate); + sta->max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report); + rtwsta->max_agg_wait = sta->max_rc_amsdu_len / 1500 - 1; +} + +static void +rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ + struct rtw89_phy_iter_ra_data ra_data; + + ra_data.rtwdev = rtwdev; + ra_data.c2h = c2h; + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_phy_c2h_ra_rpt_iter, + &ra_data); +} + +static +void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = { + [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt, + [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL, + [RTW89_PHY_C2H_FUNC_TXSTS] = NULL, +}; + +void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u32 len, u8 class, u8 func) +{ + void (*handler)(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = NULL; + + switch (class) { + case RTW89_PHY_C2H_CLASS_RA: + if (func < RTW89_PHY_C2H_FUNC_RA_MAX) + handler = rtw89_phy_c2h_ra_handler[func]; + break; + default: + rtw89_info(rtwdev, "c2h class %d not support\n", class); + return; + } + if (!handler) { + rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, + func); + return; + } + handler(rtwdev, skb, len); +} + +static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo) +{ + u32 reg_mask; + + if (sc_xo) + reg_mask = B_AX_XTAL_SC_XO_MASK; + else + reg_mask = B_AX_XTAL_SC_XI_MASK; + + return (u8)rtw89_read32_mask(rtwdev, R_AX_XTAL_ON_CTRL0, reg_mask); +} + +static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo, + u8 val) +{ + u32 reg_mask; + + if (sc_xo) + reg_mask = B_AX_XTAL_SC_XO_MASK; + else + reg_mask = B_AX_XTAL_SC_XI_MASK; + + rtw89_write32_mask(rtwdev, R_AX_XTAL_ON_CTRL0, reg_mask, val); +} + +static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev, + u8 crystal_cap, bool force) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + u8 sc_xi_val, sc_xo_val; + + if (!force && cfo->crystal_cap == crystal_cap) + return; + crystal_cap = clamp_t(u8, crystal_cap, 0, 127); + rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap); + rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap); + sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true); + sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false); + cfo->crystal_cap = sc_xi_val; + cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap); + + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val); + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val); + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n", + cfo->x_cap_ofst); + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n"); +} + +static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + u8 cap; + + cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK; + cfo->is_adjust = false; + if (cfo->crystal_cap == cfo->def_x_cap) + return; + cap = cfo->crystal_cap; + cap += (cap > cfo->def_x_cap ? -1 : 1); + rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false); + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap, + cfo->def_x_cap); +} + +static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) +{ + bool is_linked = rtwdev->total_sta_assoc > 0; + s32 cfo_avg_312; + s32 dcfo_comp; + int sign; + + if (!is_linked) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", + is_linked); + return; + } + rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo); + if (curr_cfo == 0) + return; + dcfo_comp = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO); + sign = curr_cfo > 0 ? 1 : -1; + cfo_avg_312 = (curr_cfo << 3) / 5 + sign * dcfo_comp; + rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: avg_cfo=%d\n", cfo_avg_312); + if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) + cfo_avg_312 = -cfo_avg_312; + rtw89_phy_set_phy_regs(rtwdev, R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK, + cfo_avg_312); +} + +static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) +{ + rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1); + rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8); + rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK); +} + +static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + struct rtw89_efuse *efuse = &rtwdev->efuse; + + cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK; + cfo->crystal_cap = cfo->crystal_cap_default; + cfo->def_x_cap = cfo->crystal_cap; + cfo->is_adjust = false; + cfo->x_cap_ofst = 0; + cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE; + cfo->apply_compensation = false; + cfo->residual_cfo_acc = 0; + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", + cfo->crystal_cap_default); + rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); + rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); + rtw89_dcfo_comp_init(rtwdev); + cfo->cfo_timer_ms = 2000; + cfo->cfo_trig_by_timer_en = false; + cfo->phy_cfo_trk_cnt = 0; + cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; +} + +static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, + s32 curr_cfo) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + s8 crystal_cap = cfo->crystal_cap; + s32 cfo_abs = abs(curr_cfo); + int sign; + + if (!cfo->is_adjust) { + if (cfo_abs > CFO_TRK_ENABLE_TH) + cfo->is_adjust = true; + } else { + if (cfo_abs < CFO_TRK_STOP_TH) + cfo->is_adjust = false; + } + if (!cfo->is_adjust) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n"); + return; + } + sign = curr_cfo > 0 ? 1 : -1; + if (cfo_abs > CFO_TRK_STOP_TH_4) + crystal_cap += 7 * sign; + else if (cfo_abs > CFO_TRK_STOP_TH_3) + crystal_cap += 5 * sign; + else if (cfo_abs > CFO_TRK_STOP_TH_2) + crystal_cap += 3 * sign; + else if (cfo_abs > CFO_TRK_STOP_TH_1) + crystal_cap += 1 * sign; + else + return; + rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false); + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "X_cap{Curr,Default}={0x%x,0x%x}\n", + cfo->crystal_cap, cfo->def_x_cap); +} + +static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + s32 cfo_khz_all = 0; + s32 cfo_cnt_all = 0; + s32 cfo_all_avg = 0; + u8 i; + + if (rtwdev->total_sta_assoc != 1) + return 0; + rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n"); + for (i = 0; i < CFO_TRACK_MAX_USER; i++) { + if (cfo->cfo_cnt[i] == 0) + continue; + cfo_khz_all += cfo->cfo_tail[i]; + cfo_cnt_all += cfo->cfo_cnt[i]; + cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all); + cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; + } + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "CFO track for macid = %d\n", i); + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n", + cfo_khz_all, cfo_cnt_all, cfo_all_avg); + return cfo_all_avg; +} + +static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + struct rtw89_traffic_stats *stats = &rtwdev->stats; + s32 target_cfo = 0; + s32 cfo_khz_all = 0; + s32 cfo_khz_all_tp_wgt = 0; + s32 cfo_avg = 0; + s32 max_cfo_lb = BIT(31); + s32 min_cfo_ub = GENMASK(30, 0); + u16 cfo_cnt_all = 0; + u8 active_entry_cnt = 0; + u8 sta_cnt = 0; + u32 tp_all = 0; + u8 i; + u8 cfo_tol = 0; + + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n"); + if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n"); + for (i = 0; i < CFO_TRACK_MAX_USER; i++) { + if (cfo->cfo_cnt[i] == 0) + continue; + cfo_khz_all += cfo->cfo_tail[i]; + cfo_cnt_all += cfo->cfo_cnt[i]; + cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all); + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n", + cfo_khz_all, cfo_cnt_all, cfo_avg); + target_cfo = cfo_avg; + } + } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n"); + for (i = 0; i < CFO_TRACK_MAX_USER; i++) { + if (cfo->cfo_cnt[i] == 0) + continue; + cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], + (s32)cfo->cfo_cnt[i]); + cfo_khz_all += cfo->cfo_avg[i]; + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "Macid=%d, cfo_avg=%d\n", i, + cfo->cfo_avg[i]); + } + sta_cnt = rtwdev->total_sta_assoc; + cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt); + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n", + cfo_khz_all, sta_cnt, cfo_avg); + target_cfo = cfo_avg; + } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n"); + cfo_tol = cfo->sta_cfo_tolerance; + for (i = 0; i < CFO_TRACK_MAX_USER; i++) { + sta_cnt++; + if (cfo->cfo_cnt[i] != 0) { + cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], + (s32)cfo->cfo_cnt[i]); + active_entry_cnt++; + } else { + cfo->cfo_avg[i] = cfo->pre_cfo_avg[i]; + } + max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb); + min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub); + cfo_khz_all += cfo->cfo_avg[i]; + /* need tp for each entry */ + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "[%d] cfo_avg=%d, tp=tbd\n", + i, cfo->cfo_avg[i]); + if (sta_cnt >= rtwdev->total_sta_assoc) + break; + } + tp_all = stats->rx_throughput; /* need tp for each entry */ + cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all); + + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n", + sta_cnt); + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n", + active_entry_cnt); + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "Msta cfo with tp_wgt=%d, avg_cfo=%d\n", + cfo_khz_all_tp_wgt, cfo_avg); + rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n", + max_cfo_lb, min_cfo_ub); + if (max_cfo_lb <= min_cfo_ub) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "cfo win_size=%d\n", + min_cfo_ub - max_cfo_lb); + target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub); + } else { + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "No intersection of cfo tolerance windows\n"); + target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt); + } + for (i = 0; i < CFO_TRACK_MAX_USER; i++) + cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; + } + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo); + return target_cfo; +} + +static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + + memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail)); + memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt)); + cfo->packet_count = 0; + cfo->packet_count_pre = 0; + cfo->cfo_avg_pre = 0; +} + +static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + s32 new_cfo = 0; + bool x_cap_update = false; + u8 pre_x_cap = cfo->crystal_cap; + + rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n", + rtwdev->total_sta_assoc); + if (rtwdev->total_sta_assoc == 0) { + rtw89_phy_cfo_reset(rtwdev); + return; + } + if (cfo->packet_count == 0) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n"); + return; + } + if (cfo->packet_count == cfo->packet_count_pre) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n"); + return; + } + if (rtwdev->total_sta_assoc == 1) + new_cfo = rtw89_phy_average_cfo_calc(rtwdev); + else + new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); + if (new_cfo == 0) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); + return; + } + rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); + cfo->cfo_avg_pre = new_cfo; + x_cap_update = cfo->crystal_cap == pre_x_cap ? false : true; + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); + rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", + cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, + cfo->x_cap_ofst); + if (x_cap_update) { + if (new_cfo > 0) + new_cfo -= CFO_SW_COMP_FINE_TUNE; + else + new_cfo += CFO_SW_COMP_FINE_TUNE; + } + rtw89_dcfo_comp(rtwdev, new_cfo); + rtw89_phy_cfo_statistics_reset(rtwdev); +} + +void rtw89_phy_cfo_track_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + cfo_track_work.work); + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + + mutex_lock(&rtwdev->mutex); + if (!cfo->cfo_trig_by_timer_en) + goto out; + rtw89_leave_ps_mode(rtwdev); + rtw89_phy_cfo_dm(rtwdev); + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, + msecs_to_jiffies(cfo->cfo_timer_ms)); +out: + mutex_unlock(&rtwdev->mutex); +} + +static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, + msecs_to_jiffies(cfo->cfo_timer_ms)); +} + +void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + struct rtw89_traffic_stats *stats = &rtwdev->stats; + + switch (cfo->phy_cfo_status) { + case RTW89_PHY_DCFO_STATE_NORMAL: + if (stats->tx_throughput >= CFO_TP_UPPER) { + cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE; + cfo->cfo_trig_by_timer_en = true; + cfo->cfo_timer_ms = CFO_COMP_PERIOD; + rtw89_phy_cfo_start_work(rtwdev); + } + break; + case RTW89_PHY_DCFO_STATE_ENHANCE: + if (cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) { + cfo->phy_cfo_trk_cnt = 0; + cfo->cfo_trig_by_timer_en = false; + } + if (cfo->cfo_trig_by_timer_en == 1) + cfo->phy_cfo_trk_cnt++; + if (stats->tx_throughput <= CFO_TP_LOWER) { + cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; + cfo->phy_cfo_trk_cnt = 0; + cfo->cfo_trig_by_timer_en = false; + } + break; + default: + cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; + cfo->phy_cfo_trk_cnt = 0; + break; + } + rtw89_debug(rtwdev, RTW89_DBG_CFO, + "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n", + stats->tx_throughput, cfo->phy_cfo_status, + cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt, + ewma_thermal_read(&rtwdev->phystat.avg_thermal[0])); + if (cfo->cfo_trig_by_timer_en) + return; + rtw89_phy_cfo_dm(rtwdev); +} + +void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; + u8 macid = phy_ppdu->mac_id; + + cfo->cfo_tail[macid] += cfo_val; + cfo->cfo_cnt[macid]++; + cfo->packet_count++; +} + +static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_stat *phystat = &rtwdev->phystat; + int i; + u8 th; + + for (i = 0; i < rtwdev->chip->rf_path_num; i++) { + th = rtw89_chip_get_thermal(rtwdev, i); + if (th) + ewma_thermal_add(&phystat->avg_thermal[i], th); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "path(%d) thermal cur=%u avg=%ld", i, th, + ewma_thermal_read(&phystat->avg_thermal[i])); + } +} + +struct rtw89_phy_iter_rssi_data { + struct rtw89_dev *rtwdev; + struct rtw89_phy_ch_info *ch_info; + bool rssi_changed; +}; + +static void rtw89_phy_stat_rssi_update_iter(void *data, + struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_phy_iter_rssi_data *rssi_data = + (struct rtw89_phy_iter_rssi_data *)data; + struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info; + unsigned long rssi_curr; + + rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi); + + if (rssi_curr < ch_info->rssi_min) { + ch_info->rssi_min = rssi_curr; + ch_info->rssi_min_macid = rtwsta->mac_id; + } + + if (rtwsta->prev_rssi == 0) { + rtwsta->prev_rssi = rssi_curr; + } else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) { + rtwsta->prev_rssi = rssi_curr; + rssi_data->rssi_changed = true; + } +} + +static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_iter_rssi_data rssi_data = {0}; + + rssi_data.rtwdev = rtwdev; + rssi_data.ch_info = &rtwdev->ch_info; + rssi_data.ch_info->rssi_min = U8_MAX; + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_phy_stat_rssi_update_iter, + &rssi_data); + if (rssi_data.rssi_changed) + rtw89_btc_ntfy_wl_sta(rtwdev); +} + +static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_stat *phystat = &rtwdev->phystat; + int i; + + for (i = 0; i < rtwdev->chip->rf_path_num; i++) + ewma_thermal_init(&phystat->avg_thermal[i]); + + rtw89_phy_stat_thermal_update(rtwdev); + + memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); + memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat)); +} + +void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_stat *phystat = &rtwdev->phystat; + + rtw89_phy_stat_thermal_update(rtwdev); + rtw89_phy_stat_rssi_update(rtwdev); + + phystat->last_pkt_stat = phystat->cur_pkt_stat; + memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); +} + +static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + + return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); +} + +static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + + return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); +} + +static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + + env->ccx_manual_ctrl = false; + env->ccx_ongoing = false; + env->ccx_rac_lv = RTW89_RAC_RELEASE; + env->ccx_rpt_stamp = 0; + env->ccx_period = 0; + env->ccx_unit_idx = RTW89_CCX_32_US; + env->ccx_trigger_time = 0; + env->ccx_edcca_opt_bw_idx = RTW89_CCX_EDCCA_BW20_0; + + rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EN_MSK, 1); + rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_TRIG_OPT_MSK, 1); + rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1); + rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EDCCA_OPT_MSK, + RTW89_CCX_EDCCA_BW20_0); +} + +static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report, + u16 score) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + u32 numer = 0; + u16 ret = 0; + + numer = report * score + (env->ccx_period >> 1); + if (env->ccx_period) + ret = numer / env->ccx_period; + + return ret >= score ? score - 1 : ret; +} + +static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev, + u16 time_ms, u32 *period, + u32 *unit_idx) +{ + u32 idx; + u8 quotient; + + if (time_ms >= CCX_MAX_PERIOD) + time_ms = CCX_MAX_PERIOD; + + quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD; + + if (quotient < 4) + idx = RTW89_CCX_4_US; + else if (quotient < 8) + idx = RTW89_CCX_8_US; + else if (quotient < 16) + idx = RTW89_CCX_16_US; + else + idx = RTW89_CCX_32_US; + + *unit_idx = idx; + *period = (time_ms * MS_TO_4US_RATIO) >> idx; + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "[Trigger Time] period:%d, unit_idx:%d\n", + *period, *unit_idx); +} + +static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "lv:(%d)->(0)\n", env->ccx_rac_lv); + + env->ccx_ongoing = false; + env->ccx_rac_lv = RTW89_RAC_RELEASE; + env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; +} + +static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, + struct rtw89_ccx_para_info *para) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + bool is_update = env->ifs_clm_app != para->ifs_clm_app; + u8 i = 0; + u16 *ifs_th_l = env->ifs_clm_th_l; + u16 *ifs_th_h = env->ifs_clm_th_h; + u32 ifs_th0_us = 0, ifs_th_times = 0; + u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0}; + + if (!is_update) + goto ifs_update_finished; + + switch (para->ifs_clm_app) { + case RTW89_IFS_CLM_INIT: + case RTW89_IFS_CLM_BACKGROUND: + case RTW89_IFS_CLM_ACS: + case RTW89_IFS_CLM_DBG: + case RTW89_IFS_CLM_DIG: + case RTW89_IFS_CLM_TDMA_DIG: + ifs_th0_us = IFS_CLM_TH0_UPPER; + ifs_th_times = IFS_CLM_TH_MUL; + break; + case RTW89_IFS_CLM_DBG_MANUAL: + ifs_th0_us = para->ifs_clm_manual_th0; + ifs_th_times = para->ifs_clm_manual_th_times; + break; + default: + break; + } + + /* Set sampling threshold for 4 different regions, unit in idx_cnt. + * low[i] = high[i-1] + 1 + * high[i] = high[i-1] * ifs_th_times + */ + ifs_th_l[IFS_CLM_TH_START_IDX] = 0; + ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us; + ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, + ifs_th0_us); + for (i = 1; i < RTW89_IFS_CLM_NUM; i++) { + ifs_th_l[i] = ifs_th_h[i - 1] + 1; + ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times; + ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]); + } + +ifs_update_finished: + if (!is_update) + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "No need to update IFS_TH\n"); + + return is_update; +} + +static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + u8 i = 0; + + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_LOW_MSK, + env->ifs_clm_th_l[0]); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_LOW_MSK, + env->ifs_clm_th_l[1]); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_LOW_MSK, + env->ifs_clm_th_l[2]); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_LOW_MSK, + env->ifs_clm_th_l[3]); + + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_HIGH_MSK, + env->ifs_clm_th_h[0]); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_HIGH_MSK, + env->ifs_clm_th_h[1]); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_HIGH_MSK, + env->ifs_clm_th_h[2]); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_HIGH_MSK, + env->ifs_clm_th_h[3]); + + for (i = 0; i < RTW89_IFS_CLM_NUM; i++) + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "Update IFS_T%d_th{low, high} : {%d, %d}\n", + i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); +} + +static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_ccx_para_info para = {0}; + + env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; + env->ifs_clm_mntr_time = 0; + + para.ifs_clm_app = RTW89_IFS_CLM_INIT; + if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) + rtw89_phy_ifs_clm_set_th_reg(rtwdev); + + rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COLLECT_EN, + true); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_EN_MSK, true); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_EN_MSK, true); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_EN_MSK, true); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_EN_MSK, true); +} + +static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, + enum rtw89_env_racing_lv level) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + int ret = 0; + + if (level >= RTW89_RAC_MAX_NUM) { + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "[WARNING] Wrong LV=%d\n", level); + return -EINVAL; + } + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing, + env->ccx_rac_lv, level); + + if (env->ccx_ongoing) { + if (level <= env->ccx_rac_lv) + ret = -EINVAL; + else + env->ccx_ongoing = false; + } + + if (ret == 0) + env->ccx_rac_lv = level; + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n", + !ret); + + return ret; +} + +static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + + rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 0); + rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 0); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 1); + rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1); + + env->ccx_rpt_stamp++; + env->ccx_ongoing = true; +} + +static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + u8 i = 0; + u32 res = 0; + + env->ifs_clm_tx_ratio = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT); + env->ifs_clm_edcca_excl_cca_ratio = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca, + PERCENT); + env->ifs_clm_cck_fa_ratio = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT); + env->ifs_clm_ofdm_fa_ratio = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT); + env->ifs_clm_cck_cca_excl_fa_ratio = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa, + PERCENT); + env->ifs_clm_ofdm_cca_excl_fa_ratio = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa, + PERCENT); + env->ifs_clm_cck_fa_permil = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL); + env->ifs_clm_ofdm_fa_permil = + rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL); + + for (i = 0; i < RTW89_IFS_CLM_NUM; i++) { + if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) { + env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD; + } else { + env->ifs_clm_ifs_avg[i] = + rtw89_phy_ccx_idx_to_us(rtwdev, + env->ifs_clm_avg[i]); + } + + res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]); + res += env->ifs_clm_his[i] >> 1; + if (env->ifs_clm_his[i]) + res /= env->ifs_clm_his[i]; + else + res = 0; + env->ifs_clm_cca_avg[i] = res; + } + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n", + env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n", + env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n", + env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n", + env->ifs_clm_cck_cca_excl_fa_ratio, + env->ifs_clm_ofdm_cca_excl_fa_ratio); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "Time:[his, ifs_avg(us), cca_avg(us)]\n"); + for (i = 0; i < RTW89_IFS_CLM_NUM; i++) + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n", + i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i], + env->ifs_clm_cca_avg[i]); +} + +static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + u8 i = 0; + + if (rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_DONE_MSK) == 0) { + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "Get IFS_CLM report Fail\n"); + return false; + } + + env->ifs_clm_tx = + rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT, + B_IFS_CLM_TX_CNT_MSK); + env->ifs_clm_edcca_excl_cca = + rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT, + B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK); + env->ifs_clm_cckcca_excl_fa = + rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA, + B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK); + env->ifs_clm_ofdmcca_excl_fa = + rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA, + B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK); + env->ifs_clm_cckfa = + rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA, + B_IFS_CLM_CCK_FA_MSK); + env->ifs_clm_ofdmfa = + rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA, + B_IFS_CLM_OFDM_FA_MSK); + + env->ifs_clm_his[0] = + rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T1_HIS_MSK); + env->ifs_clm_his[1] = + rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T2_HIS_MSK); + env->ifs_clm_his[2] = + rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T3_HIS_MSK); + env->ifs_clm_his[3] = + rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T4_HIS_MSK); + + env->ifs_clm_avg[0] = + rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T1_AVG_MSK); + env->ifs_clm_avg[1] = + rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T2_AVG_MSK); + env->ifs_clm_avg[2] = + rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T3_AVG_MSK); + env->ifs_clm_avg[3] = + rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T4_AVG_MSK); + + env->ifs_clm_cca[0] = + rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T1_CCA_MSK); + env->ifs_clm_cca[1] = + rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T2_CCA_MSK); + env->ifs_clm_cca[2] = + rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T3_CCA_MSK); + env->ifs_clm_cca[3] = + rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T4_CCA_MSK); + + env->ifs_clm_total_ifs = + rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_TOTAL_CNT_MSK); + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", + env->ifs_clm_total_ifs); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "{Tx, EDCCA_exclu_cca} = {%d, %d}\n", + env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n", + env->ifs_clm_cckfa, env->ifs_clm_ofdmfa); + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n", + env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa); + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n"); + for (i = 0; i < RTW89_IFS_CLM_NUM; i++) + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i], + env->ifs_clm_avg[i], env->ifs_clm_cca[i]); + + rtw89_phy_ifs_clm_get_utility(rtwdev); + + return true; +} + +static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, + struct rtw89_ccx_para_info *para) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + u32 period = 0; + u32 unit_idx = 0; + + if (para->mntr_time == 0) { + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "[WARN] MNTR_TIME is 0\n"); + return -EINVAL; + } + + if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv)) + return -EINVAL; + + if (para->mntr_time != env->ifs_clm_mntr_time) { + rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, + &period, &unit_idx); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, + B_IFS_CLM_PERIOD_MSK, period); + rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, + B_IFS_CLM_COUNTER_UNIT_MSK, unit_idx); + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "Update IFS-CLM time ((%d)) -> ((%d))\n", + env->ifs_clm_mntr_time, para->mntr_time); + + env->ifs_clm_mntr_time = para->mntr_time; + env->ccx_period = (u16)period; + env->ccx_unit_idx = (u8)unit_idx; + } + + if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) { + env->ifs_clm_app = para->ifs_clm_app; + rtw89_phy_ifs_clm_set_th_reg(rtwdev); + } + + return 0; +} + +void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_ccx_para_info para = {0}; + u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL; + + env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL; + if (env->ccx_manual_ctrl) { + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "CCX in manual ctrl\n"); + return; + } + + /* only ifs_clm for now */ + if (rtw89_phy_ifs_clm_get_result(rtwdev)) + env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM; + + rtw89_phy_ccx_racing_release(rtwdev); + para.mntr_time = 1900; + para.rac_lv = RTW89_RAC_LV_1; + para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; + + if (rtw89_phy_ifs_clm_set(rtwdev, ¶) == 0) + chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; + if (chk_result) + rtw89_phy_ccx_trigger(rtwdev); + + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "get_result=0x%x, chk_result:0x%x\n", + env->ccx_watchdog_result, chk_result); +} + +static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_dig_info *dig = &rtwdev->dig; + const struct rtw89_phy_dig_gain_cfg *cfg; + const char *msg; + u8 i; + s8 gain_base; + s8 *gain_arr; + u32 tmp; + + switch (type) { + case RTW89_DIG_GAIN_LNA_G: + gain_arr = dig->lna_gain_g; + gain_base = LNA0_GAIN; + cfg = chip->dig_table->cfg_lna_g; + msg = "lna_gain_g"; + break; + case RTW89_DIG_GAIN_TIA_G: + gain_arr = dig->tia_gain_g; + gain_base = TIA0_GAIN_G; + cfg = chip->dig_table->cfg_tia_g; + msg = "tia_gain_g"; + break; + case RTW89_DIG_GAIN_LNA_A: + gain_arr = dig->lna_gain_a; + gain_base = LNA0_GAIN; + cfg = chip->dig_table->cfg_lna_a; + msg = "lna_gain_a"; + break; + case RTW89_DIG_GAIN_TIA_A: + gain_arr = dig->tia_gain_a; + gain_base = TIA0_GAIN_A; + cfg = chip->dig_table->cfg_tia_a; + msg = "tia_gain_a"; + break; + default: + return; + } + + for (i = 0; i < cfg->size; i++) { + tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr, + cfg->table[i].mask); + tmp >>= DIG_GAIN_SHIFT; + gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base; + gain_base += DIG_GAIN; + + rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n", + msg, i, gain_arr[i]); + } +} + +static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + u32 tmp; + u8 i; + + tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, + B_PATH0_IB_PKPW_MSK); + dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); + dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK, + B_PATH0_IB_PBK_MSK); + rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n", + dig->ib_pkpwr, dig->ib_pbk); + + for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++) + rtw89_phy_dig_read_gain_table(rtwdev, i); +} + +static const u8 rssi_nolink = 22; +static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104}; +static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88}; +static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16}; +static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528}; + +static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; + struct rtw89_dig_info *dig = &rtwdev->dig; + bool is_linked = rtwdev->total_sta_assoc > 0; + + if (is_linked) { + dig->igi_rssi = ch_info->rssi_min >> 1; + } else { + rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n"); + dig->igi_rssi = rssi_nolink; + } +} + +static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + bool is_linked = rtwdev->total_sta_assoc > 0; + const u16 *fa_th_src = NULL; + + switch (rtwdev->hal.current_band_type) { + case RTW89_BAND_2G: + dig->lna_gain = dig->lna_gain_g; + dig->tia_gain = dig->tia_gain_g; + fa_th_src = is_linked ? fa_th_2g : fa_th_nolink; + dig->force_gaincode_idx_en = false; + dig->dyn_pd_th_en = true; + break; + case RTW89_BAND_5G: + default: + dig->lna_gain = dig->lna_gain_a; + dig->tia_gain = dig->tia_gain_a; + fa_th_src = is_linked ? fa_th_5g : fa_th_nolink; + dig->force_gaincode_idx_en = true; + dig->dyn_pd_th_en = true; + break; + } + memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th)); + memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th)); +} + +static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20; +static const u8 igi_max_performance_mode = 0x5a; +static const u8 dynamic_pd_threshold_max; + +static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + + dig->cur_gaincode.lna_idx = LNA_IDX_MAX; + dig->cur_gaincode.tia_idx = TIA_IDX_MAX; + dig->cur_gaincode.rxb_idx = RXB_IDX_MAX; + dig->force_gaincode.lna_idx = LNA_IDX_MAX; + dig->force_gaincode.tia_idx = TIA_IDX_MAX; + dig->force_gaincode.rxb_idx = RXB_IDX_MAX; + + dig->dyn_igi_max = igi_max_performance_mode; + dig->dyn_igi_min = dynamic_igi_min; + dig->dyn_pd_th_max = dynamic_pd_threshold_max; + dig->pd_low_th_ofst = pd_low_th_offset; + dig->is_linked_pre = false; +} + +static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev) +{ + rtw89_phy_dig_update_gain_para(rtwdev); + rtw89_phy_dig_reset(rtwdev); +} + +static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + u8 lna_idx; + + if (rssi < dig->igi_rssi_th[0]) + lna_idx = RTW89_DIG_GAIN_LNA_IDX6; + else if (rssi < dig->igi_rssi_th[1]) + lna_idx = RTW89_DIG_GAIN_LNA_IDX5; + else if (rssi < dig->igi_rssi_th[2]) + lna_idx = RTW89_DIG_GAIN_LNA_IDX4; + else if (rssi < dig->igi_rssi_th[3]) + lna_idx = RTW89_DIG_GAIN_LNA_IDX3; + else if (rssi < dig->igi_rssi_th[4]) + lna_idx = RTW89_DIG_GAIN_LNA_IDX2; + else + lna_idx = RTW89_DIG_GAIN_LNA_IDX1; + + return lna_idx; +} + +static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + u8 tia_idx; + + if (rssi < dig->igi_rssi_th[0]) + tia_idx = RTW89_DIG_GAIN_TIA_IDX1; + else + tia_idx = RTW89_DIG_GAIN_TIA_IDX0; + + return tia_idx; +} + +#define IB_PBK_BASE 110 +#define WB_RSSI_BASE 10 +static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, + struct rtw89_agc_gaincode_set *set) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + s8 lna_gain = dig->lna_gain[set->lna_idx]; + s8 tia_gain = dig->tia_gain[set->tia_idx]; + s32 wb_rssi = rssi + lna_gain + tia_gain; + s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE; + u8 rxb_idx; + + rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi; + rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX); + + rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n", + wb_rssi, rxb_idx_tmp); + + return rxb_idx; +} + +static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, + struct rtw89_agc_gaincode_set *set) +{ + set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi); + set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi); + set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set); + + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n", + rssi, set->lna_idx, set->tia_idx, set->rxb_idx); +} + +#define IGI_OFFSET_MAX 25 +#define IGI_OFFSET_MUL 2 +static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + enum rtw89_dig_noisy_level noisy_lv; + u8 igi_offset = dig->fa_rssi_ofst; + u16 fa_ratio = 0; + + fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil; + + if (fa_ratio < dig->fa_th[0]) + noisy_lv = RTW89_DIG_NOISY_LEVEL0; + else if (fa_ratio < dig->fa_th[1]) + noisy_lv = RTW89_DIG_NOISY_LEVEL1; + else if (fa_ratio < dig->fa_th[2]) + noisy_lv = RTW89_DIG_NOISY_LEVEL2; + else if (fa_ratio < dig->fa_th[3]) + noisy_lv = RTW89_DIG_NOISY_LEVEL3; + else + noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX; + + if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2) + igi_offset = 0; + else + igi_offset += noisy_lv * IGI_OFFSET_MUL; + + igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX); + dig->fa_rssi_ofst = igi_offset; + + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n", + dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]); + + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n", + env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil, + env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil, + noisy_lv, igi_offset); +} + +static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx) +{ + rtw89_phy_write32_mask(rtwdev, R_PATH0_LNA_INIT, + B_PATH0_LNA_INIT_IDX_MSK, lna_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH1_LNA_INIT, + B_PATH1_LNA_INIT_IDX_MSK, lna_idx); +} + +static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx) +{ + rtw89_phy_write32_mask(rtwdev, R_PATH0_TIA_INIT, + B_PATH0_TIA_INIT_IDX_MSK, tia_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH1_TIA_INIT, + B_PATH1_TIA_INIT_IDX_MSK, tia_idx); +} + +static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx) +{ + rtw89_phy_write32_mask(rtwdev, R_PATH0_RXB_INIT, + B_PATH0_RXB_INIT_IDX_MSK, rxb_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH1_RXB_INIT, + B_PATH1_RXB_INIT_IDX_MSK, rxb_idx); +} + +static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev, + const struct rtw89_agc_gaincode_set set) +{ + rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx); + rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx); + rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx); + + rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n", + set.lna_idx, set.tia_idx, set.rxb_idx); +} + +static const struct rtw89_reg_def sdagc_config[4] = { + {R_PATH0_P20_FOLLOW_BY_PAGCUGC, B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK}, + {R_PATH0_S20_FOLLOW_BY_PAGCUGC, B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK}, + {R_PATH1_P20_FOLLOW_BY_PAGCUGC, B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK}, + {R_PATH1_S20_FOLLOW_BY_PAGCUGC, B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK}, +}; + +static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, + bool enable) +{ + u8 i = 0; + + for (i = 0; i < ARRAY_SIZE(sdagc_config); i++) + rtw89_phy_write32_mask(rtwdev, sdagc_config[i].addr, + sdagc_config[i].mask, enable); + + rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); +} + +static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, + bool enable) +{ + enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width; + struct rtw89_dig_info *dig = &rtwdev->dig; + u8 final_rssi = 0, under_region = dig->pd_low_th_ofst; + u32 val = 0; + + under_region += PD_TH_SB_FLTR_CMP_VAL; + + switch (cbw) { + case RTW89_CHANNEL_WIDTH_40: + under_region += PD_TH_BW40_CMP_VAL; + break; + case RTW89_CHANNEL_WIDTH_80: + under_region += PD_TH_BW80_CMP_VAL; + break; + case RTW89_CHANNEL_WIDTH_20: + fallthrough; + default: + under_region += PD_TH_BW20_CMP_VAL; + break; + } + + dig->dyn_pd_th_max = dig->igi_rssi; + + final_rssi = min_t(u8, rssi, dig->igi_rssi); + final_rssi = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region, + PD_TH_MAX_RSSI + under_region); + + if (enable) { + val = (final_rssi - under_region - PD_TH_MIN_RSSI) >> 1; + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "dyn_max=%d, final_rssi=%d, total=%d, PD_low=%d\n", + dig->igi_rssi, final_rssi, under_region, val); + } else { + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "Dynamic PD th disabled, Set PD_low_bd=0\n"); + } + + rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK, + val); + rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, + B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable); +} + +void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + + dig->bypass_dig = false; + rtw89_phy_dig_para_reset(rtwdev); + rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); + rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false); + rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); + rtw89_phy_dig_update_para(rtwdev); +} + +#define IGI_RSSI_MIN 10 +void rtw89_phy_dig(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + bool is_linked = rtwdev->total_sta_assoc > 0; + + if (unlikely(dig->bypass_dig)) { + dig->bypass_dig = false; + return; + } + + if (!dig->is_linked_pre && is_linked) { + rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); + rtw89_phy_dig_update_para(rtwdev); + } else if (dig->is_linked_pre && !is_linked) { + rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); + rtw89_phy_dig_update_para(rtwdev); + } + dig->is_linked_pre = is_linked; + + rtw89_phy_dig_igi_offset_by_env(rtwdev); + rtw89_phy_dig_update_rssi_info(rtwdev); + + dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ? + dig->igi_rssi - IGI_RSSI_MIN : 0; + dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX; + dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst; + + dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, + dig->dyn_igi_max); + + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n", + dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, + dig->igi_fa_rssi); + + if (dig->force_gaincode_idx_en) { + rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "Force gaincode index enabled.\n"); + } else { + rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, + &dig->cur_gaincode); + rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); + } + + rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); + + if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max) + rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true); + else + rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); +} + +static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) +{ + rtw89_phy_ccx_top_setting_init(rtwdev); + rtw89_phy_ifs_clm_setting_init(rtwdev); +} + +void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + rtw89_phy_stat_init(rtwdev); + + rtw89_chip_bb_sethw(rtwdev); + + rtw89_phy_env_monitor_init(rtwdev); + rtw89_phy_dig_init(rtwdev); + rtw89_phy_cfo_init(rtwdev); + + rtw89_phy_init_rf_nctl(rtwdev); + rtw89_chip_rfk_init(rtwdev); + rtw89_load_txpwr_table(rtwdev, chip->byr_table); + rtw89_chip_set_txpwr_ctrl(rtwdev); + rtw89_chip_power_trim(rtwdev); +} + +void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) +{ + enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + u8 bss_color; + + if (!vif->bss_conf.he_support || !vif->bss_conf.assoc) + return; + + bss_color = vif->bss_conf.he_bss_color.color; + + rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0, 0x1, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_TGT, bss_color, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID, + vif->bss_conf.aid, phy_idx); +} diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h new file mode 100644 index 00000000000000..370129345e0f75 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_PHY_H__ +#define __RTW89_PHY_H__ + +#include "core.h" + +#define RTW89_PHY_ADDR_OFFSET 0x10000 + +#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr) +#define PHY_HEADLINE_VALID 0xf +#define get_phy_target(addr) FIELD_GET(GENMASK(27, 0), addr) +#define get_phy_compare(rfe, cv) (FIELD_PREP(GENMASK(23, 16), rfe) | \ + FIELD_PREP(GENMASK(7, 0), cv)) + +#define get_phy_cond(addr) FIELD_GET(GENMASK(31, 28), addr) +#define get_phy_cond_rfe(addr) FIELD_GET(GENMASK(23, 16), addr) +#define get_phy_cond_pkg(addr) FIELD_GET(GENMASK(15, 8), addr) +#define get_phy_cond_cv(addr) FIELD_GET(GENMASK(7, 0), addr) +#define phy_div(a, b) ({typeof(b) _b = (b); (_b) ? ((a) / (_b)) : 0; }) +#define PHY_COND_BRANCH_IF 0x8 +#define PHY_COND_BRANCH_ELIF 0x9 +#define PHY_COND_BRANCH_ELSE 0xa +#define PHY_COND_BRANCH_END 0xb +#define PHY_COND_CHECK 0x4 +#define PHY_COND_DONT_CARE 0xff + +#define RA_MASK_CCK_RATES GENMASK_ULL(3, 0) +#define RA_MASK_OFDM_RATES GENMASK_ULL(11, 4) +#define RA_MASK_SUBCCK_RATES 0x5ULL +#define RA_MASK_SUBOFDM_RATES 0x10ULL +#define RA_MASK_HT_1SS_RATES GENMASK_ULL(19, 12) +#define RA_MASK_HT_2SS_RATES GENMASK_ULL(31, 24) +#define RA_MASK_HT_3SS_RATES GENMASK_ULL(43, 36) +#define RA_MASK_HT_4SS_RATES GENMASK_ULL(55, 48) +#define RA_MASK_HT_RATES GENMASK_ULL(55, 12) +#define RA_MASK_VHT_1SS_RATES GENMASK_ULL(21, 12) +#define RA_MASK_VHT_2SS_RATES GENMASK_ULL(33, 24) +#define RA_MASK_VHT_3SS_RATES GENMASK_ULL(45, 36) +#define RA_MASK_VHT_4SS_RATES GENMASK_ULL(57, 48) +#define RA_MASK_VHT_RATES GENMASK_ULL(57, 12) +#define RA_MASK_HE_1SS_RATES GENMASK_ULL(23, 12) +#define RA_MASK_HE_2SS_RATES GENMASK_ULL(35, 24) +#define RA_MASK_HE_3SS_RATES GENMASK_ULL(47, 36) +#define RA_MASK_HE_4SS_RATES GENMASK_ULL(59, 48) +#define RA_MASK_HE_RATES GENMASK_ULL(59, 12) + +#define CFO_TRK_ENABLE_TH (2 << 2) +#define CFO_TRK_STOP_TH_4 (30 << 2) +#define CFO_TRK_STOP_TH_3 (20 << 2) +#define CFO_TRK_STOP_TH_2 (10 << 2) +#define CFO_TRK_STOP_TH_1 (00 << 2) +#define CFO_TRK_STOP_TH (2 << 2) +#define CFO_SW_COMP_FINE_TUNE (2 << 2) +#define CFO_PERIOD_CNT 15 +#define CFO_TP_UPPER 100 +#define CFO_TP_LOWER 50 +#define CFO_COMP_PERIOD 250 +#define CFO_COMP_WEIGHT 8 +#define MAX_CFO_TOLERANCE 30 + +#define CCX_MAX_PERIOD 2097 +#define CCX_MAX_PERIOD_UNIT 32 +#define MS_TO_4US_RATIO 250 +#define ENV_MNTR_FAIL_DWORD 0xffffffff +#define ENV_MNTR_IFSCLM_HIS_MAX 127 +#define PERMIL 1000 +#define PERCENT 100 +#define IFS_CLM_TH0_UPPER 64 +#define IFS_CLM_TH_MUL 4 +#define IFS_CLM_TH_START_IDX 0 + +#define TIA0_GAIN_A 12 +#define TIA0_GAIN_G 16 +#define LNA0_GAIN (-24) +#define U4_MAX_BIT 3 +#define U8_MAX_BIT 7 +#define DIG_GAIN_SHIFT 2 +#define DIG_GAIN 8 + +#define LNA_IDX_MAX 6 +#define LNA_IDX_MIN 0 +#define TIA_IDX_MAX 1 +#define TIA_IDX_MIN 0 +#define RXB_IDX_MAX 31 +#define RXB_IDX_MIN 0 + +#define PD_TH_MAX_RSSI 70 +#define PD_TH_MIN_RSSI 8 +#define PD_TH_BW80_CMP_VAL 6 +#define PD_TH_BW40_CMP_VAL 3 +#define PD_TH_BW20_CMP_VAL 0 +#define PD_TH_CMP_VAL 3 +#define PD_TH_SB_FLTR_CMP_VAL 7 + +#define PHYSTS_MGNT BIT(RTW89_RX_TYPE_MGNT) +#define PHYSTS_CTRL BIT(RTW89_RX_TYPE_CTRL) +#define PHYSTS_DATA BIT(RTW89_RX_TYPE_DATA) +#define PHYSTS_RSVD BIT(RTW89_RX_TYPE_RSVD) +#define PPDU_FILTER_BITMAP (PHYSTS_MGNT | PHYSTS_DATA) + +enum rtw89_phy_c2h_ra_func { + RTW89_PHY_C2H_FUNC_STS_RPT, + RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT, + RTW89_PHY_C2H_FUNC_TXSTS, + RTW89_PHY_C2H_FUNC_RA_MAX, +}; + +enum rtw89_phy_c2h_class { + RTW89_PHY_C2H_CLASS_RUA, + RTW89_PHY_C2H_CLASS_RA, + RTW89_PHY_C2H_CLASS_DM, + RTW89_PHY_C2H_CLASS_BTC_MIN = 0x10, + RTW89_PHY_C2H_CLASS_BTC_MAX = 0x17, + RTW89_PHY_C2H_CLASS_MAX, +}; + +enum rtw89_env_monitor_result_level { + RTW89_PHY_ENV_MON_CCX_FAIL = 0, + RTW89_PHY_ENV_MON_NHM = BIT(0), + RTW89_PHY_ENV_MON_CLM = BIT(1), + RTW89_PHY_ENV_MON_FAHM = BIT(2), + RTW89_PHY_ENV_MON_IFS_CLM = BIT(3), + RTW89_PHY_ENV_MON_EDCCA_CLM = BIT(4), +}; + +#define CCX_US_BASE_RATIO 4 +enum rtw89_ccx_unit { + RTW89_CCX_4_US = 0, + RTW89_CCX_8_US = 1, + RTW89_CCX_16_US = 2, + RTW89_CCX_32_US = 3 +}; + +enum rtw89_dig_gain_type { + RTW89_DIG_GAIN_LNA_G = 0, + RTW89_DIG_GAIN_TIA_G = 1, + RTW89_DIG_GAIN_LNA_A = 2, + RTW89_DIG_GAIN_TIA_A = 3, + RTW89_DIG_GAIN_MAX = 4 +}; + +enum rtw89_dig_gain_lna_idx { + RTW89_DIG_GAIN_LNA_IDX1 = 1, + RTW89_DIG_GAIN_LNA_IDX2 = 2, + RTW89_DIG_GAIN_LNA_IDX3 = 3, + RTW89_DIG_GAIN_LNA_IDX4 = 4, + RTW89_DIG_GAIN_LNA_IDX5 = 5, + RTW89_DIG_GAIN_LNA_IDX6 = 6 +}; + +enum rtw89_dig_gain_tia_idx { + RTW89_DIG_GAIN_TIA_IDX0 = 0, + RTW89_DIG_GAIN_TIA_IDX1 = 1 +}; + +struct rtw89_txpwr_byrate_cfg { + enum rtw89_band band; + enum rtw89_nss nss; + enum rtw89_rate_section rs; + u8 shf; + u8 len; + u32 data; +}; + +#define DELTA_SWINGIDX_SIZE 30 + +struct rtw89_txpwr_track_cfg { + const u8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE]; + const u8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE]; + const u8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE]; + const u8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE]; + const u8 *delta_swingidx_2gb_n; + const u8 *delta_swingidx_2gb_p; + const u8 *delta_swingidx_2ga_n; + const u8 *delta_swingidx_2ga_p; + const u8 *delta_swingidx_2g_cck_b_n; + const u8 *delta_swingidx_2g_cck_b_p; + const u8 *delta_swingidx_2g_cck_a_n; + const u8 *delta_swingidx_2g_cck_a_p; +}; + +struct rtw89_phy_dig_gain_cfg { + const struct rtw89_reg_def *table; + u8 size; +}; + +struct rtw89_phy_dig_gain_table { + const struct rtw89_phy_dig_gain_cfg *cfg_lna_g; + const struct rtw89_phy_dig_gain_cfg *cfg_tia_g; + const struct rtw89_phy_dig_gain_cfg *cfg_lna_a; + const struct rtw89_phy_dig_gain_cfg *cfg_tia_a; +}; + +struct rtw89_phy_reg3_tbl { + const struct rtw89_reg3_def *reg3; + int size; +}; + +#define DECLARE_PHY_REG3_TBL(_name) \ +const struct rtw89_phy_reg3_tbl _name ## _tbl = { \ + .reg3 = _name, \ + .size = ARRAY_SIZE(_name), \ +} + +static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev, + u32 addr, u8 data) +{ + rtw89_write8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); +} + +static inline void rtw89_phy_write16(struct rtw89_dev *rtwdev, + u32 addr, u16 data) +{ + rtw89_write16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); +} + +static inline void rtw89_phy_write32(struct rtw89_dev *rtwdev, + u32 addr, u32 data) +{ + rtw89_write32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); +} + +static inline void rtw89_phy_write32_set(struct rtw89_dev *rtwdev, + u32 addr, u32 bits) +{ + rtw89_write32_set(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits); +} + +static inline void rtw89_phy_write32_clr(struct rtw89_dev *rtwdev, + u32 addr, u32 bits) +{ + rtw89_write32_clr(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits); +} + +static inline void rtw89_phy_write32_mask(struct rtw89_dev *rtwdev, + u32 addr, u32 mask, u32 data) +{ + rtw89_write32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask, data); +} + +static inline u8 rtw89_phy_read8(struct rtw89_dev *rtwdev, u32 addr) +{ + return rtw89_read8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); +} + +static inline u16 rtw89_phy_read16(struct rtw89_dev *rtwdev, u32 addr) +{ + return rtw89_read16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); +} + +static inline u32 rtw89_phy_read32(struct rtw89_dev *rtwdev, u32 addr) +{ + return rtw89_read32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); +} + +static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev, + u32 addr, u32 mask) +{ + return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask); +} + +void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, + const struct rtw89_phy_reg3_tbl *tbl); +u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_bandwidth dbw); +u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask); +bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, + u32 addr, u32 mask, u32 data); +void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev); +void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev); +void rtw89_phy_dm_init(struct rtw89_dev *rtwdev); +void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, + u32 data, enum rtw89_phy_idx phy_idx); +void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, + const struct rtw89_txpwr_table *tbl); +s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, + const struct rtw89_rate_desc *rate_desc); +void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit *lmt, + u8 ntx); +void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru *lmt_ru, + u8 ntx); +s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, + u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch); +void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); +void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); +void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); +void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask); +void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, + u32 len, u8 class, u8 func); +void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev); +void rtw89_phy_cfo_track_work(struct work_struct *work); +void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, + struct rtw89_rx_phy_ppdu *phy_ppdu); +void rtw89_phy_stat_track(struct rtw89_dev *rtwdev); +void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev); +void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, + u32 val); +void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev); +void rtw89_phy_dig(struct rtw89_dev *rtwdev); +void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c new file mode 100644 index 00000000000000..7eaa01e41ef24a --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "coex.h" +#include "core.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "ps.h" +#include "reg.h" +#include "util.h" + +static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid) +{ + u32 pwr_en_bit = 0xE; + u32 chk_msk = pwr_en_bit << (4 * macid); + u32 polling; + int ret; + + ret = read_poll_timeout_atomic(rtw89_read32_mask, polling, !polling, + 1000, 50000, false, rtwdev, + R_AX_PPWRBIT_SETTING, chk_msk); + if (ret) { + rtw89_info(rtwdev, "rtw89: failed to leave lps state\n"); + return -EBUSY; + } + + return 0; +} + +static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev) +{ + if (!rtwdev->ps_mode) + return; + + if (test_and_set_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) + return; + + rtw89_mac_power_mode_change(rtwdev, true); +} + +void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev) +{ + if (!rtwdev->ps_mode) + return; + + if (test_and_clear_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) + rtw89_mac_power_mode_change(rtwdev, false); +} + +static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id) +{ + struct rtw89_lps_parm lps_param = { + .macid = mac_id, + .psmode = RTW89_MAC_AX_PS_MODE_LEGACY, + .lastrpwm = RTW89_LAST_RPWM_PS, + }; + + rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL); + rtw89_fw_h2c_lps_parm(rtwdev, &lps_param); +} + +static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id) +{ + struct rtw89_lps_parm lps_param = { + .macid = mac_id, + .psmode = RTW89_MAC_AX_PS_MODE_ACTIVE, + .lastrpwm = RTW89_LAST_RPWM_ACTIVE, + }; + + rtw89_fw_h2c_lps_parm(rtwdev, &lps_param); + rtw89_fw_leave_lps_check(rtwdev, 0); + rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); +} + +void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev) +{ + lockdep_assert_held(&rtwdev->mutex); + + __rtw89_leave_ps_mode(rtwdev); +} + +void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id) +{ + lockdep_assert_held(&rtwdev->mutex); + + if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags)) + return; + + __rtw89_enter_lps(rtwdev, mac_id); + __rtw89_enter_ps_mode(rtwdev); +} + +static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION) + return; + + __rtw89_leave_ps_mode(rtwdev); + __rtw89_leave_lps(rtwdev, rtwvif->mac_id); +} + +void rtw89_leave_lps(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + + lockdep_assert_held(&rtwdev->mutex); + + if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags)) + return; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_leave_lps_vif(rtwdev, rtwvif); +} + +void rtw89_enter_ips(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + + set_bit(RTW89_FLAG_INACTIVE_PS, rtwdev->flags); + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_mac_vif_deinit(rtwdev, rtwvif); + + rtw89_core_stop(rtwdev); +} + +void rtw89_leave_ips(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + int ret; + + ret = rtw89_core_start(rtwdev); + if (ret) + rtw89_err(rtwdev, "failed to leave idle state\n"); + + rtw89_set_channel(rtwdev); + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_mac_vif_init(rtwdev, rtwvif); + + clear_bit(RTW89_FLAG_INACTIVE_PS, rtwdev->flags); +} + +void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl) +{ + if (btc_ctrl) + rtw89_leave_lps(rtwdev); +} diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h new file mode 100644 index 00000000000000..a184b68994aa42 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/ps.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_PS_H_ +#define __RTW89_PS_H_ + +void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id); +void rtw89_leave_lps(struct rtw89_dev *rtwdev); +void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev); +void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev); +void rtw89_enter_ips(struct rtw89_dev *rtwdev); +void rtw89_leave_ips(struct rtw89_dev *rtwdev); +void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h new file mode 100644 index 00000000000000..365d8c8ce57b9d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -0,0 +1,2159 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_REG_H__ +#define __RTW89_REG_H__ + +#define R_AX_SYS_WL_EFUSE_CTRL 0x000A +#define B_AX_AUTOLOAD_SUS BIT(5) + +#define R_AX_SYS_FUNC_EN 0x0002 +#define B_AX_FEN_BB_GLB_RSTN BIT(1) +#define B_AX_FEN_BBRSTB BIT(0) + +#define R_AX_SYS_PW_CTRL 0x0004 +#define B_AX_PSUS_OFF_CAPC_EN BIT(14) + +#define R_AX_SYS_CLK_CTRL 0x0008 +#define B_AX_CPU_CLK_EN BIT(14) + +#define R_AX_RSV_CTRL 0x001C +#define B_AX_R_DIS_PRST BIT(6) +#define B_AX_WLOCK_1C_BIT6 BIT(5) + +#define R_AX_EFUSE_CTRL_1 0x0038 +#define B_AX_EF_PGPD_MASK GENMASK(30, 28) +#define B_AX_EF_RDT BIT(27) +#define B_AX_EF_VDDQST_MASK GENMASK(26, 24) +#define B_AX_EF_PGTS_MASK GENMASK(23, 20) +#define B_AX_EF_PD_DIS BIT(11) +#define B_AX_EF_POR BIT(10) +#define B_AX_EF_CELL_SEL_MASK GENMASK(9, 8) + +#define R_AX_SPSLDO_ON_CTRL0 0x0200 +#define B_AX_OCP_L1_MASK GENMASK(15, 13) + +#define R_AX_EFUSE_CTRL 0x0030 +#define B_AX_EF_MODE_SEL_MASK GENMASK(31, 30) +#define B_AX_EF_RDY BIT(29) +#define B_AX_EF_COMP_RESULT BIT(28) +#define B_AX_EF_ADDR_MASK GENMASK(26, 16) +#define B_AX_EF_DATA_MASK GENMASK(15, 0) + +#define R_AX_GPIO_MUXCFG 0x0040 +#define B_AX_BOOT_MODE BIT(19) +#define B_AX_WL_EECS_EXT_32K_SEL BIT(18) +#define B_AX_WL_SEC_BONDING_OPT_STS BIT(17) +#define B_AX_SECSIC_SEL BIT(16) +#define B_AX_ENHTP BIT(14) +#define B_AX_BT_AOD_GPIO3 BIT(13) +#define B_AX_ENSIC BIT(12) +#define B_AX_SIC_SWRST BIT(11) +#define B_AX_PO_WIFI_PTA_PINS BIT(10) +#define B_AX_PO_BT_PTA_PINS BIT(9) +#define B_AX_ENUARTTX BIT(8) +#define B_AX_BTMODE_MASK GENMASK(7, 6) +#define MAC_AX_BT_MODE_0_3 0 +#define MAC_AX_BT_MODE_2 2 +#define B_AX_ENBT BIT(5) +#define B_AX_EROM_EN BIT(4) +#define B_AX_ENUARTRX BIT(2) +#define B_AX_GPIOSEL_MASK GENMASK(1, 0) + +#define R_AX_DBG_CTRL 0x0058 +#define B_AX_DBG_SEL1_4BIT GENMASK(31, 30) +#define B_AX_DBG_SEL1_16BIT BIT(27) +#define B_AX_DBG_SEL1 GENMASK(23, 16) +#define B_AX_DBG_SEL0_4BIT GENMASK(15, 14) +#define B_AX_DBG_SEL0_16BIT BIT(11) +#define B_AX_DBG_SEL0 GENMASK(7, 0) + +#define R_AX_SYS_SDIO_CTRL 0x0070 +#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15) +#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14) +#define B_AX_PCIE_AUXCLK_GATE BIT(11) +#define B_AX_LTE_MUX_CTRL_PATH BIT(26) + +#define R_AX_PLATFORM_ENABLE 0x0088 +#define B_AX_WCPU_EN BIT(1) + +#define R_AX_SCOREBOARD 0x00AC +#define B_AX_TOGGLE BIT(31) +#define B_MAC_AX_SB_FW_MASK GENMASK(30, 24) +#define B_MAC_AX_SB_DRV_MASK GENMASK(23, 0) +#define B_MAC_AX_BTGS1_NOTIFY BIT(0) +#define MAC_AX_NOTIFY_TP_MAJOR 0x81 +#define MAC_AX_NOTIFY_PWR_MAJOR 0x80 + +#define R_AX_DBG_PORT_SEL 0x00C0 +#define B_AX_DEBUG_ST_MASK GENMASK(31, 0) + +#define R_AX_SYS_CFG1 0x00F0 +#define B_AX_CHIP_VER_MASK GENMASK(15, 12) + +#define R_AX_SYS_STATUS1 0x00F4 +#define B_AX_SEL_0XC0_MASK GENMASK(17, 16) + +#define R_AX_HALT_H2C_CTRL 0x0160 +#define R_AX_HALT_H2C 0x0168 +#define B_AX_HALT_H2C_TRIGGER BIT(0) +#define R_AX_HALT_C2H_CTRL 0x0164 +#define R_AX_HALT_C2H 0x016C + +#define R_AX_WCPU_FW_CTRL 0x01E0 +#define B_AX_WCPU_FWDL_STS_MASK GENMASK(7, 5) +#define B_AX_FWDL_PATH_RDY BIT(2) +#define B_AX_H2C_PATH_RDY BIT(1) +#define B_AX_WCPU_FWDL_EN BIT(0) + +#define R_AX_RPWM 0x01E4 +#define R_AX_PCIE_HRPWM 0x10C0 +#define PS_RPWM_TOGGLE BIT(15) +#define PS_RPWM_ACK BIT(14) +#define PS_RPWM_SEQ_NUM GENMASK(13, 12) +#define PS_RPWM_STATE 0x7 +#define RPWM_SEQ_NUM_MAX 3 +#define PS_CPWM_SEQ_NUM GENMASK(13, 12) +#define PS_CPWM_RSP_SEQ_NUM GENMASK(9, 8) +#define PS_CPWM_STATE GENMASK(2, 0) +#define CPWM_SEQ_NUM_MAX 3 + +#define R_AX_BOOT_REASON 0x01E6 +#define B_AX_BOOT_REASON_MASK GENMASK(2, 0) + +#define R_AX_LDM 0x01E8 +#define B_AX_EN_32K BIT(31) + +#define R_AX_UDM0 0x01F0 +#define R_AX_UDM1 0x01F4 +#define R_AX_UDM2 0x01F8 +#define R_AX_UDM3 0x01FC + +#define R_AX_XTAL_ON_CTRL0 0x0280 +#define B_AX_XTAL_SC_LPS BIT(31) +#define B_AX_XTAL_SC_XO_MASK GENMASK(23, 17) +#define B_AX_XTAL_SC_XI_MASK GENMASK(16, 10) +#define B_AX_XTAL_SC_MASK GENMASK(6, 0) + +#define R_AX_GPIO0_7_FUNC_SEL 0x02D0 + +#define R_AX_WLRF_CTRL 0x02F0 +#define B_AX_WLRF1_CTRL_7 BIT(15) +#define B_AX_WLRF1_CTRL_1 BIT(9) +#define B_AX_WLRF_CTRL_7 BIT(7) +#define B_AX_WLRF_CTRL_1 BIT(1) + +#define R_AX_IC_PWR_STATE 0x03F0 +#define B_AX_WHOLE_SYS_PWR_STE_MASK GENMASK(25, 16) +#define B_AX_WLMAC_PWR_STE_MASK GENMASK(9, 8) +#define B_AX_UART_HCISYS_PWR_STE_MASK GENMASK(7, 6) +#define B_AX_SDIO_HCISYS_PWR_STE_MASK GENMASK(5, 4) +#define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2) +#define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0) + +#define R_AX_FILTER_MODEL_ADDR 0x0C04 + +#define R_AX_PCIE_DBG_CTRL 0x11C0 +#define B_AX_DBG_DUMMY_MASK GENMASK(23, 16) +#define B_AX_DBG_SEL_MASK GENMASK(15, 13) +#define B_AX_PCIE_DBG_SEL BIT(12) +#define B_AX_MRD_TIMEOUT_EN BIT(10) +#define B_AX_ASFF_FULL_NO_STK BIT(1) +#define B_AX_EN_STUCK_DBG BIT(0) + +#define R_AX_PHYREG_SET 0x8040 +#define PHYREG_SET_ALL_CYCLE 0x8 + +#define R_AX_HD0IMR 0x8110 +#define B_AX_WDT_PTFM_INT_EN BIT(5) +#define B_AX_CPWM_INT_EN BIT(2) +#define B_AX_GT3_INT_EN BIT(1) +#define B_AX_C2H_INT_EN BIT(0) +#define R_AX_HD0ISR 0x8114 +#define B_AX_C2H_INT BIT(0) + +#define R_AX_H2CREG_DATA0 0x8140 +#define R_AX_H2CREG_DATA1 0x8144 +#define R_AX_H2CREG_DATA2 0x8148 +#define R_AX_H2CREG_DATA3 0x814C +#define R_AX_C2HREG_DATA0 0x8150 +#define R_AX_C2HREG_DATA1 0x8154 +#define R_AX_C2HREG_DATA2 0x8158 +#define R_AX_C2HREG_DATA3 0x815C +#define R_AX_H2CREG_CTRL 0x8160 +#define B_AX_H2CREG_TRIGGER BIT(0) +#define R_AX_C2HREG_CTRL 0x8164 +#define B_AX_C2HREG_TRIGGER BIT(0) +#define R_AX_CPWM 0x8170 + +#define R_AX_HCI_FUNC_EN 0x8380 +#define B_AX_HCI_RXDMA_EN BIT(1) +#define B_AX_HCI_TXDMA_EN BIT(0) + +#define R_AX_BOOT_DBG 0x83F0 + +#define R_AX_DMAC_FUNC_EN 0x8400 +#define B_AX_MAC_FUNC_EN BIT(30) +#define B_AX_DMAC_FUNC_EN BIT(29) +#define B_AX_MPDU_PROC_EN BIT(28) +#define B_AX_WD_RLS_EN BIT(27) +#define B_AX_DLE_WDE_EN BIT(26) +#define B_AX_TXPKT_CTRL_EN BIT(25) +#define B_AX_STA_SCH_EN BIT(24) +#define B_AX_DLE_PLE_EN BIT(23) +#define B_AX_PKT_BUF_EN BIT(22) +#define B_AX_DMAC_TBL_EN BIT(21) +#define B_AX_PKT_IN_EN BIT(20) +#define B_AX_DLE_CPUIO_EN BIT(19) +#define B_AX_DISPATCHER_EN BIT(18) +#define B_AX_MAC_SEC_EN BIT(16) + +#define R_AX_DMAC_CLK_EN 0x8404 +#define B_AX_WD_RLS_CLK_EN BIT(27) +#define B_AX_DLE_WDE_CLK_EN BIT(26) +#define B_AX_TXPKT_CTRL_CLK_EN BIT(25) +#define B_AX_STA_SCH_CLK_EN BIT(24) +#define B_AX_DLE_PLE_CLK_EN BIT(23) +#define B_AX_PKT_IN_CLK_EN BIT(20) +#define B_AX_DLE_CPUIO_CLK_EN BIT(19) +#define B_AX_DISPATCHER_CLK_EN BIT(18) +#define B_AX_MAC_SEC_CLK_EN BIT(16) + +#define PCI_LTR_IDLE_TIMER_1US 0 +#define PCI_LTR_IDLE_TIMER_10US 1 +#define PCI_LTR_IDLE_TIMER_100US 2 +#define PCI_LTR_IDLE_TIMER_200US 3 +#define PCI_LTR_IDLE_TIMER_400US 4 +#define PCI_LTR_IDLE_TIMER_800US 5 +#define PCI_LTR_IDLE_TIMER_1_6MS 6 +#define PCI_LTR_IDLE_TIMER_3_2MS 7 +#define PCI_LTR_IDLE_TIMER_R_ERR 0xFD +#define PCI_LTR_IDLE_TIMER_DEF 0xFE +#define PCI_LTR_IDLE_TIMER_IGNORE 0xFF + +#define PCI_LTR_SPC_10US 0 +#define PCI_LTR_SPC_100US 1 +#define PCI_LTR_SPC_500US 2 +#define PCI_LTR_SPC_1MS 3 +#define PCI_LTR_SPC_R_ERR 0xFD +#define PCI_LTR_SPC_DEF 0xFE +#define PCI_LTR_SPC_IGNORE 0xFF + +#define R_AX_LTR_CTRL_0 0x8410 +#define B_AX_LTR_SPACE_IDX_MASK GENMASK(13, 12) +#define B_AX_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8) +#define B_AX_APP_LTR_ACT BIT(5) +#define B_AX_APP_LTR_IDLE BIT(4) +#define B_AX_LTR_EN BIT(1) +#define B_AX_LTR_HW_EN BIT(0) + +#define R_AX_LTR_CTRL_1 0x8414 +#define B_AX_LTR_RX1_TH_MASK GENMASK(27, 16) +#define B_AX_LTR_RX0_TH_MASK GENMASK(11, 0) + +#define R_AX_LTR_IDLE_LATENCY 0x8418 + +#define R_AX_LTR_ACTIVE_LATENCY 0x841C + +#define R_AX_SER_DBG_INFO 0x8424 +#define B_AX_L0_TO_L1_EVENT_MASK GENMASK(31, 28) + +#define R_AX_DLE_EMPTY0 0x8430 +#define B_AX_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26) +#define B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX BIT(25) +#define B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU BIT(24) +#define B_AX_PLE_EMPTY_QTA_DMAC_H2C BIT(23) +#define B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL BIT(22) +#define B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL BIT(21) +#define B_AX_WDE_EMPTY_QTA_DMAC_CPUIO BIT(20) +#define B_AX_WDE_EMPTY_QTA_DMAC_PKTIN BIT(19) +#define B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU BIT(18) +#define B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU BIT(17) +#define B_AX_WDE_EMPTY_QTA_DMAC_HIF BIT(16) +#define B_AX_WDE_EMPTY_QUE_DMAC_PKTIN BIT(10) +#define B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX BIT(9) +#define B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX BIT(8) +#define B_AX_WDE_EMPTY_QUE_OTHERS BIT(7) +#define B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 BIT(4) +#define B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 BIT(3) +#define B_AX_WDE_EMPTY_QUE_CMAC1_MBH BIT(2) +#define B_AX_WDE_EMPTY_QUE_CMAC0_MBH BIT(1) +#define B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0) + +#define R_AX_DMAC_ERR_ISR 0x8524 +#define B_AX_DLE_CPUIO_ERR_FLAG BIT(10) +#define B_AX_APB_BRIDGE_ERR_FLAG BIT(9) +#define B_AX_DISPATCH_ERR_FLAG BIT(8) +#define B_AX_PKTIN_ERR_FLAG BIT(7) +#define B_AX_PLE_DLE_ERR_FLAG BIT(6) +#define B_AX_TXPKTCTRL_ERR_FLAG BIT(5) +#define B_AX_WDE_DLE_ERR_FLAG BIT(4) +#define B_AX_STA_SCHEDULER_ERR_FLAG BIT(3) +#define B_AX_MPDU_ERR_FLAG BIT(2) +#define B_AX_WSEC_ERR_FLAG BIT(1) +#define B_AX_WDRLS_ERR_FLAG BIT(0) + +#define R_AX_DISPATCHER_GLOBAL_SETTING_0 0x8800 +#define B_AX_PL_PAGE_128B_SEL BIT(9) +#define B_AX_WD_PAGE_64B_SEL BIT(8) +#define R_AX_OTHER_DISPATCHER_ERR_ISR 0x8804 +#define R_AX_HOST_DISPATCHER_ERR_ISR 0x8808 +#define R_AX_CPU_DISPATCHER_ERR_ISR 0x880C +#define R_AX_TX_ADDRESS_INFO_MODE_SETTING 0x8810 +#define B_AX_HOST_ADDR_INFO_8B_SEL BIT(0) + +#define R_AX_HOST_DISPATCHER_ERR_IMR 0x8850 +#define B_AX_HDT_OFFSET_UNMATCH_INT_EN BIT(7) +#define B_AX_HDT_PKT_FAIL_DBG_INT_EN BIT(2) + +#define R_AX_CPU_DISPATCHER_ERR_IMR 0x8854 +#define B_AX_CPU_SHIFT_EN_ERR_INT_EN BIT(25) + +#define R_AX_OTHER_DISPATCHER_ERR_IMR 0x8858 + +#define R_AX_HCI_FC_CTRL 0x8A00 +#define B_AX_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10) +#define B_AX_HCI_FC_WP_CH811_FULL_COND_MASK GENMASK(9, 8) +#define B_AX_HCI_FC_WP_CH07_FULL_COND_MASK GENMASK(7, 6) +#define B_AX_HCI_FC_WD_FULL_COND_MASK GENMASK(5, 4) +#define B_AX_HCI_FC_CH12_EN BIT(3) +#define B_AX_HCI_FC_MODE_MASK GENMASK(2, 1) +#define B_AX_HCI_FC_EN BIT(0) + +#define R_AX_CH_PAGE_CTRL 0x8A04 +#define B_AX_PREC_PAGE_CH12_MASK GENMASK(24, 16) +#define B_AX_PREC_PAGE_CH011_MASK GENMASK(8, 0) + +#define B_AX_MAX_PG_MASK GENMASK(28, 16) +#define B_AX_MIN_PG_MASK GENMASK(12, 0) +#define B_AX_GRP BIT(31) +#define R_AX_ACH0_PAGE_CTRL 0x8A10 +#define R_AX_ACH1_PAGE_CTRL 0x8A14 +#define R_AX_ACH2_PAGE_CTRL 0x8A18 +#define R_AX_ACH3_PAGE_CTRL 0x8A1C +#define R_AX_ACH4_PAGE_CTRL 0x8A20 +#define R_AX_ACH5_PAGE_CTRL 0x8A24 +#define R_AX_ACH6_PAGE_CTRL 0x8A28 +#define R_AX_ACH7_PAGE_CTRL 0x8A2C +#define R_AX_CH8_PAGE_CTRL 0x8A30 +#define R_AX_CH9_PAGE_CTRL 0x8A34 +#define R_AX_CH10_PAGE_CTRL 0x8A38 +#define R_AX_CH11_PAGE_CTRL 0x8A3C + +#define B_AX_AVAL_PG_MASK GENMASK(27, 16) +#define B_AX_USE_PG_MASK GENMASK(12, 0) +#define R_AX_ACH0_PAGE_INFO 0x8A50 +#define R_AX_ACH1_PAGE_INFO 0x8A54 +#define R_AX_ACH2_PAGE_INFO 0x8A58 +#define R_AX_ACH3_PAGE_INFO 0x8A5C +#define R_AX_ACH4_PAGE_INFO 0x8A60 +#define R_AX_ACH5_PAGE_INFO 0x8A64 +#define R_AX_ACH6_PAGE_INFO 0x8A68 +#define R_AX_ACH7_PAGE_INFO 0x8A6C +#define R_AX_CH8_PAGE_INFO 0x8A70 +#define R_AX_CH9_PAGE_INFO 0x8A74 +#define R_AX_CH10_PAGE_INFO 0x8A78 +#define R_AX_CH11_PAGE_INFO 0x8A7C +#define R_AX_CH12_PAGE_INFO 0x8A80 + +#define R_AX_PUB_PAGE_INFO3 0x8A8C +#define B_AX_G1_AVAL_PG_MASK GENMASK(28, 16) +#define B_AX_G0_AVAL_PG_MASK GENMASK(12, 0) + +#define R_AX_PUB_PAGE_CTRL1 0x8A90 +#define B_AX_PUBPG_G1_MASK GENMASK(28, 16) +#define B_AX_PUBPG_G0_MASK GENMASK(12, 0) + +#define R_AX_PUB_PAGE_CTRL2 0x8A94 +#define B_AX_PUBPG_ALL_MASK GENMASK(12, 0) + +#define R_AX_PUB_PAGE_INFO1 0x8A98 +#define B_AX_G1_USE_PG_MASK GENMASK(28, 16) +#define B_AX_G0_USE_PG_MASK GENMASK(12, 0) + +#define R_AX_PUB_PAGE_INFO2 0x8A9C +#define B_AX_PUB_AVAL_PG_MASK GENMASK(12, 0) + +#define R_AX_WP_PAGE_CTRL1 0x8AA0 +#define B_AX_PREC_PAGE_WP_CH811_MASK GENMASK(24, 16) +#define B_AX_PREC_PAGE_WP_CH07_MASK GENMASK(8, 0) + +#define R_AX_WP_PAGE_CTRL2 0x8AA4 +#define B_AX_WP_THRD_MASK GENMASK(12, 0) + +#define R_AX_WP_PAGE_INFO1 0x8AA8 +#define B_AX_WP_AVAL_PG_MASK GENMASK(28, 16) + +#define R_AX_WDE_PKTBUF_CFG 0x8C08 +#define B_AX_WDE_START_BOUND_MASK GENMASK(13, 8) +#define B_AX_WDE_PAGE_SEL_MASK GENMASK(1, 0) +#define B_AX_WDE_FREE_PAGE_NUM_MASK GENMASK(28, 16) +#define R_AX_WDE_ERR_FLAG_CFG 0x8C34 +#define R_AX_WDE_ERR_IMR 0x8C38 +#define R_AX_WDE_ERR_ISR 0x8C3C + +#define B_AX_WDE_MAX_SIZE_MASK GENMASK(27, 16) +#define B_AX_WDE_MIN_SIZE_MASK GENMASK(11, 0) +#define R_AX_WDE_QTA0_CFG 0x8C40 +#define R_AX_WDE_QTA1_CFG 0x8C44 +#define R_AX_WDE_QTA2_CFG 0x8C48 +#define R_AX_WDE_QTA3_CFG 0x8C4C +#define R_AX_WDE_QTA4_CFG 0x8C50 + +#define B_AX_DLE_PUB_PGNUM GENMASK(12, 0) +#define B_AX_DLE_FREE_HEADPG GENMASK(11, 0) +#define B_AX_DLE_FREE_TAILPG GENMASK(27, 16) +#define B_AX_DLE_USE_PGNUM GENMASK(27, 16) +#define B_AX_DLE_RSV_PGNUM GENMASK(11, 0) +#define B_AX_DLE_QEMPTY_GRP GENMASK(31, 0) + +#define R_AX_WDE_INI_STATUS 0x8D00 +#define B_AX_WDE_Q_MGN_INI_RDY BIT(1) +#define B_AX_WDE_BUF_MGN_INI_RDY BIT(0) +#define WDE_MGN_INI_RDY (B_AX_WDE_Q_MGN_INI_RDY | B_AX_WDE_BUF_MGN_INI_RDY) +#define R_AX_WDE_DBG_FUN_INTF_CTL 0x8D10 +#define B_AX_WDE_DFI_ACTIVE BIT(31) +#define B_AX_WDE_DFI_TRGSEL_MASK GENMASK(19, 16) +#define B_AX_WDE_DFI_ADDR_MASK GENMASK(15, 0) +#define R_AX_WDE_DBG_FUN_INTF_DATA 0x8D14 +#define B_AX_WDE_DFI_DATA_MASK GENMASK(31, 0) + +#define R_AX_PLE_PKTBUF_CFG 0x9008 +#define B_AX_PLE_START_BOUND_MASK GENMASK(13, 8) +#define B_AX_PLE_PAGE_SEL_MASK GENMASK(1, 0) +#define B_AX_PLE_FREE_PAGE_NUM_MASK GENMASK(28, 16) +#define R_AX_PLE_ERR_FLAG_CFG 0x9034 + +#define R_AX_PLE_ERR_IMR 0x9038 +#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN BIT(5) + +#define R_AX_PLE_ERR_FLAG_ISR 0x903C +#define B_AX_PLE_MAX_SIZE_MASK GENMASK(27, 16) +#define B_AX_PLE_MIN_SIZE_MASK GENMASK(11, 0) +#define R_AX_PLE_QTA0_CFG 0x9040 +#define R_AX_PLE_QTA1_CFG 0x9044 +#define R_AX_PLE_QTA2_CFG 0x9048 +#define R_AX_PLE_QTA3_CFG 0x904C +#define R_AX_PLE_QTA4_CFG 0x9050 +#define R_AX_PLE_QTA5_CFG 0x9054 +#define R_AX_PLE_QTA6_CFG 0x9058 +#define B_AX_PLE_Q6_MAX_SIZE_MASK GENMASK(27, 16) +#define B_AX_PLE_Q6_MIN_SIZE_MASK GENMASK(11, 0) +#define R_AX_PLE_QTA7_CFG 0x905C +#define R_AX_PLE_QTA8_CFG 0x9060 +#define R_AX_PLE_QTA9_CFG 0x9064 +#define R_AX_PLE_QTA10_CFG 0x9068 + +#define R_AX_PLE_INI_STATUS 0x9100 +#define B_AX_PLE_Q_MGN_INI_RDY BIT(1) +#define B_AX_PLE_BUF_MGN_INI_RDY BIT(0) +#define PLE_MGN_INI_RDY (B_AX_PLE_Q_MGN_INI_RDY | B_AX_PLE_BUF_MGN_INI_RDY) +#define R_AX_PLE_DBG_FUN_INTF_CTL 0x9110 +#define B_AX_PLE_DFI_ACTIVE BIT(31) +#define B_AX_PLE_DFI_TRGSEL_MASK GENMASK(19, 16) +#define B_AX_PLE_DFI_ADDR_MASK GENMASK(15, 0) +#define R_AX_PLE_DBG_FUN_INTF_DATA 0x9114 +#define B_AX_PLE_DFI_DATA_MASK GENMASK(31, 0) + +#define R_AX_WDRLS_CFG 0x9408 +#define B_AX_RLSRPT_BUFREQ_TO_MASK GENMASK(15, 8) +#define B_AX_WDRLS_MODE_MASK GENMASK(1, 0) + +#define R_AX_RLSRPT0_CFG0 0x9410 +#define B_AX_RLSRPT0_FLTR_MAP_MASK GENMASK(27, 24) +#define B_AX_RLSRPT0_PKTTYPE_MASK GENMASK(19, 16) +#define B_AX_RLSRPT0_PID_MASK GENMASK(10, 8) +#define B_AX_RLSRPT0_QID_MASK GENMASK(5, 0) + +#define R_AX_RLSRPT0_CFG1 0x9414 +#define B_AX_RLSRPT0_TO_MASK GENMASK(23, 16) +#define B_AX_RLSRPT0_AGGNUM_MASK GENMASK(7, 0) + +#define R_AX_WDRLS_ERR_IMR 0x9430 +#define B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN BIT(13) +#define B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN BIT(12) +#define B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN BIT(9) +#define B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN BIT(8) +#define B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN BIT(5) +#define B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN BIT(4) +#define B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN BIT(2) +#define B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN BIT(1) +#define B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN BIT(0) +#define R_AX_WDRLS_ERR_ISR 0x9434 + +#define R_AX_BBRPT_COM_ERR_IMR_ISR 0x960C +#define R_AX_BBRPT_CHINFO_ERR_IMR_ISR 0x962C +#define R_AX_BBRPT_DFS_ERR_IMR_ISR 0x963C +#define R_AX_LA_ERRFLAG 0x966C + +#define R_AX_WD_BUF_REQ 0x9800 +#define R_AX_PL_BUF_REQ 0x9820 +#define B_AX_WD_BUF_REQ_EXEC BIT(31) +#define B_AX_WD_BUF_REQ_QUOTA_ID_MASK GENMASK(23, 16) +#define B_AX_WD_BUF_REQ_LEN_MASK GENMASK(15, 0) + +#define R_AX_WD_BUF_STATUS 0x9804 +#define R_AX_PL_BUF_STATUS 0x9824 +#define B_AX_WD_BUF_STAT_DONE BIT(31) +#define B_AX_WD_BUF_STAT_PKTID_MASK GENMASK(11, 0) + +#define R_AX_WD_CPUQ_OP_0 0x9810 +#define R_AX_PL_CPUQ_OP_0 0x9830 +#define B_AX_WD_CPUQ_OP_EXEC BIT(31) +#define B_AX_CPUQ_OP_CMD_TYPE_MASK GENMASK(27, 24) +#define B_AX_CPUQ_OP_MACID_MASK GENMASK(23, 16) +#define B_AX_CPUQ_OP_PKTNUM_MASK GENMASK(7, 0) + +#define R_AX_WD_CPUQ_OP_1 0x9814 +#define R_AX_PL_CPUQ_OP_1 0x9834 +#define B_AX_CPUQ_OP_SRC_PID_MASK GENMASK(24, 22) +#define B_AX_CPUQ_OP_SRC_QID_MASK GENMASK(21, 16) +#define B_AX_CPUQ_OP_DST_PID_MASK GENMASK(8, 6) +#define B_AX_CPUQ_OP_DST_QID_MASK GENMASK(5, 0) + +#define R_AX_WD_CPUQ_OP_2 0x9818 +#define R_AX_PL_CPUQ_OP_2 0x9838 +#define B_AX_WD_CPUQ_OP_STRT_PKTID_MASK GENMASK(27, 16) +#define B_AX_WD_CPUQ_OP_END_PKTID_MASK GENMASK(11, 0) + +#define R_AX_WD_CPUQ_OP_STATUS 0x981C +#define R_AX_PL_CPUQ_OP_STATUS 0x983C +#define B_AX_WD_CPUQ_OP_STAT_DONE BIT(31) +#define B_AX_WD_CPUQ_OP_PKTID_MASK GENMASK(11, 0) +#define R_AX_CPUIO_ERR_IMR 0x9840 +#define R_AX_CPUIO_ERR_ISR 0x9844 + +#define R_AX_SEC_ERR_IMR_ISR 0x991C + +#define R_AX_PKTIN_SETTING 0x9A00 +#define B_AX_WD_ADDR_INFO_LENGTH BIT(1) +#define R_AX_PKTIN_ERR_IMR 0x9A20 +#define R_AX_PKTIN_ERR_ISR 0x9A24 + +#define R_AX_MPDU_TX_ERR_ISR 0x9BF0 +#define R_AX_MPDU_TX_ERR_IMR 0x9BF4 + +#define R_AX_MPDU_PROC 0x9C00 +#define B_AX_A_ICV_ERR BIT(1) +#define B_AX_APPEND_FCS BIT(0) + +#define R_AX_ACTION_FWD0 0x9C04 +#define TRXCFG_MPDU_PROC_ACT_FRWD 0x02A95A95 + +#define R_AX_TF_FWD 0x9C14 +#define TRXCFG_MPDU_PROC_TF_FRWD 0x0000AA55 + +#define R_AX_HW_RPT_FWD 0x9C18 +#define B_AX_FWD_PPDU_STAT_MASK GENMASK(1, 0) +#define RTW89_PRPT_DEST_HOST 1 +#define RTW89_PRPT_DEST_WLCPU 2 + +#define R_AX_CUT_AMSDU_CTRL 0x9C40 +#define TRXCFG_MPDU_PROC_CUT_CTRL 0x010E05F0 + +#define R_AX_MPDU_RX_ERR_ISR 0x9CF0 +#define R_AX_MPDU_RX_ERR_IMR 0x9CF4 + +#define R_AX_SEC_ENG_CTRL 0x9D00 +#define B_AX_TX_PARTIAL_MODE BIT(11) +#define B_AX_CLK_EN_CGCMP BIT(10) +#define B_AX_CLK_EN_WAPI BIT(9) +#define B_AX_CLK_EN_WEP_TKIP BIT(8) +#define B_AX_BMC_MGNT_DEC BIT(5) +#define B_AX_UC_MGNT_DEC BIT(4) +#define B_AX_MC_DEC BIT(3) +#define B_AX_BC_DEC BIT(2) +#define B_AX_SEC_RX_DEC BIT(1) +#define B_AX_SEC_TX_ENC BIT(0) + +#define R_AX_SEC_MPDU_PROC 0x9D04 +#define B_AX_APPEND_ICV BIT(1) +#define B_AX_APPEND_MIC BIT(0) + +#define R_AX_SEC_CAM_ACCESS 0x9D10 +#define R_AX_SEC_CAM_RDATA 0x9D14 +#define R_AX_SEC_CAM_WDATA 0x9D18 +#define R_AX_SEC_DEBUG 0x9D1C +#define R_AX_SEC_TX_DEBUG 0x9D20 +#define R_AX_SEC_RX_DEBUG 0x9D24 +#define R_AX_SEC_TRX_PKT_CNT 0x9D28 +#define R_AX_SEC_TRX_BLK_CNT 0x9D2C + +#define R_AX_SS_CTRL 0x9E10 +#define B_AX_SS_INIT_DONE_1 BIT(31) +#define B_AX_SS_WARM_INIT_FLG BIT(29) +#define B_AX_SS_EN BIT(0) + +#define R_AX_SS_MACID_PAUSE_0 0x9EB0 +#define B_AX_SS_MACID31_0_PAUSE_SH 0 +#define B_AX_SS_MACID31_0_PAUSE_MASK GENMASK(31, 0) + +#define R_AX_SS_MACID_PAUSE_1 0x9EB4 +#define B_AX_SS_MACID63_32_PAUSE_SH 0 +#define B_AX_SS_MACID63_32_PAUSE_MASK GENMASK(31, 0) + +#define R_AX_SS_MACID_PAUSE_2 0x9EB8 +#define B_AX_SS_MACID95_64_PAUSE_SH 0 +#define B_AX_SS_MACID95_64_PAUSE_MASK GENMASK(31, 0) + +#define R_AX_SS_MACID_PAUSE_3 0x9EBC +#define B_AX_SS_MACID127_96_PAUSE_SH 0 +#define B_AX_SS_MACID127_96_PAUSE_MASK GENMASK(31, 0) + +#define R_AX_STA_SCHEDULER_ERR_IMR 0x9EF0 +#define R_AX_STA_SCHEDULER_ERR_ISR 0x9EF4 + +#define R_AX_TXPKTCTL_ERR_IMR_ISR 0x9F1C +#define R_AX_TXPKTCTL_ERR_IMR_ISR_B1 0x9F2C +#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9) +#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3) +#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN BIT(2) +#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN BIT(1) + +#define R_AX_DBG_FUN_INTF_CTL 0x9F30 +#define B_AX_DFI_ACTIVE BIT(31) +#define B_AX_DFI_TRGSEL_MASK GENMASK(19, 16) +#define B_AX_DFI_ADDR_MASK GENMASK(15, 0) +#define R_AX_DBG_FUN_INTF_DATA 0x9F34 +#define B_AX_DFI_DATA_MASK GENMASK(31, 0) + +#define R_AX_AFE_CTRL1 0x0024 + +#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4) +#define B_AX_R_SYM_WLCMAC1_P3_PC_EN BIT(3) +#define B_AX_R_SYM_WLCMAC1_P2_PC_EN BIT(2) +#define B_AX_R_SYM_WLCMAC1_P1_PC_EN BIT(1) +#define B_AX_R_SYM_WLCMAC1_PC_EN BIT(0) + +#define R_AX_SYS_ISO_CTRL_EXTEND 0x0080 +#define B_AX_CMAC1_FEN BIT(30) +#define B_AX_R_SYM_FEN_WLBBGLB_1 BIT(17) +#define B_AX_R_SYM_FEN_WLBBFUN_1 BIT(16) +#define B_AX_R_SYM_ISO_CMAC12PP BIT(5) + +#define R_AX_CMAC_REG_START 0xC000 + +#define R_AX_CMAC_FUNC_EN 0xC000 +#define R_AX_CMAC_FUNC_EN_C1 0xE000 +#define B_AX_CMAC_CRPRT BIT(31) +#define B_AX_CMAC_EN BIT(30) +#define B_AX_CMAC_TXEN BIT(29) +#define B_AX_CMAC_RXEN BIT(28) +#define B_AX_FORCE_CMACREG_GCKEN BIT(15) +#define B_AX_PHYINTF_EN BIT(5) +#define B_AX_CMAC_DMA_EN BIT(4) +#define B_AX_PTCLTOP_EN BIT(3) +#define B_AX_SCHEDULER_EN BIT(2) +#define B_AX_TMAC_EN BIT(1) +#define B_AX_RMAC_EN BIT(0) + +#define R_AX_CK_EN 0xC004 +#define R_AX_CK_EN_C1 0xE004 +#define B_AX_CMAC_ALLCKEN GENMASK(31, 0) +#define B_AX_CMAC_CKEN BIT(30) +#define B_AX_PHYINTF_CKEN BIT(5) +#define B_AX_CMAC_DMA_CKEN BIT(4) +#define B_AX_PTCLTOP_CKEN BIT(3) +#define B_AX_SCHEDULER_CKEN BIT(2) +#define B_AX_TMAC_CKEN BIT(1) +#define B_AX_RMAC_CKEN BIT(0) + +#define R_AX_WMAC_RFMOD 0xC010 +#define R_AX_WMAC_RFMOD_C1 0xE010 +#define B_AX_WMAC_RFMOD_MASK GENMASK(1, 0) + +#define R_AX_GID_POSITION0 0xC070 +#define R_AX_GID_POSITION0_C1 0xE070 +#define R_AX_GID_POSITION1 0xC074 +#define R_AX_GID_POSITION1_C1 0xE074 +#define R_AX_GID_POSITION2 0xC078 +#define R_AX_GID_POSITION2_C1 0xE078 +#define R_AX_GID_POSITION3 0xC07C +#define R_AX_GID_POSITION3_C1 0xE07C +#define R_AX_GID_POSITION_EN0 0xC080 +#define R_AX_GID_POSITION_EN0_C1 0xE080 +#define R_AX_GID_POSITION_EN1 0xC084 +#define R_AX_GID_POSITION_EN1_C1 0xE084 + +#define R_AX_TX_SUB_CARRIER_VALUE 0xC088 +#define R_AX_TX_SUB_CARRIER_VALUE_C1 0xE088 +#define B_AX_TXSC_80M_MASK GENMASK(11, 8) +#define B_AX_TXSC_40M_MASK GENMASK(7, 4) +#define B_AX_TXSC_20M_MASK GENMASK(3, 0) + +#define R_AX_CMAC_ERR_ISR 0xC164 +#define R_AX_CMAC_ERR_ISR_C1 0xE164 +#define B_AX_WMAC_TX_ERR_IND BIT(7) +#define B_AX_WMAC_RX_ERR_IND BIT(6) +#define B_AX_TXPWR_CTRL_ERR_IND BIT(5) +#define B_AX_PHYINTF_ERR_IND BIT(4) +#define B_AX_DMA_TOP_ERR_IND BIT(3) +#define B_AX_PTCL_TOP_ERR_IND BIT(1) +#define B_AX_SCHEDULE_TOP_ERR_IND BIT(0) + +#define R_AX_MACID_SLEEP_0 0xC2C0 +#define R_AX_MACID_SLEEP_0_C1 0xE2C0 +#define B_AX_MACID31_0_SLEEP_SH 0 +#define B_AX_MACID31_0_SLEEP_MASK GENMASK(31, 0) + +#define R_AX_MACID_SLEEP_1 0xC2C4 +#define R_AX_MACID_SLEEP_1_C1 0xE2C4 +#define B_AX_MACID63_32_SLEEP_SH 0 +#define B_AX_MACID63_32_SLEEP_MASK GENMASK(31, 0) + +#define R_AX_MACID_SLEEP_2 0xC2C8 +#define R_AX_MACID_SLEEP_2_C1 0xE2C8 +#define B_AX_MACID95_64_SLEEP_SH 0 +#define B_AX_MACID95_64_SLEEP_MASK GENMASK(31, 0) + +#define R_AX_MACID_SLEEP_3 0xC2CC +#define R_AX_MACID_SLEEP_3_C1 0xE2CC +#define B_AX_MACID127_96_SLEEP_SH 0 +#define B_AX_MACID127_96_SLEEP_MASK GENMASK(31, 0) + +#define SCH_PREBKF_24US 0x18 +#define R_AX_PREBKF_CFG_0 0xC338 +#define R_AX_PREBKF_CFG_0_C1 0xE338 +#define B_AX_PREBKF_TIME_MASK GENMASK(4, 0) + +#define R_AX_CCA_CFG_0 0xC340 +#define R_AX_CCA_CFG_0_C1 0xE340 +#define B_AX_BTCCA_BRK_TXOP_EN BIT(9) +#define B_AX_BTCCA_EN BIT(5) +#define B_AX_EDCCA_EN BIT(4) +#define B_AX_SEC80_EN BIT(3) +#define B_AX_SEC40_EN BIT(2) +#define B_AX_SEC20_EN BIT(1) +#define B_AX_CCA_EN BIT(0) + +#define R_AX_CTN_TXEN 0xC348 +#define R_AX_CTN_TXEN_C1 0xE348 +#define B_AX_CTN_TXEN_TWT_1 BIT(15) +#define B_AX_CTN_TXEN_TWT_0 BIT(14) +#define B_AX_CTN_TXEN_ULQ BIT(13) +#define B_AX_CTN_TXEN_BCNQ BIT(12) +#define B_AX_CTN_TXEN_HGQ BIT(11) +#define B_AX_CTN_TXEN_CPUMGQ BIT(10) +#define B_AX_CTN_TXEN_MGQ1 BIT(9) +#define B_AX_CTN_TXEN_MGQ BIT(8) +#define B_AX_CTN_TXEN_VO_1 BIT(7) +#define B_AX_CTN_TXEN_VI_1 BIT(6) +#define B_AX_CTN_TXEN_BK_1 BIT(5) +#define B_AX_CTN_TXEN_BE_1 BIT(4) +#define B_AX_CTN_TXEN_VO_0 BIT(3) +#define B_AX_CTN_TXEN_VI_0 BIT(2) +#define B_AX_CTN_TXEN_BK_0 BIT(1) +#define B_AX_CTN_TXEN_BE_0 BIT(0) + +#define R_AX_MUEDCA_BE_PARAM_0 0xC350 +#define R_AX_MUEDCA_BE_PARAM_0_C1 0xE350 +#define B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK GENMASK(31, 16) +#define B_AX_MUEDCA_BE_PARAM_0_CW_MASK GENMASK(15, 8) +#define B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK GENMASK(7, 0) + +#define R_AX_MUEDCA_BK_PARAM_0 0xC354 +#define R_AX_MUEDCA_BK_PARAM_0_C1 0xE354 +#define R_AX_MUEDCA_VI_PARAM_0 0xC358 +#define R_AX_MUEDCA_VI_PARAM_0_C1 0xE358 +#define R_AX_MUEDCA_VO_PARAM_0 0xC35C +#define R_AX_MUEDCA_VO_PARAM_0_C1 0xE35C + +#define R_AX_MUEDCA_EN 0xC370 +#define R_AX_MUEDCA_EN_C1 0xE370 +#define B_AX_MUEDCA_WMM_SEL BIT(8) +#define B_AX_SET_MUEDCATIMER_TF_0 BIT(4) +#define B_AX_MUEDCA_EN_0 BIT(0) + +#define R_AX_CCA_CONTROL 0xC390 +#define R_AX_CCA_CONTROL_C1 0xE390 +#define B_AX_TB_CHK_TX_NAV BIT(31) +#define B_AX_TB_CHK_BASIC_NAV BIT(30) +#define B_AX_TB_CHK_BTCCA BIT(29) +#define B_AX_TB_CHK_EDCCA BIT(28) +#define B_AX_TB_CHK_CCA_S80 BIT(27) +#define B_AX_TB_CHK_CCA_S40 BIT(26) +#define B_AX_TB_CHK_CCA_S20 BIT(25) +#define B_AX_TB_CHK_CCA_P20 BIT(24) +#define B_AX_SIFS_CHK_BTCCA BIT(21) +#define B_AX_SIFS_CHK_EDCCA BIT(20) +#define B_AX_SIFS_CHK_CCA_S80 BIT(19) +#define B_AX_SIFS_CHK_CCA_S40 BIT(18) +#define B_AX_SIFS_CHK_CCA_S20 BIT(17) +#define B_AX_SIFS_CHK_CCA_P20 BIT(16) +#define B_AX_CTN_CHK_TXNAV BIT(8) +#define B_AX_CTN_CHK_INTRA_NAV BIT(7) +#define B_AX_CTN_CHK_BASIC_NAV BIT(6) +#define B_AX_CTN_CHK_BTCCA BIT(5) +#define B_AX_CTN_CHK_EDCCA BIT(4) +#define B_AX_CTN_CHK_CCA_S80 BIT(3) +#define B_AX_CTN_CHK_CCA_S40 BIT(2) +#define B_AX_CTN_CHK_CCA_S20 BIT(1) +#define B_AX_CTN_CHK_CCA_P20 BIT(0) + +#define R_AX_SCHEDULE_ERR_IMR 0xC3E8 +#define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8 +#define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1) +#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0) + +#define R_AX_SCHEDULE_ERR_ISR 0xC3EC +#define R_AX_SCHEDULE_ERR_ISR_C1 0xE3EC + +#define R_AX_SCH_DBG_SEL 0xC3F4 +#define R_AX_SCH_DBG_SEL_C1 0xE3F4 +#define B_AX_SCH_DBG_EN BIT(16) +#define B_AX_SCH_CFG_CMD_SEL GENMASK(15, 8) +#define B_AX_SCH_DBG_SEL_MASK GENMASK(7, 0) + +#define R_AX_SCH_DBG 0xC3F8 +#define R_AX_SCH_DBG_C1 0xE3F8 +#define B_AX_SCHEDULER_DBG_MASK GENMASK(31, 0) + +#define R_AX_PORT_CFG_P0 0xC400 +#define R_AX_PORT_CFG_P1 0xC440 +#define R_AX_PORT_CFG_P2 0xC480 +#define R_AX_PORT_CFG_P3 0xC4C0 +#define R_AX_PORT_CFG_P4 0xC500 +#define B_AX_BRK_SETUP BIT(16) +#define B_AX_TBTT_UPD_SHIFT_SEL BIT(15) +#define B_AX_BCN_DROP_ALLOW BIT(14) +#define B_AX_TBTT_PROHIB_EN BIT(13) +#define B_AX_BCNTX_EN BIT(12) +#define B_AX_NET_TYPE_MASK GENMASK(11, 10) +#define B_AX_BCN_FORCETX_EN BIT(9) +#define B_AX_TXBCN_BTCCA_EN BIT(8) +#define B_AX_BCNERR_CNT_EN BIT(7) +#define B_AX_BCN_AGRES BIT(6) +#define B_AX_TSFTR_RST BIT(5) +#define B_AX_RX_BSSID_FIT_EN BIT(4) +#define B_AX_TSF_UDT_EN BIT(3) +#define B_AX_PORT_FUNC_EN BIT(2) +#define B_AX_TXBCN_RPT_EN BIT(1) +#define B_AX_RXBCN_RPT_EN BIT(0) + +#define R_AX_TBTT_PROHIB_P0 0xC404 +#define R_AX_TBTT_PROHIB_P1 0xC444 +#define R_AX_TBTT_PROHIB_P2 0xC484 +#define R_AX_TBTT_PROHIB_P3 0xC4C4 +#define R_AX_TBTT_PROHIB_P4 0xC504 +#define B_AX_TBTT_HOLD_MASK GENMASK(27, 16) +#define B_AX_TBTT_SETUP_MASK GENMASK(7, 0) + +#define R_AX_BCN_AREA_P0 0xC408 +#define R_AX_BCN_AREA_P1 0xC448 +#define R_AX_BCN_AREA_P2 0xC488 +#define R_AX_BCN_AREA_P3 0xC4C8 +#define R_AX_BCN_AREA_P4 0xC508 +#define B_AX_BCN_MSK_AREA_MASK GENMASK(27, 16) +#define B_AX_BCN_CTN_AREA_MASK GENMASK(11, 0) + +#define R_AX_BCNERLYINT_CFG_P0 0xC40C +#define R_AX_BCNERLYINT_CFG_P1 0xC44C +#define R_AX_BCNERLYINT_CFG_P2 0xC48C +#define R_AX_BCNERLYINT_CFG_P3 0xC4CC +#define R_AX_BCNERLYINT_CFG_P4 0xC50C +#define B_AX_BCNERLY_MASK GENMASK(11, 0) + +#define R_AX_TBTTERLYINT_CFG_P0 0xC40E +#define R_AX_TBTTERLYINT_CFG_P1 0xC44E +#define R_AX_TBTTERLYINT_CFG_P2 0xC48E +#define R_AX_TBTTERLYINT_CFG_P3 0xC4CE +#define R_AX_TBTTERLYINT_CFG_P4 0xC50E +#define B_AX_TBTTERLY_MASK GENMASK(11, 0) + +#define R_AX_TBTT_AGG_P0 0xC412 +#define R_AX_TBTT_AGG_P1 0xC452 +#define R_AX_TBTT_AGG_P2 0xC492 +#define R_AX_TBTT_AGG_P3 0xC4D2 +#define R_AX_TBTT_AGG_P4 0xC512 +#define B_AX_TBTT_AGG_NUM_MASK GENMASK(15, 8) + +#define R_AX_BCN_SPACE_CFG_P0 0xC414 +#define R_AX_BCN_SPACE_CFG_P1 0xC454 +#define R_AX_BCN_SPACE_CFG_P2 0xC494 +#define R_AX_BCN_SPACE_CFG_P3 0xC4D4 +#define R_AX_BCN_SPACE_CFG_P4 0xC514 +#define B_AX_SUB_BCN_SPACE_MASK GENMASK(23, 16) +#define B_AX_BCN_SPACE_MASK GENMASK(15, 0) + +#define R_AX_BCN_FORCETX_P0 0xC418 +#define R_AX_BCN_FORCETX_P1 0xC458 +#define R_AX_BCN_FORCETX_P2 0xC498 +#define R_AX_BCN_FORCETX_P3 0xC4D8 +#define R_AX_BCN_FORCETX_P4 0xC518 +#define B_AX_FORCE_BCN_CURRCNT_MASK GENMASK(23, 16) +#define B_AX_FORCE_BCN_NUM_MASK GENMASK(15, 0) +#define B_AX_BCN_MAX_ERR_MASK GENMASK(7, 0) + +#define R_AX_BCN_ERR_CNT_P0 0xC420 +#define R_AX_BCN_ERR_CNT_P1 0xC460 +#define R_AX_BCN_ERR_CNT_P2 0xC4A0 +#define R_AX_BCN_ERR_CNT_P3 0xC4E0 +#define R_AX_BCN_ERR_CNT_P4 0xC520 +#define B_AX_BCN_ERR_CNT_SUM_MASK GENMASK(31, 24) +#define B_AX_BCN_ERR_CNT_NAV_MASK GENMASK(23, 16) +#define B_AX_BCN_ERR_CNT_EDCCA_MASK GENMASK(15, 0) +#define B_AX_BCN_ERR_CNT_CCA_MASK GENMASK(7, 0) + +#define R_AX_BCN_ERR_FLAG_P0 0xC424 +#define R_AX_BCN_ERR_FLAG_P1 0xC464 +#define R_AX_BCN_ERR_FLAG_P2 0xC4A4 +#define R_AX_BCN_ERR_FLAG_P3 0xC4E4 +#define R_AX_BCN_ERR_FLAG_P4 0xC524 +#define B_AX_BCN_ERR_FLAG_OTHERS BIT(6) +#define B_AX_BCN_ERR_FLAG_MAC BIT(5) +#define B_AX_BCN_ERR_FLAG_TXON BIT(4) +#define B_AX_BCN_ERR_FLAG_SRCHEND BIT(3) +#define B_AX_BCN_ERR_FLAG_INVALID BIT(2) +#define B_AX_BCN_ERR_FLAG_CMP BIT(1) +#define B_AX_BCN_ERR_FLAG_LOCK BIT(0) + +#define R_AX_DTIM_CTRL_P0 0xC426 +#define R_AX_DTIM_CTRL_P1 0xC466 +#define R_AX_DTIM_CTRL_P2 0xC4A6 +#define R_AX_DTIM_CTRL_P3 0xC4E6 +#define R_AX_DTIM_CTRL_P4 0xC526 +#define B_AX_DTIM_NUM_MASK GENMASK(15, 0) +#define B_AX_DTIM_CURRCNT_MASK GENMASK(7, 0) + +#define R_AX_TBTT_SHIFT_P0 0xC428 +#define R_AX_TBTT_SHIFT_P1 0xC468 +#define R_AX_TBTT_SHIFT_P2 0xC4A8 +#define R_AX_TBTT_SHIFT_P3 0xC4E8 +#define R_AX_TBTT_SHIFT_P4 0xC528 +#define B_AX_TBTT_SHIFT_OFST_MASK GENMASK(11, 0) + +#define R_AX_BCN_CNT_TMR_P0 0xC434 +#define R_AX_BCN_CNT_TMR_P1 0xC474 +#define R_AX_BCN_CNT_TMR_P2 0xC4B4 +#define R_AX_BCN_CNT_TMR_P3 0xC4F4 +#define R_AX_BCN_CNT_TMR_P4 0xC534 +#define B_AX_BCN_CNT_TMR_MASK GENMASK(31, 0) + +#define R_AX_TSFTR_LOW_P0 0xC438 +#define R_AX_TSFTR_LOW_P1 0xC478 +#define R_AX_TSFTR_LOW_P2 0xC4B8 +#define R_AX_TSFTR_LOW_P3 0xC4F8 +#define R_AX_TSFTR_LOW_P4 0xC538 +#define B_AX_TSFTR_LOW_MASK GENMASK(31, 0) + +#define R_AX_TSFTR_HIGH_P0 0xC43C +#define R_AX_TSFTR_HIGH_P1 0xC47C +#define R_AX_TSFTR_HIGH_P2 0xC4BC +#define R_AX_TSFTR_HIGH_P3 0xC4FC +#define R_AX_TSFTR_HIGH_P4 0xC53C +#define B_AX_TSFTR_HIGH_MASK GENMASK(31, 0) + +#define R_AX_MBSSID_CTRL 0xC568 +#define R_AX_MBSSID_CTRL_C1 0xE568 +#define B_AX_P0MB_ALL_MASK GENMASK(23, 1) +#define B_AX_P0MB_NUM_MASK GENMASK(19, 16) +#define B_AX_P0MB15_EN BIT(15) +#define B_AX_P0MB14_EN BIT(14) +#define B_AX_P0MB13_EN BIT(13) +#define B_AX_P0MB12_EN BIT(12) +#define B_AX_P0MB11_EN BIT(11) +#define B_AX_P0MB10_EN BIT(10) +#define B_AX_P0MB9_EN BIT(9) +#define B_AX_P0MB8_EN BIT(8) +#define B_AX_P0MB7_EN BIT(7) +#define B_AX_P0MB6_EN BIT(6) +#define B_AX_P0MB5_EN BIT(5) +#define B_AX_P0MB4_EN BIT(4) +#define B_AX_P0MB3_EN BIT(3) +#define B_AX_P0MB2_EN BIT(2) +#define B_AX_P0MB1_EN BIT(1) + +#define R_AX_AMPDU_AGG_LIMIT 0xC610 +#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24) +#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16) +#define B_AX_RTS_MAX_AGG_NUM_MASK GENMASK(15, 8) +#define B_AX_MAX_AGG_NUM_MASK GENMASK(7, 0) + +#define R_AX_AGG_LEN_HT_0 0xC614 +#define R_AX_AGG_LEN_HT_0_C1 0xE614 +#define B_AX_AMPDU_MAX_LEN_HT_MASK GENMASK(31, 16) +#define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8) +#define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0) + +#define S_AX_CTS2S_TH_SEC_256B 1 +#define R_AX_SIFS_SETTING 0xC624 +#define R_AX_SIFS_SETTING_C1 0xE624 +#define B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK GENMASK(31, 24) +#define B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK GENMASK(23, 18) +#define B_AX_HW_CTS2SELF_EN BIT(16) +#define B_AX_SPEC_SIFS_OFDM_PTCL_SH 8 +#define B_AX_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8) +#define B_AX_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0) +#define S_AX_CTS2S_TH_1K 4 + +#define R_AX_TXRATE_CHK 0xC628 +#define R_AX_TXRATE_CHK_C1 0xE628 +#define B_AX_DEFT_RATE_MASK GENMASK(15, 7) +#define B_AX_BAND_MODE BIT(4) +#define B_AX_MAX_TXNSS_MASK GENMASK(3, 2) +#define B_AX_RTS_LIMIT_IN_OFDM6 BIT(1) +#define B_AX_CHECK_CCK_EN BIT(0) + +#define R_AX_TXCNT 0xC62C +#define R_AX_TXCNT_C1 0xE62C +#define B_AX_ADD_TXCNT_BY BIT(31) +#define B_AX_S_TXCNT_LMT_MASK GENMASK(29, 24) +#define B_AX_L_TXCNT_LMT_MASK GENMASK(21, 16) + +#define R_AX_MBSSID_DROP_0 0xC63C +#define R_AX_MBSSID_DROP_0_C1 0xE63C +#define B_AX_GI_LTF_FB_SEL BIT(30) +#define B_AX_RATE_SEL_MASK GENMASK(29, 24) +#define B_AX_PORT_DROP_4_0_MASK GENMASK(20, 16) +#define B_AX_MBSSID_DROP_15_0_MASK GENMASK(15, 0) + +#define R_AX_BT_PLT 0xC67C +#define R_AX_BT_PLT_C1 0xE67C +#define B_AX_BT_PLT_PKT_CNT_MASK GENMASK(31, 16) +#define B_AX_BT_PLT_RST BIT(9) +#define B_AX_PLT_EN BIT(8) +#define B_AX_RX_PLT_GNT_LTE_RX BIT(7) +#define B_AX_RX_PLT_GNT_BT_RX BIT(6) +#define B_AX_RX_PLT_GNT_BT_TX BIT(5) +#define B_AX_RX_PLT_GNT_WL BIT(4) +#define B_AX_TX_PLT_GNT_LTE_RX BIT(3) +#define B_AX_TX_PLT_GNT_BT_RX BIT(2) +#define B_AX_TX_PLT_GNT_BT_TX BIT(1) +#define B_AX_TX_PLT_GNT_WL BIT(0) + +#define R_AX_PTCL_BSS_COLOR_0 0xC6A0 +#define R_AX_PTCL_BSS_COLOR_0_C1 0xE6A0 +#define B_AX_BSS_COLOB_AX_PORT_3_MASK GENMASK(29, 24) +#define B_AX_BSS_COLOB_AX_PORT_2_MASK GENMASK(21, 16) +#define B_AX_BSS_COLOB_AX_PORT_1_MASK GENMASK(13, 8) +#define B_AX_BSS_COLOB_AX_PORT_0_MASK GENMASK(5, 0) + +#define R_AX_PTCL_BSS_COLOR_1 0xC6A4 +#define R_AX_PTCL_BSS_COLOR_1_C1 0xE6A4 +#define B_AX_BSS_COLOB_AX_PORT_4_MASK GENMASK(5, 0) + +#define R_AX_PTCL_IMR0 0xC6C0 +#define R_AX_PTCL_IMR0_C1 0xE6C0 +#define B_AX_F2PCMD_USER_ALLC_ERR_INT_EN BIT(28) +#define B_AX_TX_RECORD_PKTID_ERR_INT_EN BIT(23) + +#define R_AX_PTCL_ISR0 0xC6C4 +#define R_AX_PTCL_ISR0_C1 0xE6C4 + +#define S_AX_PTCL_TO_2MS 0x3F +#define R_AX_PTCL_FSM_MON 0xC6E8 +#define R_AX_PTCL_FSM_MON_C1 0xE6E8 +#define B_AX_PTCL_TX_ARB_TO_MODE BIT(6) +#define B_AX_PTCL_TX_ARB_TO_THR_MASK GENMASK(5, 0) + +#define R_AX_PTCL_TX_CTN_SEL 0xC6EC +#define R_AX_PTCL_TX_CTN_SEL_C1 0xE6EC +#define B_AX_PTCL_TX_ON_STAT BIT(7) + +#define R_AX_PTCL_DBG_INFO 0xC6F0 +#define R_AX_PTCL_DBG_INFO_C1 0xE6F0 +#define B_AX_PTCL_DBG_INFO_MASK GENMASK(31, 0) +#define R_AX_PTCL_DBG 0xC6F4 +#define R_AX_PTCL_DBG_C1 0xE6F4 +#define B_AX_PTCL_DBG_EN BIT(8) +#define B_AX_PTCL_DBG_SEL_MASK GENMASK(7, 0) + +#define R_AX_DLE_CTRL 0xC800 +#define R_AX_DLE_CTRL_C1 0xE800 +#define B_AX_NO_RESERVE_PAGE_ERR_IMR BIT(23) +#define B_AX_RXDATA_FSM_HANG_ERROR_IMR BIT(15) +#define R_AX_RXDMA_PKT_INFO_0 0xC814 +#define R_AX_RXDMA_PKT_INFO_1 0xC818 +#define R_AX_RXDMA_PKT_INFO_2 0xC81C + +#define R_AX_TCR1 0xCA04 +#define R_AX_TCR1_C1 0xEA04 +#define B_AX_TXDFIFO_THRESHOLD GENMASK(31, 28) +#define B_AX_TCR_CCK_LOCK_CLK BIT(27) +#define B_AX_TCR_FORCE_READ_TXDFIFO BIT(26) +#define B_AX_TCR_USTIME GENMASK(23, 16) +#define B_AX_TCR_SMOOTH_VAL BIT(15) +#define B_AX_TCR_SMOOTH_CTRL BIT(14) +#define B_AX_CS_REQ_VAL BIT(13) +#define B_AX_CS_REQ_SEL BIT(12) +#define B_AX_TCR_ZLD_USTIME_AFTERPHYTXON GENMASK(11, 8) +#define B_AX_TCR_TXTIMEOUT GENMASK(7, 0) + +#define R_AX_PPWRBIT_SETTING 0xCA0C +#define R_AX_PPWRBIT_SETTING_C1 0xEA0C + +#define R_AX_MACTX_DBG_SEL_CNT 0xCA20 +#define R_AX_MACTX_DBG_SEL_CNT_C1 0xEA20 +#define B_AX_MACTX_MPDU_CNT GENMASK(31, 24) +#define B_AX_MACTX_DMA_CNT GENMASK(23, 16) +#define B_AX_LENGTH_ERR_FLAG_U3 BIT(11) +#define B_AX_LENGTH_ERR_FLAG_U2 BIT(10) +#define B_AX_LENGTH_ERR_FLAG_U1 BIT(9) +#define B_AX_LENGTH_ERR_FLAG_U0 BIT(8) +#define B_AX_DBGSEL_MACTX_MASK GENMASK(5, 0) + +#define R_AX_WMAC_TX_CTRL_DEBUG 0xCAE4 +#define R_AX_WMAC_TX_CTRL_DEBUG_C1 0xEAE4 +#define B_AX_TX_CTRL_DEBUG_SEL_MASK GENMASK(3, 0) + +#define R_AX_WMAC_TX_INFO0_DEBUG 0xCAE8 +#define R_AX_WMAC_TX_INFO0_DEBUG_C1 0xEAE8 +#define B_AX_TX_CTRL_INFO_P0_MASK GENMASK(31, 0) + +#define R_AX_WMAC_TX_INFO1_DEBUG 0xCAEC +#define R_AX_WMAC_TX_INFO1_DEBUG_C1 0xEAEC +#define B_AX_TX_CTRL_INFO_P1_MASK GENMASK(31, 0) + +#define R_AX_RSP_CHK_SIG 0xCC00 +#define R_AX_RSP_CHK_SIG_C1 0xEC00 +#define B_AX_RSP_STATIC_RTS_CHK_SERV_BW_EN BIT(30) +#define B_AX_RSP_TBPPDU_CHK_PWR BIT(29) +#define B_AX_RSP_CHK_BASIC_NAV BIT(21) +#define B_AX_RSP_CHK_INTRA_NAV BIT(20) +#define B_AX_RSP_CHK_TXNAV BIT(19) +#define B_AX_TXDATA_END_PS_OPT BIT(18) +#define B_AX_CHECK_SOUNDING_SEQ BIT(17) +#define B_AX_RXBA_IGNOREA2 BIT(16) +#define B_AX_ACKTO_CCK_MASK GENMASK(15, 8) +#define B_AX_ACKTO_MASK GENMASK(7, 0) + +#define R_AX_TRXPTCL_RESP_0 0xCC04 +#define R_AX_TRXPTCL_RESP_0_C1 0xEC04 +#define B_AX_WMAC_RESP_STBC_EN BIT(31) +#define B_AX_WMAC_RXFTM_TXACK_SC BIT(30) +#define B_AX_WMAC_RXFTM_TXACKBWEQ BIT(29) +#define B_AX_RSP_CHK_SEC_CCA_80 BIT(28) +#define B_AX_RSP_CHK_SEC_CCA_40 BIT(27) +#define B_AX_RSP_CHK_SEC_CCA_20 BIT(26) +#define B_AX_RSP_CHK_BTCCA BIT(25) +#define B_AX_RSP_CHK_EDCCA BIT(24) +#define B_AX_RSP_CHK_CCA BIT(23) +#define B_AX_WMAC_LDPC_EN BIT(22) +#define B_AX_WMAC_SGIEN BIT(21) +#define B_AX_WMAC_SPLCPEN BIT(20) +#define B_AX_WMAC_BESP_EARLY_TXBA BIT(17) +#define B_AX_WMAC_SPEC_SIFS_OFDM_MASK GENMASK(15, 8) +#define B_AX_WMAC_SPEC_SIFS_CCK_MASK GENMASK(7, 0) +#define WMAC_SPEC_SIFS_OFDM_52A 0x15 +#define WMAC_SPEC_SIFS_OFDM_52B 0x11 +#define WMAC_SPEC_SIFS_OFDM_52C 0x11 +#define WMAC_SPEC_SIFS_CCK 0xA + +#define R_AX_MAC_LOOPBACK 0xCC20 +#define R_AX_MAC_LOOPBACK_C1 0xEC20 +#define B_AX_MACLBK_EN BIT(0) + +#define R_AX_RXTRIG_TEST_USER_2 0xCCB0 +#define R_AX_RXTRIG_TEST_USER_2_C1 0xECB0 +#define B_AX_RXTRIG_MACID_MASK GENMASK(31, 24) +#define B_AX_RXTRIG_RU26_DIS BIT(21) +#define B_AX_RXTRIG_FCSCHK_EN BIT(20) +#define B_AX_RXTRIG_PORT_SEL_MASK GENMASK(19, 17) +#define B_AX_RXTRIG_EN BIT(16) +#define B_AX_RXTRIG_USERINFO_2_MASK GENMASK(15, 0) + +#define R_AX_WMAC_TX_TF_INFO_0 0xCCD0 +#define R_AX_WMAC_TX_TF_INFO_0_C1 0xECD0 +#define B_AX_WMAC_TX_TF_INFO_SEL_MASK GENMASK(2, 0) + +#define R_AX_WMAC_TX_TF_INFO_1 0xCCD4 +#define R_AX_WMAC_TX_TF_INFO_1_C1 0xECD4 +#define B_AX_WMAC_TX_TF_INFO_P0_MASK GENMASK(31, 0) + +#define R_AX_WMAC_TX_TF_INFO_2 0xCCD8 +#define R_AX_WMAC_TX_TF_INFO_2_C1 0xECD8 +#define B_AX_WMAC_TX_TF_INFO_P1_MASK GENMASK(31, 0) + +#define R_AX_TMAC_ERR_IMR_ISR 0xCCEC +#define R_AX_TMAC_ERR_IMR_ISR_C1 0xECEC + +#define R_AX_DBGSEL_TRXPTCL 0xCCF4 +#define R_AX_DBGSEL_TRXPTCL_C1 0xECF4 +#define B_AX_DBGSEL_TRXPTCL_MASK GENMASK(7, 0) + +#define R_AX_PHYINFO_ERR_IMR 0xCCFC +#define R_AX_PHYINFO_ERR_IMR_C1 0xECFC +#define B_AX_CSI_ON_TIMEOUT BIT(29) +#define B_AX_STS_ON_TIMEOUT BIT(28) +#define B_AX_DATA_ON_TIMEOUT BIT(27) +#define B_AX_OFDM_CCA_TIMEOUT BIT(26) +#define B_AX_CCK_CCA_TIMEOUT BIT(25) +#define B_AXC_PHY_TXON_TIMEOUT BIT(24) +#define B_AX_CSI_ON_TIMEOUT_INT_EN BIT(21) +#define B_AX_STS_ON_TIMEOUT_INT_EN BIT(20) +#define B_AX_DATA_ON_TIMEOUT_INT_EN BIT(19) +#define B_AX_OFDM_CCA_TIMEOUT_INT_EN BIT(18) +#define B_AX_CCK_CCA_TIMEOUT_INT_EN BIT(17) +#define B_AX_PHY_TXON_TIMEOUT_INT_EN BIT(16) +#define B_AX_PHYINTF_TIMEOUT_THR_MSAK GENMASK(5, 0) + +#define R_AX_PHYINFO_ERR_ISR 0xCCFC +#define R_AX_PHYINFO_ERR_ISR_C1 0xECFC + +#define R_AX_BFMER_CTRL_0 0xCD78 +#define R_AX_BFMER_CTRL_0_C1 0xED78 +#define B_AX_BFMER_HE_CSI_OFFSET_MASK GENMASK(31, 24) +#define B_AX_BFMER_VHT_CSI_OFFSET_MASK GENMASK(23, 16) +#define B_AX_BFMER_HT_CSI_OFFSET_MASK GENMASK(15, 8) +#define B_AX_BFMER_NDP_BFEN BIT(2) +#define B_AX_BFMER_VHT_BFPRT_CHK BIT(0) + +#define R_AX_BFMEE_RESP_OPTION 0xCD80 +#define R_AX_BFMEE_RESP_OPTION_C1 0xED80 +#define B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK GENMASK(31, 24) +#define B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK GENMASK(23, 20) +#define B_AX_MU_BFRPTSEG_SEL_MASK GENMASK(18, 17) +#define B_AX_BFMEE_NDP_RXSTDBY_SEL BIT(16) +#define BFRP_RX_STANDBY_TIMER 0x0 +#define NDP_RX_STANDBY_TIMER 0xFF +#define B_AX_BFMEE_HE_NDPA_EN BIT(2) +#define B_AX_BFMEE_VHT_NDPA_EN BIT(1) +#define B_AX_BFMEE_HT_NDPA_EN BIT(0) + +#define R_AX_TRXPTCL_RESP_CSI_CTRL_0 0xCD88 +#define R_AX_TRXPTCL_RESP_CSI_CTRL_0_C1 0xED88 +#define R_AX_TRXPTCL_RESP_CSI_CTRL_1 0xCD94 +#define R_AX_TRXPTCL_RESP_CSI_CTRL_1_C1 0xED94 +#define B_AX_BFMEE_CSISEQ_SEL BIT(29) +#define B_AX_BFMEE_BFPARAM_SEL BIT(28) +#define B_AX_BFMEE_OFDM_LEN_TH_MASK GENMASK(27, 24) +#define B_AX_BFMEE_BF_PORT_SEL BIT(23) +#define B_AX_BFMEE_USE_NSTS BIT(22) +#define B_AX_BFMEE_CSI_RATE_FB_EN BIT(21) +#define B_AX_BFMEE_CSI_GID_SEL BIT(20) +#define B_AX_BFMEE_CSI_RSC_MASK GENMASK(19, 18) +#define B_AX_BFMEE_CSI_FORCE_RETE_EN BIT(17) +#define B_AX_BFMEE_CSI_USE_NDPARATE BIT(16) +#define B_AX_BFMEE_CSI_WITHHTC_EN BIT(15) +#define B_AX_BFMEE_CSIINFO0_BF_EN BIT(14) +#define B_AX_BFMEE_CSIINFO0_STBC_EN BIT(13) +#define B_AX_BFMEE_CSIINFO0_LDPC_EN BIT(12) +#define B_AX_BFMEE_CSIINFO0_CS_MASK GENMASK(11, 10) +#define B_AX_BFMEE_CSIINFO0_CB_MASK GENMASK(9, 8) +#define B_AX_BFMEE_CSIINFO0_NG_MASK GENMASK(7, 6) +#define B_AX_BFMEE_CSIINFO0_NR_MASK GENMASK(5, 3) +#define B_AX_BFMEE_CSIINFO0_NC_MASK GENMASK(2, 0) + +#define R_AX_TRXPTCL_RESP_CSI_RRSC 0xCD8C +#define R_AX_TRXPTCL_RESP_CSI_RRSC_C1 0xED8C +#define CSI_RRSC_BMAP 0x29292911 + +#define R_AX_TRXPTCL_RESP_CSI_RATE 0xCD90 +#define R_AX_TRXPTCL_RESP_CSI_RATE_C1 0xED90 +#define B_AX_BFMEE_HE_CSI_RATE_MASK GENMASK(22, 16) +#define B_AX_BFMEE_VHT_CSI_RATE_MASK GENMASK(14, 8) +#define B_AX_BFMEE_HT_CSI_RATE_MASK GENMASK(6, 0) +#define CSI_INIT_RATE_HE 0x3 +#define CSI_INIT_RATE_VHT 0x3 +#define CSI_INIT_RATE_HT 0x3 + +#define R_AX_RCR 0xCE00 +#define R_AX_RCR_C1 0xEE00 +#define B_AX_STOP_RX_IN BIT(11) +#define B_AX_DRV_INFO_SIZE_MASK GENMASK(10, 8) +#define B_AX_CH_EN_MASK GENMASK(3, 0) + +#define R_AX_DLK_PROTECT_CTL 0xCE02 +#define R_AX_DLK_PROTECT_CTL_C1 0xEE02 +#define B_AX_RX_DLK_CCA_TIME_MASK GENMASK(15, 8) +#define B_AX_RX_DLK_DATA_TIME_MASK GENMASK(7, 4) + +#define R_AX_PLCP_HDR_FLTR 0xCE04 +#define R_AX_PLCP_HDR_FLTR_C1 0xEE04 +#define B_AX_DIS_CHK_MIN_LEN BIT(8) +#define B_AX_HE_SIGB_CRC_CHK BIT(6) +#define B_AX_VHT_MU_SIGB_CRC_CHK BIT(5) +#define B_AX_VHT_SU_SIGB_CRC_CHK BIT(4) +#define B_AX_SIGA_CRC_CHK BIT(3) +#define B_AX_LSIG_PARITY_CHK_EN BIT(2) +#define B_AX_CCK_SIG_CHK BIT(1) +#define B_AX_CCK_CRC_CHK BIT(0) + +#define R_AX_RX_FLTR_OPT 0xCE20 +#define R_AX_RX_FLTR_OPT_C1 0xEE20 +#define B_AX_UID_FILTER_MASK GENMASK(31, 24) +#define B_AX_UNSPT_FILTER_SH 22 +#define B_AX_UNSPT_FILTER_MASK GENMASK(23, 22) +#define B_AX_RX_MPDU_MAX_LEN_MASK GENMASK(21, 16) +#define B_AX_RX_MPDU_MAX_LEN_SIZE 0x3f +#define B_AX_A_FTM_REQ BIT(14) +#define B_AX_A_ERR_PKT BIT(13) +#define B_AX_A_UNSUP_PKT BIT(12) +#define B_AX_A_CRC32_ERR BIT(11) +#define B_AX_A_PWR_MGNT BIT(10) +#define B_AX_A_BCN_CHK_RULE_MASK GENMASK(9, 8) +#define B_AX_A_BCN_CHK_EN BIT(7) +#define B_AX_A_MC_LIST_CAM_MATCH BIT(6) +#define B_AX_A_BC_CAM_MATCH BIT(5) +#define B_AX_A_UC_CAM_MATCH BIT(4) +#define B_AX_A_MC BIT(3) +#define B_AX_A_BC BIT(2) +#define B_AX_A_A1_MATCH BIT(1) +#define B_AX_SNIFFER_MODE BIT(0) +#define DEFAULT_AX_RX_FLTR (B_AX_A_A1_MATCH | B_AX_A_BC | B_AX_A_MC | \ + B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH | \ + B_AX_A_PWR_MGNT | B_AX_A_FTM_REQ | \ + u32_encode_bits(3, B_AX_UID_FILTER_MASK) | \ + B_AX_A_BCN_CHK_EN) +#define B_AX_RX_FLTR_CFG_MASK ((u32)~B_AX_RX_MPDU_MAX_LEN_MASK) + +#define R_AX_CTRL_FLTR 0xCE24 +#define R_AX_CTRL_FLTR_C1 0xEE24 +#define R_AX_MGNT_FLTR 0xCE28 +#define R_AX_MGNT_FLTR_C1 0xEE28 +#define R_AX_DATA_FLTR 0xCE2C +#define R_AX_DATA_FLTR_C1 0xEE2C +#define RX_FLTR_FRAME_DROP 0x00000000 +#define RX_FLTR_FRAME_TO_HOST 0x55555555 +#define RX_FLTR_FRAME_TO_WLCPU 0xAAAAAAAA + +#define R_AX_ADDR_CAM_CTRL 0xCE34 +#define R_AX_ADDR_CAM_CTRL_C1 0xEE34 +#define B_AX_ADDR_CAM_RANGE_MASK GENMASK(23, 16) +#define B_AX_ADDR_CAM_CMPLIMT_MASK GENMASK(15, 12) +#define B_AX_ADDR_CAM_CLR BIT(8) +#define B_AX_ADDR_CAM_A2_B0_CHK BIT(2) +#define B_AX_ADDR_CAM_SRCH_PERPKT BIT(1) +#define B_AX_ADDR_CAM_EN BIT(0) + +#define R_AX_RESPBA_CAM_CTRL 0xCE3C +#define R_AX_RESPBA_CAM_CTRL_C1 0xEE3C +#define B_AX_SSN_SEL BIT(2) + +#define R_AX_PPDU_STAT 0xCE40 +#define R_AX_PPDU_STAT_C1 0xEE40 +#define B_AX_PPDU_STAT_RPT_TRIG BIT(8) +#define B_AX_PPDU_STAT_RPT_CRC32 BIT(5) +#define B_AX_PPDU_STAT_RPT_A1M BIT(4) +#define B_AX_APP_PLCP_HDR_RPT BIT(3) +#define B_AX_APP_RX_CNT_RPT BIT(2) +#define B_AX_APP_MAC_INFO_RPT BIT(1) +#define B_AX_PPDU_STAT_RPT_EN BIT(0) + +#define R_AX_RX_SR_CTRL 0xCE4A +#define R_AX_RX_SR_CTRL_C1 0xEE4A +#define B_AX_SR_EN BIT(0) + +#define R_AX_RX_STATE_MONITOR 0xCEF0 +#define R_AX_RX_STATE_MONITOR_C1 0xEEF0 +#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0) +#define B_AX_STATE_CUR_MASK GENMASK(31, 16) +#define B_AX_STATE_NXT_MASK GENMASK(13, 8) +#define B_AX_STATE_UPD BIT(7) +#define B_AX_STATE_SEL_MASK GENMASK(4, 0) + +#define R_AX_RMAC_ERR_ISR 0xCEF4 +#define R_AX_RMAC_ERR_ISR_C1 0xEEF4 +#define B_AX_RXERR_INTPS_EN BIT(31) +#define B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN BIT(19) +#define B_AX_RMAC_RX_TIMEOUT_INT_EN BIT(18) +#define B_AX_RMAC_CSI_TIMEOUT_INT_EN BIT(17) +#define B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN BIT(16) +#define B_AX_RMAC_CCA_TIMEOUT_INT_EN BIT(15) +#define B_AX_RMAC_DMA_TIMEOUT_INT_EN BIT(14) +#define B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN BIT(13) +#define B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN BIT(12) +#define B_AX_RMAC_RX_CSI_TIMEOUT_FLAG BIT(7) +#define B_AX_RMAC_RX_TIMEOUT_FLAG BIT(6) +#define B_AX_BMAC_CSI_TIMEOUT_FLAG BIT(5) +#define B_AX_BMAC_DATA_ON_TIMEOUT_FLAG BIT(4) +#define B_AX_BMAC_CCA_TIMEOUT_FLAG BIT(3) +#define B_AX_BMAC_DMA_TIMEOUT_FLAG BIT(2) +#define B_AX_BMAC_DATA_ON_TO_IDLE_TIMEOUT_FLAG BIT(1) +#define B_AX_BMAC_CCA_TO_IDLE_TIMEOUT_FLAG BIT(0) + +#define R_AX_RMAC_PLCP_MON 0xCEF8 +#define R_AX_RMAC_PLCP_MON_C1 0xEEF8 +#define B_AX_RMAC_PLCP_MON_MASK GENMASK(31, 0) +#define B_AX_PCLP_MON_SEL_MASK GENMASK(31, 28) +#define B_AX_PCLP_MON_CONT_MASK GENMASK(27, 0) + +#define R_AX_RX_DEBUG_SELECT 0xCEFC +#define R_AX_RX_DEBUG_SELECT_C1 0xEEFC +#define B_AX_DEBUG_SEL_MASK GENMASK(7, 0) + +#define R_AX_PWR_RATE_CTRL 0xD200 +#define R_AX_PWR_RATE_CTRL_C1 0xF200 +#define B_AX_FORCE_PWR_BY_RATE_EN BIT(9) +#define B_AX_FORCE_PWR_BY_RATE_VALUE_MASK GENMASK(8, 0) + +#define R_AX_PWR_RATE_OFST_CTRL 0xD204 +#define R_AX_PWR_COEXT_CTRL 0xD220 +#define B_AX_TXAGC_BT_EN BIT(1) +#define B_AX_TXAGC_BT_MASK GENMASK(11, 3) + +#define R_AX_PWR_UL_CTRL0 0xD240 +#define R_AX_PWR_UL_CTRL2 0xD248 +#define B_AX_PWR_UL_CFO_MASK GENMASK(2, 0) +#define B_AX_PWR_UL_CTRL2_MASK 0x07700007 +#define R_AX_PWR_UL_TB_CTRL 0xD288 +#define B_AX_PWR_UL_TB_CTRL_EN BIT(31) +#define R_AX_PWR_UL_TB_1T 0xD28C +#define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0) +#define R_AX_PWR_UL_TB_2T 0xD290 +#define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0) +#define R_AX_PWR_BY_RATE_TABLE0 0xD2C0 +#define R_AX_PWR_BY_RATE_TABLE10 0xD2E8 +#define R_AX_PWR_BY_RATE R_AX_PWR_BY_RATE_TABLE0 +#define R_AX_PWR_BY_RATE_MAX R_AX_PWR_BY_RATE_TABLE10 +#define R_AX_PWR_LMT_TABLE0 0xD2EC +#define R_AX_PWR_LMT_TABLE19 0xD338 +#define R_AX_PWR_LMT R_AX_PWR_LMT_TABLE0 +#define R_AX_PWR_LMT_MAX R_AX_PWR_LMT_TABLE19 +#define R_AX_PWR_RU_LMT_TABLE0 0xD33C +#define R_AX_PWR_RU_LMT_TABLE11 0xD368 +#define R_AX_PWR_RU_LMT R_AX_PWR_RU_LMT_TABLE0 +#define R_AX_PWR_RU_LMT_MAX R_AX_PWR_RU_LMT_TABLE11 +#define R_AX_PWR_MACID_LMT_TABLE0 0xD36C +#define R_AX_PWR_MACID_LMT_TABLE127 0xD568 + +#define R_AX_TXPWR_IMR 0xD9E0 +#define R_AX_TXPWR_IMR_C1 0xF9E0 +#define R_AX_TXPWR_ISR 0xD9E4 +#define R_AX_TXPWR_ISR_C1 0xF9E4 + +#define R_AX_BTC_CFG 0xDA00 +#define B_AX_DIS_BTC_CLK_G BIT(2) + +#define R_AX_WL_PRI_MSK 0xDA10 +#define B_AX_PTA_WL_PRI_MASK_BCNQ BIT(8) + +#define R_AX_BTC_FUNC_EN 0xDA20 +#define R_AX_BTC_FUNC_EN_C1 0xFA20 +#define B_AX_PTA_WL_TX_EN BIT(1) +#define B_AX_PTA_EDCCA_EN BIT(0) + +#define R_BTC_BREAK_TABLE 0xDA2C +#define BTC_BREAK_PARAM 0xf0ffffff + +#define R_BTC_BT_COEX_MSK_TABLE 0xDA30 +#define B_BTC_PRI_MASK_TX_RESP_V1 BIT(3) + +#define R_AX_BT_COEX_CFG_2 0xDA34 +#define R_AX_BT_COEX_CFG_2_C1 0xFA34 +#define B_AX_GNT_BT_BYPASS_PRIORITY BIT(12) +#define B_AX_GNT_BT_POLARITY BIT(8) +#define B_AX_TIMER_MASK GENMASK(7, 0) +#define MAC_AX_CSR_RATE 80 + +#define R_AX_CSR_MODE 0xDA40 +#define R_AX_CSR_MODE_C1 0xFA40 +#define B_AX_BT_CNT_RST BIT(16) +#define B_AX_BT_STAT_DELAY_MASK GENMASK(15, 12) +#define MAC_AX_CSR_DELAY 0 +#define B_AX_BT_TRX_INIT_DETECT_MASK GENMASK(11, 8) +#define MAC_AX_CSR_TRX_TO 4 +#define B_AX_BT_PRI_DETECT_TO_MASK GENMASK(7, 4) +#define MAC_AX_CSR_PRI_TO 5 +#define B_AX_WL_ACT_MSK BIT(3) +#define B_AX_STATIS_BT_EN BIT(2) +#define B_AX_WL_ACT_MASK_ENABLE BIT(1) +#define B_AX_ENHANCED_BT BIT(0) + +#define R_AX_BT_STAST_HIGH 0xDA44 +#define B_AX_STATIS_BT_HI_RX_MASK GENMASK(31, 16) +#define B_AX_STATIS_BT_HI_TX_MASK GENMASK(15, 0) +#define R_AX_BT_STAST_LOW 0xDA48 +#define B_AX_STATIS_BT_LO_TX_1_MASK GENMASK(15, 0) +#define B_AX_STATIS_BT_LO_RX_1_MASK GENMASK(31, 16) + +#define R_AX_TDMA_MODE 0xDA4C +#define R_AX_TDMA_MODE_C1 0xFA4C +#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16) +#define B_AX_R_RPT_FROM_BT_MASK GENMASK(15, 8) +#define B_AX_BT_HID_ISR_SET_MASK GENMASK(7, 6) +#define B_AX_TDMA_BT_START_NOTIFY BIT(5) +#define B_AX_ENABLE_TDMA_FW_MODE BIT(4) +#define B_AX_ENABLE_PTA_TDMA_MODE BIT(3) +#define B_AX_ENABLE_COEXIST_TAB_IN_TDMA BIT(2) +#define B_AX_GPIO2_GPIO3_EXANGE_OR_NO_BT_CCA BIT(1) +#define B_AX_RTK_BT_ENABLE BIT(0) + +#define R_AX_BT_COEX_CFG_5 0xDA6C +#define R_AX_BT_COEX_CFG_5_C1 0xFA6C +#define B_AX_BT_TIME_MASK GENMASK(31, 6) +#define B_AX_BT_RPT_SAMPLE_RATE_MASK GENMASK(5, 0) +#define MAC_AX_RTK_RATE 5 + +#define R_AX_LTE_CTRL 0xDAF0 +#define R_AX_LTE_WDATA 0xDAF4 +#define R_AX_LTE_RDATA 0xDAF8 + +#define CMAC1_START_ADDR 0xE000 +#define CMAC1_END_ADDR 0xFFFF +#define R_AX_CMAC_REG_END 0xFFFF + +#define R_AX_LTE_SW_CFG_1 0x0038 +#define R_AX_LTE_SW_CFG_1_C1 0x2038 +#define B_AX_GNT_BT_RFC_S1_SW_VAL BIT(31) +#define B_AX_GNT_BT_RFC_S1_SW_CTRL BIT(30) +#define B_AX_GNT_WL_RFC_S1_SW_VAL BIT(29) +#define B_AX_GNT_WL_RFC_S1_SW_CTRL BIT(28) +#define B_AX_GNT_BT_BB_S1_SW_VAL BIT(27) +#define B_AX_GNT_BT_BB_S1_SW_CTRL BIT(26) +#define B_AX_GNT_WL_BB_S1_SW_VAL BIT(25) +#define B_AX_GNT_WL_BB_S1_SW_CTRL BIT(24) +#define B_AX_BT_SW_CTRL_WL_PRIORITY BIT(19) +#define B_AX_WL_SW_CTRL_WL_PRIORITY BIT(18) +#define B_AX_LTE_PATTERN_2_EN BIT(17) +#define B_AX_LTE_PATTERN_1_EN BIT(16) +#define B_AX_GNT_BT_RFC_S0_SW_VAL BIT(15) +#define B_AX_GNT_BT_RFC_S0_SW_CTRL BIT(14) +#define B_AX_GNT_WL_RFC_S0_SW_VAL BIT(13) +#define B_AX_GNT_WL_RFC_S0_SW_CTRL BIT(12) +#define B_AX_GNT_BT_BB_S0_SW_VAL BIT(11) +#define B_AX_GNT_BT_BB_S0_SW_CTRL BIT(10) +#define B_AX_GNT_WL_BB_S0_SW_VAL BIT(9) +#define B_AX_GNT_WL_BB_S0_SW_CTRL BIT(8) +#define B_AX_LTECOEX_FUN_EN BIT(7) +#define B_AX_LTECOEX_3WIRE_CTRL_MUX BIT(6) +#define B_AX_LTECOEX_OP_MODE_SEL_MASK GENMASK(5, 4) +#define B_AX_LTECOEX_UART_MUX BIT(3) +#define B_AX_LTECOEX_UART_MODE_SEL_MASK GENMASK(2, 0) + +#define R_AX_LTE_SW_CFG_2 0x003C +#define R_AX_LTE_SW_CFG_2_C1 0x203C +#define B_AX_WL_RX_CTRL BIT(8) +#define B_AX_GNT_WL_RX_SW_VAL BIT(7) +#define B_AX_GNT_WL_RX_SW_CTRL BIT(6) +#define B_AX_GNT_WL_TX_SW_VAL BIT(5) +#define B_AX_GNT_WL_TX_SW_CTRL BIT(4) +#define B_AX_GNT_BT_RX_SW_VAL BIT(3) +#define B_AX_GNT_BT_RX_SW_CTRL BIT(2) +#define B_AX_GNT_BT_TX_SW_VAL BIT(1) +#define B_AX_GNT_BT_TX_SW_CTRL BIT(0) + +#define RR_MOD 0x00 +#define RR_MOD_IQK GENMASK(19, 4) +#define RR_MOD_DPK GENMASK(19, 5) +#define RR_MOD_MASK GENMASK(19, 16) +#define RR_MOD_V_DOWN 0x0 +#define RR_MOD_V_STANDBY 0x1 +#define RR_MOD_V_TX 0x2 +#define RR_MOD_V_RX 0x3 +#define RR_MOD_V_TXIQK 0x4 +#define RR_MOD_V_DPK 0x5 +#define RR_MOD_V_RXK1 0x6 +#define RR_MOD_V_RXK2 0x7 +#define RR_MOD_M_RXG GENMASK(13, 4) +#define RR_MOD_M_RXBB GENMASK(9, 5) +#define RR_MODOPT 0x01 +#define RR_MODOPT_M_TXPWR GENMASK(5, 0) +#define RR_WLSEL 0x02 +#define RR_WLSEL_AG GENMASK(18, 16) +#define RR_RSV1 0x05 +#define RR_RSV1_RST BIT(0) +#define RR_DTXLOK 0x08 +#define RR_RSV2 0x09 +#define RR_CFGCH 0x18 +#define RR_BTC 0x1a +#define RR_BTC_TXBB GENMASK(14, 12) +#define RR_BTC_RXBB GENMASK(11, 10) +#define RR_RCKC 0x1b +#define RR_RCKC_CA GENMASK(14, 10) +#define RR_RCKS 0x1c +#define RR_RCKO 0x1d +#define RR_RCKO_OFF GENMASK(13, 9) +#define RR_RXKPLL 0x1e +#define RR_RXKPLL_OFF GENMASK(5, 0) +#define RR_RXKPLL_POW BIT(19) +#define RR_RSV4 0x1f +#define RR_RXK 0x20 +#define RR_RXK_PLLEN BIT(5) +#define RR_RXK_SEL5G BIT(7) +#define RR_RXK_SEL2G BIT(8) +#define RR_LUTWA 0x33 +#define RR_LUTWA_MASK GENMASK(9, 0) +#define RR_LUTWD1 0x3e +#define RR_LUTWD0 0x3f +#define RR_TM 0x42 +#define RR_TM_TRI BIT(19) +#define RR_TM_VAL GENMASK(6, 1) +#define RR_TM2 0x43 +#define RR_TM2_OFF GENMASK(19, 16) +#define RR_TXG1 0x51 +#define RR_TXG1_ATT2 BIT(19) +#define RR_TXG1_ATT1 BIT(11) +#define RR_TXG2 0x52 +#define RR_TXG2_ATT0 BIT(11) +#define RR_BSPAD 0x54 +#define RR_TXGA 0x55 +#define RR_TXGA_LOK_EN BIT(0) +#define RR_TXGA_TRK_EN BIT(7) +#define RR_GAINTX 0x56 +#define RR_GAINTX_ALL GENMASK(15, 0) +#define RR_GAINTX_PAD GENMASK(9, 5) +#define RR_GAINTX_BB GENMASK(4, 0) +#define RR_TXMO 0x58 +#define RR_TXMO_COI GENMASK(19, 15) +#define RR_TXMO_COQ GENMASK(14, 10) +#define RR_TXMO_FII GENMASK(9, 6) +#define RR_TXMO_FIQ GENMASK(5, 2) +#define RR_TXA 0x5d +#define RR_TXA_TRK GENMASK(19, 14) +#define RR_TXRSV 0x5c +#define RR_TXRSV_GAPK BIT(19) +#define RR_BIAS 0x5e +#define RR_BIAS_GAPK BIT(19) +#define RR_BIASA 0x60 +#define RR_BIASA_TXG GENMASK(15, 12) +#define RR_BIASA_TXA GENMASK(19, 16) +#define RR_BIASA_A GENMASK(2, 0) +#define RR_BIASA2 0x63 +#define RR_BIASA2_LB GENMASK(4, 2) +#define RR_TXATANK 0x64 +#define RR_TXATANK_LBSW GENMASK(16, 15) +#define RR_TRXIQ 0x66 +#define RR_RSV6 0x6d +#define RR_TXPOW 0x7f +#define RR_TXPOW_TXG BIT(1) +#define RR_TXPOW_TXA BIT(8) +#define RR_RXPOW 0x80 +#define RR_RXPOW_IQK GENMASK(17, 16) +#define RR_RXBB 0x83 +#define RR_RXBB_C2G GENMASK(16, 10) +#define RR_RXBB_C1G GENMASK(9, 8) +#define RR_RXBB_ATTR GENMASK(7, 4) +#define RR_RXBB_ATTC GENMASK(2, 0) +#define RR_XGLNA2 0x85 +#define RR_XGLNA2_SW GENMASK(1, 0) +#define RR_RXA 0x8a +#define RR_RXA_DPK GENMASK(9, 8) +#define RR_RXA2 0x8c +#define RR_RXA2_C2 GENMASK(9, 3) +#define RR_RXA2_C1 GENMASK(12, 10) +#define RR_RXIQGEN 0x8d +#define RR_RXIQGEN_ATTL GENMASK(12, 8) +#define RR_RXIQGEN_ATTH GENMASK(14, 13) +#define RR_RXBB2 0x8f +#define RR_EN_TIA_IDA GENMASK(11, 10) +#define RR_RXBB2_DAC_EN BIT(13) +#define RR_XALNA2 0x90 +#define RR_XALNA2_SW GENMASK(1, 0) +#define RR_DCK 0x92 +#define RR_DCK_FINE BIT(1) +#define RR_DCK_LV BIT(0) +#define RR_DCK1 0x93 +#define RR_DCK1_SEL BIT(3) +#define RR_DCK2 0x94 +#define RR_DCK2_CYCLE GENMASK(7, 2) +#define RR_MIXER 0x9f +#define RR_MIXER_GN GENMASK(4, 3) +#define RR_XTALX2 0xb8 +#define RR_MALSEL 0xbe +#define RR_RCKD 0xde +#define RR_RCKD_POW GENMASK(19, 13) +#define RR_RCKD_BW BIT(2) +#define RR_TXADBG 0xde +#define RR_LUTDBG 0xdf +#define RR_LUTDBG_LOK BIT(2) +#define RR_LUTWE2 0xee +#define RR_LUTWE 0xef +#define RR_LUTWE_LOK BIT(2) +#define RR_RFC 0xf0 +#define RR_RFC_CKEN BIT(1) + +#define R_UPD_P0 0x0000 +#define R_RSTB_WATCH_DOG 0x000C +#define B_P0_RSTB_WATCH_DOG BIT(0) +#define B_P1_RSTB_WATCH_DOG BIT(1) +#define B_UPD_P0_EN BIT(30) +#define R_ANAPAR_PW15 0x030C +#define B_ANAPAR_PW15 GENMASK(31, 24) +#define B_ANAPAR_PW15_H GENMASK(27, 24) +#define B_ANAPAR_PW15_H2 GENMASK(27, 26) +#define R_ANAPAR 0x032C +#define B_ANAPAR_15 GENMASK(31, 16) +#define B_ANAPAR_ADCCLK BIT(30) +#define B_ANAPAR_FLTRST BIT(22) +#define B_ANAPAR_CRXBB GENMASK(18, 16) +#define B_ANAPAR_14 GENMASK(15, 0) +#define R_UPD_CLK_ADC 0x0700 +#define B_UPD_CLK_ADC_ON BIT(24) +#define B_UPD_CLK_ADC_VAL GENMASK(26, 25) +#define R_RSTB_ASYNC 0x0704 +#define B_RSTB_ASYNC_ALL BIT(1) +#define R_PMAC_GNT 0x0980 +#define B_PMAC_GNT_TXEN BIT(0) +#define B_PMAC_GNT_RXEN BIT(16) +#define B_PMAC_GNT_P1 GENMASK(20, 17) +#define B_PMAC_GNT_P2 GENMASK(29, 26) +#define R_PMAC_RX_CFG1 0x0988 +#define B_PMAC_OPT1_MSK GENMASK(11, 0) +#define R_PMAC_RXMOD 0x0994 +#define B_PMAC_RXMOD_MSK GENMASK(7, 4) +#define R_MAC_SEL 0x09A4 +#define B_MAC_SEL_MOD GENMASK(4, 2) +#define B_MAC_SEL_DPD_EN BIT(10) +#define B_MAC_SEL_PWR_EN BIT(16) +#define R_PMAC_TX_CTRL 0x09C0 +#define B_PMAC_TXEN_DIS BIT(0) +#define R_PMAC_TX_PRD 0x09C4 +#define B_PMAC_TX_PRD_MSK GENMASK(31, 8) +#define B_PMAC_CTX_EN BIT(0) +#define B_PMAC_PTX_EN BIT(4) +#define R_PMAC_TX_CNT 0x09C8 +#define B_PMAC_TX_CNT_MSK GENMASK(31, 0) +#define R_CCX 0x0C00 +#define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4) +#define B_MEASUREMENT_TRIG_MSK BIT(2) +#define B_CCX_TRIG_OPT_MSK BIT(1) +#define B_CCX_EN_MSK BIT(0) +#define R_IFS_COUNTER 0x0C28 +#define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16) +#define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14) +#define B_IFS_COUNTER_CLR_MSK BIT(13) +#define B_IFS_COLLECT_EN BIT(12) +#define R_IFS_T1 0x0C2C +#define B_IFS_T1_TH_HIGH_MSK GENMASK(31, 16) +#define B_IFS_T1_EN_MSK BIT(15) +#define B_IFS_T1_TH_LOW_MSK GENMASK(14, 0) +#define R_IFS_T2 0x0C30 +#define B_IFS_T2_TH_HIGH_MSK GENMASK(31, 16) +#define B_IFS_T2_EN_MSK BIT(15) +#define B_IFS_T2_TH_LOW_MSK GENMASK(14, 0) +#define R_IFS_T3 0x0C34 +#define B_IFS_T3_TH_HIGH_MSK GENMASK(31, 16) +#define B_IFS_T3_EN_MSK BIT(15) +#define B_IFS_T3_TH_LOW_MSK GENMASK(14, 0) +#define R_IFS_T4 0x0C38 +#define B_IFS_T4_TH_HIGH_MSK GENMASK(31, 16) +#define B_IFS_T4_EN_MSK BIT(15) +#define B_IFS_T4_TH_LOW_MSK GENMASK(14, 0) +#define R_PD_CTRL 0x0C3C +#define B_PD_HIT_DIS BIT(9) +#define R_IOQ_IQK_DPK 0x0C60 +#define B_IOQ_IQK_DPK_EN BIT(1) +#define R_P0_EN_SOUND_WO_NDP 0x0D7C +#define B_P0_EN_SOUND_WO_NDP BIT(1) +#define R_SPOOF_ASYNC_RST 0x0D84 +#define B_SPOOF_ASYNC_RST BIT(15) +#define R_NDP_BRK0 0xDA0 +#define R_NDP_BRK1 0xDA4 +#define B_NDP_RU_BRK BIT(0) +#define R_BRK_ASYNC_RST_EN_1 0x0DC0 +#define R_BRK_ASYNC_RST_EN_2 0x0DC4 +#define R_BRK_ASYNC_RST_EN_3 0x0DC8 +#define R_P0_RXCK 0x12A0 +#define B_P0_RXCK_VAL GENMASK(18, 16) +#define B_P0_RXCK_ON BIT(19) +#define B_P0_RXCK_BW3 BIT(30) +#define R_P0_NRBW 0x12B8 +#define B_P0_NRBW_DBG BIT(30) +#define R_S0_RXDC 0x12D4 +#define B_S0_RXDC_I GENMASK(25, 16) +#define B_S0_RXDC_Q GENMASK(31, 26) +#define R_S0_RXDC2 0x12D8 +#define B_S0_RXDC2_SEL GENMASK(9, 8) +#define B_S0_RXDC2_AVG GENMASK(7, 6) +#define B_S0_RXDC2_MEN GENMASK(5, 4) +#define B_S0_RXDC2_Q2 GENMASK(3, 0) +#define R_CFO_COMP_SEG0_L 0x1384 +#define R_CFO_COMP_SEG0_H 0x1388 +#define R_CFO_COMP_SEG0_CTRL 0x138C +#define R_DBG32_D 0x1730 +#define R_TX_COUNTER 0x1A40 +#define R_IFS_CLM_TX_CNT 0x1ACC +#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16) +#define B_IFS_CLM_TX_CNT_MSK GENMASK(15, 0) +#define R_IFS_CLM_CCA 0x1AD0 +#define B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK GENMASK(31, 16) +#define B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK GENMASK(15, 0) +#define R_IFS_CLM_FA 0x1AD4 +#define B_IFS_CLM_OFDM_FA_MSK GENMASK(31, 16) +#define B_IFS_CLM_CCK_FA_MSK GENMASK(15, 0) +#define R_IFS_HIS 0x1AD8 +#define B_IFS_T4_HIS_MSK GENMASK(31, 24) +#define B_IFS_T3_HIS_MSK GENMASK(23, 16) +#define B_IFS_T2_HIS_MSK GENMASK(15, 8) +#define B_IFS_T1_HIS_MSK GENMASK(7, 0) +#define R_IFS_AVG_L 0x1ADC +#define B_IFS_T2_AVG_MSK GENMASK(31, 16) +#define B_IFS_T1_AVG_MSK GENMASK(15, 0) +#define R_IFS_AVG_H 0x1AE0 +#define B_IFS_T4_AVG_MSK GENMASK(31, 16) +#define B_IFS_T3_AVG_MSK GENMASK(15, 0) +#define R_IFS_CCA_L 0x1AE4 +#define B_IFS_T2_CCA_MSK GENMASK(31, 16) +#define B_IFS_T1_CCA_MSK GENMASK(15, 0) +#define R_IFS_CCA_H 0x1AE8 +#define B_IFS_T4_CCA_MSK GENMASK(31, 16) +#define B_IFS_T3_CCA_MSK GENMASK(15, 0) +#define R_IFSCNT 0x1AEC +#define B_IFSCNT_DONE_MSK BIT(16) +#define B_IFSCNT_TOTAL_CNT_MSK GENMASK(15, 0) +#define R_TXAGC_TP 0x1C04 +#define B_TXAGC_TP GENMASK(2, 0) +#define R_TSSI_THER 0x1C10 +#define B_TSSI_THER GENMASK(29, 24) +#define R_TXAGC_BB 0x1C60 +#define B_TXAGC_BB_OFT GENMASK(31, 16) +#define B_TXAGC_BB GENMASK(31, 24) +#define R_S0_ADDCK 0x1E00 +#define B_S0_ADDCK_I GENMASK(9, 0) +#define B_S0_ADDCK_Q GENMASK(19, 10) +#define R_ADC_FIFO 0x20fc +#define B_ADC_FIFO_RST GENMASK(31, 24) +#define R_TXFIR0 0x2300 +#define B_TXFIR_C01 GENMASK(23, 0) +#define R_TXFIR2 0x2304 +#define B_TXFIR_C23 GENMASK(23, 0) +#define R_TXFIR4 0x2308 +#define B_TXFIR_C45 GENMASK(23, 0) +#define R_TXFIR6 0x230c +#define B_TXFIR_C67 GENMASK(23, 0) +#define R_TXFIR8 0x2310 +#define B_TXFIR_C89 GENMASK(23, 0) +#define R_TXFIRA 0x2314 +#define B_TXFIR_CAB GENMASK(23, 0) +#define R_TXFIRC 0x2318 +#define B_TXFIR_CCD GENMASK(23, 0) +#define R_TXFIRE 0x231c +#define B_TXFIR_CEF GENMASK(23, 0) +#define R_RXCCA 0x2344 +#define B_RXCCA_DIS BIT(31) +#define R_RXSC 0x237C +#define B_RXSC_EN BIT(0) +#define R_RXSCOBC 0x23B0 +#define B_RXSCOBC_TH GENMASK(18, 0) +#define R_RXSCOCCK 0x23B4 +#define B_RXSCOCCK_TH GENMASK(18, 0) +#define R_P1_EN_SOUND_WO_NDP 0x2D7C +#define B_P1_EN_SOUND_WO_NDP BIT(1) +#define R_P1_DBGMOD 0x32B8 +#define B_P1_DBGMOD_ON BIT(30) +#define R_S1_RXDC 0x32D4 +#define B_S1_RXDC_I GENMASK(25, 16) +#define B_S1_RXDC_Q GENMASK(31, 26) +#define R_S1_RXDC2 0x32D8 +#define B_S1_RXDC2_EN GENMASK(5, 4) +#define B_S1_RXDC2_SEL GENMASK(9, 8) +#define B_S1_RXDC2_Q2 GENMASK(3, 0) +#define R_TXAGC_BB_S1 0x3C60 +#define B_TXAGC_BB_S1_OFT GENMASK(31, 16) +#define B_TXAGC_BB_S1 GENMASK(31, 24) +#define R_S1_ADDCK 0x3E00 +#define B_S1_ADDCK_I GENMASK(9, 0) +#define B_S1_ADDCK_Q GENMASK(19, 10) +#define R_DCFO 0x4264 +#define B_DCFO GENMASK(1, 0) +#define R_SEG0CSI 0x42AC +#define B_SEG0CSI_IDX GENMASK(10, 0) +#define R_SEG0CSI_EN 0x42C4 +#define B_SEG0CSI_EN BIT(23) +#define R_BSS_CLR_MAP 0x43ac +#define B_BSS_CLR_MAP_VLD0 BIT(28) +#define B_BSS_CLR_MAP_TGT GENMASK(27, 22) +#define B_BSS_CLR_MAP_STAID GENMASK(21, 11) +#define R_CFO_TRK0 0x4404 +#define R_CFO_TRK1 0x440C +#define B_CFO_TRK_MSK GENMASK(14, 10) +#define R_DCFO_COMP_S0 0x448C +#define B_DCFO_COMP_S0_MSK GENMASK(11, 0) +#define R_DCFO_WEIGHT 0x4490 +#define B_DCFO_WEIGHT_MSK GENMASK(27, 24) +#define R_DCFO_OPT 0x4494 +#define B_DCFO_OPT_EN BIT(29) +#define R_BANDEDGE 0x4498 +#define B_BANDEDGE_EN BIT(30) +#define R_TXPATH_SEL 0x458C +#define B_TXPATH_SEL_MSK GENMASK(31, 28) +#define R_TXPWR 0x4594 +#define B_TXPWR_MSK GENMASK(30, 22) +#define R_TXNSS_MAP 0x45B4 +#define B_TXNSS_MAP_MSK GENMASK(20, 17) +#define R_PATH0_IB_PKPW 0x4628 +#define B_PATH0_IB_PKPW_MSK GENMASK(11, 6) +#define R_PATH0_LNA_ERR1 0x462C +#define B_PATH0_LNA_ERR_G1_A_MSK GENMASK(29, 24) +#define B_PATH0_LNA_ERR_G0_G_MSK GENMASK(17, 12) +#define B_PATH0_LNA_ERR_G0_A_MSK GENMASK(11, 6) +#define R_PATH0_LNA_ERR2 0x4630 +#define B_PATH0_LNA_ERR_G2_G_MSK GENMASK(23, 18) +#define B_PATH0_LNA_ERR_G2_A_MSK GENMASK(17, 12) +#define B_PATH0_LNA_ERR_G1_G_MSK GENMASK(5, 0) +#define R_PATH0_LNA_ERR3 0x4634 +#define B_PATH0_LNA_ERR_G4_G_MSK GENMASK(29, 24) +#define B_PATH0_LNA_ERR_G4_A_MSK GENMASK(23, 18) +#define B_PATH0_LNA_ERR_G3_G_MSK GENMASK(11, 6) +#define B_PATH0_LNA_ERR_G3_A_MSK GENMASK(5, 0) +#define R_PATH0_LNA_ERR4 0x4638 +#define B_PATH0_LNA_ERR_G6_A_MSK GENMASK(29, 24) +#define B_PATH0_LNA_ERR_G5_G_MSK GENMASK(17, 12) +#define B_PATH0_LNA_ERR_G5_A_MSK GENMASK(11, 6) +#define R_PATH0_LNA_ERR5 0x463C +#define B_PATH0_LNA_ERR_G6_G_MSK GENMASK(5, 0) +#define R_PATH0_TIA_ERR_G0 0x4640 +#define B_PATH0_TIA_ERR_G0_G_MSK GENMASK(23, 18) +#define B_PATH0_TIA_ERR_G0_A_MSK GENMASK(17, 12) +#define R_PATH0_TIA_ERR_G1 0x4644 +#define B_PATH0_TIA_ERR_G1_SEL GENMASK(31, 30) +#define B_PATH0_TIA_ERR_G1_G_MSK GENMASK(11, 6) +#define B_PATH0_TIA_ERR_G1_A_MSK GENMASK(5, 0) +#define R_PATH0_IB_PBK 0x4650 +#define B_PATH0_IB_PBK_MSK GENMASK(14, 10) +#define R_PATH0_RXB_INIT 0x4658 +#define B_PATH0_RXB_INIT_IDX_MSK GENMASK(9, 5) +#define R_PATH0_LNA_INIT 0x4668 +#define B_PATH0_LNA_INIT_IDX_MSK GENMASK(26, 24) +#define R_PATH0_BTG 0x466C +#define B_PATH0_BTG_SHEN GENMASK(18, 17) +#define R_PATH0_TIA_INIT 0x4674 +#define B_PATH0_TIA_INIT_IDX_MSK BIT(17) +#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0 +#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4 +#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_P0_NBIIDX 0x469C +#define B_P0_NBIIDX_VAL GENMASK(11, 0) +#define B_P0_NBIIDX_NOTCH_EN BIT(12) +#define R_P1_MODE 0x4718 +#define B_P1_MODE_SEL GENMASK(31, 30) +#define R_PATH1_LNA_INIT 0x473C +#define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24) +#define R_PATH1_TIA_INIT 0x4748 +#define B_PATH1_TIA_INIT_IDX_MSK BIT(17) +#define R_PATH1_BTG 0x4740 +#define B_PATH1_BTG_SHEN GENMASK(18, 17) +#define R_PATH1_RXB_INIT 0x472C +#define B_PATH1_RXB_INIT_IDX_MSK GENMASK(9, 5) +#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774 +#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778 +#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_P1_NBIIDX 0x4770 +#define B_P1_NBIIDX_VAL GENMASK(11, 0) +#define B_P1_NBIIDX_NOTCH_EN BIT(12) +#define R_SEG0R_PD 0x481C +#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29) +#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6) +#define R_2P4G_BAND 0x4970 +#define B_2P4G_BAND_SEL BIT(1) +#define R_FC0_BW 0x4974 +#define B_FC0_BW_INV GENMASK(6, 0) +#define B_FC0_BW_SET GENMASK(31, 30) +#define R_CHBW_MOD 0x4978 +#define B_CHBW_MOD_PRICH GENMASK(11, 8) +#define B_CHBW_MOD_SBW GENMASK(13, 12) +#define R_CFO_COMP_SEG1_L 0x5384 +#define R_CFO_COMP_SEG1_H 0x5388 +#define R_CFO_COMP_SEG1_CTRL 0x538C +#define B_CFO_COMP_VALID_BIT BIT(29) +#define B_CFO_COMP_WEIGHT_MSK GENMASK(27, 24) +#define B_CFO_COMP_VAL_MSK GENMASK(11, 0) +#define R_DPD_OFT_EN 0x5800 +#define B_DPD_OFT_EN BIT(28) +#define R_DPD_OFT_ADDR 0x5804 +#define B_DPD_OFT_ADDR GENMASK(31, 27) +#define R_P0_TMETER 0x5810 +#define B_P0_TMETER GENMASK(15, 10) +#define B_P0_TMETER_DIS BIT(16) +#define B_P0_TMETER_TRK BIT(24) +#define R_P0_TSSI_TRK 0x5818 +#define B_P0_TSSI_TRK_EN BIT(30) +#define B_P0_TSSI_OFT_EN BIT(28) +#define B_P0_TSSI_OFT GENMASK(7, 0) +#define R_P0_TSSI_AVG 0x5820 +#define B_P0_TSSI_AVG GENMASK(15, 12) +#define R_P0_RFCTM 0x5864 +#define B_P0_RFCTM_VAL GENMASK(25, 20) +#define R_P0_RFCTM_RDY BIT(26) +#define R_P0_TXDPD 0x58D4 +#define B_P0_TXDPD GENMASK(31, 28) +#define R_P0_TXPW_RSTB 0x58DC +#define B_P0_TXPW_RSTB_MANON BIT(30) +#define B_P0_TXPW_RSTB_TSSI BIT(31) +#define R_P0_TSSI_MV_AVG 0x58E4 +#define B_P0_TSSI_MV_AVG GENMASK(13, 11) +#define R_TXGAIN_SCALE 0x58F0 +#define B_TXGAIN_SCALE_EN BIT(19) +#define B_TXGAIN_SCALE_OFT GENMASK(31, 24) +#define R_P0_TSSI_BASE 0x5C00 +#define R_S0_DACKI 0x5E00 +#define B_S0_DACKI_AR GENMASK(31, 28) +#define B_S0_DACKI_EN BIT(3) +#define R_S0_DACKI2 0x5E30 +#define B_S0_DACKI2_K GENMASK(21, 12) +#define R_S0_DACKI7 0x5E44 +#define B_S0_DACKI7_K GENMASK(15, 8) +#define R_S0_DACKI8 0x5E48 +#define B_S0_DACKI8_K GENMASK(15, 8) +#define R_S0_DACKQ 0x5E50 +#define B_S0_DACKQ_AR GENMASK(31, 28) +#define B_S0_DACKQ_EN BIT(3) +#define R_S0_DACKQ2 0x5E80 +#define B_S0_DACKQ2_K GENMASK(21, 12) +#define R_S0_DACKQ7 0x5E94 +#define B_S0_DACKQ7_K GENMASK(15, 8) +#define R_S0_DACKQ8 0x5E98 +#define B_S0_DACKQ8_K GENMASK(15, 8) +#define R_P1_TMETER 0x7810 +#define B_P1_TMETER GENMASK(15, 10) +#define B_P1_TMETER_DIS BIT(16) +#define B_P1_TMETER_TRK BIT(24) +#define R_P1_TSSI_TRK 0x7818 +#define B_P1_TSSI_TRK_EN BIT(30) +#define B_P1_TSSI_OFT_EN BIT(28) +#define B_P1_TSSI_OFT GENMASK(7, 0) +#define R_P1_TSSI_AVG 0x7820 +#define B_P1_TSSI_AVG GENMASK(15, 12) +#define R_P1_RFCTM 0x7864 +#define R_P1_RFCTM_RDY BIT(26) +#define B_P1_RFCTM_VAL GENMASK(25, 20) +#define R_P1_TXPW_RSTB 0x78DC +#define B_P1_TXPW_RSTB_MANON BIT(30) +#define B_P1_TXPW_RSTB_TSSI BIT(31) +#define R_P1_TSSI_MV_AVG 0x78E4 +#define B_P1_TSSI_MV_AVG GENMASK(13, 11) +#define R_TSSI_THOF 0x7C00 +#define R_S1_DACKI 0x7E00 +#define B_S1_DACKI_AR GENMASK(31, 28) +#define B_S1_DACKI_EN BIT(3) +#define R_S1_DACKI2 0x7E30 +#define B_S1_DACKI2_K GENMASK(21, 12) +#define R_S1_DACKI7 0x7E44 +#define B_S1_DACKI_K GENMASK(15, 8) +#define R_S1_DACKI8 0x7E48 +#define B_S1_DACKI8_K GENMASK(15, 8) +#define R_S1_DACKQ 0x7E50 +#define B_S1_DACKQ_AR GENMASK(31, 28) +#define B_S1_DACKQ_EN BIT(3) +#define R_S1_DACKQ2 0x7E80 +#define B_S1_DACKQ2_K GENMASK(21, 12) +#define R_S1_DACKQ7 0x7E94 +#define B_S1_DACKQ7_K GENMASK(15, 8) +#define R_S1_DACKQ8 0x7E98 +#define B_S1_DACKQ8_K GENMASK(15, 8) +#define R_NCTL_CFG 0x8000 +#define B_NCTL_CFG_SPAGE GENMASK(2, 1) +#define R_NCTL_RPT 0x8008 +#define B_NCTL_RPT_FLG BIT(26) +#define R_NCTL_N1 0x8010 +#define B_NCTL_N1_CIP GENMASK(7, 0) +#define R_NCTL_N2 0x8014 +#define R_IQK_COM 0x8018 +#define R_IQK_DIF 0x801C +#define B_IQK_DIF_TRX GENMASK(1, 0) +#define R_IQK_DIF1 0x8020 +#define B_IQK_DIF1_TXPI GENMASK(19, 0) +#define R_IQK_DIF2 0x8024 +#define B_IQK_DIF2_RXPI GENMASK(19, 0) +#define R_IQK_DIF4 0x802C +#define B_IQK_DIF4_TXT GENMASK(11, 0) +#define B_IQK_DIF4_RXT GENMASK(27, 16) +#define R_IQK_CFG 0x8034 +#define B_IQK_CFG_SET GENMASK(5, 4) +#define R_TPG_MOD 0x806C +#define B_TPG_MOD_F GENMASK(2, 1) +#define R_MDPK_SYNC 0x8070 +#define B_MDPK_SYNC_SEL BIT(31) +#define B_MDPK_SYNC_MAN GENMASK(31, 28) +#define R_MDPK_RX_DCK 0x8074 +#define R_NCTL_RW 0x8080 +#define R_KIP_SYSCFG 0x8088 +#define R_KIP_CLK 0x808C +#define R_LDL_NORM 0x80A0 +#define B_LDL_NORM_PN GENMASK(12, 8) +#define B_LDL_NORM_OP GENMASK(1, 0) +#define R_DPK_CTL 0x80B0 +#define B_DPK_CTL_EN BIT(28) +#define R_DPK_CFG 0x80B8 +#define B_DPK_CFG_IDX GENMASK(14, 12) +#define R_DPK_CFG2 0x80BC +#define B_DPK_CFG2_ST BIT(14) +#define R_DPK_CFG3 0x80C0 +#define R_KPATH_CFG 0x80D0 +#define R_KIP_RPT1 0x80D4 +#define B_KIP_RPT1_SEL GENMASK(21, 16) +#define R_SRAM_IQRX 0x80D8 +#define R_GAPK 0x80E0 +#define B_GAPK_ADR BIT(0) +#define R_SRAM_IQRX2 0x80E8 +#define R_DPK_TRK 0x80f0 +#define B_DPK_TRK_DIS BIT(31) +#define R_RPT_COM 0x80FC +#define B_PRT_COM_SYNERR BIT(30) +#define B_PRT_COM_DCI GENMASK(27, 16) +#define B_PRT_COM_CORV GENMASK(15, 8) +#define B_PRT_COM_DCQ GENMASK(11, 0) +#define B_PRT_COM_GL GENMASK(7, 4) +#define B_PRT_COM_CORI GENMASK(7, 0) +#define R_COEF_SEL 0x8104 +#define B_COEF_SEL_IQC BIT(0) +#define B_COEF_SEL_MDPD BIT(8) +#define R_CFIR_SYS 0x8120 +#define R_IQK_RES 0x8124 +#define B_IQK_RES_TXCFIR GENMASK(11, 8) +#define B_IQK_RES_RXCFIR GENMASK(3, 0) +#define R_TXIQC 0x8138 +#define R_RXIQC 0x813c +#define B_RXIQC_BYPASS BIT(0) +#define B_RXIQC_BYPASS2 BIT(2) +#define B_RXIQC_NEWP GENMASK(19, 8) +#define B_RXIQC_NEWX GENMASK(31, 20) +#define R_KIP 0x8140 +#define B_KIP_DBCC BIT(0) +#define B_KIP_RFGAIN BIT(8) +#define R_RFGAIN 0x8144 +#define B_RFGAIN_PAD GENMASK(4, 0) +#define B_RFGAIN_TXBB GENMASK(12, 8) +#define R_RFGAIN_BND 0x8148 +#define B_RFGAIN_BND GENMASK(4, 0) +#define R_CFIR_MAP 0x8150 +#define R_CFIR_LUT 0x8154 +#define B_CFIR_LUT_SEL BIT(8) +#define B_CFIR_LUT_G3 BIT(3) +#define B_CFIR_LUT_G2 BIT(2) +#define B_CFIR_LUT_GP GENMASK(1, 0) +#define R_DPD_V1 0x81a0 +#define R_DPD_CH0 0x81AC +#define R_DPD_BND 0x81B4 +#define R_DPD_CH0A 0x81BC +#define R_TXAGC_RFK 0x81C4 +#define B_TXAGC_RFK_CH0 GENMASK(5, 0) +#define R_DPD_COM 0x81C8 +#define R_KIP_IQP 0x81CC +#define B_KIP_IQP_IQSW GENMASK(5, 0) +#define R_KIP_RPT 0x81D4 +#define B_KIP_RPT_SEL GENMASK(21, 16) +#define R_W_COEF 0x81D8 +#define R_LOAD_COEF 0x81DC +#define B_LOAD_COEF_MDPD BIT(16) +#define B_LOAD_COEF_CFIR GENMASK(1, 0) +#define B_LOAD_COEF_AUTO BIT(0) +#define R_RPT_PER 0x81FC +#define R_RXCFIR_P0C0 0x8D40 +#define R_RXCFIR_P0C1 0x8D84 +#define R_RXCFIR_P0C2 0x8DC8 +#define R_RXCFIR_P0C3 0x8E0C +#define R_TXCFIR_P0C0 0x8F50 +#define R_TXCFIR_P0C1 0x8F84 +#define R_TXCFIR_P0C2 0x8FB8 +#define R_TXCFIR_P0C3 0x8FEC +#define R_RXCFIR_P1C0 0x9140 +#define R_RXCFIR_P1C1 0x9184 +#define R_RXCFIR_P1C2 0x91C8 +#define R_RXCFIR_P1C3 0x920C +#define R_TXCFIR_P1C0 0x9350 +#define R_TXCFIR_P1C1 0x9384 +#define R_TXCFIR_P1C2 0x93B8 +#define R_TXCFIR_P1C3 0x93EC +#define R_IQKINF 0x9FE0 +#define B_IQKINF_VER GENMASK(31, 24) +#define B_IQKINF_FAIL_RXGRP GENMASK(23, 16) +#define B_IQKINF_FAIL_TXGRP GENMASK(15, 8) +#define B_IQKINF_FAIL GENMASK(3, 0) +#define B_IQKINF_F_RX BIT(3) +#define B_IQKINF_FTX BIT(2) +#define B_IQKINF_FFIN BIT(1) +#define B_IQKINF_FCOR BIT(0) +#define R_IQKCH 0x9FE4 +#define B_IQKCH_CH GENMASK(15, 8) +#define B_IQKCH_BW GENMASK(7, 4) +#define B_IQKCH_BAND GENMASK(3, 0) +#define R_IQKINF2 0x9FE8 +#define B_IQKINF2_FCNT GENMASK(23, 16) +#define B_IQKINF2_KCNT GENMASK(15, 8) +#define B_IQKINF2_NCTLV GENMAKS(7, 0) +#endif diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c new file mode 100644 index 00000000000000..f00b94ecfff4f8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "debug.h" +#include "ps.h" + +#define COUNTRY_REGD(_alpha2, _txpwr_regd_2g, _txpwr_regd_5g) \ + {.alpha2 = (_alpha2), \ + .txpwr_regd[RTW89_BAND_2G] = (_txpwr_regd_2g), \ + .txpwr_regd[RTW89_BAND_5G] = (_txpwr_regd_5g) \ + } + +static const struct rtw89_regulatory rtw89_ww_regd = + COUNTRY_REGD("00", RTW89_WW, RTW89_WW); + +static const struct rtw89_regulatory rtw89_regd_map[] = { + COUNTRY_REGD("AR", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BO", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("CL", RTW89_WW, RTW89_CHILE), + COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("SV", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("HN", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("MX", RTW89_FCC, RTW89_MEXICO), + COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("UY", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("VE", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("AT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CY", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("EE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("FI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("FR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("HU", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LV", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LU", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MC", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ES", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GB", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("HR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("EG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IQ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("JO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KW", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LB", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("OM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("QA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("RO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("RU", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("RS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ME", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ZA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("UA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("YE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ZW", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BD", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("HK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC), + COUNTRY_REGD("MY", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("TH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("VN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AU", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("NZ", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("PG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CA", RTW89_IC, RTW89_IC), + COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK), + COUNTRY_REGD("JM", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("TN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("AD", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AQ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("AM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BY", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BW", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BV", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("BN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BF", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CV", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("CF", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TD", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CX", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("CC", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CD", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("GQ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ER", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ET", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("FK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("FO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("GF", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PF", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TF", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("GP", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("GG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GW", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GY", RTW89_FCC, RTW89_NCC), + COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("HM", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("VA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("JE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KI", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LA", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("LY", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MW", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MV", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ML", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("MQ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MU", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("YT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("MD", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NP", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NC", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("NU", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("NF", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("RE", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("RW", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("PM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("SM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("SL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SB", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("SJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TK", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("TO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TC", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("UG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("UZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("VU", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("WF", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("EH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("ZM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("IR", RTW89_WW, RTW89_ETSI), +}; + +static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) { + if (!memcmp(rtw89_regd_map[i].alpha2, alpha2, 2)) + return &rtw89_regd_map[i]; + } + + return &rtw89_ww_regd; +} + +static bool rtw89_regd_is_ww(const struct rtw89_regulatory *regd) +{ + return regd == &rtw89_ww_regd; +} + +int rtw89_regd_init(struct rtw89_dev *rtwdev, + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) +{ + const struct rtw89_regulatory *chip_regd; + struct wiphy *wiphy = rtwdev->hw->wiphy; + int ret; + + if (!wiphy) + return -EINVAL; + + chip_regd = rtw89_regd_find_reg_by_name(rtwdev->efuse.country_code); + if (!rtw89_regd_is_ww(chip_regd)) { + rtwdev->regd = chip_regd; + /* Ignore country ie if there is a country domain programmed in chip */ + wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; + wiphy->regulatory_flags |= REGULATORY_STRICT_REG; + + ret = regulatory_hint(rtwdev->hw->wiphy, rtwdev->regd->alpha2); + if (ret) + rtw89_warn(rtwdev, "failed to hint regulatory:%d\n", ret); + + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "efuse country code %c%c, mapping to 2g txregd %d, 5g txregd %d\n", + rtwdev->efuse.country_code[0], rtwdev->efuse.country_code[1], + rtwdev->regd->txpwr_regd[RTW89_BAND_2G], + rtwdev->regd->txpwr_regd[RTW89_BAND_5G]); + + return 0; + } + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "worldwide roaming chip, follow the setting of stack(%c%c), mapping to 2g txregd %d, 5g txregd %d\n", + rtwdev->regd->alpha2[0], rtwdev->regd->alpha2[1], + rtwdev->regd->txpwr_regd[RTW89_BAND_2G], + rtwdev->regd->txpwr_regd[RTW89_BAND_5G]); + + return 0; +} + +static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev, + struct wiphy *wiphy, + struct regulatory_request *request) +{ + rtwdev->regd = rtw89_regd_find_reg_by_name(request->alpha2); + /* This notification might be set from the system of distros, + * and it does not expect the regulatory will be modified by + * connecting to an AP (i.e. country ie). + */ + if (request->initiator == NL80211_REGDOM_SET_BY_USER && + !rtw89_regd_is_ww(rtwdev->regd)) + wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; + else + wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE; +} + +void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + + if (wiphy->regd) { + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "There is a country domain programmed in chip, ignore notifications\n"); + goto exit; + } + rtw89_regd_notifier_apply(rtwdev, wiphy, request); + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "get alpha2 %c%c from initiator %d, mapping to 2g txregd %d, 5g txregd %d\n", + request->alpha2[0], request->alpha2[1], request->initiator, + rtwdev->regd->txpwr_regd[RTW89_BAND_2G], + rtwdev->regd->txpwr_regd[RTW89_BAND_5G]); + + rtw89_chip_set_txpwr(rtwdev); + +exit: + mutex_unlock(&rtwdev->mutex); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c new file mode 100644 index 00000000000000..5c6ffca3a324ef --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -0,0 +1,2036 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "coex.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852a.h" +#include "rtw8852a_rfk.h" +#include "rtw8852a_table.h" +#include "txrx.h" + +static const struct rtw89_hfc_ch_cfg rtw8852a_hfc_chcfg_pcie[] = { + {128, 1896, grp_0}, /* ACH 0 */ + {128, 1896, grp_0}, /* ACH 1 */ + {128, 1896, grp_0}, /* ACH 2 */ + {128, 1896, grp_0}, /* ACH 3 */ + {128, 1896, grp_1}, /* ACH 4 */ + {128, 1896, grp_1}, /* ACH 5 */ + {128, 1896, grp_1}, /* ACH 6 */ + {128, 1896, grp_1}, /* ACH 7 */ + {32, 1896, grp_0}, /* B0MGQ */ + {128, 1896, grp_0}, /* B0HIQ */ + {32, 1896, grp_1}, /* B1MGQ */ + {128, 1896, grp_1}, /* B1HIQ */ + {40, 0, 0} /* FWCMDQ */ +}; + +static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = { + 1896, /* Group 0 */ + 1896, /* Group 1 */ + 3792, /* Public Max */ + 0 /* WP threshold */ +}; + +static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = { + [RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie, + &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH}, + [RTW89_QTA_DLFW] = {NULL, NULL, &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH}, + [RTW89_QTA_INVALID] = {NULL}, +}; + +static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = { + [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &wde_size0, &ple_size0, &wde_qt0, + &wde_qt0, &ple_qt4, &ple_qt5}, + [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &wde_size4, &ple_size4, + &wde_qt4, &wde_qt4, &ple_qt13, &ple_qt13}, + [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL, + NULL}, +}; + +static const struct rtw89_reg2_def rtw8852a_pmac_ht20_mcs7_tbl[] = { + {0x44AC, 0x00000000}, + {0x44B0, 0x00000000}, + {0x44B4, 0x00000000}, + {0x44B8, 0x00000000}, + {0x44BC, 0x00000000}, + {0x44C0, 0x00000000}, + {0x44C4, 0x00000000}, + {0x44C8, 0x00000000}, + {0x44CC, 0x00000000}, + {0x44D0, 0x00000000}, + {0x44D4, 0x00000000}, + {0x44D8, 0x00000000}, + {0x44DC, 0x00000000}, + {0x44E0, 0x00000000}, + {0x44E4, 0x00000000}, + {0x44E8, 0x00000000}, + {0x44EC, 0x00000000}, + {0x44F0, 0x00000000}, + {0x44F4, 0x00000000}, + {0x44F8, 0x00000000}, + {0x44FC, 0x00000000}, + {0x4500, 0x00000000}, + {0x4504, 0x00000000}, + {0x4508, 0x00000000}, + {0x450C, 0x00000000}, + {0x4510, 0x00000000}, + {0x4514, 0x00000000}, + {0x4518, 0x00000000}, + {0x451C, 0x00000000}, + {0x4520, 0x00000000}, + {0x4524, 0x00000000}, + {0x4528, 0x00000000}, + {0x452C, 0x00000000}, + {0x4530, 0x4E1F3E81}, + {0x4534, 0x00000000}, + {0x4538, 0x0000005A}, + {0x453C, 0x00000000}, + {0x4540, 0x00000000}, + {0x4544, 0x00000000}, + {0x4548, 0x00000000}, + {0x454C, 0x00000000}, + {0x4550, 0x00000000}, + {0x4554, 0x00000000}, + {0x4558, 0x00000000}, + {0x455C, 0x00000000}, + {0x4560, 0x4060001A}, + {0x4564, 0x40000000}, + {0x4568, 0x00000000}, + {0x456C, 0x00000000}, + {0x4570, 0x04000007}, + {0x4574, 0x0000DC87}, + {0x4578, 0x00000BAB}, + {0x457C, 0x03E00000}, + {0x4580, 0x00000048}, + {0x4584, 0x00000000}, + {0x4588, 0x000003E8}, + {0x458C, 0x30000000}, + {0x4590, 0x00000000}, + {0x4594, 0x10000000}, + {0x4598, 0x00000001}, + {0x459C, 0x00030000}, + {0x45A0, 0x01000000}, + {0x45A4, 0x03000200}, + {0x45A8, 0xC00001C0}, + {0x45AC, 0x78018000}, + {0x45B0, 0x80000000}, + {0x45B4, 0x01C80600}, + {0x45B8, 0x00000002}, + {0x4594, 0x10000000} +}; + +static const struct rtw89_reg3_def rtw8852a_btc_preagc_en_defs[] = { + {0x4624, GENMASK(20, 14), 0x40}, + {0x46f8, GENMASK(20, 14), 0x40}, + {0x4674, GENMASK(20, 19), 0x2}, + {0x4748, GENMASK(20, 19), 0x2}, + {0x4650, GENMASK(14, 10), 0x18}, + {0x4724, GENMASK(14, 10), 0x18}, + {0x4688, GENMASK(1, 0), 0x3}, + {0x475c, GENMASK(1, 0), 0x3}, +}; + +static DECLARE_PHY_REG3_TBL(rtw8852a_btc_preagc_en_defs); + +static const struct rtw89_reg3_def rtw8852a_btc_preagc_dis_defs[] = { + {0x4624, GENMASK(20, 14), 0x1a}, + {0x46f8, GENMASK(20, 14), 0x1a}, + {0x4674, GENMASK(20, 19), 0x1}, + {0x4748, GENMASK(20, 19), 0x1}, + {0x4650, GENMASK(14, 10), 0x12}, + {0x4724, GENMASK(14, 10), 0x12}, + {0x4688, GENMASK(1, 0), 0x0}, + {0x475c, GENMASK(1, 0), 0x0}, +}; + +static DECLARE_PHY_REG3_TBL(rtw8852a_btc_preagc_dis_defs); + +static const struct rtw89_pwr_cfg rtw8852a_pwron[] = { + {0x00C6, + PWR_CV_MSK_B, + PWR_INTF_MSK_PCIE, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0x1086, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_SDIO, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), 0}, + {0x1086, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_SDIO, + PWR_BASE_MAC, + PWR_CMD_POLL, BIT(1), BIT(1)}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4) | BIT(3), 0}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(7), 0}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(2), 0}, + {0x0006, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_POLL, BIT(1), BIT(1)}, + {0x0006, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_POLL, BIT(0), 0}, + {0x106D, + PWR_CV_MSK_B | PWR_CV_MSK_C, + PWR_INTF_MSK_USB, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(6), 0}, + {0x0088, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0088, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), 0}, + {0x0088, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0088, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), 0}, + {0x0088, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0083, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(6), 0}, + {0x0080, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0x0024, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0), 0}, + {0x02A0, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x02A2, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(7) | BIT(6) | BIT(5), 0}, + {0x0071, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_PCIE, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4), 0}, + {0x0010, + PWR_CV_MSK_A, + PWR_INTF_MSK_PCIE, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(2), BIT(2)}, + {0x02A0, + PWR_CV_MSK_A, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(7) | BIT(6), 0}, + {0xFFFF, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + 0, + PWR_CMD_END, 0, 0}, +}; + +static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = { + {0x02F0, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, 0xFF, 0}, + {0x02F1, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, 0xFF, 0}, + {0x0006, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0002, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(1) | BIT(0), 0}, + {0x0082, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(1) | BIT(0), 0}, + {0x106D, + PWR_CV_MSK_B | PWR_CV_MSK_C, + PWR_INTF_MSK_USB, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + PWR_BASE_MAC, + PWR_CMD_POLL, BIT(1), 0}, + {0x0091, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_PCIE, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), 0}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_PCIE, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(2), BIT(2)}, + {0x0007, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_USB, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4), 0}, + {0x0007, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_SDIO, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(6) | BIT(4), 0}, + {0x0005, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_SDIO, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4) | BIT(3), BIT(3)}, + {0x0005, + PWR_CV_MSK_C | PWR_CV_MSK_D | PWR_CV_MSK_E | PWR_CV_MSK_F | + PWR_CV_MSK_G, + PWR_INTF_MSK_USB, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4) | BIT(3), BIT(3)}, + {0x1086, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_SDIO, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x1086, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_SDIO, + PWR_BASE_MAC, + PWR_CMD_POLL, BIT(1), 0}, + {0xFFFF, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_ALL, + 0, + PWR_CMD_END, 0, 0}, +}; + +static const struct rtw89_pwr_cfg * const pwr_on_seq_8852a[] = { + rtw8852a_pwron, NULL +}; + +static const struct rtw89_pwr_cfg * const pwr_off_seq_8852a[] = { + rtw8852a_pwroff, NULL +}; + +static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse, + struct rtw8852a_efuse *map) +{ + ether_addr_copy(efuse->addr, map->e.mac_addr); + efuse->rfe_type = map->rfe_type; + efuse->xtal_cap = map->xtal_k; +} + +static void rtw8852a_efuse_parsing_tssi(struct rtw89_dev *rtwdev, + struct rtw8852a_efuse *map) +{ + struct rtw89_tssi_info *tssi = &rtwdev->tssi; + struct rtw8852a_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi}; + u8 i, j; + + tssi->thermal[RF_PATH_A] = map->path_a_therm; + tssi->thermal[RF_PATH_B] = map->path_b_therm; + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi, + sizeof(ofst[i]->cck_tssi)); + + for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n", + i, j, tssi->tssi_cck[i][j]); + + memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi, + sizeof(ofst[i]->bw40_tssi)); + memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM, + ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g)); + + for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n", + i, j, tssi->tssi_mcs[i][j]); + } +} + +static int rtw8852a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map) +{ + struct rtw89_efuse *efuse = &rtwdev->efuse; + struct rtw8852a_efuse *map; + + map = (struct rtw8852a_efuse *)log_map; + + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + rtw8852a_efuse_parsing_tssi(rtwdev, map); + + switch (rtwdev->hci.type) { + case RTW89_HCI_TYPE_PCIE: + rtw8852ae_efuse_parsing(efuse, map); + break; + default: + return -ENOTSUPP; + } + + rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type); + + return 0; +} + +static void rtw8852a_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + struct rtw89_tssi_info *tssi = &rtwdev->tssi; + static const u32 tssi_trim_addr[RF_PATH_NUM_8852A] = {0x5D6, 0x5AB}; + u32 addr = rtwdev->chip->phycap_addr; + bool pg = false; + u32 ofst; + u8 i, j; + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) { + /* addrs are in decreasing order */ + ofst = tssi_trim_addr[i] - addr - j; + tssi->tssi_trim[i][j] = phycap_map[ofst]; + + if (phycap_map[ofst] != 0xff) + pg = true; + } + } + + if (!pg) { + memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim)); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM] no PG, set all trim info to 0\n"); + } + + for (i = 0; i < RF_PATH_NUM_8852A; i++) + for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n", + i, j, tssi->tssi_trim[i][j], + tssi_trim_addr[i] - j); +} + +static void rtw8852a_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev, + u8 *phycap_map) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + static const u32 thm_trim_addr[RF_PATH_NUM_8852A] = {0x5DF, 0x5DC}; + u32 addr = rtwdev->chip->phycap_addr; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n", + i, info->thermal_trim[i]); + + if (info->thermal_trim[i] != 0xff) + info->pg_thermal_trim = true; + } +} + +static void rtw8852a_thermal_trim(struct rtw89_dev *rtwdev) +{ +#define __thm_setting(raw) \ +({ \ + u8 __v = (raw); \ + ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \ +}) + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + u8 i, val; + + if (!info->pg_thermal_trim) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] no PG, do nothing\n"); + + return; + } + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + val = __thm_setting(info->thermal_trim[i]); + rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n", + i, val); + } +#undef __thm_setting +} + +static void rtw8852a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev, + u8 *phycap_map) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + static const u32 pabias_trim_addr[RF_PATH_NUM_8852A] = {0x5DE, 0x5DB}; + u32 addr = rtwdev->chip->phycap_addr; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n", + i, info->pa_bias_trim[i]); + + if (info->pa_bias_trim[i] != 0xff) + info->pg_pa_bias_trim = true; + } +} + +static void rtw8852a_pa_bias_trim(struct rtw89_dev *rtwdev) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + u8 pabias_2g, pabias_5g; + u8 i; + + if (!info->pg_pa_bias_trim) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] no PG, do nothing\n"); + + return; + } + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]); + pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n", + i, pabias_2g, pabias_5g); + + rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g); + rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g); + } +} + +static int rtw8852a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + rtw8852a_phycap_parsing_tssi(rtwdev, phycap_map); + rtw8852a_phycap_parsing_thermal_trim(rtwdev, phycap_map); + rtw8852a_phycap_parsing_pa_bias_trim(rtwdev, phycap_map); + + return 0; +} + +static void rtw8852a_power_trim(struct rtw89_dev *rtwdev) +{ + rtw8852a_thermal_trim(rtwdev); + rtw8852a_pa_bias_trim(rtwdev); +} + +static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + u8 mac_idx) +{ + u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, + mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u8 txsc20 = 0, txsc40 = 0; + + switch (param->bandwidth) { + case RTW89_CHANNEL_WIDTH_80: + txsc40 = rtw89_phy_get_txsc(rtwdev, param, + RTW89_CHANNEL_WIDTH_40); + fallthrough; + case RTW89_CHANNEL_WIDTH_40: + txsc20 = rtw89_phy_get_txsc(rtwdev, param, + RTW89_CHANNEL_WIDTH_20); + break; + default: + break; + } + + switch (param->bandwidth) { + case RTW89_CHANNEL_WIDTH_80: + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1)); + rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4)); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0)); + rtw89_write32(rtwdev, sub_carr, txsc20); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK); + rtw89_write32(rtwdev, sub_carr, 0); + break; + default: + break; + } + + if (param->center_chan > 14) + rtw89_write8_set(rtwdev, chk_rate, + B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); + else + rtw89_write8_clr(rtwdev, chk_rate, + B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); +} + +static const u32 rtw8852a_sco_barker_threshold[14] = { + 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6, + 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4 +}; + +static const u32 rtw8852a_sco_cck_threshold[14] = { + 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724, + 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed +}; + +static int rtw8852a_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 central_ch, + u8 primary_ch, enum rtw89_bandwidth bw) +{ + u8 ch_element; + + if (bw == RTW89_CHANNEL_WIDTH_20) { + ch_element = central_ch - 1; + } else if (bw == RTW89_CHANNEL_WIDTH_40) { + if (primary_ch == 1) + ch_element = central_ch - 1 + 2; + else + ch_element = central_ch - 1 - 2; + } else { + rtw89_warn(rtwdev, "Invalid BW:%d for CCK\n", bw); + return -EINVAL; + } + rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH, + rtw8852a_sco_barker_threshold[ch_element]); + rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH, + rtw8852a_sco_cck_threshold[ch_element]); + + return 0; +} + +static void rtw8852a_ch_setting(struct rtw89_dev *rtwdev, u8 central_ch, + u8 path) +{ + u32 val; + + val = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + if (val == INV_RF_DATA) { + rtw89_warn(rtwdev, "Invalid RF_0x18 for Path-%d\n", path); + return; + } + val &= ~0x303ff; + val |= central_ch; + if (central_ch > 14) + val |= (BIT(16) | BIT(8)); + rtw89_write_rf(rtwdev, path, RR_CFGCH, RFREG_MASK, val); +} + +static u8 rtw8852a_sco_mapping(u8 central_ch) +{ + if (central_ch == 1) + return 109; + else if (central_ch >= 2 && central_ch <= 6) + return 108; + else if (central_ch >= 7 && central_ch <= 10) + return 107; + else if (central_ch >= 11 && central_ch <= 14) + return 106; + else if (central_ch == 36 || central_ch == 38) + return 51; + else if (central_ch >= 40 && central_ch <= 58) + return 50; + else if (central_ch >= 60 && central_ch <= 64) + return 49; + else if (central_ch == 100 || central_ch == 102) + return 48; + else if (central_ch >= 104 && central_ch <= 126) + return 47; + else if (central_ch >= 128 && central_ch <= 151) + return 46; + else if (central_ch >= 153 && central_ch <= 177) + return 45; + else + return 0; +} + +static void rtw8852a_ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch, + enum rtw89_phy_idx phy_idx) +{ + u8 sco_comp; + bool is_2g = central_ch <= 14; + + if (phy_idx == RTW89_PHY_0) { + /* Path A */ + rtw8852a_ch_setting(rtwdev, central_ch, RF_PATH_A); + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH0_TIA_ERR_G1, + B_PATH0_TIA_ERR_G1_SEL, 1, + phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH0_TIA_ERR_G1, + B_PATH0_TIA_ERR_G1_SEL, 0, + phy_idx); + + /* Path B */ + if (!rtwdev->dbcc_en) { + rtw8852a_ch_setting(rtwdev, central_ch, RF_PATH_B); + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_P1_MODE, + B_P1_MODE_SEL, + 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_P1_MODE, + B_P1_MODE_SEL, + 0, phy_idx); + } else { + if (is_2g) + rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, + B_2P4G_BAND_SEL); + else + rtw89_phy_write32_set(rtwdev, R_2P4G_BAND, + B_2P4G_BAND_SEL); + } + /* SCO compensate FC setting */ + sco_comp = rtw8852a_sco_mapping(central_ch); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, + sco_comp, phy_idx); + } else { + /* Path B */ + rtw8852a_ch_setting(rtwdev, central_ch, RF_PATH_B); + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_P1_MODE, + B_P1_MODE_SEL, + 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_P1_MODE, + B_P1_MODE_SEL, + 0, phy_idx); + /* SCO compensate FC setting */ + sco_comp = rtw8852a_sco_mapping(central_ch); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, + sco_comp, phy_idx); + } + + /* Band edge */ + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 1, + phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0, + phy_idx); + + /* CCK parameters */ + if (central_ch == 14) { + rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, + 0x3b13ff); + rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, + 0x1c42de); + rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, + 0xfdb0ad); + rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, + 0xf60f6e); + rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, + 0xfd8f92); + rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011); + rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c); + rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, + 0xfff00a); + } else { + rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, + 0x3d23ff); + rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, + 0x29b354); + rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8); + rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, + 0xfdb053); + rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, + 0xf86f9a); + rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, + 0xfaef92); + rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, + 0xfe5fcc); + rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, + 0xffdff5); + } +} + +static void rtw8852a_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + u32 val = 0; + u32 adc_sel[2] = {0x12d0, 0x32d0}; + u32 wbadc_sel[2] = {0x12ec, 0x32ec}; + + val = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + if (val == INV_RF_DATA) { + rtw89_warn(rtwdev, "Invalid RF_0x18 for Path-%d\n", path); + return; + } + val &= ~(BIT(11) | BIT(10)); + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0); + val |= (BIT(11) | BIT(10)); + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1); + val |= (BIT(11) | BIT(10)); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + val |= (BIT(11) | BIT(10)); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + val |= BIT(11); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + val |= BIT(10); + break; + default: + rtw89_warn(rtwdev, "Fail to set ADC\n"); + } + + rtw89_write_rf(rtwdev, path, RR_CFGCH, RFREG_MASK, val); +} + +static void +rtw8852a_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, + enum rtw89_phy_idx phy_idx) +{ + /* Switch bandwidth */ + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x1, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + 0x0, phy_idx); + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x2, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + 0x0, phy_idx); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + 0x0, phy_idx); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x1, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + pri_ch, + phy_idx); + if (pri_ch == RTW89_SC_20_UPPER) + rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1); + else + rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x2, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + pri_ch, + phy_idx); + break; + default: + rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, + pri_ch); + } + + if (phy_idx == RTW89_PHY_0) { + rtw8852a_bw_setting(rtwdev, bw, RF_PATH_A); + if (!rtwdev->dbcc_en) + rtw8852a_bw_setting(rtwdev, bw, RF_PATH_B); + } else { + rtw8852a_bw_setting(rtwdev, bw, RF_PATH_B); + } +} + +static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch) +{ + if (central_ch == 153) { + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, B_P0_NBIIDX_VAL, + 0x210); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL, + 0x210); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x7c0); + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, + B_P0_NBIIDX_NOTCH_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, + B_P1_NBIIDX_NOTCH_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, + 0x1); + } else if (central_ch == 151) { + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, B_P0_NBIIDX_VAL, + 0x210); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL, + 0x210); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x40); + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, + B_P0_NBIIDX_NOTCH_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, + B_P1_NBIIDX_NOTCH_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, + 0x1); + } else if (central_ch == 155) { + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, B_P0_NBIIDX_VAL, + 0x2d0); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL, + 0x2d0); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x740); + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, + B_P0_NBIIDX_NOTCH_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, + B_P1_NBIIDX_NOTCH_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, + 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, + B_P0_NBIIDX_NOTCH_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, + B_P1_NBIIDX_NOTCH_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, + 0x0); + } +} + +static void rtw8852a_bb_reset_all(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, + phy_idx); +} + +static void rtw8852a_bb_reset_en(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, bool en) +{ + if (en) + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, + 1, + phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, + 0, + phy_idx); +} + +static void rtw8852a_bb_reset(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON); + rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); + rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); + rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); + rtw8852a_bb_reset_all(rtwdev, phy_idx); + rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON); + rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); + rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); + rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); +} + +static void rtw8852a_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u32 addr; + + for (addr = R_AX_PWR_MACID_LMT_TABLE0; + addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4) + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); +} + +static void rtw8852a_bb_sethw(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP); + rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP); + + if (rtwdev->hal.cv <= CHIP_CCV) { + rtw89_phy_write32_set(rtwdev, R_RSTB_WATCH_DOG, B_P0_RSTB_WATCH_DOG); + rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_1, 0x864FA000); + rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x3F); + rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_3, 0x7FFF); + rtw89_phy_write32_set(rtwdev, R_SPOOF_ASYNC_RST, B_SPOOF_ASYNC_RST); + rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON); + rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); + } + rtw89_phy_write32_mask(rtwdev, R_CFO_TRK0, B_CFO_TRK_MSK, 0x1f); + rtw89_phy_write32_mask(rtwdev, R_CFO_TRK1, B_CFO_TRK_MSK, 0x0c); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_1); + rtw89_phy_write32_clr(rtwdev, R_NDP_BRK0, B_NDP_RU_BRK); + rtw89_phy_write32_set(rtwdev, R_NDP_BRK1, B_NDP_RU_BRK); + + rtw8852a_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); +} + +static void rtw8852a_bbrst_for_rfk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); + rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); + rtw8852a_bb_reset_all(rtwdev, phy_idx); + rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); + rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); + udelay(1); +} + +static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + bool cck_en = param->center_chan > 14 ? false : true; + u8 pri_ch_idx = param->pri_ch_idx; + + if (param->center_chan <= 14) + rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan, + param->primary_chan, param->bandwidth); + + rtw8852a_ctrl_ch(rtwdev, param->center_chan, phy_idx); + rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx); + if (cck_en) { + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1); + rtw8852a_bbrst_for_rfk(rtwdev, phy_idx); + } + rtw8852a_spur_elimination(rtwdev, param->center_chan); + rtw8852a_bb_reset_all(rtwdev, phy_idx); +} + +static void rtw8852a_set_channel(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *params) +{ + rtw8852a_set_channel_mac(rtwdev, params, RTW89_MAC_0); + rtw8852a_set_channel_bb(rtwdev, params, RTW89_PHY_0); +} + +static void rtw8852a_dfs_en(struct rtw89_dev *rtwdev, bool en) +{ + if (en) + rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 1); + else + rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 0); +} + +static void rtw8852a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, + enum rtw89_rf_path path) +{ + static const u32 tssi_trk[2] = {0x5818, 0x7818}; + static const u32 ctrl_bbrst[2] = {0x58dc, 0x78dc}; + + if (en) { + rtw89_phy_write32_mask(rtwdev, ctrl_bbrst[path], BIT(30), 0x0); + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0); + } else { + rtw89_phy_write32_mask(rtwdev, ctrl_bbrst[path], BIT(30), 0x1); + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1); + } +} + +static void rtw8852a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, + u8 phy_idx) +{ + if (!rtwdev->dbcc_en) { + rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_A); + rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_B); + } else { + if (phy_idx == RTW89_PHY_0) + rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_A); + else + rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_B); + } +} + +static void rtw8852a_adc_en(struct rtw89_dev *rtwdev, bool en) +{ + if (en) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, + 0x0); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, + 0xf); +} + +static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter, + struct rtw89_channel_help_params *p) +{ + u8 phy_idx = RTW89_PHY_0; + + if (enter) { + rtw89_mac_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL); + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); + rtw8852a_dfs_en(rtwdev, false); + rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0); + rtw8852a_adc_en(rtwdev, false); + fsleep(40); + rtw8852a_bb_reset_en(rtwdev, phy_idx, false); + } else { + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); + rtw8852a_adc_en(rtwdev, true); + rtw8852a_dfs_en(rtwdev, true); + rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0); + rtw8852a_bb_reset_en(rtwdev, phy_idx, true); + rtw89_mac_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en); + } +} + +static void rtw8852a_fem_setup(struct rtw89_dev *rtwdev) +{ + struct rtw89_efuse *efuse = &rtwdev->efuse; + + switch (efuse->rfe_type) { + case 11: + case 12: + case 17: + case 18: + case 51: + case 53: + rtwdev->fem.epa_2g = true; + rtwdev->fem.elna_2g = true; + fallthrough; + case 9: + case 10: + case 15: + case 16: + rtwdev->fem.epa_5g = true; + rtwdev->fem.elna_5g = true; + break; + default: + break; + } +} + +static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev) +{ + rtwdev->is_tssi_mode[RF_PATH_A] = false; + rtwdev->is_tssi_mode[RF_PATH_B] = false; + + rtw8852a_rck(rtwdev); + rtw8852a_dack(rtwdev); + rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true); +} + +static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev) +{ + enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + + rtw8852a_rx_dck(rtwdev, phy_idx, true); + rtw8852a_iqk(rtwdev, phy_idx); + rtw8852a_tssi(rtwdev, phy_idx); + rtw8852a_dpk(rtwdev, phy_idx); +} + +static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev) +{ + rtw8852a_tssi_scan(rtwdev, RTW89_PHY_0); +} + +static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start) +{ + rtw8852a_wifi_scan_notify(rtwdev, start, RTW89_PHY_0); +} + +static void rtw8852a_rfk_track(struct rtw89_dev *rtwdev) +{ + rtw8852a_dpk_track(rtwdev); + rtw8852a_iqk_track(rtwdev); + rtw8852a_tssi_track(rtwdev); +} + +static u32 rtw8852a_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, s16 ref) +{ + s8 ofst_int = 0; + u8 base_cw_0db = 0x27; + u16 tssi_16dbm_cw = 0x12c; + s16 pwr_s10_3 = 0; + s16 rf_pwr_cw = 0; + u16 bb_pwr_cw = 0; + u32 pwr_cw = 0; + u32 tssi_ofst_cw = 0; + + pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); + bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3); + rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3); + rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); + pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; + + tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", + tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); + + return (tssi_ofst_cw << 18) | (pwr_cw << 9) | (ref & GENMASK(8, 0)); +} + +static +void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, + s16 pw_ofst, enum rtw89_mac_idx mac_idx) +{ + s32 val_1t = 0; + s32 val_2t = 0; + u32 reg; + + if (pw_ofst < -16 || pw_ofst > 15) { + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] Err pwr_offset=%d\n", + pw_ofst); + return; + } + reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); + val_1t = (s32)pw_ofst; + reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t); + val_2t = max(val_1t - 3, -16); + reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, val_2t); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] Set TB pwr_offset=(%d, %d)\n", + val_1t, val_2t); +} + +static void rtw8852a_set_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + static const u32 addr[RF_PATH_NUM_8852A] = {0x5800, 0x7800}; + const u32 mask = 0x7FFFFFF; + const u8 ofst_ofdm = 0x4; + const u8 ofst_cck = 0x8; + s16 ref_ofdm = 0; + s16 ref_cck = 0; + u32 val; + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n"); + + rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, + GENMASK(27, 10), 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); + val = rtw8852a_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); + + for (i = 0; i < RF_PATH_NUM_8852A; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, + phy_idx); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); + val = rtw8852a_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); + + for (i = 0; i < RF_PATH_NUM_8852A; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, + phy_idx); +} + +static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u8 ch = rtwdev->hal.current_channel; + static const u8 rs[] = { + RTW89_RS_CCK, + RTW89_RS_OFDM, + RTW89_RS_MCS, + RTW89_RS_HEDCM, + }; + s8 tmp; + u8 i, j; + u32 val, shf, addr = R_AX_PWR_BY_RATE; + struct rtw89_rate_desc cur; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr byrate with ch=%d\n", ch); + + for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) { + for (i = 0; i < ARRAY_SIZE(rs); i++) { + if (cur.nss >= rtw89_rs_nss_max[rs[i]]) + continue; + + val = 0; + cur.rs = rs[i]; + + for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) { + cur.idx = j; + shf = (j % 4) * 8; + tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur); + val |= (tmp << shf); + + if ((j + 1) % 4) + continue; + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + val = 0; + addr += 4; + } + } + } +} + +static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_rate_desc desc = { + .nss = RTW89_NSS_1, + .rs = RTW89_RS_OFFSET, + }; + u32 val = 0; + s8 v; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); + + for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) { + v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc); + val |= ((v & 0xf) << (4 * desc.idx)); + } + + rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, + GENMASK(19, 0), val); +} + +static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ +#define __MAC_TXPWR_LMT_PAGE_SIZE 40 + u8 ch = rtwdev->hal.current_channel; + u8 bw = rtwdev->hal.current_band_width; + struct rtw89_txpwr_limit lmt[NTX_NUM_8852A]; + u32 addr, val; + const s8 *ptr; + u8 i, j, k; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); + + for (i = 0; i < NTX_NUM_8852A; i++) { + rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i); + + for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) { + addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i; + ptr = (s8 *)&lmt[i] + j; + val = 0; + + for (k = 0; k < 4; k++) + val |= (ptr[k] << (8 * k)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } +#undef __MAC_TXPWR_LMT_PAGE_SIZE +} + +static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ +#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24 + u8 ch = rtwdev->hal.current_channel; + u8 bw = rtwdev->hal.current_band_width; + struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852A]; + u32 addr, val; + const s8 *ptr; + u8 i, j, k; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); + + for (i = 0; i < NTX_NUM_8852A; i++) { + rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i); + + for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) { + addr = R_AX_PWR_RU_LMT + j + + __MAC_TXPWR_LMT_RU_PAGE_SIZE * i; + ptr = (s8 *)&lmt_ru[i] + j; + val = 0; + + for (k = 0; k < 4; k++) + val |= (ptr[k] << (8 * k)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } + +#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE +} + +static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev) +{ + rtw8852a_set_txpwr_byrate(rtwdev, RTW89_PHY_0); + rtw8852a_set_txpwr_limit(rtwdev, RTW89_PHY_0); + rtw8852a_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0); +} + +static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev) +{ + rtw8852a_set_txpwr_ref(rtwdev, RTW89_PHY_0); + rtw8852a_set_txpwr_offset(rtwdev, RTW89_PHY_0); +} + +static int +rtw8852a_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + int ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf004); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff); + if (ret) + return ret; + + return 0; +} + +void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev) +{ + u8 i = 0; + u32 addr, val; + + for (i = 0; i < ARRAY_SIZE(rtw8852a_pmac_ht20_mcs7_tbl); i++) { + addr = rtw8852a_pmac_ht20_mcs7_tbl[i].addr; + val = rtw8852a_pmac_ht20_mcs7_tbl[i].data; + rtw89_phy_write32(rtwdev, addr, val); + } +} + +static void rtw8852a_stop_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852a_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx"); + if (tx_info->mode == CONT_TX) + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, + idx); + else if (tx_info->mode == PKTS_TX) + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, + idx); +} + +static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852a_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + enum rtw8852a_pmac_mode mode = tx_info->mode; + u32 pkt_cnt = tx_info->tx_cnt; + u16 period = tx_info->period; + + if (mode == CONT_TX && !tx_info->is_cck) { + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, + idx); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start"); + } else if (mode == PKTS_TX) { + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, + idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, + B_PMAC_TX_PRD_MSK, period, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK, + pkt_cnt, idx); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start"); + } + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx); +} + +void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852a_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + if (!tx_info->en_pmac_tx) { + rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx); + rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); + if (rtwdev->hal.current_band_type == RTW89_BAND_2G) + rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS); + return; + } + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable"); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, + idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx); + rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx); + rtw8852a_start_pmac_tx(rtwdev, tx_info, idx); +} + +void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx) +{ + struct rtw8852a_bb_pmac_info tx_info = {0}; + + tx_info.en_pmac_tx = enable; + tx_info.is_cck = 0; + tx_info.mode = PKTS_TX; + tx_info.tx_cnt = tx_cnt; + tx_info.period = period; + tx_info.tx_time = tx_time; + rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx); +} + +void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx); +} + +void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) +{ + u32 rst_mask0 = 0; + u32 rst_mask1 = 0; + + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_1); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path); + if (!rtwdev->dbcc_en) { + if (tx_path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, + B_TXPATH_SEL_MSK, 1); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, + B_TXNSS_MAP_MSK, 0); + } else if (tx_path == RF_PATH_B) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, + B_TXPATH_SEL_MSK, 2); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, + B_TXNSS_MAP_MSK, 0); + } else if (tx_path == RF_PATH_AB) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, + B_TXPATH_SEL_MSK, 3); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, + B_TXNSS_MAP_MSK, 4); + } else { + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path"); + } + } else { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, + 1); + rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2, + RTW89_PHY_1); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, + 0); + rtw89_phy_write32_idx(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4, + RTW89_PHY_1); + } + rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; + rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; + if (tx_path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3); + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3); + } +} + +void rtw8852a_bb_tx_mode_switch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode) +{ + if (mode != 0) + return; + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch"); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); +} + +static void rtw8852a_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +{ + rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852a_btc_preagc_en_defs_tbl : + &rtw8852a_btc_preagc_dis_defs_tbl); +} + +static u8 rtw8852a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) +{ + if (rtwdev->is_tssi_mode[rf_path]) { + u32 addr = 0x1c10 + (rf_path << 13); + + return (u8)rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000); + } + + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + + fsleep(200); + + return (u8)rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); +} + +static void rtw8852a_btc_set_rfe(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_module *module = &btc->mdinfo; + + module->rfe_type = rtwdev->efuse.rfe_type; + module->cv = rtwdev->hal.cv; + module->bt_solo = 0; + module->switch_type = BTC_SWITCH_INTERNAL; + + if (module->rfe_type > 0) + module->ant.num = (module->rfe_type % 2 ? 2 : 3); + else + module->ant.num = 2; + + module->ant.diversity = 0; + module->ant.isolation = 10; + + if (module->ant.num == 3) { + module->ant.type = BTC_ANT_DEDICATED; + module->bt_pos = BTC_BT_ALONE; + } else { + module->ant.type = BTC_ANT_SHARED; + module->bt_pos = BTC_BT_BTG; + } +} + +static +void rtw8852a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) +{ + rtw89_write_rf(rtwdev, path, RR_LUTWE, 0xfffff, 0x20000); + rtw89_write_rf(rtwdev, path, RR_LUTWA, 0xfffff, group); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, 0xfffff, val); + rtw89_write_rf(rtwdev, path, RR_LUTWE, 0xfffff, 0x0); +} + +static void rtw8852a_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +{ + if (btg) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG, B_PATH0_BTG_SHEN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG, B_PATH1_BTG_SHEN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG, B_PATH0_BTG_SHEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG, B_PATH1_BTG_SHEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P2, 0x4); + } +} + +static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_module *module = &btc->mdinfo; + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_mac_ax_coex coex_params = { + .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE, + .direction = RTW89_MAC_AX_COEX_INNER, + }; + + /* PTA init */ + rtw89_mac_coex_init(rtwdev, &coex_params); + + /* set WL Tx response = Hi-Pri */ + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true); + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true); + + /* set rf gnt debug off */ + rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, 0xfffff, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, 0xfffff, 0x0); + + /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */ + if (module->ant.type == BTC_ANT_SHARED) { + rtw8852a_set_trx_mask(rtwdev, + RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff); + rtw8852a_set_trx_mask(rtwdev, + RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff); + } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */ + rtw8852a_set_trx_mask(rtwdev, + RF_PATH_A, BTC_BT_SS_GROUP, 0x5df); + rtw8852a_set_trx_mask(rtwdev, + RF_PATH_B, BTC_BT_SS_GROUP, 0x5df); + } + + /* set PTA break table */ + rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM); + + /* enable BT counter 0xda40[16,2] = 2b'11 */ + rtw89_write32_set(rtwdev, + R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN); + btc->cx.wl.status.map.init_ok = true; +} + +static +void rtw8852a_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) +{ + u32 bitmap = 0; + u32 reg = 0; + + switch (map) { + case BTC_PRI_MASK_TX_RESP: + reg = R_BTC_BT_COEX_MSK_TABLE; + bitmap = B_BTC_PRI_MASK_TX_RESP_V1; + break; + case BTC_PRI_MASK_BEACON: + reg = R_AX_WL_PRI_MSK; + bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ; + break; + default: + return; + } + + if (state) + rtw89_write32_set(rtwdev, reg, bitmap); + else + rtw89_write32_clr(rtwdev, reg, bitmap); +} + +static inline u32 __btc_ctrl_val_all_time(u32 ctrl) +{ + return FIELD_GET(GENMASK(15, 0), ctrl); +} + +static inline u32 __btc_ctrl_rst_all_time(u32 cur) +{ + return cur & ~B_AX_FORCE_PWR_BY_RATE_EN; +} + +static inline u32 __btc_ctrl_gen_all_time(u32 cur, u32 val) +{ + u32 hv = cur & ~B_AX_FORCE_PWR_BY_RATE_VALUE_MASK; + u32 lv = val & B_AX_FORCE_PWR_BY_RATE_VALUE_MASK; + + return hv | lv | B_AX_FORCE_PWR_BY_RATE_EN; +} + +static inline u32 __btc_ctrl_val_gnt_bt(u32 ctrl) +{ + return FIELD_GET(GENMASK(31, 16), ctrl); +} + +static inline u32 __btc_ctrl_rst_gnt_bt(u32 cur) +{ + return cur & ~B_AX_TXAGC_BT_EN; +} + +static inline u32 __btc_ctrl_gen_gnt_bt(u32 cur, u32 val) +{ + u32 ov = cur & ~B_AX_TXAGC_BT_MASK; + u32 iv = FIELD_PREP(B_AX_TXAGC_BT_MASK, val); + + return ov | iv | B_AX_TXAGC_BT_EN; +} + +static void +rtw8852a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val) +{ + const u32 __btc_cr_all_time = R_AX_PWR_RATE_CTRL; + const u32 __btc_cr_gnt_bt = R_AX_PWR_COEXT_CTRL; + +#define __do_clr(_chk) ((_chk) == GENMASK(15, 0)) +#define __handle(_case) \ + do { \ + const u32 _reg = __btc_cr_ ## _case; \ + u32 _val = __btc_ctrl_val_ ## _case(txpwr_val); \ + u32 _cur, _wrt; \ + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \ + "btc ctrl %s: 0x%x\n", #_case, _val); \ + rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur);\ + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \ + "btc ctrl ori 0x%x: 0x%x\n", _reg, _cur); \ + _wrt = __do_clr(_val) ? \ + __btc_ctrl_rst_ ## _case(_cur) : \ + __btc_ctrl_gen_ ## _case(_cur, _val); \ + rtw89_mac_txpwr_write32(rtwdev, RTW89_PHY_0, _reg, _wrt);\ + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \ + "btc ctrl set 0x%x: 0x%x\n", _reg, _wrt); \ + } while (0) + + __handle(all_time); + __handle(gnt_bt); + +#undef __handle +#undef __do_clr +} + +static +s8 rtw8852a_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + return clamp_t(s8, val, -100, 0) + 100; +} + +static struct rtw89_btc_rf_trx_para rtw89_btc_8852a_rf_ul[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {6, 1, 0, 7}, + {13, 1, 0, 7}, + {13, 1, 0, 7} +}; + +static struct rtw89_btc_rf_trx_para rtw89_btc_8852a_rf_dl[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {255, 1, 0, 7}, + {255, 1, 0, 7}, + {255, 1, 0, 7} +}; + +static const +u8 rtw89_btc_8852a_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30}; +static const +u8 rtw89_btc_8852a_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {40, 36, 31, 28}; + +static struct rtw89_btc_fbtc_mreg rtw89_btc_8852a_mon_reg[] = { + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda28), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda2c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda10), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda20), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xcef4), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x8424), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980), + RTW89_DEF_FBTC_MREG(REG_BT_MODEM, 4, 0x178), +}; + +static +void rtw8852a_btc_bt_aci_imp(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_bt_link_info *b = &bt->link_info; + + /* fix LNA2 = level-5 for BT ACI issue at BTG */ + if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0) + dm->trx_para_level = 1; +} + +static +void rtw8852a_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + u32 val; + + val = rtw89_read32(rtwdev, R_AX_BT_STAST_HIGH); + cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val); + cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val); + + val = rtw89_read32(rtwdev, R_AX_BT_STAST_LOW); + cx->cnt_bt[BTC_BCNT_LOPRI_TX] = FIELD_GET(B_AX_STATIS_BT_LO_TX_1_MASK, val); + cx->cnt_bt[BTC_BCNT_LOPRI_RX] = FIELD_GET(B_AX_STATIS_BT_LO_RX_1_MASK, val); + + /* clock-gate off before reset counter*/ + rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G); + rtw89_write32_clr(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST); + rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST); + rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G); +} + +static +void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x1); + + /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */ + if (state) + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, + RFREG_MASK, 0xa2d7c); + else + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, + RFREG_MASK, 0xa2020); + + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); +} + +static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u8 path; + s8 *rx_power = phy_ppdu->rssi; + + status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]); + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + status->chains |= BIT(path); + status->chain_signal[path] = rx_power[path]; + } +} + +static const struct rtw89_chip_ops rtw8852a_chip_ops = { + .bb_reset = rtw8852a_bb_reset, + .bb_sethw = rtw8852a_bb_sethw, + .read_rf = rtw89_phy_read_rf, + .write_rf = rtw89_phy_write_rf, + .set_channel = rtw8852a_set_channel, + .set_channel_help = rtw8852a_set_channel_help, + .read_efuse = rtw8852a_read_efuse, + .read_phycap = rtw8852a_read_phycap, + .fem_setup = rtw8852a_fem_setup, + .rfk_init = rtw8852a_rfk_init, + .rfk_channel = rtw8852a_rfk_channel, + .rfk_band_changed = rtw8852a_rfk_band_changed, + .rfk_scan = rtw8852a_rfk_scan, + .rfk_track = rtw8852a_rfk_track, + .power_trim = rtw8852a_power_trim, + .set_txpwr = rtw8852a_set_txpwr, + .set_txpwr_ctrl = rtw8852a_set_txpwr_ctrl, + .init_txpwr_unit = rtw8852a_init_txpwr_unit, + .get_thermal = rtw8852a_get_thermal, + .ctrl_btg = rtw8852a_ctrl_btg, + .query_ppdu = rtw8852a_query_ppdu, + .bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc, + .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, + + .btc_set_rfe = rtw8852a_btc_set_rfe, + .btc_init_cfg = rtw8852a_btc_init_cfg, + .btc_set_wl_pri = rtw8852a_btc_set_wl_pri, + .btc_set_wl_txpwr_ctrl = rtw8852a_btc_set_wl_txpwr_ctrl, + .btc_get_bt_rssi = rtw8852a_btc_get_bt_rssi, + .btc_bt_aci_imp = rtw8852a_btc_bt_aci_imp, + .btc_update_bt_cnt = rtw8852a_btc_update_bt_cnt, + .btc_wl_s1_standby = rtw8852a_btc_wl_s1_standby, +}; + +const struct rtw89_chip_info rtw8852a_chip_info = { + .chip_id = RTL8852A, + .ops = &rtw8852a_chip_ops, + .fw_name = "rtw89/rtw8852a_fw.bin", + .fifo_size = 458752, + .max_amsdu_limit = 3500, + .dis_2g_40m_ul_ofdma = true, + .hfc_param_ini = rtw8852a_hfc_param_ini_pcie, + .dle_mem = rtw8852a_dle_mem_pcie, + .rf_base_addr = {0xc000, 0xd000}, + .pwr_on_seq = pwr_on_seq_8852a, + .pwr_off_seq = pwr_off_seq_8852a, + .bb_table = &rtw89_8852a_phy_bb_table, + .rf_table = {&rtw89_8852a_phy_radioa_table, + &rtw89_8852a_phy_radiob_table,}, + .nctl_table = &rtw89_8852a_phy_nctl_table, + .byr_table = &rtw89_8852a_byr_table, + .txpwr_lmt_2g = &rtw89_8852a_txpwr_lmt_2g, + .txpwr_lmt_5g = &rtw89_8852a_txpwr_lmt_5g, + .txpwr_lmt_ru_2g = &rtw89_8852a_txpwr_lmt_ru_2g, + .txpwr_lmt_ru_5g = &rtw89_8852a_txpwr_lmt_ru_5g, + .txpwr_factor_rf = 2, + .txpwr_factor_mac = 1, + .dig_table = &rtw89_8852a_phy_dig_table, + .rf_path_num = 2, + .tx_nss = 2, + .rx_nss = 2, + .acam_num = 128, + .bcam_num = 10, + .scam_num = 128, + .sec_ctrl_efuse_size = 4, + .physical_efuse_size = 1216, + .logical_efuse_size = 1536, + .limit_efuse_size = 1152, + .phycap_addr = 0x580, + .phycap_size = 128, + .para_ver = 0x05050764, + .wlcx_desired = 0x05050000, + .btcx_desired = 0x5, + .scbd = 0x1, + .mailbox = 0x1, + .afh_guard_ch = 6, + .wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres, + .bt_rssi_thres = rtw89_btc_8852a_bt_rssi_thres, + .rssi_tol = 2, + .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852a_mon_reg), + .mon_reg = rtw89_btc_8852a_mon_reg, + .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852a_rf_ul), + .rf_para_ulink = rtw89_btc_8852a_rf_ul, + .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852a_rf_dl), + .rf_para_dlink = rtw89_btc_8852a_rf_dl, + .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) | + BIT(RTW89_PS_MODE_CLK_GATED) | + BIT(RTW89_PS_MODE_PWR_GATED), +}; +EXPORT_SYMBOL(rtw8852a_chip_info); + +MODULE_FIRMWARE("rtw89/rtw8852a_fw.bin"); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h new file mode 100644 index 00000000000000..633384374de05c --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_8852A_H__ +#define __RTW89_8852A_H__ + +#include "core.h" + +#define RF_PATH_NUM_8852A 2 +#define NTX_NUM_8852A 2 + +enum rtw8852a_pmac_mode { + NONE_TEST, + PKTS_TX, + PKTS_RX, + CONT_TX +}; + +struct rtw8852au_efuse { + u8 rsvd[0x38]; + u8 mac_addr[ETH_ALEN]; +}; + +struct rtw8852ae_efuse { + u8 mac_addr[ETH_ALEN]; +}; + +struct rtw8852a_tssi_offset { + u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM]; + u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM]; + u8 rsvd[7]; + u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM]; +} __packed; + +struct rtw8852a_efuse { + u8 rsvd[0x210]; + struct rtw8852a_tssi_offset path_a_tssi; + u8 rsvd1[10]; + struct rtw8852a_tssi_offset path_b_tssi; + u8 rsvd2[94]; + u8 channel_plan; + u8 xtal_k; + u8 rsvd3; + u8 iqk_lck; + u8 rsvd4[5]; + u8 reg_setting:2; + u8 tx_diversity:1; + u8 rx_diversity:2; + u8 ac_mode:1; + u8 module_type:2; + u8 rsvd5; + u8 shared_ant:1; + u8 coex_type:3; + u8 ant_iso:1; + u8 radio_on_off:1; + u8 rsvd6:2; + u8 eeprom_version; + u8 customer_id; + u8 tx_bb_swing_2g; + u8 tx_bb_swing_5g; + u8 tx_cali_pwr_trk_mode; + u8 trx_path_selection; + u8 rfe_type; + u8 country_code[2]; + u8 rsvd7[3]; + u8 path_a_therm; + u8 path_b_therm; + u8 rsvd8[46]; + u8 path_a_cck_pwr_idx[6]; + u8 path_a_bw40_1tx_pwr_idx[5]; + u8 path_a_ofdm_1tx_pwr_idx_diff:4; + u8 path_a_bw20_1tx_pwr_idx_diff:4; + u8 path_a_bw20_2tx_pwr_idx_diff:4; + u8 path_a_bw40_2tx_pwr_idx_diff:4; + u8 path_a_cck_2tx_pwr_idx_diff:4; + u8 path_a_ofdm_2tx_pwr_idx_diff:4; + u8 rsvd9[0xf2]; + union { + struct rtw8852au_efuse u; + struct rtw8852ae_efuse e; + }; +} __packed; + +struct rtw8852a_bb_pmac_info { + u8 en_pmac_tx:1; + u8 is_cck:1; + u8 mode:3; + u8 rsvd:3; + u16 tx_cnt; + u16 period; + u16 tx_time; + u8 duty_cycle; +}; + +void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev); +void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852a_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx); +void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx); +void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx); +void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path); +void rtw8852a_bb_tx_mode_switch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c new file mode 100644 index 00000000000000..c021e93eb07b01 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c @@ -0,0 +1,3911 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852a.h" +#include "rtw8852a_rfk.h" +#include "rtw8852a_rfk_table.h" +#include "rtw8852a_table.h" + +static void +_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) +{ + rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data); +} + +static void +_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) +{ + rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); +} + +static void +_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) +{ + rtw89_phy_write32_set(rtwdev, def->addr, def->mask); +} + +static void +_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) +{ + rtw89_phy_write32_clr(rtwdev, def->addr, def->mask); +} + +static void +_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) +{ + udelay(def->data); +} + +static void +(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = { + [RTW89_RFK_F_WRF] = _rfk_write_rf, + [RTW89_RFK_F_WM] = _rfk_write32_mask, + [RTW89_RFK_F_WS] = _rfk_write32_set, + [RTW89_RFK_F_WC] = _rfk_write32_clr, + [RTW89_RFK_F_DELAY] = _rfk_delay, +}; + +static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM); + +static void +rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl) +{ + const struct rtw89_reg5_def *p = tbl->defs; + const struct rtw89_reg5_def *end = tbl->defs + tbl->size; + + for (; p < end; p++) + _rfk_handler[p->flag](rtwdev, p); +} + +#define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f) \ + do { \ + typeof(rtwdev) _dev = (rtwdev); \ + if (cond) \ + rtw89_rfk_parser(_dev, (tbl_t)); \ + else \ + rtw89_rfk_parser(_dev, (tbl_f)); \ + } while (0) + +static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", + rtwdev->dbcc_en, phy_idx); + + if (!rtwdev->dbcc_en) + return RF_AB; + + if (phy_idx == RTW89_PHY_0) + return RF_A; + else + return RF_B; +} + +static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0}; +static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; +#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs) +#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs) + +static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + backup_bb_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i], + MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]backup bb reg : %x, value =%x\n", + rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], + u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + backup_rf_reg_val[i] = + rtw89_read_rf(rtwdev, rf_path, + rtw8852a_backup_rf_regs[i], RFREG_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path, + rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, + u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i], + MASKDWORD, backup_bb_reg_val[i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]restore bb reg : %x, value =%x\n", + rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, + u32 backup_rf_reg_val[], u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i], + RFREG_MASK, backup_rf_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path, + rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) +{ + u8 path; + u32 rf_mode; + int ret; + + for (path = 0; path < RF_PATH_MAX; path++) { + if (!(kpath & BIT(path))) + continue; + + ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2, + 2, 5000, false, rtwdev, path, 0x00, + RR_MOD_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", + path, ret); + } +} + +static void _dack_dump(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + u8 t; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[0][0], dack->addck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[1][0], dack->addck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[0][0], dack->dadck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[1][0], dack->dadck_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[0][0], dack->biask_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[1][0], dack->biask_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } +} + +static void _afe_init(struct rtw89_dev *rtwdev) +{ + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl); +} + +static void _addck_backup(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL); + dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK, + B_S0_ADDCK_Q); + dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK, + B_S0_ADDCK_I); + + rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL); + dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK, + B_S1_ADDCK_Q); + dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK, + B_S1_ADDCK_I); +} + +static void _addck_reload(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]); + rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2, + (dack->addck_d[0][1] >> 6)); + rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q, + (dack->addck_d[0][1] & 0x3f)); + rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN); + rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]); + rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2, + (dack->addck_d[1][1] >> 6)); + rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q, + (dack->addck_d[1][1] & 0x3f)); + rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN); +} + +static void _dack_backup_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN); + rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN); + rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG); + + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i); + dack->msbk_d[0][0][i] = + (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K); + rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i); + dack->msbk_d[0][1][i] = + (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K); + } + dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2, + B_S0_DACKI2_K); + dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2, + B_S0_DACKQ2_K); + dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8, + B_S0_DACKI8_K) - 8; + dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8, + B_S0_DACKQ8_K) - 8; +} + +static void _dack_backup_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN); + rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN); + rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON); + + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i); + dack->msbk_d[1][0][i] = + (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K); + rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i); + dack->msbk_d[1][1][i] = + (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K); + } + dack->biask_d[1][0] = + (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K); + dack->biask_d[1][1] = + (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K); + dack->dadck_d[1][0] = + (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8; + dack->dadck_d[1][1] = + (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8; +} + +static void _dack_reload_by_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 index) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 tmp = 0, tmp_offset, tmp_reg; + u8 i; + u32 idx_offset, path_offset; + + if (index == 0) + idx_offset = 0; + else + idx_offset = 0x50; + + if (path == RF_PATH_A) + path_offset = 0; + else + path_offset = 0x2000; + + tmp_offset = idx_offset + path_offset; + /* msbk_d: 15/14/13/12 */ + tmp = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); + tmp_reg = 0x5e14 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + /* msbk_d: 11/10/9/8 */ + tmp = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); + tmp_reg = 0x5e18 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + /* msbk_d: 7/6/5/4 */ + tmp = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); + tmp_reg = 0x5e1c + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + /* msbk_d: 3/2/1/0 */ + tmp = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + tmp |= dack->msbk_d[path][index][i] << (i * 8); + tmp_reg = 0x5e20 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + /* dadak_d/biask_d */ + tmp = 0x0; + tmp = (dack->biask_d[path][index] << 22) | + (dack->dadck_d[path][index] << 14); + tmp_reg = 0x5e24 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); +} + +static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u8 i; + + for (i = 0; i < 2; i++) + _dack_reload_by_path(rtwdev, path, i); + + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_rfk_dack_reload_defs_a_tbl, + &rtw8852a_rfk_dack_reload_defs_b_tbl); +} + +#define ADDC_T_AVG 100 +static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + s32 dc_re = 0, dc_im = 0; + u32 tmp; + u32 i; + + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_rfk_check_addc_defs_a_tbl, + &rtw8852a_rfk_check_addc_defs_b_tbl); + + for (i = 0; i < ADDC_T_AVG; i++) { + tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD); + dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11); + dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11); + } + + dc_re /= ADDC_T_AVG; + dc_im /= ADDC_T_AVG; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im); +} + +static void _addck(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + /* S0 */ + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n"); + _check_addc(rtwdev, RF_PATH_A); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x1e00, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n"); + _check_addc(rtwdev, RF_PATH_A); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl); + + /* S1 */ + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n"); + _check_addc(rtwdev, RF_PATH_B); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x3e00, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); + dack->addck_timeout[1] = true; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n"); + _check_addc(rtwdev, RF_PATH_B); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl); +} + +static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_rfk_check_dadc_defs_f_a_tbl, + &rtw8852a_rfk_check_dadc_defs_f_b_tbl); + + _check_addc(rtwdev, path); + + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_rfk_check_dadc_defs_r_a_tbl, + &rtw8852a_rfk_check_dadc_defs_r_b_tbl); +} + +static void _dack_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x5e28, BIT(15)); + ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x5e78, BIT(15)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n"); + dack->msbk_timeout[0] = true; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x5e48, BIT(17)); + ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x5e98, BIT(17)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n"); + dack->dadck_timeout[0] = true; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); + _check_dadc(rtwdev, RF_PATH_A); + + _dack_backup_s0(rtwdev); + _dack_reload(rtwdev, RF_PATH_A); + + rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG); +} + +static void _dack_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x7e28, BIT(15)); + ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x7e78, BIT(15)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n"); + dack->msbk_timeout[1] = true; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x7e48, BIT(17)); + ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, + false, rtwdev, 0x7e98, BIT(17)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n"); + dack->dadck_timeout[1] = true; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n"); + _check_dadc(rtwdev, RF_PATH_B); + + _dack_backup_s1(rtwdev); + _dack_reload(rtwdev, RF_PATH_B); + + rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON); +} + +static void _dack(struct rtw89_dev *rtwdev) +{ + _dack_s0(rtwdev); + _dack_s1(rtwdev); +} + +static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 rf0_0, rf1_0; + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB); + + dack->dack_done = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); + rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); + rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK); + _afe_init(rtwdev); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); + _addck(rtwdev); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); + _addck_backup(rtwdev); + _addck_reload(rtwdev); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); + _dack(rtwdev); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); + _dack_dump(rtwdev); + dack->dack_done = true; + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); + dack->dack_cnt++; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); +} + +#define RTW8852A_NCTL_VER 0xd +#define RTW8852A_IQK_VER 0x2a +#define RTW8852A_IQK_SS 2 +#define RTW8852A_IQK_THR_REK 8 +#define RTW8852A_IQK_CFIR_GROUP_NR 4 + +enum rtw8852a_iqk_type { + ID_TXAGC, + ID_FLOK_COARSE, + ID_FLOK_FINE, + ID_TXK, + ID_RXAGC, + ID_RXK, + ID_NBTXK, + ID_NBRXK, +}; + +static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path) +{ + u8 i = 0x0; + u32 fft[6] = {0x0}; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000); + fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000); + fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000); + fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000); + fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000); + fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000); + fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + for (i = 0; i < 6; i++) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n", + path, i, fft[i]); +} + +static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path) +{ + u8 i = 0x0; + u32 tmp = 0x0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1); + + for (i = 0x0; i < 0x18; i++) { + rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n", + path, BIT(path), tmp); + udelay(1); + } + rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX); + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100); + udelay(1); +} + +static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path, + u8 group) +{ + static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = { + {0x8f20, 0x8f54, 0x8f88, 0x8fbc}, + {0x9320, 0x9354, 0x9388, 0x93bc}, + }; + u8 idx = 0x0; + u32 tmp = 0x0; + u32 base_addr; + + if (path >= RTW8852A_IQK_SS) { + rtw89_warn(rtwdev, "cfir path %d out of range\n", path); + return; + } + if (group >= RTW8852A_IQK_CFIR_GROUP_NR) { + rtw89_warn(rtwdev, "cfir group %d out of range\n", group); + return; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001); + + base_addr = base_addrs[path][group]; + + for (idx = 0; idx < 0x0d; idx++) { + tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK] %x = %x\n", + base_addr + (idx << 2), tmp); + } + + if (path == 0x0) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp); + } else { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp); + } + rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc); + udelay(1); + tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path, + BIT(path), tmp); +} + +static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path, + u8 group) +{ + static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = { + {0x8d00, 0x8d44, 0x8d88, 0x8dcc}, + {0x9100, 0x9144, 0x9188, 0x91cc}, + }; + u8 idx = 0x0; + u32 tmp = 0x0; + u32 base_addr; + + if (path >= RTW8852A_IQK_SS) { + rtw89_warn(rtwdev, "cfir path %d out of range\n", path); + return; + } + if (group >= RTW8852A_IQK_CFIR_GROUP_NR) { + rtw89_warn(rtwdev, "cfir group %d out of range\n", group); + return; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001); + + base_addr = base_addrs[path][group]; + for (idx = 0; idx < 0x10; idx++) { + tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]%x = %x\n", + base_addr + (idx << 2), tmp); + } + + if (path == 0x0) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp); + } else { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp); + tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp); + } + rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd); + tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path, + BIT(path), tmp); +} + +static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path) +{ + u32 tmp = 0x0; + u32 i = 0x0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000); + rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); + + for (i = 0; i <= 0x9f; i++) { + rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i); + tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp); + } + + for (i = 0; i <= 0x9f; i++) { + rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i); + tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp); + } + rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD); + rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD); +} + +static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 tmp = 0x0; + + rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000); + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2); + rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1); + break; + default: + break; + } + tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); + fsleep(128); +} + +static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) +{ + u32 tmp; + u32 val; + int ret; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200, + false, rtwdev, 0xbff8, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n"); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret); + tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp); + + return false; +} + +static bool _iqk_one_shot(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path, u8 ktype) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail = false; + u32 iqk_cmd = 0x0; + u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path); + u32 addr_rfc_ctl = 0x0; + + if (path == RF_PATH_A) + addr_rfc_ctl = 0x5864; + else + addr_rfc_ctl = 0x7864; + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + switch (ktype) { + case ID_TXAGC: + iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); + break; + case ID_FLOK_COARSE: + rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); + iqk_cmd = 0x108 | (1 << (4 + path)); + break; + case ID_FLOK_FINE: + rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); + iqk_cmd = 0x208 | (1 << (4 + path)); + break; + case ID_TXK: + rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025); + iqk_cmd = 0x008 | (1 << (path + 4)) | + (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); + break; + case ID_RXAGC: + iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); + break; + case ID_RXK: + rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); + iqk_cmd = 0x008 | (1 << (path + 4)) | + (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8); + break; + case ID_NBTXK: + rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025); + iqk_cmd = 0x308 | (1 << (4 + path)); + break; + case ID_NBRXK: + rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); + iqk_cmd = 0x608 | (1 << (4 + path)); + break; + default: + return false; + } + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); + rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN); + udelay(1); + fail = _iqk_check_cal(rtwdev, path, ktype); + if (iqk_info->iqk_xym_en) + _iqk_read_xym_dbcc0(rtwdev, path); + if (iqk_info->iqk_fft_en) + _iqk_read_fft_dbcc0(rtwdev, path); + if (iqk_info->iqk_sram_en) + _iqk_sram(rtwdev, path); + if (iqk_info->iqk_cfir_en) { + if (ktype == ID_TXK) { + _iqk_read_txcfir_dbcc0(rtwdev, path, 0x0); + _iqk_read_txcfir_dbcc0(rtwdev, path, 0x1); + _iqk_read_txcfir_dbcc0(rtwdev, path, 0x2); + _iqk_read_txcfir_dbcc0(rtwdev, path, 0x3); + } else { + _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0); + _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1); + _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2); + _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3); + } + } + + rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); + + return fail; +} + +static bool _rxk_group_sel(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0}; + static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30}; + static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1}; + static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0}; + static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a}; + static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0}; + u8 gp = 0x0; + bool fail = false; + u32 rf0 = 0x0; + + for (gp = 0; gp < 0x4; gp++) { + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]); + break; + default: + break; + } + rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET); + rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, + rf0 | iqk_info->syn1to2); + rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100); + rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR); + rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); + rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp); + rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail); + } + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0); + break; + default: + break; + } + iqk_info->nb_rxcfir[path] = 0x40000000; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_RXCFIR, 0x5); + iqk_info->is_wb_rxiqk[path] = true; + return false; +} + +static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 group = 0x0; + u32 rf0 = 0x0, tmp = 0x0; + u32 idxrxgain_a = 0x1a0; + u32 idxattc2_a = 0x00; + u32 idxattc1_a = 0x5; + u32 idxrxgain_g = 0x1E0; + u32 idxattc2_g = 0x15; + u32 idxattc1_g = 0x0; + bool fail = false; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a); + break; + default: + break; + } + rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET); + rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, + rf0 | iqk_info->syn1to2); + rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100); + rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR); + rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); + rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_GP, group); + rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0); + break; + default: + break; + } + if (!fail) { + tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); + iqk_info->nb_rxcfir[path] = tmp | 0x2; + } else { + iqk_info->nb_rxcfir[path] = 0x40000002; + } + return fail; +} + +static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), + MASKDWORD, 0x4d000a08); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), + B_P0_RXCK_VAL, 0x2); + rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON); + rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), + MASKDWORD, 0x44000a08); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), + B_P0_RXCK_VAL, 0x1); + rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON); + rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON); + rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL); + } +} + +static bool _txk_group_sel(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED}; + static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED}; + static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b}; + static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12}; + static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail = false; + u8 gp = 0x0; + u32 tmp = 0x0; + + for (gp = 0x0; gp < 0x4; gp++) { + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), + B_RFGAIN_BND, 0x08); + rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, + g_txgain[gp]); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, + g_attsmxr[gp]); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, + g_attsmxr[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, g_itqt[gp]); + break; + case RTW89_BAND_5G: + rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), + B_RFGAIN_BND, 0x04); + rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, + a_txgain[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, a_itqt[gp]); + break; + default: + break; + } + rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR); + rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); + rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_GP, gp); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail); + } + + iqk_info->nb_txcfir[path] = 0x40000000; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_TXCFIR, 0x5); + iqk_info->is_wb_txiqk[path] = true; + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path, + BIT(path), tmp); + return false; +} + +static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 group = 0x2; + u32 a_mode_txgain = 0x64e2; + u32 g_mode_txgain = 0x61e8; + u32 attsmxr = 0x1; + u32 itqt = 0x12; + u32 tmp = 0x0; + bool fail = false; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), + B_RFGAIN_BND, 0x08); + rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr); + break; + case RTW89_BAND_5G: + rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), + B_RFGAIN_BND, 0x04); + rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain); + break; + default: + break; + } + rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR); + rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); + rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + if (!fail) { + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + iqk_info->nb_txcfir[path] = tmp | 0x2; + } else { + iqk_info->nb_txcfir[path] = 0x40000002; + } + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path, + BIT(path), tmp); + return fail; +} + +static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2); + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0); + else + rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); +} + +static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) +{ + bool is_fail = false; + u32 tmp = 0x0; + u32 core_i = 0x0; + u32 core_q = 0x0; + + tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n", + path, tmp); + core_i = FIELD_GET(RR_TXMO_COI, tmp); + core_q = FIELD_GET(RR_TXMO_COQ, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q); + + if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d) + is_fail = true; + return is_fail; +} + +static bool _iqk_lok(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 rf0 = 0x0; + u8 itqt = 0x12; + bool fail = false; + bool tmp = false; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0); + itqt = 0x09; + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0); + itqt = 0x12; + break; + default: + break; + } + rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET); + rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI, + rf0 | iqk_info->syn1to2); + rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0); + rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE); + iqk_info->lok_cor_fail[0][path] = tmp; + fsleep(10); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE); + iqk_info->lok_fin_fail[0][path] = tmp; + fail = _lok_finetune_check(rtwdev, path); + return fail; +} + +static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000); + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00); + rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000); + rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + udelay(1); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00); + rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f); + rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100); + rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + udelay(1); + break; + default: + break; + } +} + +static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); +} + +static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 tmp = 0x0; + bool flag = 0x0; + + iqk_info->thermal[path] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + iqk_info->thermal_rek_en = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path, + iqk_info->thermal[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path, + iqk_info->lok_cor_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path, + iqk_info->lok_fin_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path, + iqk_info->iqk_tx_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path, + iqk_info->iqk_rx_fail[0][path]); + flag = iqk_info->lok_cor_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag); + flag = iqk_info->lok_fin_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag); + flag = iqk_info->iqk_tx_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag); + flag = iqk_info->iqk_rx_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag); + + tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD); + iqk_info->bp_iqkenable[path] = tmp; + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + iqk_info->bp_txkresult[path] = tmp; + tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); + iqk_info->bp_rxkresult[path] = tmp; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, + (u8)iqk_info->iqk_times); + + tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4)); + if (tmp != 0x0) + iqk_info->iqk_fail_cnt++; + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4), + iqk_info->iqk_fail_cnt); +} + +static +void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool lok_is_fail = false; + u8 ibias = 0x1; + u8 i = 0; + + _iqk_txclk_setting(rtwdev, path); + + for (i = 0; i < 3; i++) { + _lok_res_table(rtwdev, path, ibias++); + _iqk_txk_setting(rtwdev, path); + lok_is_fail = _iqk_lok(rtwdev, phy_idx, path); + if (!lok_is_fail) + break; + } + if (iqk_info->is_nbiqk) + iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path); + else + iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path); + + _iqk_rxclk_setting(rtwdev, path); + _iqk_rxk_setting(rtwdev, path); + if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G) + iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path); + else + iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path); + + _iqk_info_iqk(rtwdev, phy_idx, path); +} + +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + struct rtw89_hal *hal = &rtwdev->hal; + u32 reg_rf18 = 0x0, reg_35c = 0x0; + u8 idx = 0; + u8 get_empty_table = false; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + if (iqk_info->iqk_mcc_ch[idx][path] == 0) { + get_empty_table = true; + break; + } + } + if (!get_empty_table) { + idx = iqk_info->iqk_table_idx[path] + 1; + if (idx > RTW89_IQK_CHS_NR - 1) + idx = 0; + } + reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18); + reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00); + + iqk_info->iqk_band[path] = hal->current_band_type; + iqk_info->iqk_bw[path] = hal->current_band_width; + iqk_info->iqk_ch[path] = hal->current_channel; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path, + iqk_info->iqk_band[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n", + path, iqk_info->iqk_bw[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n", + path, iqk_info->iqk_ch[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy, + rtwdev->dbcc_en ? "on" : "off", + iqk_info->iqk_band[path] == 0 ? "2G" : + iqk_info->iqk_band[path] == 1 ? "5G" : "6G", + iqk_info->iqk_ch[path], + iqk_info->iqk_bw[path] == 0 ? "20M" : + iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); + if (reg_35c == 0x01) + iqk_info->syn1to2 = 0x1; + else + iqk_info->syn1to2 = 0x0; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16), + (u8)iqk_info->iqk_band[path]); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16), + (u8)iqk_info->iqk_bw[path]); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16), + (u8)iqk_info->iqk_ch[path]); + + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER); +} + +static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path) +{ + _iqk_by_path(rtwdev, phy_idx, path); +} + +static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, + iqk_info->nb_txcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, + iqk_info->nb_rxcfir[path]); + rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD); + rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD); + rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000); + rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN); + rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4); + rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); + rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0); + rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); +} + +static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + const struct rtw89_rfk_tbl *tbl; + + switch (_kpath(rtwdev, phy_idx)) { + case RF_A: + tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl; + break; + case RF_B: + tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl; + break; + default: + tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl; + break; + } + + rtw89_rfk_parser(rtwdev, tbl); +} + +static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 idx = iqk_info->iqk_table_idx[path]; + + if (rtwdev->dbcc_en) { + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_IQC, path & 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_G2, path & 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_IQC, idx); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_G2, idx); + } + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); + rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000); + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000); + rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD); +} + +static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + const struct rtw89_rfk_tbl *tbl; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__); + + switch (_kpath(rtwdev, phy_idx)) { + case RF_A: + tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl; + break; + case RF_B: + tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl; + break; + default: + tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl; + break; + } + + rtw89_rfk_parser(rtwdev, tbl); +} + +static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 phy_idx = 0x0; + + iqk_info->iqk_times++; + + if (path == 0x0) + phy_idx = RTW89_PHY_0; + else + phy_idx = RTW89_PHY_1; + + _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_macbb_setting(rtwdev, phy_idx, path); + _iqk_preset(rtwdev, path); + _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_restore(rtwdev, path); + _iqk_afebb_restore(rtwdev, phy_idx, path); +} + +static void _iqk_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_iqk_info *iqk = &rtwdev->iqk; + u8 path = 0x0; + u8 cur_ther; + + if (iqk->iqk_band[0] == RTW89_BAND_2G) + return; + if (iqk->iqk_bw[0] < RTW89_CHANNEL_WIDTH_80) + return; + + /* only check path 0 */ + for (path = 0; path < 1; path++) { + cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + if (abs(cur_ther - iqk->thermal[path]) > RTW8852A_IQK_THR_REK) + iqk->thermal_rek_en = true; + else + iqk->thermal_rek_en = false; + } +} + +static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u32 rf_reg5, rck_val = 0; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); + + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + /* RCK trigger */ + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20, + false, rtwdev, path, 0x1c, BIT(3)); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n"); + + rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); + + /* RCK_ADC_OFFSET */ + rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4); + + rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1); + rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK)); +} + +static void _iqk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 ch, path; + + rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD); + if (iqk_info->is_iqk_init) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + iqk_info->is_iqk_init = true; + iqk_info->is_nbiqk = false; + iqk_info->iqk_fft_en = false; + iqk_info->iqk_sram_en = false; + iqk_info->iqk_cfir_en = false; + iqk_info->iqk_xym_en = false; + iqk_info->thermal_rek_en = false; + iqk_info->iqk_times = 0x0; + + for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) { + iqk_info->iqk_channel[ch] = 0x0; + for (path = 0; path < RTW8852A_IQK_SS; path++) { + iqk_info->lok_cor_fail[ch][path] = false; + iqk_info->lok_fin_fail[ch][path] = false; + iqk_info->iqk_tx_fail[ch][path] = false; + iqk_info->iqk_rx_fail[ch][path] = false; + iqk_info->iqk_mcc_ch[ch][path] = 0x0; + iqk_info->iqk_table_idx[path] = 0x0; + } + } +} + +static void _doiqk(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR]; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]==========IQK strat!!!!!==========\n"); + iqk_info->iqk_times++; + iqk_info->kcount = 0; + iqk_info->version = RTW8852A_IQK_VER; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); + _iqk_get_ch_info(rtwdev, phy_idx, path); + _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); + _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); + _iqk_macbb_setting(rtwdev, phy_idx, path); + _iqk_preset(rtwdev, path); + _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_restore(rtwdev, path); + _iqk_afebb_restore(rtwdev, phy_idx, path); + _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); + _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +{ + switch (_kpath(rtwdev, phy_idx)) { + case RF_A: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + break; + case RF_B: + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + case RF_AB: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + default: + break; + } +} + +#define RXDCK_VER_8852A 0xe + +static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, bool is_afe) +{ + u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path); + u32 ori_val; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RX_DCK] ==== S%d RX DCK (by %s)====\n", + path, is_afe ? "AFE" : "RFC"); + + ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD); + + if (is_afe) { + rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); + rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), + B_P0_RXCK_VAL, 0x3); + rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN); + rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13), + B_S0_RXDC2_AVG, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); + rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK); + rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST); + rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1); + } + + rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f); + rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START); + + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + + fsleep(600); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP); + + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + + if (is_afe) { + rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), + MASKDWORD, ori_val); + } +} + +static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool is_afe) +{ + u8 path, kpath, dck_tune; + u32 rf_reg5; + u32 addr; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", + RXDCK_VER_8852A, rtwdev->hal.cv); + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < 2; path++) { + if (!(kpath & BIT(path))) + continue; + + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE); + + if (rtwdev->is_tssi_mode[path]) { + addr = 0x5818 + (path << 13); + /* TSSI pause */ + rtw89_phy_write32_set(rtwdev, addr, BIT(30)); + } + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + _set_rx_dck(rtwdev, phy, path, is_afe); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + if (rtwdev->is_tssi_mode[path]) { + addr = 0x5818 + (path << 13); + /* TSSI resume */ + rtw89_phy_write32_clr(rtwdev, addr, BIT(30)); + } + } +} + +#define RTW8852A_RF_REL_VERSION 34 +#define RTW8852A_DPK_VER 0x10 +#define RTW8852A_DPK_TH_AVG_NUM 4 +#define RTW8852A_DPK_RF_PATH 2 +#define RTW8852A_DPK_KIP_REG_NUM 2 + +enum rtw8852a_dpk_id { + LBK_RXIQK = 0x06, + SYNC = 0x10, + MDPK_IDL = 0x11, + MDPK_MPA = 0x12, + GAIN_LOSS = 0x13, + GAIN_CAL = 0x14, +}; + +static void _rf_direct_cntrl(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool off); + +static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg, + u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], + u8 path) +{ + u8 i; + + for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) { + reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev, + reg[i] + (path << 8), + MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", + reg[i] + (path << 8), reg_bkup[path][i]); + } +} + +static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg, + u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path) +{ + u8 i; + + for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) { + rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), + MASKDWORD, reg_bkup[path][i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n", + reg[i] + (path << 8), reg_bkup[path][i]); + } +} + +static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, enum rtw8852a_dpk_id id) +{ + u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path); + u16 dpk_cmd = 0x0; + u32 val; + int ret; + + dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4))); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); + rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0); + + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot for %s = 0x%x (ret=%d)\n", + id == 0x06 ? "LBK_RXIQK" : + id == 0x10 ? "SYNC" : + id == 0x11 ? "MDPK_IDL" : + id == 0x12 ? "MDPK_MPA" : + id == 0x13 ? "GAIN_LOSS" : "PWR_CAL", + dpk_cmd, ret); + + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot over 20ms!!!!\n"); + return 1; + } + + return 0; +} + +static void _dpk_rx_dck(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3); + _set_rx_dck(rtwdev, phy, path, false); +} + +static void _dpk_information(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + struct rtw89_hal *hal = &rtwdev->hal; + + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].band = hal->current_band_type; + dpk->bp[path][kidx].ch = hal->current_channel; + dpk->bp[path][kidx].bw = hal->current_band_width; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", + path, dpk->cur_idx[path], phy, + rtwdev->is_tssi_mode[path] ? "on" : "off", + rtwdev->dbcc_en ? "on" : "off", + dpk->bp[path][kidx].band == 0 ? "2G" : + dpk->bp[path][kidx].band == 1 ? "5G" : "6G", + dpk->bp[path][kidx].ch, + dpk->bp[path][kidx].bw == 0 ? "20M" : + dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); +} + +static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + switch (kpath) { + case RF_A: + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl); + + if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0) + rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl); + break; + case RF_B: + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl); + + if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1) + rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl); + break; + case RF_AB: + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl); + break; + default: + break; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath); +} + +static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + switch (kpath) { + case RF_A: + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl); + break; + case RF_B: + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl); + break; + case RF_AB: + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl); + break; + default: + break; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath); +} + +static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_pause) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, is_pause); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, + is_pause ? "pause" : "resume"); +} + +static void _dpk_kip_setting(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 kidx) +{ + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/ + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2), + MASKDWORD, 0x003f2e2e); + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKDWORD, 0x005b5b5b); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n", + path, kidx); +} + +static void _dpk_kip_restore(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000); + rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD); + + if (rtwdev->hal.cv > CHIP_CBV) + rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); +} + +static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + u8 cur_rxbb; + + cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1); + rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2); + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, + rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK)); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); + + fsleep(70); + + rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f); + + if (cur_rxbb <= 0xa) + rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3); + else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb) + rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); + + _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, + rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD)); + + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/ + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK); + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl); +} + +static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + dpk->bp[path][kidx].ther_dpk = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n", + dpk->bp[path][kidx].ther_dpk); +} + +static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain, + enum rtw89_rf_path path) +{ + u8 txagc_ori = 0x38; + + rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori); + + return txagc_ori; +} + +static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4); + rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0); + } else { + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e); + rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7); + rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3); + rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3); + } + rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); + rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); + rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK)); +} + +static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_manual) +{ + u8 tmp_pad, tmp_txbb; + + if (is_manual) { + rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1); + tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD); + rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8), + B_RFGAIN_PAD, tmp_pad); + + tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB); + rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8), + B_RFGAIN_TXBB, tmp_txbb); + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), + B_LOAD_COEF_CFIR, 0x1); + rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), + B_LOAD_COEF_CFIR); + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad, + tmp_txbb); + } else { + rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] disable manual switch TXCFIR\n"); + } +} + +static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_bypass) +{ + if (is_bypass) { + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS, 0x1); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path, + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD)); + } else { + rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2); + rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path, + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD)); + } +} + +static +void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) + rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); + else + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); +} + +static void _dpk_table_select(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 kidx, u8 gain) +{ + u8 val; + + val = 0x80 + kidx * 0x20 + gain * 0x10; + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx, + gain, val); +} + +static bool _dpk_sync_check(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path) +{ +#define DPK_SYNC_TH_DC_I 200 +#define DPK_SYNC_TH_DC_Q 200 +#define DPK_SYNC_TH_CORR 170 + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u16 dc_i, dc_q; + u8 corr_val, corr_idx; + + rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); + + corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); + corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx, + corr_val); + + dpk->corr_idx[path] = corr_idx; + dpk->corr_val[path] = corr_val; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); + + dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); + + dc_i = abs(sign_extend32(dc_i, 11)); + dc_q = abs(sign_extend32(dc_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n", + path, dc_i, dc_q); + + dpk->dc_i[path] = dc_i; + dpk->dc_q[path] = dc_q; + + if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || + corr_val < DPK_SYNC_TH_CORR) + return true; + else + return false; +} + +static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_tpg_sel(rtwdev, path, kidx); + _dpk_one_shot(rtwdev, phy, path, SYNC); + return _dpk_sync_check(rtwdev, path); /*1= fail*/ +} + +static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) +{ + u16 dgain = 0x0; + + rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); + + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); + + dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, + dgain); + + return dgain; +} + +static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain) +{ + s8 offset; + + if (dgain >= 0x783) + offset = 0x6; + else if (dgain <= 0x782 && dgain >= 0x551) + offset = 0x3; + else if (dgain <= 0x550 && dgain >= 0x3c4) + offset = 0x0; + else if (dgain <= 0x3c3 && dgain >= 0x2aa) + offset = -3; + else if (dgain <= 0x2a9 && dgain >= 0x1e3) + offset = -6; + else if (dgain <= 0x1e2 && dgain >= 0x156) + offset = -9; + else if (dgain <= 0x155) + offset = -12; + else + offset = 0x0; + + return offset; +} + +static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); + return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); +} + +static void _dpk_gainloss(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, enum rtw89_rf_path path, + u8 kidx) +{ + _dpk_table_select(rtwdev, path, kidx, 1); + _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS); +} + +#define DPK_TXAGC_LOWER 0x2e +#define DPK_TXAGC_UPPER 0x3f +#define DPK_TXAGC_INVAL 0xff + +static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, s8 gain_offset) +{ + u8 txagc; + + txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK); + + if (txagc - gain_offset < DPK_TXAGC_LOWER) + txagc = DPK_TXAGC_LOWER; + else if (txagc - gain_offset > DPK_TXAGC_UPPER) + txagc = DPK_TXAGC_UPPER; + else + txagc = txagc - gain_offset; + + rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n", + gain_offset, txagc); + return txagc; +} + +enum dpk_agc_step { + DPK_AGC_STEP_SYNC_DGAIN, + DPK_AGC_STEP_GAIN_ADJ, + DPK_AGC_STEP_GAIN_LOSS_IDX, + DPK_AGC_STEP_GL_GT_CRITERION, + DPK_AGC_STEP_GL_LT_CRITERION, + DPK_AGC_STEP_SET_TX_GAIN, +}; + +static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) +{ + u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; + u8 i; + + rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl); + + if (is_check) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); + val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val1_i = abs(sign_extend32(val1_i, 11)); + val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val1_q = abs(sign_extend32(val1_q, 11)); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); + val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val2_i = abs(sign_extend32(val2_i, 11)); + val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val2_q = abs(sign_extend32(val2_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", + (val1_i * val1_i + val1_q * val1_q) / + (val2_i * val2_i + val2_q * val2_q)); + + } else { + for (i = 0; i < 32; i++) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] PAS_Read[%02d]= 0x%08x\n", i, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + } + if ((val1_i * val1_i + val1_q * val1_q) >= + ((val2_i * val2_i + val2_q * val2_q) * 8 / 5)) + return 1; + else + return 0; +} + +static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 init_txagc, + bool loss_only) +{ +#define DPK_AGC_ADJ_LMT 6 +#define DPK_DGAIN_UPPER 1922 +#define DPK_DGAIN_LOWER 342 +#define DPK_RXBB_UPPER 0x1f +#define DPK_RXBB_LOWER 0 +#define DPK_GL_CRIT 7 + u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0; + u8 agc_cnt = 0; + bool limited_rxbb = false; + s8 offset = 0; + u16 dgain = 0; + u8 step = DPK_AGC_STEP_SYNC_DGAIN; + bool goout = false; + + tmp_txagc = init_txagc; + + do { + switch (step) { + case DPK_AGC_STEP_SYNC_DGAIN: + if (_dpk_sync(rtwdev, phy, path, kidx)) { + tmp_txagc = DPK_TXAGC_INVAL; + goout = true; + break; + } + + dgain = _dpk_dgain_read(rtwdev); + + if (loss_only || limited_rxbb) + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + else + step = DPK_AGC_STEP_GAIN_ADJ; + break; + + case DPK_AGC_STEP_GAIN_ADJ: + tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); + offset = _dpk_dgain_mapping(rtwdev, dgain); + + if (tmp_rxbb + offset > DPK_RXBB_UPPER) { + tmp_rxbb = DPK_RXBB_UPPER; + limited_rxbb = true; + } else if (tmp_rxbb + offset < DPK_RXBB_LOWER) { + tmp_rxbb = DPK_RXBB_LOWER; + limited_rxbb = true; + } else { + tmp_rxbb = tmp_rxbb + offset; + } + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, + tmp_rxbb); + if (offset != 0 || agc_cnt == 0) { + if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80) + _dpk_bypass_rxcfir(rtwdev, path, true); + else + _dpk_lbk_rxiqk(rtwdev, phy, path); + } + if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER) + step = DPK_AGC_STEP_SYNC_DGAIN; + else + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + + agc_cnt++; + break; + + case DPK_AGC_STEP_GAIN_LOSS_IDX: + _dpk_gainloss(rtwdev, phy, path, kidx); + tmp_gl_idx = _dpk_gainloss_read(rtwdev); + + if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || + tmp_gl_idx > DPK_GL_CRIT) + step = DPK_AGC_STEP_GL_GT_CRITERION; + else if (tmp_gl_idx == 0) + step = DPK_AGC_STEP_GL_LT_CRITERION; + else + step = DPK_AGC_STEP_SET_TX_GAIN; + break; + + case DPK_AGC_STEP_GL_GT_CRITERION: + if (tmp_txagc == DPK_TXAGC_LOWER) { + goout = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc@lower bound!!\n"); + } else { + tmp_txagc = _dpk_set_offset(rtwdev, path, 3); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + agc_cnt++; + break; + + case DPK_AGC_STEP_GL_LT_CRITERION: + if (tmp_txagc == DPK_TXAGC_UPPER) { + goout = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc@upper bound!!\n"); + } else { + tmp_txagc = _dpk_set_offset(rtwdev, path, -2); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + agc_cnt++; + break; + + case DPK_AGC_STEP_SET_TX_GAIN: + tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx); + goout = true; + agc_cnt++; + break; + + default: + goout = true; + break; + } + } while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, + tmp_rxbb); + + return tmp_txagc; +} + +static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) +{ + switch (order) { + case 0: + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1); + break; + case 1: + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN); + rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN); + break; + case 2: + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN); + rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN); + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Wrong MDPD order!!(0x%x)\n", order); + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Set MDPD order to 0x%x for IDL\n", order); +} + +static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 gain) +{ + _dpk_set_mdpd_para(rtwdev, 0x0); + _dpk_table_select(rtwdev, path, kidx, 1); + _dpk_one_shot(rtwdev, phy, path, MDPK_IDL); +} + +static void _dpk_fill_result(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 kidx, u8 gain, + u8 txagc) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + u16 pwsf = 0x78; + u8 gs = 0x5b; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc, + pwsf, gs); + + dpk->bp[path][kidx].txagc_dpk = txagc; + rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8), + 0x3F << ((gain << 3) + (kidx << 4)), txagc); + + dpk->bp[path][kidx].pwsf = pwsf; + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x1FF << (gain << 4), pwsf); + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); + rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD); + + dpk->bp[path][kidx].gs = gs; + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKDWORD, 0x065b5b5b); + + rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD); + + rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL); +} + +static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + bool is_reload = false; + u8 idx, cur_band, cur_ch; + + cur_band = rtwdev->hal.current_band_type; + cur_ch = rtwdev->hal.current_channel; + + for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { + if (cur_band != dpk->bp[path][idx].band || + cur_ch != dpk->bp[path][idx].ch) + continue; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_MDPD, idx); + dpk->cur_idx[path] = idx; + is_reload = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] reload S%d[%d] success\n", path, idx); + } + + return is_reload; +} + +static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 txagc = 0, kidx = dpk->cur_idx[path]; + bool is_fail = false; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", path, + kidx); + + _rf_direct_cntrl(rtwdev, path, false); + txagc = _dpk_set_tx_pwr(rtwdev, gain, path); + _dpk_rf_setting(rtwdev, gain, path, kidx); + _dpk_rx_dck(rtwdev, phy, path); + + _dpk_kip_setting(rtwdev, path, kidx); + _dpk_manual_txcfir(rtwdev, path, true); + txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); + if (txagc == DPK_TXAGC_INVAL) + is_fail = true; + _dpk_get_thermal(rtwdev, kidx, path); + + _dpk_idl_mpa(rtwdev, phy, path, kidx, gain); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + _dpk_fill_result(rtwdev, path, kidx, gain, txagc); + _dpk_manual_txcfir(rtwdev, path, false); + + if (!is_fail) + dpk->bp[path][kidx].path_ok = true; + else + dpk->bp[path][kidx].path_ok = false; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, + is_fail ? "Check" : "Success"); + + return is_fail; +} + +static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy, u8 kpath) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR]; + u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}}; + u32 kip_reg[] = {R_RXIQC, R_IQK_RES}; + u8 path; + bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false}; + + if (dpk->is_dpk_reload_en) { + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { + if (!(kpath & BIT(path))) + continue; + + reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + if (!reloaded[path] && dpk->bp[path][0].ch != 0) + dpk->cur_idx[path] = !dpk->cur_idx[path]; + else + _dpk_onoff(rtwdev, path, false); + } + } else { + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) + dpk->cur_idx[path] = 0; + } + + if ((kpath == RF_A && reloaded[RF_PATH_A]) || + (kpath == RF_B && reloaded[RF_PATH_B]) || + (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B])) + return; + + _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); + + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { + if (!(kpath & BIT(path)) || reloaded[path]) + continue; + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, true); + _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); + _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); + _dpk_information(rtwdev, phy, path); + } + + _dpk_bb_afe_setting(rtwdev, phy, path, kpath); + + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { + if (!(kpath & BIT(path)) || reloaded[path]) + continue; + + is_fail = _dpk_main(rtwdev, phy, path, 1); + _dpk_onoff(rtwdev, path, is_fail); + } + + _dpk_bb_afe_restore(rtwdev, phy, path, kpath); + _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); + + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { + if (!(kpath & BIT(path)) || reloaded[path]) + continue; + + _dpk_kip_restore(rtwdev, path); + _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path); + _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, false); + } +} + +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_fem_info *fem = &rtwdev->fem; + + if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); + return true; + } + + return false; +} + +static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, kpath; + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { + if (kpath & BIT(path)) + _dpk_onoff(rtwdev, path, true); + } +} + +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", + RTW8852A_DPK_VER, rtwdev->hal.cv, + RTW8852A_RF_REL_VERSION); + + if (_dpk_bypass_check(rtwdev, phy)) + _dpk_force_bypass(rtwdev, phy); + else + _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool off) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 val, kidx = dpk->cur_idx[path]; + + val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKBYTE3, 0x6 | val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, + kidx, dpk->is_dpk_enable && !off ? "enable" : "disable"); +} + +static void _dpk_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 path, kidx; + u8 trk_idx = 0, txagc_rf = 0; + s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0; + u16 pwsf[2]; + u8 cur_ther; + s8 delta_ther[2] = {0}; + + for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { + kidx = dpk->cur_idx[path]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", + path, kidx, dpk->bp[path][kidx].ch); + + cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] thermal now = %d\n", cur_ther); + + if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0) + delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) + delta_ther[path] = delta_ther[path] * 3 / 2; + else + delta_ther[path] = delta_ther[path] * 5 / 2; + + txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + RR_MODOPT_M_TXPWR); + + if (rtwdev->is_tssi_mode[path]) { + trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n", + txagc_rf, trk_idx); + + txagc_bb = + (s8)rtw89_phy_read32_mask(rtwdev, + R_TXAGC_BB + (path << 13), + MASKBYTE2); + txagc_bb_tp = + (u8)rtw89_phy_read32_mask(rtwdev, + R_TXAGC_TP + (path << 13), + B_TXAGC_TP); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", + txagc_bb_tp, txagc_bb); + + txagc_ofst = + (s8)rtw89_phy_read32_mask(rtwdev, + R_TXAGC_BB + (path << 13), + MASKBYTE3); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n", + txagc_ofst, delta_ther[path]); + + if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8), + BIT(15)) == 0x1) + txagc_ofst = 0; + + if (txagc_rf != 0 && cur_ther != 0) + ini_diff = txagc_ofst + delta_ther[path]; + + if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13), + B_P0_TXDPD) == 0x0) { + pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp - + txagc_bb + ini_diff + + tssi_info->extra_ofst[path]; + pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp - + txagc_bb + ini_diff + + tssi_info->extra_ofst[path]; + } else { + pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff + + tssi_info->extra_ofst[path]; + pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff + + tssi_info->extra_ofst[path]; + } + + } else { + pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; + pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; + } + + if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 && + txagc_rf != 0) { + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n", + pwsf[0], pwsf[1]); + + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x000001FF, pwsf[0]); + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x01FF0000, pwsf[1]); + } + } +} + +static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + if (band == RTW89_BAND_2G) + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1); +} + +static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl); + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852a_tssi_sys_defs_2g_tbl, + &rtw8852a_tssi_sys_defs_5g_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl, + &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl); + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl, + &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl, + &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl); +} + +static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_dck_defs_a_tbl, + &rtw8852a_tssi_dck_defs_b_tbl); +} + +static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define __get_val(ptr, idx) \ +({ \ + s8 *__ptr = (ptr); \ + u8 __idx = (idx), __i, __v; \ + u32 __val = 0; \ + for (__i = 0; __i < 4; __i++) { \ + __v = (__ptr[__idx + __i]); \ + __val |= (__v << (8 * __i)); \ + } \ + __val; \ +}) + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u8 subband = rtwdev->hal.current_subband; + const u8 *thm_up_a = NULL; + const u8 *thm_down_a = NULL; + const u8 *thm_up_b = NULL; + const u8 *thm_down_b = NULL; + u8 thermal = 0xff; + s8 thm_ofst[64] = {0}; + u32 tmp = 0; + u8 i, j; + + switch (subband) { + case RTW89_CH_2G: + thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p; + thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n; + thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p; + thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n; + break; + case RTW89_CH_5G_BAND_1: + thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0]; + thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0]; + thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0]; + thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0]; + break; + case RTW89_CH_5G_BAND_3: + thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1]; + thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1]; + thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1]; + thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1]; + break; + case RTW89_CH_5G_BAND_4: + thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2]; + thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2]; + thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2]; + thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2]; + break; + } + + if (path == RF_PATH_A) { + thermal = tssi_info->thermal[RF_PATH_A]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_a[i++] : + -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_a[i++] : + thm_up_a[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = __get_val(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); + + } else { + thermal = tssi_info->thermal[RF_PATH_B]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_b[i++] : + -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_b[i++] : + thm_up_b[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = __get_val(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); + } +#undef __get_val +} + +static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl, + &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl); +} + +static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_slope_cal_org_defs_a_tbl, + &rtw8852a_tssi_slope_cal_org_defs_b_tbl); +} + +static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl, + &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl); +} + +static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_slope_defs_a_tbl, + &rtw8852a_tssi_slope_defs_b_tbl); +} + +static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_track_defs_a_tbl, + &rtw8852a_tssi_track_defs_b_tbl); +} + +static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl, + &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl); +} + +static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + u8 subband = rtwdev->hal.current_subband; + + switch (subband) { + case RTW89_CH_2G: + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_pak_defs_a_2g_tbl, + &rtw8852a_tssi_pak_defs_b_2g_tbl); + break; + case RTW89_CH_5G_BAND_1: + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_pak_defs_a_5g_1_tbl, + &rtw8852a_tssi_pak_defs_b_5g_1_tbl); + break; + case RTW89_CH_5G_BAND_3: + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_pak_defs_a_5g_3_tbl, + &rtw8852a_tssi_pak_defs_b_5g_3_tbl); + break; + case RTW89_CH_5G_BAND_4: + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852a_tssi_pak_defs_a_5g_4_tbl, + &rtw8852a_tssi_pak_defs_b_5g_4_tbl); + break; + } +} + +static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + _tssi_set_track(rtwdev, phy, i); + _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); + + rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A, + &rtw8852a_tssi_enable_defs_a_tbl, + &rtw8852a_tssi_enable_defs_b_tbl); + + tssi_info->base_thermal[i] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]); + rtwdev->is_tssi_mode[i] = true; + } +} + +static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl); + + rtwdev->is_tssi_mode[RF_PATH_A] = false; + rtwdev->is_tssi_mode[RF_PATH_B] = false; +} + +static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 13: + return 4; + case 14: + return 5; + } + + return 0; +} + +#define TSSI_EXTRA_GROUP_BIT (BIT(31)) +#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) +#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) + +static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 14: + return 4; + case 36 ... 40: + return 5; + case 41 ... 43: + return TSSI_EXTRA_GROUP(5); + case 44 ... 48: + return 6; + case 49 ... 51: + return TSSI_EXTRA_GROUP(6); + case 52 ... 56: + return 7; + case 57 ... 59: + return TSSI_EXTRA_GROUP(7); + case 60 ... 64: + return 8; + case 100 ... 104: + return 9; + case 105 ... 107: + return TSSI_EXTRA_GROUP(9); + case 108 ... 112: + return 10; + case 113 ... 115: + return TSSI_EXTRA_GROUP(10); + case 116 ... 120: + return 11; + case 121 ... 123: + return TSSI_EXTRA_GROUP(11); + case 124 ... 128: + return 12; + case 129 ... 131: + return TSSI_EXTRA_GROUP(12); + case 132 ... 136: + return 13; + case 137 ... 139: + return TSSI_EXTRA_GROUP(13); + case 140 ... 144: + return 14; + case 149 ... 153: + return 15; + case 154 ... 156: + return TSSI_EXTRA_GROUP(15); + case 157 ... 161: + return 16; + case 162 ... 164: + return TSSI_EXTRA_GROUP(16); + case 165 ... 169: + return 17; + case 170 ... 172: + return TSSI_EXTRA_GROUP(17); + case 173 ... 177: + return 18; + } + + return 0; +} + +static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 8: + return 0; + case 9 ... 14: + return 1; + case 36 ... 48: + return 2; + case 52 ... 64: + return 3; + case 100 ... 112: + return 4; + case 116 ... 128: + return 5; + case 132 ... 144: + return 6; + case 149 ... 177: + return 7; + } + + return 0; +} + +static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u32 gidx, gidx_1st, gidx_2nd; + s8 de_1st = 0; + s8 de_2nd = 0; + s8 val; + + gidx = _tssi_get_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", + path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + + return val; +} + +static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u32 tgidx, tgidx_1st, tgidx_2nd; + s8 tde_1st = 0; + s8 tde_2nd = 0; + s8 val; + + tgidx = _tssi_get_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + + return val; +} + +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy) +{ +#define __DE_MASK 0x003ff000 + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858}; + static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860}; + static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838}; + static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840}; + static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848}; + static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850}; + static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828}; + static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830}; + u8 ch = rtwdev->hal.current_channel; + u8 i, gidx; + s8 ofdm_de; + s8 trim_de; + s32 val; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", + phy, ch); + + for (i = 0; i < RF_PATH_NUM_8852A; i++) { + gidx = _tssi_get_cck_group(rtwdev, ch); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = tssi_info->tssi_cck[i][gidx] + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", + i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); + + rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", + r_cck_long[i], + rtw89_phy_read32_mask(rtwdev, r_cck_long[i], + __DE_MASK)); + + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = ofdm_de + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", + i, ofdm_de, trim_de); + + rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", + r_mcs_20m[i], + rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i], + __DE_MASK)); + } +#undef __DE_MASK +} + +static void _tssi_track(struct rtw89_dev *rtwdev) +{ + static const u32 tx_gain_scale_table[] = { + 0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c, + 0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1 + }; + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 path; + u8 cur_ther; + s32 delta_ther = 0, gain_offset_int, gain_offset_float; + s8 gain_offset; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n", + __func__); + + if (!rtwdev->is_tssi_mode[RF_PATH_A]) + return; + if (!rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) { + if (!tssi_info->tssi_tracking_check[path]) { + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n"); + continue; + } + + cur_ther = (u8)rtw89_phy_read32_mask(rtwdev, + R_TSSI_THER + (path << 13), + B_TSSI_THER); + + if (cur_ther == 0 || tssi_info->base_thermal[path] == 0) + continue; + + delta_ther = cur_ther - tssi_info->base_thermal[path]; + + gain_offset = (s8)delta_ther * 15 / 10; + + tssi_info->extra_ofst[path] = gain_offset; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n", + tssi_info->base_thermal[path], gain_offset, path); + + gain_offset_int = gain_offset >> 3; + gain_offset_float = gain_offset & 7; + + if (gain_offset_int > 15) + gain_offset_int = 15; + else if (gain_offset_int < -16) + gain_offset_int = -16; + + rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13), + B_DPD_OFT_EN, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13), + B_TXGAIN_SCALE_EN, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13), + B_DPD_OFT_ADDR, gain_offset_int); + + rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13), + B_TXGAIN_SCALE_OFT, + tx_gain_scale_table[gain_offset_float]); + } +} + +static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel, ch_tmp; + u8 bw = rtwdev->hal.current_band_width; + u8 subband = rtwdev->hal.current_subband; + s8 power; + s32 xdbm; + + if (bw == RTW89_CHANNEL_WIDTH_40) + ch_tmp = ch - 2; + else if (bw == RTW89_CHANNEL_WIDTH_80) + ch_tmp = ch - 6; + else + ch_tmp = ch; + + power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX, + RTW89_RS_MCS, RTW89_NONBF, ch_tmp); + + xdbm = power * 100 / 4; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n", + __func__, phy, xdbm); + + if (xdbm > 1800 && subband == RTW89_CH_2G) { + tssi_info->tssi_tracking_check[RF_PATH_A] = true; + tssi_info->tssi_tracking_check[RF_PATH_B] = true; + } else { + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl); + tssi_info->extra_ofst[RF_PATH_A] = 0; + tssi_info->extra_ofst[RF_PATH_B] = 0; + tssi_info->tssi_tracking_check[RF_PATH_A] = false; + tssi_info->tssi_tracking_check[RF_PATH_B] = false; + } +} + +static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + u8 path, s16 pwr_dbm, u8 enable) +{ + rtw8852a_bb_set_plcp_tx(rtwdev); + rtw8852a_bb_cfg_tx_path(rtwdev, path); + rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy); + rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy); +} + +static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chip_info *mac_reg = rtwdev->chip; + u8 ch = rtwdev->hal.current_channel, ch_tmp; + u8 bw = rtwdev->hal.current_band_width; + u16 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0); + s8 power; + s16 xdbm; + u32 i, tx_counter = 0; + + if (bw == RTW89_CHANNEL_WIDTH_40) + ch_tmp = ch - 2; + else if (bw == RTW89_CHANNEL_WIDTH_80) + ch_tmp = ch - 6; + else + ch_tmp = ch; + + power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX, + RTW89_RS_OFDM, RTW89_NONBF, ch_tmp); + + xdbm = (power * 100) >> mac_reg->txpwr_factor_mac; + + if (xdbm > 1800) + xdbm = 68; + else + xdbm = power * 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n", + __func__, phy, power, xdbm); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); + rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy)); + tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + + _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true); + mdelay(15); + _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false); + + tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) - + tx_counter; + + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_A] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, + MASKBYTE3); + + if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0) + break; + } + } + + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_B] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, + MASKBYTE3); + + if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0) + break; + } + } + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] %s: tx counter=%d\n", + __func__, tx_counter); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n", + tssi_info->default_txagc_offset[RF_PATH_A], + tssi_info->default_txagc_offset[RF_PATH_B]); + + rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0); + + rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); +} + +void rtw8852a_rck(struct rtw89_dev *rtwdev) +{ + u8 path; + + for (path = 0; path < 2; path++) + _rck(rtwdev, path); +} + +void rtw8852a_dack(struct rtw89_dev *rtwdev) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); + _dac_cal(rtwdev, false); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); +} + +void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u16 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); + rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _iqk_init(rtwdev); + if (rtwdev->dbcc_en) + _iqk_dbcc(rtwdev, phy_idx); + else + _iqk(rtwdev, phy_idx, false); + + rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); +} + +void rtw8852a_iqk_track(struct rtw89_dev *rtwdev) +{ + _iqk_track(rtwdev); +} + +void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + bool is_afe) +{ + u16 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); + rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _rx_dck(rtwdev, phy_idx, is_afe); + + rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); +} + +void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u16 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); + rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + rtwdev->dpk.is_dpk_enable = true; + rtwdev->dpk.is_dpk_reload_en = false; + _dpk(rtwdev, phy_idx, false); + + rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); +} + +void rtw8852a_dpk_track(struct rtw89_dev *rtwdev) +{ + _dpk_track(rtwdev); +} + +void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", + __func__, phy); + + _tssi_disable(rtwdev, phy); + + for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) { + _tssi_rf_setting(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy); + _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_dac_gain_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_set_rf_gap_tbl(rtwdev, phy, i); + _tssi_set_slope(rtwdev, phy, i); + _tssi_pak(rtwdev, phy, i); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_high_power(rtwdev, phy); + _tssi_pre_tx(rtwdev, phy); +} + +void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", + __func__, phy); + + if (!rtwdev->is_tssi_mode[RF_PATH_A]) + return; + if (!rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + _tssi_disable(rtwdev, phy); + + for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) { + _tssi_rf_setting(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_pak(rtwdev, phy, i); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +void rtw8852a_tssi_track(struct rtw89_dev *rtwdev) +{ + _tssi_track(rtwdev); +} + +static +void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + /* disable */ + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0); + + /* enable */ + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl); +} + +static +void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + /* disable */ + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); + + /* enable */ + rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl); +} + +static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, bool enable) +{ + if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + if (enable) { + /* SCAN_START */ + _rtw8852a_tssi_avg_scan(rtwdev, phy); + } else { + /* SCAN_END */ + _rtw8852a_tssi_set_avg(rtwdev, phy); + } +} + +static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, bool enable) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 i; + + if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + if (enable) { + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_A] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, + B_TXAGC_BB); + if (tssi_info->default_txagc_offset[RF_PATH_A]) + break; + } + } + + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_B] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, + B_TXAGC_BB_S1); + if (tssi_info->default_txagc_offset[RF_PATH_B]) + break; + } + } + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, + tssi_info->default_txagc_offset[RF_PATH_A]); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, + tssi_info->default_txagc_offset[RF_PATH_B]); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); + } +} + +void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, + bool scan_start, enum rtw89_phy_idx phy_idx) +{ + if (scan_start) { + rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true); + rtw8852a_tssi_set_avg(rtwdev, phy_idx, true); + } else { + rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false); + rtw8852a_tssi_set_avg(rtwdev, phy_idx, false); + } +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h new file mode 100644 index 00000000000000..ea36553a76b7c8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_8852A_RFK_H__ +#define __RTW89_8852A_RFK_H__ + +#include "core.h" + +void rtw8852a_rck(struct rtw89_dev *rtwdev); +void rtw8852a_dack(struct rtw89_dev *rtwdev); +void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852a_iqk_track(struct rtw89_dev *rtwdev); +void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + bool is_afe); +void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852a_dpk_track(struct rtw89_dev *rtwdev); +void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852a_tssi_track(struct rtw89_dev *rtwdev); +void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c new file mode 100644 index 00000000000000..51057009050242 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c @@ -0,0 +1,1607 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "rtw8852a_rfk_table.h" + +static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = { + DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001), + DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002), + DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001), + DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002), + DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005), + DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005), + DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005), + DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005), + DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033), + DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033), + DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005), + DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005), + DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005), + DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005), + DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033), + DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033), + DECL_RFK_WM(0x0300, 0xff000000, 0x00000019), + DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019), + DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d), + DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044), + DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042), + DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002), + DECL_RFK_WM(0x0020, 0x00006000, 0x00000003), + DECL_RFK_WM(0x0024, 0x00006000, 0x00000003), + DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e), + DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e), + DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004), + DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004), + DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000), + DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs); + +static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = { + DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033), + DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033), + DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033), + DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g); + +static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = { + DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044), + DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044), + DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044), + DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g); + +static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = { + DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f), + DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080), + DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f), + DECL_RFK_WM(0x5800, 0x10000000, 0x00000000), + DECL_RFK_WM(0x5800, 0x20000000, 0x00000000), + DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000), + DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040), + DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040), + DECL_RFK_WM(0x580c, 0x00008000, 0x00000000), + DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000), + DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x5810, 0x00000200, 0x00000000), + DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000), + DECL_RFK_WM(0x5810, 0x00010000, 0x00000001), + DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000), + DECL_RFK_WM(0x5810, 0x01000000, 0x00000001), + DECL_RFK_WM(0x5810, 0x06000000, 0x00000000), + DECL_RFK_WM(0x5810, 0x38000000, 0x00000003), + DECL_RFK_WM(0x5810, 0x40000000, 0x00000001), + DECL_RFK_WM(0x5810, 0x80000000, 0x00000000), + DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000), + DECL_RFK_WM(0x5814, 0x00001000, 0x00000001), + DECL_RFK_WM(0x5814, 0x00002000, 0x00000000), + DECL_RFK_WM(0x5814, 0x00004000, 0x00000001), + DECL_RFK_WM(0x5814, 0x00038000, 0x00000005), + DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000), + DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000), + DECL_RFK_WM(0x5814, 0x18000000, 0x00000000), + DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000), + DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000), + DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018), + DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016), + DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000), + DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280), + DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x581c, 0x00100000, 0x00000000), + DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008), + DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e), + DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008), + DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e), + DECL_RFK_WM(0x581c, 0x20000000, 0x00000000), + DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080), + DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f), + DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000), + DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000), + DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x5860, 0x80000000, 0x00000000), + DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff), + DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000), + DECL_RFK_WM(0x5864, 0x04000000, 0x00000000), + DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd), + DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5), + DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd), + DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5), + DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016), + DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000), + DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000), + DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000), + DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000), + DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000), + DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002), + DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000), + DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a), + DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028), + DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076), + DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000), + DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000), + DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f), + DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080), + DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003), + DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001), + DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002), + DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002), + DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007), + DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f), + DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000), + DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff), + DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000), + DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000), + DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101), + DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004), + DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100), + DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000), + DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000), + DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff), + DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100), + DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c), + DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c), + DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002), + DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800), + DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f), + DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080), + DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000), + DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000), + DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001), + DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff), + DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = { + DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f), + DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080), + DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f), + DECL_RFK_WM(0x7800, 0x10000000, 0x00000000), + DECL_RFK_WM(0x7800, 0x20000000, 0x00000000), + DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000), + DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040), + DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040), + DECL_RFK_WM(0x780c, 0x00008000, 0x00000000), + DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000), + DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x7810, 0x00000200, 0x00000000), + DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000), + DECL_RFK_WM(0x7810, 0x00010000, 0x00000001), + DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000), + DECL_RFK_WM(0x7810, 0x01000000, 0x00000001), + DECL_RFK_WM(0x7810, 0x06000000, 0x00000000), + DECL_RFK_WM(0x7810, 0x38000000, 0x00000003), + DECL_RFK_WM(0x7810, 0x40000000, 0x00000001), + DECL_RFK_WM(0x7810, 0x80000000, 0x00000000), + DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000), + DECL_RFK_WM(0x7814, 0x00001000, 0x00000001), + DECL_RFK_WM(0x7814, 0x00002000, 0x00000000), + DECL_RFK_WM(0x7814, 0x00004000, 0x00000001), + DECL_RFK_WM(0x7814, 0x00038000, 0x00000005), + DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000), + DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000), + DECL_RFK_WM(0x7814, 0x18000000, 0x00000000), + DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000), + DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000), + DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018), + DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016), + DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000), + DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280), + DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x781c, 0x00100000, 0x00000000), + DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008), + DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e), + DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008), + DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e), + DECL_RFK_WM(0x781c, 0x20000000, 0x00000000), + DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080), + DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000), + DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000), + DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000), + DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000), + DECL_RFK_WM(0x7860, 0x80000000, 0x00000000), + DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff), + DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000), + DECL_RFK_WM(0x7864, 0x04000000, 0x00000000), + DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd), + DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5), + DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd), + DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5), + DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016), + DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000), + DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000), + DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000), + DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000), + DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000), + DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002), + DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000), + DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a), + DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028), + DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076), + DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000), + DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000), + DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f), + DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080), + DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003), + DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001), + DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002), + DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002), + DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007), + DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f), + DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000), + DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff), + DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000), + DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000), + DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101), + DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004), + DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100), + DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000), + DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000), + DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff), + DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100), + DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c), + DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c), + DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002), + DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800), + DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f), + DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080), + DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000), + DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000), + DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001), + DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff), + DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = { + DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c), + DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g); + +static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = { + DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c), + DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g); + +static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = { + DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc), + DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = { + DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc), + DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = { + DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000), + DECL_RFK_WM(0x5814, 0x00001000, 0x00000001), + DECL_RFK_WM(0x5814, 0x00002000, 0x00000001), + DECL_RFK_WM(0x5814, 0x00004000, 0x00000001), + DECL_RFK_WM(0x5814, 0x00038000, 0x00000005), + DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003), + DECL_RFK_WM(0x5814, 0x18000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = { + DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000), + DECL_RFK_WM(0x7814, 0x00001000, 0x00000001), + DECL_RFK_WM(0x7814, 0x00002000, 0x00000001), + DECL_RFK_WM(0x7814, 0x00004000, 0x00000001), + DECL_RFK_WM(0x7814, 0x00038000, 0x00000005), + DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003), + DECL_RFK_WM(0x7814, 0x18000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = { + DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000), + DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001), + DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = { + DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000), + DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001), + DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000), + DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = { + DECL_RFK_WM(0x581c, 0x00100000, 0x00000000), + DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001), + DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000), + DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001), + DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002), + DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003), + DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040), + DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040), + DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040), + DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x5898, 0xff000000, 0x00000040), + DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040), + DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040), + DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040), + DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x589c, 0xff000000, 0x00000040), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = { + DECL_RFK_WM(0x781c, 0x00100000, 0x00000000), + DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001), + DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000), + DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001), + DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002), + DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003), + DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040), + DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040), + DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040), + DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x7898, 0xff000000, 0x00000040), + DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040), + DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040), + DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040), + DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040), + DECL_RFK_WM(0x789c, 0xff000000, 0x00000040), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = { + DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000), + DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = { + DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000), + DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = { + DECL_RFK_WM(0x5820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x5818, 0x10000000, 0x00000000), + DECL_RFK_WM(0x5814, 0x00000800, 0x00000001), + DECL_RFK_WM(0x581c, 0x20000000, 0x00000001), + DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001), + DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280), + DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000), + DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000), + DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a), + DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028), + DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076), + DECL_RFK_WM(0x5810, 0x20000000, 0x00000000), + DECL_RFK_WM(0x5814, 0x20000000, 0x00000001), + DECL_RFK_WM(0x580c, 0x10000000, 0x00000001), + DECL_RFK_WM(0x580c, 0x40000000, 0x00000001), + DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = { + DECL_RFK_WM(0x7820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x7818, 0x10000000, 0x00000000), + DECL_RFK_WM(0x7814, 0x00000800, 0x00000001), + DECL_RFK_WM(0x781c, 0x20000000, 0x00000001), + DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001), + DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280), + DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000), + DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000), + DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a), + DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028), + DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076), + DECL_RFK_WM(0x7810, 0x20000000, 0x00000000), + DECL_RFK_WM(0x7814, 0x20000000, 0x00000001), + DECL_RFK_WM(0x780c, 0x10000000, 0x00000001), + DECL_RFK_WM(0x780c, 0x40000000, 0x00000001), + DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2), + DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000), + DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121), + DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000), + DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = { + DECL_RFK_WM(0x5820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x5818, 0x18000000, 0x00000000), + DECL_RFK_WM(0x5814, 0x00000800, 0x00000000), + DECL_RFK_WM(0x581c, 0x20000000, 0x00000001), + DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff), + DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080), + DECL_RFK_WM(0x5814, 0x01000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = { + DECL_RFK_WM(0x7820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x7818, 0x18000000, 0x00000000), + DECL_RFK_WM(0x7814, 0x00000800, 0x00000000), + DECL_RFK_WM(0x781c, 0x20000000, 0x00000001), + DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff), + DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200), + DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080), + DECL_RFK_WM(0x7814, 0x01000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = { + DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000), + DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001), + DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000), + DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000), + DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = { + DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000), + DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001), + DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000), + DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000), + DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = { + DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0), + DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8), + DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b), + DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = { + DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7), + DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb), + DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005), + DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = { + DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8), + DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc), + DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006), + DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = { + DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5), + DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a), + DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011), + DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = { + DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc), + DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2), + DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005), + DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000), + DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = { + DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5), + DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc), + DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005), + DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = { + DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc), + DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002), + DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b), + DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3); + +static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = { + DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000), + DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000), + DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0), + DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016), + DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000), + DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000), + DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f), + DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4); + +static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = { + DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001), + DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0), + DECL_RFK_WM(0x5818, 0x10000000, 0x00000000), + DECL_RFK_WM(0x5818, 0x10000000, 0x00000001), + DECL_RFK_WM(0x5820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x5820, 0x80000000, 0x00000001), + DECL_RFK_WM(0x5818, 0x18000000, 0x00000003), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a); + +static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = { + DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001), + DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0), + DECL_RFK_WM(0x7818, 0x10000000, 0x00000000), + DECL_RFK_WM(0x7818, 0x10000000, 0x00000001), + DECL_RFK_WM(0x7820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x7820, 0x80000000, 0x00000001), + DECL_RFK_WM(0x7818, 0x18000000, 0x00000003), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b); + +static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = { + DECL_RFK_WM(0x5820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x5818, 0x18000000, 0x00000001), + DECL_RFK_WM(0x7820, 0x80000000, 0x00000000), + DECL_RFK_WM(0x7818, 0x18000000, 0x00000001), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs); + +static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = { + DECL_RFK_WM(0x5820, 0x80000000, 0x0), + DECL_RFK_WM(0x5820, 0x80000000, 0x1), + DECL_RFK_WM(0x5818, 0x18000000, 0x3), + DECL_RFK_WM(0x7820, 0x80000000, 0x0), + DECL_RFK_WM(0x7820, 0x80000000, 0x1), + DECL_RFK_WM(0x7818, 0x18000000, 0x3), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab); + +static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = { + DECL_RFK_WM(0x5800, 0x10000000, 0x00000000), + DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000), + DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000), + DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400), + DECL_RFK_WM(0x7800, 0x10000000, 0x00000000), + DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000), + DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000), + DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400), +}; + +DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs); + +static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = { + DECL_RFK_WC(0x12ec, 0x00008000), + DECL_RFK_WS(0x12ec, 0x00008000), + DECL_RFK_WC(0x5e00, 0x00000001), + DECL_RFK_WS(0x5e00, 0x00000001), + DECL_RFK_WC(0x32ec, 0x00008000), + DECL_RFK_WS(0x32ec, 0x00008000), + DECL_RFK_WC(0x7e00, 0x00000001), + DECL_RFK_WS(0x7e00, 0x00000001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = { + DECL_RFK_WS(0x5e00, 0x00000008), + DECL_RFK_WS(0x5e50, 0x00000008), + DECL_RFK_WS(0x5e10, 0x80000000), + DECL_RFK_WS(0x5e60, 0x80000000), + DECL_RFK_WC(0x5e00, 0x00000008), + DECL_RFK_WC(0x5e50, 0x00000008), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = { + DECL_RFK_WS(0x7e00, 0x00000008), + DECL_RFK_WS(0x7e50, 0x00000008), + DECL_RFK_WS(0x7e10, 0x80000000), + DECL_RFK_WS(0x7e60, 0x80000000), + DECL_RFK_WC(0x7e00, 0x00000008), + DECL_RFK_WC(0x7e50, 0x00000008), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = { + DECL_RFK_WC(0x20f4, 0x01000000), + DECL_RFK_WS(0x20f8, 0x80000000), + DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001), + DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002), + DECL_RFK_WC(0x20f0, 0x0000000f), + DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = { + DECL_RFK_WC(0x20f4, 0x01000000), + DECL_RFK_WS(0x20f8, 0x80000000), + DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001), + DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002), + DECL_RFK_WC(0x20f0, 0x0000000f), + DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = { + DECL_RFK_WC(0x12d8, 0x00000030), + DECL_RFK_WC(0x32d8, 0x00000030), + DECL_RFK_WS(0x12b8, 0x40000000), + DECL_RFK_WC(0x032c, 0x40000000), + DECL_RFK_WC(0x032c, 0x00400000), + DECL_RFK_WS(0x032c, 0x00400000), + DECL_RFK_WS(0x030c, 0x0f000000), + DECL_RFK_WC(0x032c, 0x00010000), + DECL_RFK_WS(0x12dc, 0x00000002), + DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = { + DECL_RFK_WS(0x12d8, 0x000000c0), + DECL_RFK_WS(0x12d8, 0x00000800), + DECL_RFK_WC(0x12d8, 0x00000800), + DECL_RFK_DELAY(1), + DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = { + DECL_RFK_WC(0x12dc, 0x00000002), + DECL_RFK_WS(0x032c, 0x00010000), + DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c), + DECL_RFK_WS(0x032c, 0x40000000), + DECL_RFK_WC(0x12b8, 0x40000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = { + DECL_RFK_WS(0x32b8, 0x40000000), + DECL_RFK_WC(0x032c, 0x40000000), + DECL_RFK_WC(0x032c, 0x00400000), + DECL_RFK_WS(0x032c, 0x00400000), + DECL_RFK_WS(0x030c, 0x0f000000), + DECL_RFK_WC(0x032c, 0x00010000), + DECL_RFK_WS(0x32dc, 0x00000002), + DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = { + DECL_RFK_WS(0x32d8, 0x000000c0), + DECL_RFK_WS(0x32d8, 0x00000800), + DECL_RFK_WC(0x32d8, 0x00000800), + DECL_RFK_DELAY(1), + DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = { + DECL_RFK_WC(0x32dc, 0x00000002), + DECL_RFK_WS(0x032c, 0x00010000), + DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c), + DECL_RFK_WS(0x032c, 0x40000000), + DECL_RFK_WC(0x32b8, 0x40000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = { + DECL_RFK_WC(0x032c, 0x40000000), + DECL_RFK_WS(0x030c, 0x0f000000), + DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003), + DECL_RFK_WC(0x032c, 0x00010000), + DECL_RFK_WS(0x12dc, 0x00000001), + DECL_RFK_WS(0x12e8, 0x00000004), + DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = { + DECL_RFK_WC(0x032c, 0x40000000), + DECL_RFK_WS(0x030c, 0x0f000000), + DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003), + DECL_RFK_WC(0x032c, 0x00010000), + DECL_RFK_WS(0x32dc, 0x00000001), + DECL_RFK_WS(0x32e8, 0x00000004), + DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = { + DECL_RFK_WC(0x12dc, 0x00000001), + DECL_RFK_WC(0x12e8, 0x00000004), + DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000), + DECL_RFK_WM(0x032c, 0x00010000, 0x00000001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = { + DECL_RFK_WC(0x32dc, 0x00000001), + DECL_RFK_WC(0x32e8, 0x00000004), + DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000), + DECL_RFK_WM(0x032c, 0x00010000, 0x00000001), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = { + DECL_RFK_WS(0x5e00, 0x00000008), + DECL_RFK_WC(0x5e10, 0x80000000), + DECL_RFK_WS(0x5e50, 0x00000008), + DECL_RFK_WC(0x5e60, 0x80000000), + DECL_RFK_WS(0x12a0, 0x00008000), + DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003), + DECL_RFK_WS(0x12b8, 0x40000000), + DECL_RFK_WS(0x030c, 0x10000000), + DECL_RFK_WC(0x032c, 0x80000000), + DECL_RFK_WS(0x12e0, 0x00010000), + DECL_RFK_WS(0x12e4, 0x0c000000), + DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030), + DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030), + DECL_RFK_WC(0x5e00, 0x0c000000), + DECL_RFK_WC(0x5e50, 0x0c000000), + DECL_RFK_WC(0x5e0c, 0x00000008), + DECL_RFK_WC(0x5e5c, 0x00000008), + DECL_RFK_WS(0x5e0c, 0x00000001), + DECL_RFK_WS(0x5e5c, 0x00000001), + DECL_RFK_DELAY(1), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = { + DECL_RFK_WC(0x12e4, 0x0c000000), + DECL_RFK_WS(0x5e0c, 0x00000008), + DECL_RFK_WS(0x5e5c, 0x00000008), + DECL_RFK_DELAY(1), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = { + DECL_RFK_WC(0x5e0c, 0x00000001), + DECL_RFK_WC(0x5e5c, 0x00000001), + DECL_RFK_WC(0x12e0, 0x00010000), + DECL_RFK_WC(0x12a0, 0x00008000), + DECL_RFK_WS(0x12a0, 0x00007000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = { + DECL_RFK_WS(0x7e00, 0x00000008), + DECL_RFK_WC(0x7e10, 0x80000000), + DECL_RFK_WS(0x7e50, 0x00000008), + DECL_RFK_WC(0x7e60, 0x80000000), + DECL_RFK_WS(0x32a0, 0x00008000), + DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003), + DECL_RFK_WS(0x32b8, 0x40000000), + DECL_RFK_WS(0x030c, 0x10000000), + DECL_RFK_WC(0x032c, 0x80000000), + DECL_RFK_WS(0x32e0, 0x00010000), + DECL_RFK_WS(0x32e4, 0x0c000000), + DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030), + DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030), + DECL_RFK_WC(0x7e00, 0x0c000000), + DECL_RFK_WC(0x7e50, 0x0c000000), + DECL_RFK_WC(0x7e0c, 0x00000008), + DECL_RFK_WC(0x7e5c, 0x00000008), + DECL_RFK_WS(0x7e0c, 0x00000001), + DECL_RFK_WS(0x7e5c, 0x00000001), + DECL_RFK_DELAY(1), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = { + DECL_RFK_WC(0x32e4, 0x0c000000), + DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001), + DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001), + DECL_RFK_DELAY(1), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = { + DECL_RFK_WC(0x7e0c, 0x00000001), + DECL_RFK_WC(0x7e5c, 0x00000001), + DECL_RFK_WC(0x32e0, 0x00010000), + DECL_RFK_WC(0x32a0, 0x00008000), + DECL_RFK_WS(0x32a0, 0x00007000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101), + DECL_RFK_WS(0x12b8, 0x40000000), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041), + DECL_RFK_WS(0x12b8, 0x10000000), + DECL_RFK_WS(0x58c8, 0x01000000), + DECL_RFK_WS(0x5864, 0xc0000000), + DECL_RFK_WS(0x2008, 0x01ffffff), + DECL_RFK_WS(0x0c1c, 0x00000004), + DECL_RFK_WS(0x0700, 0x08000000), + DECL_RFK_WS(0x0c70, 0x000003ff), + DECL_RFK_WS(0x0c60, 0x00000003), + DECL_RFK_WS(0x0c6c, 0x00000001), + DECL_RFK_WS(0x58ac, 0x08000000), + DECL_RFK_WS(0x0c3c, 0x00000200), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = { + DECL_RFK_WS(0x4490, 0x80000000), + DECL_RFK_WS(0x12a0, 0x00007000), + DECL_RFK_WS(0x12a0, 0x00008000), + DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003), + DECL_RFK_WS(0x12a0, 0x00080000), + DECL_RFK_WS(0x0700, 0x01000000), + DECL_RFK_WM(0x0700, 0x06000000, 0x00000002), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111), + DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202), + DECL_RFK_WS(0x32b8, 0x40000000), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041), + DECL_RFK_WS(0x32b8, 0x10000000), + DECL_RFK_WS(0x78c8, 0x01000000), + DECL_RFK_WS(0x7864, 0xc0000000), + DECL_RFK_WS(0x2008, 0x01ffffff), + DECL_RFK_WS(0x2c1c, 0x00000004), + DECL_RFK_WS(0x2700, 0x08000000), + DECL_RFK_WS(0x0c70, 0x000003ff), + DECL_RFK_WS(0x0c60, 0x00000003), + DECL_RFK_WS(0x0c6c, 0x00000001), + DECL_RFK_WS(0x78ac, 0x08000000), + DECL_RFK_WS(0x2c3c, 0x00000200), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = { + DECL_RFK_WS(0x6490, 0x80000000), + DECL_RFK_WS(0x32a0, 0x00007000), + DECL_RFK_WS(0x32a0, 0x00008000), + DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003), + DECL_RFK_WS(0x32a0, 0x00080000), + DECL_RFK_WS(0x2700, 0x01000000), + DECL_RFK_WM(0x2700, 0x06000000, 0x00000002), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222), + DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303), + DECL_RFK_WS(0x12b8, 0x40000000), + DECL_RFK_WS(0x32b8, 0x40000000), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041), + DECL_RFK_WS(0x12b8, 0x10000000), + DECL_RFK_WS(0x58c8, 0x01000000), + DECL_RFK_WS(0x78c8, 0x01000000), + DECL_RFK_WS(0x5864, 0xc0000000), + DECL_RFK_WS(0x7864, 0xc0000000), + DECL_RFK_WS(0x2008, 0x01ffffff), + DECL_RFK_WS(0x0c1c, 0x00000004), + DECL_RFK_WS(0x0700, 0x08000000), + DECL_RFK_WS(0x0c70, 0x000003ff), + DECL_RFK_WS(0x0c60, 0x00000003), + DECL_RFK_WS(0x0c6c, 0x00000001), + DECL_RFK_WS(0x58ac, 0x08000000), + DECL_RFK_WS(0x78ac, 0x08000000), + DECL_RFK_WS(0x0c3c, 0x00000200), + DECL_RFK_WS(0x2344, 0x80000000), + DECL_RFK_WS(0x4490, 0x80000000), + DECL_RFK_WS(0x12a0, 0x00007000), + DECL_RFK_WS(0x12a0, 0x00008000), + DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003), + DECL_RFK_WS(0x12a0, 0x00080000), + DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003), + DECL_RFK_WS(0x32a0, 0x00080000), + DECL_RFK_WS(0x0700, 0x01000000), + DECL_RFK_WM(0x0700, 0x06000000, 0x00000002), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333), + DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000), + DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101), + DECL_RFK_WC(0x12b8, 0x40000000), + DECL_RFK_WC(0x5864, 0xc0000000), + DECL_RFK_WC(0x2008, 0x01ffffff), + DECL_RFK_WC(0x0c1c, 0x00000004), + DECL_RFK_WC(0x0700, 0x08000000), + DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003), + DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003), + DECL_RFK_WC(0x12a0, 0x000ff000), + DECL_RFK_WC(0x0700, 0x07000000), + DECL_RFK_WC(0x5864, 0x20000000), + DECL_RFK_WC(0x0c3c, 0x00000200), + DECL_RFK_WC(0x20fc, 0xffff0000), + DECL_RFK_WC(0x58c8, 0x01000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202), + DECL_RFK_WC(0x32b8, 0x40000000), + DECL_RFK_WC(0x7864, 0xc0000000), + DECL_RFK_WC(0x2008, 0x01ffffff), + DECL_RFK_WC(0x2c1c, 0x00000004), + DECL_RFK_WC(0x2700, 0x08000000), + DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003), + DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003), + DECL_RFK_WC(0x32a0, 0x000ff000), + DECL_RFK_WC(0x2700, 0x07000000), + DECL_RFK_WC(0x7864, 0x20000000), + DECL_RFK_WC(0x2c3c, 0x00000200), + DECL_RFK_WC(0x20fc, 0xffff0000), + DECL_RFK_WC(0x78c8, 0x01000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303), + DECL_RFK_WC(0x12b8, 0x40000000), + DECL_RFK_WC(0x32b8, 0x40000000), + DECL_RFK_WC(0x5864, 0xc0000000), + DECL_RFK_WC(0x7864, 0xc0000000), + DECL_RFK_WC(0x2008, 0x01ffffff), + DECL_RFK_WC(0x0c1c, 0x00000004), + DECL_RFK_WC(0x0700, 0x08000000), + DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003), + DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003), + DECL_RFK_WC(0x12a0, 0x000ff000), + DECL_RFK_WC(0x32a0, 0x000ff000), + DECL_RFK_WC(0x0700, 0x07000000), + DECL_RFK_WC(0x5864, 0x20000000), + DECL_RFK_WC(0x7864, 0x20000000), + DECL_RFK_WC(0x0c3c, 0x00000200), + DECL_RFK_WC(0x20fc, 0xffff0000), + DECL_RFK_WC(0x58c8, 0x01000000), + DECL_RFK_WC(0x78c8, 0x01000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = { + DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f), + DECL_RFK_DELAY(1), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000003), + DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001), + DECL_RFK_DELAY(1), + DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041), + DECL_RFK_WS(0x8074, 0x80000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = { + DECL_RFK_WC(0x8074, 0x80000000), + DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f), + DECL_RFK_DELAY(1), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001), + DECL_RFK_DELAY(1), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r); + +static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = { + DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006), + DECL_RFK_WC(0x80bc, 0x00004000), + DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs); + +static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303), + DECL_RFK_WM(0x5864, 0x18000000, 0x00000003), + DECL_RFK_WM(0x7864, 0x18000000, 0x00000003), + DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001), + DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001), + DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001), + DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001), + DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001), + DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003), + DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003), + DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff), + DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001), + DECL_RFK_WM(0x0700, 0x08000000, 0x00000001), + DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff), + DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003), + DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001), + DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001), + DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001), + DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001), + DECL_RFK_WM(0x2344, 0x80000000, 0x00000001), + DECL_RFK_WM(0x4490, 0x80000000, 0x00000001), + DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007), + DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001), + DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003), + DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001), + DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003), + DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001), + DECL_RFK_WM(0x0700, 0x01000000, 0x00000001), + DECL_RFK_WM(0x0700, 0x06000000, 0x00000002), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333), + DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000), + DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01); + +static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101), + DECL_RFK_WM(0x5864, 0x18000000, 0x00000003), + DECL_RFK_WM(0x7864, 0x18000000, 0x00000003), + DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001), + DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001), + DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001), + DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003), + DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff), + DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001), + DECL_RFK_WM(0x0700, 0x08000000, 0x00000001), + DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff), + DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003), + DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001), + DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001), + DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001), + DECL_RFK_WM(0x2320, 0x00000001, 0x00000001), + DECL_RFK_WM(0x4490, 0x80000000, 0x00000001), + DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007), + DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001), + DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003), + DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001), + DECL_RFK_WM(0x0700, 0x01000000, 0x00000001), + DECL_RFK_WM(0x0700, 0x06000000, 0x00000002), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111), + DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0); + +static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202), + DECL_RFK_WM(0x7864, 0x18000000, 0x00000003), + DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001), + DECL_RFK_WM(0x030c, 0xff000000, 0x00000013), + DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001), + DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001), + DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001), + DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003), + DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff), + DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001), + DECL_RFK_WM(0x2700, 0x08000000, 0x00000001), + DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff), + DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003), + DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001), + DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001), + DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001), + DECL_RFK_WM(0x6490, 0x80000000, 0x00000001), + DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007), + DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001), + DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003), + DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001), + DECL_RFK_WM(0x2700, 0x01000000, 0x00000001), + DECL_RFK_WM(0x2700, 0x06000000, 0x00000002), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222), + DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1); + +static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303), + DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000), + DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000), + DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000), + DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000), + DECL_RFK_WM(0x0700, 0x08000000, 0x00000000), + DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003), + DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003), + DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000), + DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000), + DECL_RFK_WM(0x0700, 0x07000000, 0x00000000), + DECL_RFK_WM(0x5864, 0x20000000, 0x00000000), + DECL_RFK_WM(0x7864, 0x20000000, 0x00000000), + DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000), + DECL_RFK_WM(0x2320, 0x00000001, 0x00000000), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000), + DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000), + DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01); + +static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101), + DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000), + DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000), + DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000), + DECL_RFK_WM(0x0700, 0x08000000, 0x00000000), + DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003), + DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003), + DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000), + DECL_RFK_WM(0x0700, 0x07000000, 0x00000000), + DECL_RFK_WM(0x5864, 0x20000000, 0x00000000), + DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000), + DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0); + +static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = { + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202), + DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000), + DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000), + DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000), + DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000), + DECL_RFK_WM(0x2700, 0x08000000, 0x00000000), + DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003), + DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003), + DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000), + DECL_RFK_WM(0x2700, 0x07000000, 0x00000000), + DECL_RFK_WM(0x7864, 0x20000000, 0x00000000), + DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000), + DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000), + DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000), +}; + +DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h new file mode 100644 index 00000000000000..4a4a45d778ff6d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_8852A_RFK_TABLE_H__ +#define __RTW89_8852A_RFK_TABLE_H__ + +#include "core.h" + +enum rtw89_rfk_flag { + RTW89_RFK_F_WRF = 0, + RTW89_RFK_F_WM = 1, + RTW89_RFK_F_WS = 2, + RTW89_RFK_F_WC = 3, + RTW89_RFK_F_DELAY = 4, + RTW89_RFK_F_NUM, +}; + +struct rtw89_rfk_tbl { + const struct rtw89_reg5_def *defs; + u32 size; +}; + +#define DECLARE_RFK_TBL(_name) \ +const struct rtw89_rfk_tbl _name ## _tbl = { \ + .defs = _name, \ + .size = ARRAY_SIZE(_name), \ +} + +#define DECL_RFK_WRF(_path, _addr, _mask, _data) \ + {.flag = RTW89_RFK_F_WRF, \ + .path = _path, \ + .addr = _addr, \ + .mask = _mask, \ + .data = _data,} + +#define DECL_RFK_WM(_addr, _mask, _data) \ + {.flag = RTW89_RFK_F_WM, \ + .addr = _addr, \ + .mask = _mask, \ + .data = _data,} + +#define DECL_RFK_WS(_addr, _mask) \ + {.flag = RTW89_RFK_F_WS, \ + .addr = _addr, \ + .mask = _mask,} + +#define DECL_RFK_WC(_addr, _mask) \ + {.flag = RTW89_RFK_F_WC, \ + .addr = _addr, \ + .mask = _mask,} + +#define DECL_RFK_DELAY(_data) \ + {.flag = RTW89_RFK_F_DELAY, \ + .data = _data,} + +extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_dck_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_dck_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_dac_gain_tbl_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_dac_gain_tbl_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_cal_org_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_cal_org_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_rf_gap_tbl_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_rf_gap_tbl_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_track_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_track_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_1_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_3_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_4_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_1_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_3_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_4_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_ab_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_disable_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_tssi_tracking_defs_tbl; + +extern const struct rtw89_rfk_tbl rtw8852a_rfk_afe_init_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_reload_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_reload_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_addc_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_addc_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_reset_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_trigger_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_restore_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_reset_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_trigger_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_restore_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_f_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_f_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_r_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_r_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_f_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_m_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_r_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_f_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_m_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_r_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_pas_read_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl; +extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c new file mode 100644 index 00000000000000..3a4fe7207420d1 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -0,0 +1,48725 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "phy.h" +#include "reg.h" +#include "rtw8852a_table.h" + +static const struct rtw89_reg2_def rtw89_8852a_phy_bb_regs[] = { + {0xF0FF0001, 0x00000000}, + {0xF03300FF, 0x00000001}, + {0xF03500FF, 0x00000002}, + {0xF03200FF, 0x00000003}, + {0xF03400FF, 0x00000004}, + {0xF03600FF, 0x00000005}, + {0x704, 0x601E0100}, + {0x714, 0x00000000}, + {0x718, 0x13332333}, + {0x714, 0x00010000}, + {0x720, 0x20000000}, + {0x980, 0x10002250}, + {0x994, 0x00000010}, + {0x644, 0x2314283C}, + {0x644, 0x3426283C}, + {0x994, 0x00000010}, + {0xC3C, 0x2840E1BF}, + {0xC40, 0x00000000}, + {0xC44, 0x00000007}, + {0xC48, 0x410E4000}, + {0xC54, 0x1001436E}, + {0xC58, 0x41000000}, + {0x730, 0x00000002}, + {0xC60, 0x017FFFF2}, + {0xC64, 0x0010A130}, + {0xC64, 0x0010A130}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0xC68, 0x10000068}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0xC68, 0x90000068}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0xC68, 0x90000068}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0xC68, 0x10000068}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0xC68, 0x90000068}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0xC68, 0x90000068}, + {0xA0000000, 0x00000000}, + {0xC68, 0x10000068}, + {0xB0000000, 0x00000000}, + {0xC64, 0x0010A130}, + {0xC54, 0x1EE1436E}, + {0xC6C, 0x00000020}, + {0x708, 0x00000000}, + {0xC6C, 0x00000020}, + {0x804, 0x0043F01D}, + {0x12D0, 0x00000000}, + {0x12EC, 0x888CA72B}, + {0x32D0, 0x00000000}, + {0x32EC, 0x888CA72B}, + {0xD40, 0xF64FA0F7}, + {0xD44, 0x0400063F}, + {0xD48, 0x0003FF7F}, + {0xD4C, 0x00000000}, + {0xD50, 0xF64FA0F7}, + {0xD54, 0x04100437}, + {0xD58, 0x0000FF7F}, + {0xD5C, 0x00000000}, + {0xD60, 0x00000000}, + {0xD64, 0x00000000}, + {0xD70, 0x0000001D}, + {0xD90, 0x000003FF}, + {0xD94, 0x00000000}, + {0xD98, 0x0000003F}, + {0xD9C, 0x00000000}, + {0xDA0, 0x000003FF}, + {0xDA4, 0x00000000}, + {0xDA8, 0x0000003F}, + {0xDAC, 0x00000000}, + {0xD00, 0x77777777}, + {0xD04, 0xBBBBBBBB}, + {0xD08, 0xBBBBBBBB}, + {0xD0C, 0x00000070}, + {0xD10, 0x20110900}, + {0xD10, 0x20110FFF}, + {0xD7C, 0x001D050C}, + {0xD84, 0x00006207}, + {0xD18, 0x50209900}, + {0xD80, 0x00804100}, + {0x714, 0x00010000}, + {0x704, 0x601E00FD}, + {0x710, 0xF3810000}, + {0x000, 0x0580801F}, + {0x000, 0x8580801F}, + {0x334, 0xFFFFFFFF}, + {0x33C, 0x55000000}, + {0x340, 0x00005555}, + {0x724, 0x00111200}, + {0x5868, 0xA9550000}, + {0x5870, 0x33221100}, + {0x5874, 0x77665544}, + {0x5878, 0xBBAA9988}, + {0x587C, 0xFFEEDDCC}, + {0x5880, 0x76543210}, + {0x5884, 0xFEDCBA98}, + {0x5888, 0x00000000}, + {0x588C, 0x00000000}, + {0x5894, 0x00000008}, + {0x7868, 0xA9550000}, + {0x7870, 0x33221100}, + {0x7874, 0x77665544}, + {0x7878, 0xBBAA9988}, + {0x787C, 0xFFEEDDCC}, + {0x7880, 0x76543210}, + {0x7884, 0xFEDCBA98}, + {0x7888, 0x00000000}, + {0x788C, 0x00000000}, + {0x7894, 0x00000008}, + {0x240C, 0x00000000}, + {0xC70, 0x00000400}, + {0x700, 0x00000030}, + {0x704, 0x601E00FF}, + {0x704, 0x601E00FD}, + {0x704, 0x601E00FF}, + {0x586C, 0x000000F0}, + {0x586C, 0x000000E0}, + {0x586C, 0x000000D0}, + {0x586C, 0x000000C0}, + {0x586C, 0x000000B0}, + {0x586C, 0x000000A0}, + {0x586C, 0x00000090}, + {0x586C, 0x00000080}, + {0x586C, 0x00000070}, + {0x586C, 0x00000060}, + {0x586C, 0x00000050}, + {0x586C, 0x00000040}, + {0x586C, 0x00000030}, + {0x586C, 0x00000020}, + {0x586C, 0x00000010}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x586C, 0x00000000}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x586C, 0x03E00000}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x586C, 0x03E00000}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x586C, 0x00000000}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x586C, 0x03E00000}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x586C, 0x03E00000}, + {0xA0000000, 0x00000000}, + {0x586C, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x786C, 0x000000F0}, + {0x786C, 0x000000E0}, + {0x786C, 0x000000D0}, + {0x786C, 0x000000C0}, + {0x786C, 0x000000B0}, + {0x786C, 0x000000A0}, + {0x786C, 0x00000090}, + {0x786C, 0x00000080}, + {0x786C, 0x00000070}, + {0x786C, 0x00000060}, + {0x786C, 0x00000050}, + {0x786C, 0x00000040}, + {0x786C, 0x00000030}, + {0x786C, 0x00000020}, + {0x786C, 0x00000010}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x786C, 0x00000000}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x786C, 0x03E00000}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x786C, 0x03E00000}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x786C, 0x00000000}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x786C, 0x03E00000}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x786C, 0x03E00000}, + {0xA0000000, 0x00000000}, + {0x786C, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x5864, 0x080801FF}, + {0x7864, 0x080801FF}, + {0xC60, 0x017FFFF3}, + {0xC6C, 0x00000021}, + {0x58AC, 0x08000000}, + {0x78AC, 0x08000000}, + {0x5864, 0x180801FF}, + {0x7864, 0x180801FF}, + {0xC60, 0x017FFFF3}, + {0xC60, 0x017FFFF3}, + {0x2C60, 0x013FFF0A}, + {0xC70, 0x00000600}, + {0xC70, 0x00000660}, + {0xC6C, 0x10001021}, + {0x58AC, 0x08000000}, + {0x78AC, 0x08000000}, + {0x5864, 0x100801FF}, + {0x7864, 0x100801FF}, + {0x5864, 0x180801FF}, + {0x7864, 0x180801FF}, + {0x704, 0x601C01FF}, + {0x58D4, 0x0401FE00}, + {0x78D4, 0x0401FE00}, + {0x58F0, 0x000401FF}, + {0x78F0, 0x000401FF}, + {0x58F0, 0x400401FF}, + {0x78F0, 0x400401FF}, + {0x12A8, 0x333378A5}, + {0x32A8, 0x333378A5}, + {0x2300, 0x02748790}, + {0x2304, 0x00558670}, + {0x2308, 0x002883F0}, + {0x230C, 0x00090120}, + {0x2310, 0x00000000}, + {0x2314, 0x06000000}, + {0x2318, 0x00000000}, + {0x231C, 0x00000000}, + {0x2320, 0x03020100}, + {0x2324, 0x07060504}, + {0x2328, 0x0B0A0908}, + {0x232C, 0x0F0E0D0C}, + {0x2330, 0x13121110}, + {0x2334, 0x17161514}, + {0x2338, 0x0C700022}, + {0x233C, 0x0A05298F}, + {0x2340, 0x0005298E}, + {0x2344, 0x0006318A}, + {0x2348, 0xB7E6318A}, + {0x234C, 0x80039CE7}, + {0x2350, 0x80039CE7}, + {0x2354, 0x0005298F}, + {0x2358, 0x0015296E}, + {0x235C, 0x0C07FC31}, + {0x2360, 0x0219A6AE}, + {0x2364, 0xE4F624C3}, + {0x2368, 0x53626F15}, + {0x236C, 0x48000000}, + {0x2370, 0x48000000}, + {0x2374, 0x074C0000}, + {0x2378, 0x202401B5}, + {0x237C, 0x00F7000E}, + {0x2380, 0x0F0A1111}, + {0x2384, 0x30D9000F}, + {0x2388, 0x0400EA02}, + {0x238C, 0x003CB061}, + {0x2390, 0x69C00000}, + {0x2394, 0x00000000}, + {0x2398, 0x000000F0}, + {0x239C, 0x0001FFFF}, + {0x23A0, 0x00C80064}, + {0x23A4, 0x0190012C}, + {0x23A8, 0x001917BE}, + {0x23AC, 0x0B308800}, + {0x23B0, 0x0001D5B0}, + {0x23B4, 0x000285D2}, + {0x23B8, 0x00000000}, + {0x23BC, 0x00000000}, + {0x23C0, 0x00000000}, + {0x23C4, 0x00000000}, + {0x23C8, 0x00000000}, + {0x23CC, 0x00000000}, + {0x23D0, 0x00000000}, + {0x23D4, 0x00000000}, + {0x23D8, 0x00000000}, + {0x23DC, 0x00000000}, + {0x23E0, 0x00000000}, + {0x23E4, 0x00000000}, + {0x23E8, 0x00000000}, + {0x23EC, 0x00000000}, + {0x23F0, 0x00000000}, + {0x23F4, 0x00000000}, + {0x23F8, 0x00000000}, + {0x23FC, 0x00000000}, + {0x804, 0x0043F01D}, + {0x300, 0xF30CE31C}, + {0x304, 0x13EF1F19}, + {0x308, 0x0C0CF3F3}, + {0x30C, 0x0C0C0C0C}, + {0x310, 0x80416000}, + {0x314, 0x0041E000}, + {0x318, 0x20022042}, + {0x31C, 0x20448001}, + {0x320, 0x00410040}, + {0x324, 0xE000E000}, + {0x328, 0xE000E000}, + {0x32C, 0xE000E000}, + {0x12BC, 0x10104041}, + {0x12C0, 0x14411111}, + {0x32BC, 0x10104041}, + {0x32C0, 0x14411111}, + {0x010, 0x0005FFFF}, + {0x604, 0x1E1E1E28}, + {0x650, 0x00200888}, + {0x620, 0x00141230}, + {0x35C, 0x000004C4}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x5804, 0x04237040}, + {0x7804, 0x04237040}, + {0x5808, 0x04237040}, + {0x7808, 0x04237040}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5804, 0x04231040}, + {0x7804, 0x04231040}, + {0x5808, 0x04231040}, + {0x7808, 0x04231040}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5804, 0x04231040}, + {0x7804, 0x04231040}, + {0x5808, 0x04231040}, + {0x7808, 0x04231040}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5804, 0x04237040}, + {0x7804, 0x04237040}, + {0x5808, 0x04237040}, + {0x7808, 0x04237040}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5804, 0x04231040}, + {0x7804, 0x04231040}, + {0x5808, 0x04231040}, + {0x7808, 0x04231040}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5804, 0x04231040}, + {0x7804, 0x04231040}, + {0x5808, 0x04231040}, + {0x7808, 0x04231040}, + {0xA0000000, 0x00000000}, + {0x5804, 0x04237040}, + {0x7804, 0x04237040}, + {0x5808, 0x04237040}, + {0x7808, 0x04237040}, + {0xB0000000, 0x00000000}, + {0x12A0, 0x24903156}, + {0x32A0, 0x24903156}, + {0x640, 0x1414141E}, + {0x12B8, 0x30020000}, + {0x12AC, 0x02333121}, + {0x9A4, 0x0000001C}, + {0x624, 0x01010301}, + {0x628, 0x00010101}, + {0x5800, 0x03FF807F}, + {0x7800, 0x03FF807F}, + {0x4000, 0x00000000}, + {0x4004, 0xCA014000}, + {0x4008, 0xC751D4F0}, + {0x400C, 0x44511475}, + {0x4010, 0x00000000}, + {0x4014, 0x00000000}, + {0x4018, 0x4F4C084B}, + {0x401C, 0x084A4E52}, + {0x4020, 0x4D504E4B}, + {0x4024, 0x4F4C0849}, + {0x4028, 0x08484C50}, + {0x402C, 0x4C50504C}, + {0x4030, 0x5454084A}, + {0x4034, 0x084B5654}, + {0x4038, 0x6A6C605A}, + {0x403C, 0x4C4C084C}, + {0x4040, 0x084B4E4D}, + {0x4044, 0x4E4C4B4B}, + {0x4048, 0x4B4B084A}, + {0x404C, 0x084A4E4C}, + {0x4050, 0x514F4C4A}, + {0x4054, 0x524E084A}, + {0x4058, 0x084A5154}, + {0x405C, 0x53555554}, + {0x4060, 0x45450845}, + {0x4064, 0x08454144}, + {0x4068, 0x40434445}, + {0x406C, 0x44450845}, + {0x4070, 0x08444043}, + {0x4074, 0x42434444}, + {0x4078, 0x46450844}, + {0x407C, 0x08444843}, + {0x4080, 0x4B4E4A47}, + {0x4084, 0x4F4C084B}, + {0x4088, 0x084A4E52}, + {0x408C, 0x4D504E4B}, + {0x4090, 0x4F4C0849}, + {0x4094, 0x08484C50}, + {0x4098, 0x4C50504C}, + {0x409C, 0x5454084A}, + {0x40A0, 0x084B5654}, + {0x40A4, 0x6A6C605A}, + {0x40A8, 0x4C4C084C}, + {0x40AC, 0x084B4E4D}, + {0x40B0, 0x4E4C4B4B}, + {0x40B4, 0x4B4B084A}, + {0x40B8, 0x084A4E4C}, + {0x40BC, 0x514F4C4A}, + {0x40C0, 0x524E084A}, + {0x40C4, 0x084A5154}, + {0x40C8, 0x53555554}, + {0x40CC, 0x45450845}, + {0x40D0, 0x08454144}, + {0x40D4, 0x40434445}, + {0x40D8, 0x44450845}, + {0x40DC, 0x08444043}, + {0x40E0, 0x42434444}, + {0x40E4, 0x46450844}, + {0x40E8, 0x08444843}, + {0x40EC, 0x4B4E4A47}, + {0x40F0, 0x00000000}, + {0x40F4, 0x00000006}, + {0x40F8, 0x00000001}, + {0x40FC, 0x8C30C30C}, + {0x4100, 0x4C30C30C}, + {0x4104, 0x0C30C30C}, + {0x4108, 0x0C30C30C}, + {0x410C, 0x0C30C30C}, + {0x4110, 0x0C30C30C}, + {0x4114, 0x28A28A28}, + {0x4118, 0x28A28A28}, + {0x411C, 0x28A28A28}, + {0x4120, 0x28A28A28}, + {0x4124, 0x28A28A28}, + {0x4128, 0x28A28A28}, + {0x412C, 0x06666666}, + {0x4130, 0x33333333}, + {0x4134, 0x33333333}, + {0x4138, 0x33333333}, + {0x413C, 0x00000031}, + {0x4140, 0x5100600A}, + {0x4144, 0x18363113}, + {0x4148, 0x1D976DDC}, + {0x414C, 0x1C072DD7}, + {0x4150, 0x1127CDF4}, + {0x4154, 0x1E37BDF1}, + {0x4158, 0x1FB7F1D6}, + {0x415C, 0x1EA7DDF9}, + {0x4160, 0x1FE445DD}, + {0x4164, 0x1F97F1FE}, + {0x4168, 0x1FF781ED}, + {0x416C, 0x1FA7F5FE}, + {0x4170, 0x1E07B913}, + {0x4174, 0x1FD7FDFF}, + {0x4178, 0x1E17B9FA}, + {0x417C, 0x19A66914}, + {0x4180, 0x10F65598}, + {0x4184, 0x14A5A111}, + {0x4188, 0x1D3765DB}, + {0x418C, 0x17C685CA}, + {0x4190, 0x1107C5F3}, + {0x4194, 0x1B5785EB}, + {0x4198, 0x1F97ED8F}, + {0x419C, 0x1BC7A5F3}, + {0x41A0, 0x1FE43595}, + {0x41A4, 0x1EB7D9FC}, + {0x41A8, 0x1FE65DBE}, + {0x41AC, 0x1EC7D9FC}, + {0x41B0, 0x1976FCFF}, + {0x41B4, 0x1F77F5FF}, + {0x41B8, 0x1976FDEC}, + {0x41BC, 0x198664EF}, + {0x41C0, 0x11062D93}, + {0x41C4, 0x10C4E910}, + {0x41C8, 0x1CA759DB}, + {0x41CC, 0x1335A9B5}, + {0x41D0, 0x1097B9F3}, + {0x41D4, 0x17B72DE1}, + {0x41D8, 0x1F67ED42}, + {0x41DC, 0x18074DE9}, + {0x41E0, 0x1FD40547}, + {0x41E4, 0x1D57ADF9}, + {0x41E8, 0x1FE52182}, + {0x41EC, 0x1D67B1F9}, + {0x41F0, 0x14860CE1}, + {0x41F4, 0x1EC7E9FE}, + {0x41F8, 0x14860DD6}, + {0x41FC, 0x195664C7}, + {0x4200, 0x0005E58A}, + {0x4204, 0x00000000}, + {0x4208, 0x00000000}, + {0x420C, 0x7A000000}, + {0x4210, 0x0F9F3D7A}, + {0x4214, 0x0040817C}, + {0x4218, 0x00E10204}, + {0x421C, 0x227D94CD}, + {0x4220, 0x080238E3}, + {0x4224, 0x00000210}, + {0x4228, 0x04688000}, + {0x422C, 0x0060B002}, + {0x4230, 0x9A8249A8}, + {0x4234, 0x26A1469E}, + {0x4238, 0x2099A824}, + {0x423C, 0x2359461C}, + {0x4240, 0x1631A675}, + {0x4244, 0x2C6B1D63}, + {0x4248, 0x0000000E}, + {0x424C, 0x00000001}, + {0x4250, 0x00000001}, + {0x4254, 0x00000000}, + {0x4258, 0x00000000}, + {0x425C, 0x00000000}, + {0x4260, 0x0020000C}, + {0x4264, 0x00000000}, + {0x4268, 0x00000000}, + {0x426C, 0x0418317C}, + {0x4270, 0x00D6135C}, + {0x4274, 0x00000000}, + {0x4278, 0x00000000}, + {0x427C, 0x00000000}, + {0x4280, 0x00000000}, + {0x4284, 0x00000000}, + {0x4288, 0x00000000}, + {0x428C, 0x00000000}, + {0x4290, 0x00000000}, + {0x4294, 0x00000000}, + {0x4298, 0x84026000}, + {0x429C, 0x0051AC20}, + {0x42A0, 0x02024008}, + {0x42A4, 0x00000000}, + {0x42A8, 0x00000000}, + {0x42AC, 0x22CE803C}, + {0x42B0, 0x80000000}, + {0x42B4, 0x00E7D03D}, + {0x42B8, 0x3D67D67D}, + {0x42BC, 0x7D67D65B}, + {0x42C0, 0x2802AF59}, + {0x42C4, 0x00280280}, + {0x42C8, 0x00000000}, + {0x42CC, 0x00000000}, + {0x42D0, 0x00000003}, + {0x42D4, 0x00000001}, + {0x42D8, 0x61861800}, + {0x42DC, 0x830C30C3}, + {0x42E0, 0xC30C30C3}, + {0x42E4, 0x830C30C3}, + {0x42E8, 0x451450C3}, + {0x42EC, 0x05145145}, + {0x42F0, 0x05145145}, + {0x42F4, 0x05145145}, + {0x42F8, 0x0F0C3145}, + {0x42FC, 0x030C30CF}, + {0x4300, 0x030C30C3}, + {0x4304, 0x030CF3C3}, + {0x4308, 0x030C30C3}, + {0x430C, 0x0F3CF3C3}, + {0x4310, 0x0F3CF3CF}, + {0x4314, 0x0F3CF3CF}, + {0x4318, 0x0F3CF3CF}, + {0x431C, 0x0F3CF3CF}, + {0x4320, 0x030C10C3}, + {0x4324, 0x051430C3}, + {0x4328, 0x051490CB}, + {0x432C, 0x030CD151}, + {0x4330, 0x050C50C7}, + {0x4334, 0x051492CB}, + {0x4338, 0x05145145}, + {0x433C, 0x05145145}, + {0x4340, 0x05145145}, + {0x4344, 0x05145145}, + {0x4348, 0x090CD3CF}, + {0x434C, 0x071491C5}, + {0x4350, 0x073CF143}, + {0x4354, 0x071431C3}, + {0x4358, 0x0F3CF1C5}, + {0x435C, 0x0F3CF3CF}, + {0x4360, 0x0F3CF3CF}, + {0x4364, 0x0F3CF3CF}, + {0x4368, 0x0F3CF3CF}, + {0x436C, 0x090C91CF}, + {0x4370, 0x11243143}, + {0x4374, 0x9777A777}, + {0x4378, 0xBB7BAC95}, + {0x437C, 0xB667B889}, + {0x4380, 0x7B9B8899}, + {0x4384, 0x7A5567C8}, + {0x4388, 0x2278CCCC}, + {0x438C, 0x7C222222}, + {0x4390, 0x0000069B}, + {0x4394, 0x001CCCCC}, + {0x4398, 0x00000000}, + {0x439C, 0x00000008}, + {0x49A4, 0x00000000}, + {0x43A0, 0x00000000}, + {0x43A4, 0x00000000}, + {0x43A8, 0x00000000}, + {0x43AC, 0x10000800}, + {0x43B0, 0x00401802}, + {0x43B4, 0x00061004}, + {0x43B8, 0x000024D8}, + {0x43BC, 0x00000000}, + {0x43C0, 0x10000020}, + {0x43C4, 0x20000200}, + {0x43C8, 0x00000000}, + {0x43CC, 0x04000000}, + {0x43D0, 0x44000100}, + {0x43D4, 0x60804060}, + {0x43D8, 0x44204210}, + {0x43DC, 0x82108082}, + {0x43E0, 0x82108402}, + {0x43E4, 0xC8082108}, + {0x43E8, 0xC8202084}, + {0x43EC, 0x44208208}, + {0x43F0, 0x84108204}, + {0x43F4, 0xD0108104}, + {0x43F8, 0xF8210108}, + {0x43FC, 0x6431E930}, + {0x4400, 0x02109468}, + {0x4404, 0x10C61C22}, + {0x4408, 0x02109469}, + {0x440C, 0x10C61C22}, + {0x4410, 0x00041049}, + {0x4414, 0x00000000}, + {0x4418, 0x00000000}, + {0x441C, 0x6C000000}, + {0x4420, 0xB0200020}, + {0x4424, 0x00001FF0}, + {0x4428, 0x00000000}, + {0x442C, 0x00000000}, + {0x4430, 0x00000000}, + {0x4434, 0x00000000}, + {0x4438, 0x65F962F8}, + {0x443C, 0x280668A0}, + {0x4440, 0x64100820}, + {0x4444, 0x4A146304}, + {0x4448, 0x0C59008F}, + {0x444C, 0x6E30498A}, + {0x4450, 0x656E371B}, + {0x4454, 0x00000F52}, + {0x4458, 0x00000000}, + {0x445C, 0x4801442E}, + {0x4460, 0x0041A0B8}, + {0x4464, 0x00000000}, + {0x4468, 0x00000000}, + {0x446C, 0x00000000}, + {0x4470, 0x00000000}, + {0x4474, 0x00000000}, + {0x4478, 0x00000000}, + {0x447C, 0x00000000}, + {0x4480, 0x2A0A6040}, + {0x4484, 0x0A0A6829}, + {0x4488, 0x00000004}, + {0x448C, 0x00000000}, + {0x4490, 0x80000000}, + {0x4494, 0x10000000}, + {0x4498, 0xA0000000}, + {0x449C, 0x0000001E}, + {0x44A0, 0x02B29397}, + {0x44A4, 0x00000400}, + {0x44A8, 0x00000001}, + {0x44AC, 0x00000000}, + {0x44B0, 0x00000000}, + {0x44B4, 0x00000000}, + {0x44B8, 0x00000000}, + {0x44BC, 0x00000000}, + {0x44C0, 0x00000000}, + {0x44C4, 0x00000000}, + {0x44C8, 0x00000000}, + {0x44CC, 0x00000000}, + {0x44D0, 0x00000000}, + {0x44D4, 0x00000000}, + {0x44D8, 0x00000000}, + {0x44DC, 0x00000000}, + {0x44E0, 0x00000000}, + {0x44E4, 0x00000000}, + {0x44E8, 0x00000000}, + {0x44EC, 0x00000000}, + {0x44F0, 0x00000000}, + {0x44F4, 0x00000000}, + {0x44F8, 0x00000000}, + {0x44FC, 0x00000000}, + {0x4500, 0x00000000}, + {0x4504, 0x00000000}, + {0x4508, 0x00000000}, + {0x450C, 0x00000000}, + {0x4510, 0x00000000}, + {0x4514, 0x00000000}, + {0x4518, 0x00000000}, + {0x451C, 0x00000000}, + {0x4520, 0x00000000}, + {0x4524, 0x00000000}, + {0x4528, 0x00000000}, + {0x452C, 0x00000000}, + {0x4530, 0x4EA20631}, + {0x4534, 0x000005C8}, + {0x4538, 0x000000FF}, + {0x453C, 0x00000000}, + {0x4540, 0x00000000}, + {0x4544, 0x00000000}, + {0x4548, 0x00000000}, + {0x454C, 0x00000000}, + {0x4550, 0x00000000}, + {0x4554, 0x00000000}, + {0x4558, 0x00000000}, + {0x455C, 0x00000000}, + {0x4560, 0x4060001A}, + {0x4564, 0x40000000}, + {0x4568, 0x00000000}, + {0x456C, 0x20000000}, + {0x4570, 0x04800406}, + {0x4574, 0x00022270}, + {0x4578, 0x0002024B}, + {0x457C, 0x00200000}, + {0x4580, 0x00009B40}, + {0x4584, 0x00000000}, + {0x4588, 0x00000063}, + {0x458C, 0x30000000}, + {0x4590, 0x00000000}, + {0x4594, 0x05000000}, + {0x4598, 0x00000001}, + {0x459C, 0x0003FE00}, + {0x45A0, 0x00000000}, + {0x45A4, 0x00000000}, + {0x45A8, 0xC00001C0}, + {0x45AC, 0x78028000}, + {0x45B0, 0x80000048}, + {0x45B4, 0x01C90800}, + {0x45B8, 0x00000002}, + {0x45BC, 0x06748790}, + {0x45C0, 0x80000000}, + {0x45C4, 0x00000000}, + {0x45C8, 0x00000000}, + {0x45CC, 0x00558670}, + {0x45D0, 0x002883F0}, + {0x45D4, 0x00090120}, + {0x45D8, 0x00000000}, + {0x45DC, 0xA3A6D3C4}, + {0x49A8, 0xAB27B126}, + {0x49AC, 0x00006778}, + {0x49FC, 0x000001B5}, + {0x49B0, 0x11110F0A}, + {0x49B4, 0x00000007}, + {0x49B8, 0x0000000A}, + {0x49BC, 0x0058BC3F}, + {0x49C0, 0x00000003}, + {0x49C4, 0x000003D9}, + {0x49C8, 0x002B1CB0}, + {0x4A00, 0x00000000}, + {0x49CC, 0x00000001}, + {0x49D0, 0x00000010}, + {0x49D4, 0x00000001}, + {0x49D8, 0x85298FBF}, + {0x49DC, 0x18A5296E}, + {0x49E0, 0x18C6298C}, + {0x49E4, 0x0A739CA7}, + {0x49E8, 0x001A50E7}, + {0x49EC, 0x00000001}, + {0x49F0, 0x00005924}, + {0x49F4, 0x0003AAA6}, + {0x49F8, 0x0000C4C3}, + {0x45E0, 0x00000000}, + {0x45E4, 0x00000000}, + {0x45E8, 0x00E2E100}, + {0x45EC, 0xCB00B6B6}, + {0x45F0, 0x59100FCA}, + {0x45F4, 0x08882550}, + {0x45F8, 0x08CC2660}, + {0x45FC, 0x09102660}, + {0x4600, 0x00000154}, + {0x4604, 0x00000800}, + {0x4608, 0x31BF0400}, + {0x460C, 0x00E0C0A0}, + {0x4610, 0x30604020}, + {0x4614, 0x2F346D50}, + {0x4618, 0x2E36B6E2}, + {0x461C, 0x3E7EF86B}, + {0x4620, 0x001FC004}, + {0x4624, 0xA8068010}, + {0x4628, 0x4602CA80}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x462C, 0x76067E8C}, + {0x4630, 0x8EA350E8}, + {0x4634, 0xB3B8D8F5}, + {0x4638, 0x6FFF0C06}, + {0x463C, 0xB8FA4435}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x462C, 0x76078E8C}, + {0x4630, 0x8EDB50F6}, + {0x4634, 0xB5B8DD75}, + {0x4638, 0x6FFF4C06}, + {0x463C, 0xB8FA4434}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x462C, 0x76078E8C}, + {0x4630, 0x8EDB50F6}, + {0x4634, 0xB5B8DD75}, + {0x4638, 0x6FFF4C06}, + {0x463C, 0xB8FA4434}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x462C, 0x76067E8C}, + {0x4630, 0x8EA350E8}, + {0x4634, 0xB3B8D8F5}, + {0x4638, 0x6FFF0C06}, + {0x463C, 0xB8FA4435}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x462C, 0x76078E8C}, + {0x4630, 0x8EDB50F6}, + {0x4634, 0xB5B8DD75}, + {0x4638, 0x6FFF4C06}, + {0x463C, 0xB8FA4434}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x462C, 0x76078E8C}, + {0x4630, 0x8EDB50F6}, + {0x4634, 0xB5B8DD75}, + {0x4638, 0x6FFF4C06}, + {0x463C, 0xB8FA4434}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0xA0000000, 0x00000000}, + {0x462C, 0x76067E8C}, + {0x4630, 0x8EA350E8}, + {0x4634, 0xB3B8D8F5}, + {0x4638, 0x6FFF0C06}, + {0x463C, 0xB8FA4435}, + {0x4640, 0xB7C4FEF8}, + {0x4644, 0x2A72AD07}, + {0xB0000000, 0x00000000}, + {0x4648, 0x64204FB2}, + {0x464C, 0x4C823404}, + {0x4650, 0x9084C800}, + {0x4654, 0x9889314F}, + {0x4658, 0x5ECC3FF4}, + {0x465C, 0xFEECAECE}, + {0x4660, 0x47806638}, + {0x4664, 0x0F5AF843}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x4668, 0x56452994}, + {0x466C, 0x54D89ADB}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4668, 0x55452994}, + {0x466C, 0x56D89ADB}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4668, 0x55452994}, + {0x466C, 0x56D89ADB}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4668, 0x56452994}, + {0x466C, 0x54D89ADB}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4668, 0x55452994}, + {0x466C, 0x56D89ADB}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4668, 0x55452994}, + {0x466C, 0x56D89ADB}, + {0xA0000000, 0x00000000}, + {0x4668, 0x56452994}, + {0x466C, 0x54D89ADB}, + {0xB0000000, 0x00000000}, + {0x4670, 0xE8DF38D8}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x4674, 0x002ACC30}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4674, 0x0028CC30}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4674, 0x0028CC30}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4674, 0x002ACC30}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4674, 0x0028CC30}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4674, 0x0028CC30}, + {0xA0000000, 0x00000000}, + {0x4674, 0x002ACC30}, + {0xB0000000, 0x00000000}, + {0x4678, 0x00000000}, + {0x467C, 0x00000000}, + {0x4680, 0x00000219}, + {0x4684, 0x00000000}, + {0x4688, 0x00000000}, + {0x468C, 0x00000001}, + {0x4690, 0x00000001}, + {0x4694, 0x00000000}, + {0x4698, 0x00000000}, + {0x469C, 0x00000151}, + {0x46A0, 0x00000498}, + {0x46A4, 0x00000498}, + {0x46A8, 0x00000000}, + {0x46AC, 0x00000000}, + {0x46B0, 0x00001146}, + {0x46B4, 0x00000000}, + {0x46B8, 0x00000000}, + {0x46BC, 0x00E2E100}, + {0x46C0, 0xCB00B6B6}, + {0x46C4, 0x59100FCA}, + {0x46C8, 0x08882550}, + {0x46CC, 0x08CC2660}, + {0x46D0, 0x09102660}, + {0x46D4, 0x00000154}, + {0x46D8, 0x00000800}, + {0x46DC, 0x31BF0400}, + {0x46E0, 0x00E0C0A0}, + {0x46E4, 0x30604020}, + {0x46E8, 0x4F346D50}, + {0x46EC, 0x2E36B6E2}, + {0x46F0, 0x3E7EF86B}, + {0x46F4, 0x001FC004}, + {0x46F8, 0xA8068010}, + {0x46FC, 0x4602CA80}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x4700, 0x7806FECC}, + {0x4704, 0x8EC360F1}, + {0x4708, 0xB4C4DA7A}, + {0x470C, 0x72FF2CC6}, + {0x4710, 0xB8FA4439}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4700, 0x78078ECC}, + {0x4704, 0x8EDB60F6}, + {0x4708, 0xB5C4DD7A}, + {0x470C, 0x72FF4CC6}, + {0x4710, 0xB8FA4434}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4700, 0x78078ECC}, + {0x4704, 0x8EDB60F6}, + {0x4708, 0xB5C4DD7A}, + {0x470C, 0x72FF4CC6}, + {0x4710, 0xB8FA4434}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4700, 0x7806FECC}, + {0x4704, 0x8EC360F1}, + {0x4708, 0xB4C4DA7A}, + {0x470C, 0x72FF2CC6}, + {0x4710, 0xB8FA4439}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4700, 0x78078ECC}, + {0x4704, 0x8EDB60F6}, + {0x4708, 0xB5C4DD7A}, + {0x470C, 0x72FF4CC6}, + {0x4710, 0xB8FA4434}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4700, 0x78078ECC}, + {0x4704, 0x8EDB60F6}, + {0x4708, 0xB5C4DD7A}, + {0x470C, 0x72FF4CC6}, + {0x4710, 0xB8FA4434}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0xA0000000, 0x00000000}, + {0x4700, 0x7806FECC}, + {0x4704, 0x8EC360F1}, + {0x4708, 0xB4C4DA7A}, + {0x470C, 0x72FF2CC6}, + {0x4710, 0xB8FA4439}, + {0x4714, 0xB7C4FEF8}, + {0x4718, 0x2A72AD09}, + {0xB0000000, 0x00000000}, + {0x471C, 0x64204FB2}, + {0x4720, 0x4C823404}, + {0x4724, 0x9084C800}, + {0x4728, 0x9889314F}, + {0x472C, 0x5ECC3FF4}, + {0x4730, 0xFEECAECE}, + {0x4734, 0x47806638}, + {0x4738, 0x0F4A7843}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x473C, 0x56452994}, + {0x4740, 0x54D89ADB}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x473C, 0x55452994}, + {0x4740, 0x56D89ADB}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x473C, 0x55452994}, + {0x4740, 0x56D89ADB}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x473C, 0x56452994}, + {0x4740, 0x54D89ADB}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x473C, 0x55452994}, + {0x4740, 0x56D89ADB}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x473C, 0x55452994}, + {0x4740, 0x56D89ADB}, + {0xA0000000, 0x00000000}, + {0x473C, 0x56452994}, + {0x4740, 0x54D89ADB}, + {0xB0000000, 0x00000000}, + {0x4744, 0xE8DF38D8}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x4748, 0x002ACC30}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4748, 0x0028CC30}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4748, 0x0028CC30}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4748, 0x002ACC30}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4748, 0x0028CC30}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4748, 0x0028CC30}, + {0xA0000000, 0x00000000}, + {0x4748, 0x002ACC30}, + {0xB0000000, 0x00000000}, + {0x474C, 0x00000000}, + {0x4750, 0x00000000}, + {0x4754, 0x00000219}, + {0x4758, 0x00000000}, + {0x475C, 0x00000000}, + {0x4760, 0x00000001}, + {0x4764, 0x00000001}, + {0x4768, 0x00000000}, + {0x476C, 0x00000000}, + {0x4770, 0x00000151}, + {0x4774, 0x00000498}, + {0x4778, 0x00000498}, + {0x477C, 0x00000000}, + {0x4780, 0x00000000}, + {0x4784, 0x00001147}, + {0x4788, 0x00000000}, + {0x478C, 0xA32103FE}, + {0x4790, 0x320A7B28}, + {0x4794, 0xC6A7B14F}, + {0x4798, 0x000006D7}, + {0x479C, 0x009B902A}, + {0x47A0, 0x009B902A}, + {0x47A4, 0x98682C18}, + {0x47A8, 0x6308C4C1}, + {0x47AC, 0x6248C631}, + {0x47B0, 0x922A8253}, + {0x47B4, 0x00000005}, + {0x47B8, 0x00001759}, + {0x47BC, 0x4B802000}, + {0x47C0, 0x831408BE}, + {0x47C4, 0x9ABBCACB}, + {0x47C8, 0x56767578}, + {0x47CC, 0xBBCCBBB3}, + {0x47D0, 0x57889989}, + {0x47D4, 0x00000F45}, + {0x47D8, 0x27039CE9}, + {0x47DC, 0x31413432}, + {0x47E0, 0x26058342}, + {0x47E4, 0x00000006}, + {0x47E8, 0x00000005}, + {0x47EC, 0x00000005}, + {0x47F0, 0xC7013016}, + {0x47F4, 0x84413016}, + {0x47F8, 0x84413016}, + {0x47FC, 0x8C413016}, + {0x4800, 0x8C40B028}, + {0x4804, 0x3140B028}, + {0x4808, 0x2940B028}, + {0x480C, 0x8440B028}, + {0x4810, 0x2318C610}, + {0x4814, 0x45334753}, + {0x4818, 0x236A6A88}, + {0x481C, 0x576DF814}, + {0x4820, 0xA08877AC}, + {0x4824, 0x0000087A}, + {0x4828, 0xBCEB4A14}, + {0x482C, 0x000A3A4A}, + {0x4830, 0xBCEB4A14}, + {0x4834, 0x000A3A4A}, + {0x4838, 0xBCBDBD85}, + {0x483C, 0x0CABB99A}, + {0x4840, 0x38384242}, + {0x4844, 0x8086102E}, + {0x4848, 0xCA24C82A}, + {0x484C, 0x00008A62}, + {0x4850, 0x00000008}, + {0x4854, 0x009B902A}, + {0x4858, 0x009B902A}, + {0x485C, 0x98682C18}, + {0x4860, 0x6308C4C1}, + {0x4864, 0x6248C631}, + {0x4868, 0x922A8253}, + {0x486C, 0x00000005}, + {0x4870, 0x00001759}, + {0x4874, 0x4B802000}, + {0x4878, 0x831408BE}, + {0x487C, 0x9898A8BB}, + {0x4880, 0x54535368}, + {0x4884, 0x999999B3}, + {0x4888, 0x35555589}, + {0x488C, 0x00000745}, + {0x4890, 0x27039CE9}, + {0x4894, 0x31413432}, + {0x4898, 0x26058342}, + {0x489C, 0x00000006}, + {0x48A0, 0x00000005}, + {0x48A4, 0x00000005}, + {0x48A8, 0xC7013016}, + {0x48AC, 0x84413016}, + {0x48B0, 0x84413016}, + {0x48B4, 0x8C413016}, + {0x48B8, 0x8C40B028}, + {0x48BC, 0x3140B028}, + {0x48C0, 0x2940B028}, + {0x48C4, 0x8440B028}, + {0x48C8, 0x2318C610}, + {0x48CC, 0x45334753}, + {0x48D0, 0x236A6A88}, + {0x48D4, 0x576DF814}, + {0x48D8, 0xA08877AC}, + {0x48DC, 0x0000007A}, + {0x48E0, 0xBCEB4A14}, + {0x48E4, 0x000A3A4A}, + {0x48E8, 0xBCEB4A14}, + {0x48EC, 0x000A3A4A}, + {0x48F0, 0x9A8A8A85}, + {0x48F4, 0x0CA3B99A}, + {0x48F8, 0x38384242}, + {0x48FC, 0x8086102E}, + {0x4900, 0xCA24C82A}, + {0x4904, 0x00008A62}, + {0x4908, 0x00000008}, + {0x490C, 0x80040000}, + {0x4910, 0x80040000}, + {0x4914, 0xFE800000}, + {0x4918, 0x834C0000}, + {0x491C, 0x00000000}, + {0x4920, 0x00000000}, + {0x4924, 0x00000000}, + {0x4928, 0x00000000}, + {0x492C, 0x00000000}, + {0x4930, 0x00000000}, + {0x4934, 0x40000000}, + {0x4938, 0x00000000}, + {0x493C, 0x00000000}, + {0x4940, 0x00000000}, + {0x4944, 0x00000000}, + {0x4948, 0x04065800}, + {0x494C, 0x32004080}, + {0x4950, 0x0E1E3E05}, + {0x4954, 0x0A163068}, + {0x4958, 0x00206040}, + {0x495C, 0x02020202}, + {0x4960, 0x00A16020}, + {0x4964, 0x031F4284}, + {0x4968, 0x00A10285}, + {0x496C, 0x00000005}, + {0x4970, 0x00000000}, + {0x4974, 0x800CD62D}, + {0x4978, 0x00000103}, + {0x497C, 0x00000000}, + {0x4980, 0x00000000}, + {0x4984, 0x00000000}, + {0x4988, 0x00000000}, + {0x498C, 0x00000000}, + {0x4990, 0x00000000}, + {0x4994, 0x00000000}, + {0x4998, 0x00000000}, + {0x499C, 0x00000000}, + {0x49A0, 0x00000000}, + {0x2404, 0x00000001}, + {0xC7C, 0x0000BFE0}, + {0x020, 0x0000F381}, + {0x024, 0x0000F381}, + {0x028, 0x0000F381}, + {0x02C, 0x0000F381}, + {0xD78, 0x00000005}, + {0x12CC, 0x00000CC1}, + {0x12D0, 0x00000000}, + {0x12D4, 0x00000000}, + {0x12D8, 0x00000040}, + {0x12DC, 0x4486888C}, + {0x12E0, 0xC43A10E1}, + {0x12E4, 0x30D52C68}, + {0x12E8, 0x02024128}, + {0x12EC, 0x888C272B}, + {0x12EC, 0x888CA72B}, + {0x32CC, 0x00000CC1}, + {0x32D0, 0x00000000}, + {0x32D4, 0x00000000}, + {0x32D8, 0x00000040}, + {0x32DC, 0x4486888C}, + {0x32E0, 0xC43A10E1}, + {0x32E4, 0x30D52C68}, + {0x32E8, 0x02024128}, + {0x32EC, 0x888C272B}, + {0x32EC, 0x888CA72B}, + {0x12AC, 0x12333121}, + {0x32AC, 0x12333121}, + {0x738, 0x004100CC}, + {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x5820, 0x80001080}, + {0x7820, 0x80001080}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5820, 0xC0001080}, + {0x7820, 0xC0001080}, + {0x903500ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5820, 0xC0001080}, + {0x7820, 0xC0001080}, + {0x903200ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5820, 0x80001080}, + {0x7820, 0x80001080}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5820, 0x80001080}, + {0x7820, 0x80001080}, + {0x903600ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x5820, 0x80001080}, + {0x7820, 0x80001080}, + {0xA0000000, 0x00000000}, + {0x5820, 0x80001080}, + {0x7820, 0x80001080}, + {0xB0000000, 0x00000000}, + {0x2000, 0x18BBBF84}, + {0x0F0, 0x00000002}, + {0x0F4, 0x00000016}, + {0x0F8, 0x20201013}, +}; + +static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = { + {0xF0010000, 0x00000000}, + {0xF0010001, 0x00000001}, + {0xF0020001, 0x00000002}, + {0xF0030001, 0x00000003}, + {0xF0250001, 0x00000004}, + {0xF0260001, 0x00000005}, + {0xF0320001, 0x00000006}, + {0xF0330001, 0x00000007}, + {0xF0340001, 0x00000008}, + {0xF0350001, 0x00000009}, + {0xF0360001, 0x0000000A}, + {0xF0010002, 0x0000000B}, + {0xF0020002, 0x0000000C}, + {0xF0030002, 0x0000000D}, + {0xF0250002, 0x0000000E}, + {0xF0260002, 0x0000000F}, + {0xF0320002, 0x00000010}, + {0xF0330002, 0x00000011}, + {0xF0340002, 0x00000012}, + {0xF0350002, 0x00000013}, + {0xF0360002, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000001}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x005, 0x00000001}, + {0xB0000000, 0x00000000}, + {0x000, 0x00030000}, + {0x018, 0x00011124}, + {0x000, 0x00033C00}, + {0x01A, 0x00040004}, + {0x0FE, 0x00000000}, + {0x055, 0x00080000}, + {0x056, 0x0008FFF0}, + {0x057, 0x0000C485}, + {0x058, 0x000A4164}, + {0x059, 0x00010000}, + {0x05A, 0x00060000}, + {0x05B, 0x0000A000}, + {0x05C, 0x00000000}, + {0x05D, 0x0001C013}, + {0x05E, 0x00000000}, + {0x05F, 0x00001FF0}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0xA0000000, 0x00000000}, + {0x060, 0x00011000}, + {0xB0000000, 0x00000000}, + {0x061, 0x0009F338}, + {0x062, 0x0009233A}, + {0x063, 0x000D6002}, + {0x064, 0x000A0CB0}, + {0x065, 0x00030EFE}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0xA0000000, 0x00000000}, + {0x066, 0x00020000}, + {0xB0000000, 0x00000000}, + {0x068, 0x00000000}, + {0x069, 0x00030F0A}, + {0x06A, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000AD6A4}, + {0x052, 0x00091345}, + {0x053, 0x00080081}, + {0x054, 0x0009BC24}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0xA0000000, 0x00000000}, + {0x051, 0x000AD6A4}, + {0x052, 0x00091345}, + {0x053, 0x00080081}, + {0x054, 0x0009BC24}, + {0xB0000000, 0x00000000}, + {0x0D3, 0x00000143}, + {0x043, 0x00005000}, + {0x0DD, 0x000003A0}, + {0x0B0, 0x000E6700}, + {0x0AF, 0x0001F82E}, + {0x0B2, 0x000210A7}, + {0x0B1, 0x00065FFF}, + {0x0BB, 0x000F7A00}, + {0x0B3, 0x00013F7A}, + {0x0D4, 0x0000000E}, + {0x0B7, 0x00001E0C}, + {0x0A0, 0x0000004F}, + {0x0B4, 0x0007C03E}, + {0x0B5, 0x0007E301}, + {0x0B6, 0x00080800}, + {0x0CA, 0x00002000}, + {0x0DD, 0x000003A0}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x00080000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0xA0000000, 0x00000000}, + {0x0CC, 0x00080000}, + {0xB0000000, 0x00000000}, + {0x0A1, 0x0006F300}, + {0x0A2, 0x00080500}, + {0x0A3, 0x0008050B}, + {0x0A4, 0x0006DB12}, + {0x0A5, 0x00000000}, + {0x0A6, 0x00000000}, + {0x0A7, 0x00000000}, + {0x0A8, 0x00000000}, + {0x0A9, 0x00000000}, + {0x0AA, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x000B0000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0A5, 0x000B0000}, + {0xB0000000, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00008000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0ED, 0x00008000}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000000}, + {0x03E, 0x00008000}, + {0x03F, 0x000E1333}, + {0x033, 0x00000001}, + {0x03E, 0x00008000}, + {0x03F, 0x000E7333}, + {0x033, 0x00000002}, + {0x03E, 0x00008000}, + {0x03F, 0x000FA000}, + {0x033, 0x00000003}, + {0x03E, 0x00004000}, + {0x03F, 0x000FA400}, + {0x033, 0x00000004}, + {0x03E, 0x00004000}, + {0x03F, 0x000F5000}, + {0x033, 0x00000005}, + {0x03E, 0x00004001}, + {0x03F, 0x00029400}, + {0x033, 0x00000006}, + {0x03E, 0x0000AAA1}, + {0x03F, 0x00041999}, + {0x033, 0x00000007}, + {0x03E, 0x0000AAA1}, + {0x03F, 0x00034444}, + {0x033, 0x00000008}, + {0x03E, 0x0000AAA1}, + {0x03F, 0x0004D555}, + {0x033, 0x00000009}, + {0x03E, 0x00005551}, + {0x03F, 0x00046AAA}, + {0x033, 0x0000000A}, + {0x03E, 0x00005551}, + {0x03F, 0x00046AAA}, + {0x033, 0x0000000B}, + {0x03E, 0x00005551}, + {0x03F, 0x0008C555}, + {0x033, 0x0000000C}, + {0x03E, 0x0000CCC1}, + {0x03F, 0x00081EB8}, + {0x033, 0x0000000D}, + {0x03E, 0x0000CCC1}, + {0x03F, 0x00071EB8}, + {0x033, 0x0000000E}, + {0x03E, 0x0000CCC1}, + {0x03F, 0x00090000}, + {0x033, 0x0000000F}, + {0x03E, 0x00006661}, + {0x03F, 0x00088000}, + {0x033, 0x00000010}, + {0x03E, 0x00006661}, + {0x03F, 0x00088000}, + {0x033, 0x00000011}, + {0x03E, 0x00006661}, + {0x03F, 0x000DB999}, + {0x0ED, 0x00000000}, + {0x0ED, 0x00002000}, + {0x033, 0x00000002}, + {0x03D, 0x0004A883}, + {0x03E, 0x00000000}, + {0x03F, 0x00000001}, + {0x033, 0x00000006}, + {0x03D, 0x0004A883}, + {0x03E, 0x00000000}, + {0x03F, 0x00000001}, + {0x0ED, 0x00000000}, + {0x018, 0x00001001}, + {0x002, 0x0000000D}, + {0x0EE, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0xA0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00000000}, + {0x08F, 0x000D0F7A}, + {0x08C, 0x00084584}, + {0x0EF, 0x00004000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000700}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000700}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000500}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000500}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000400}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00008B00}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00008B00}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00002900}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00001000}, + {0x033, 0x00000000}, + {0x03F, 0x00000015}, + {0x033, 0x00000001}, + {0x03F, 0x00000017}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000000}, + {0x03F, 0x000FECFC}, + {0x033, 0x00000001}, + {0x03F, 0x000BECFC}, + {0x033, 0x00000002}, + {0x03F, 0x0003E4FC}, + {0x033, 0x00000003}, + {0x03F, 0x0001D0FC}, + {0x033, 0x00000004}, + {0x03F, 0x0001C3FC}, + {0x033, 0x00000005}, + {0x03F, 0x000103FC}, + {0x033, 0x00000006}, + {0x03F, 0x0000007C}, + {0x033, 0x00000007}, + {0x03F, 0x0000007C}, + {0x033, 0x00000008}, + {0x03F, 0x000FECFC}, + {0x033, 0x00000009}, + {0x03F, 0x000BECFC}, + {0x033, 0x0000000A}, + {0x03F, 0x0003E4FC}, + {0x033, 0x0000000B}, + {0x03F, 0x0001D0FC}, + {0x033, 0x0000000C}, + {0x03F, 0x0001C3FC}, + {0x033, 0x0000000D}, + {0x03F, 0x000103FC}, + {0x033, 0x0000000E}, + {0x03F, 0x0000007C}, + {0x033, 0x0000000F}, + {0x03F, 0x0000007C}, + {0x033, 0x00000010}, + {0x03F, 0x000FECFC}, + {0x033, 0x00000011}, + {0x03F, 0x000BECFC}, + {0x033, 0x00000012}, + {0x03F, 0x0003E4FC}, + {0x033, 0x00000013}, + {0x03F, 0x0001D0FC}, + {0x033, 0x00000014}, + {0x03F, 0x0001C3FC}, + {0x033, 0x00000015}, + {0x03F, 0x000103FC}, + {0x033, 0x00000016}, + {0x03F, 0x0000007C}, + {0x033, 0x00000017}, + {0x03F, 0x0000007C}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000100}, + {0x033, 0x00000000}, + {0x03F, 0x00003317}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x03F, 0x00003317}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x03F, 0x00003317}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x03F, 0x00003338}, + {0x033, 0x0000001A}, + {0x03F, 0x00003338}, + {0x033, 0x0000001B}, + {0x03F, 0x00003338}, + {0x033, 0x0000001C}, + {0x03F, 0x00003338}, + {0x033, 0x0000001D}, + {0x03F, 0x00003338}, + {0x033, 0x0000001E}, + {0x03F, 0x00003338}, + {0x033, 0x0000001F}, + {0x03F, 0x00003338}, + {0x033, 0x00000020}, + {0x03F, 0x00003338}, + {0x033, 0x00000021}, + {0x03F, 0x00003338}, + {0x033, 0x00000022}, + {0x03F, 0x00003338}, + {0x033, 0x00000023}, + {0x03F, 0x00003338}, + {0x033, 0x00000024}, + {0x03F, 0x00003338}, + {0x033, 0x00000025}, + {0x03F, 0x00003338}, + {0x033, 0x00000026}, + {0x03F, 0x00003338}, + {0x033, 0x00000027}, + {0x03F, 0x00003338}, + {0x033, 0x00000028}, + {0x03F, 0x00003338}, + {0x033, 0x00000029}, + {0x03F, 0x00003338}, + {0x033, 0x0000002A}, + {0x03F, 0x00003338}, + {0x033, 0x0000002B}, + {0x03F, 0x00003338}, + {0x033, 0x0000002C}, + {0x03F, 0x00003338}, + {0x033, 0x0000002D}, + {0x03F, 0x00003338}, + {0x033, 0x0000002E}, + {0x03F, 0x00003338}, + {0x033, 0x0000002F}, + {0x03F, 0x00003338}, + {0x033, 0x00000030}, + {0x03F, 0x00003338}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000040}, + {0x033, 0x00000001}, + {0x03F, 0x000004BA}, + {0x033, 0x00000002}, + {0x03F, 0x000004BA}, + {0x033, 0x00000003}, + {0x03F, 0x000004BA}, + {0x033, 0x00000004}, + {0x03F, 0x000004BA}, + {0x033, 0x00000005}, + {0x03F, 0x000004BA}, + {0x033, 0x00000006}, + {0x03F, 0x000004BA}, + {0x033, 0x00000007}, + {0x03F, 0x000004BA}, + {0x033, 0x00000008}, + {0x03F, 0x000004BA}, + {0x033, 0x00000009}, + {0x03F, 0x000004BA}, + {0x033, 0x0000000A}, + {0x03F, 0x000004BA}, + {0x033, 0x0000000B}, + {0x03F, 0x000004BA}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000010}, + {0x033, 0x00000001}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000002}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000003}, + {0x03F, 0x00000870}, + {0x033, 0x00000004}, + {0x03F, 0x00000870}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000008}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000009}, + {0x03F, 0x00000870}, + {0x033, 0x0000000A}, + {0x03F, 0x00000870}, + {0x033, 0x0000000B}, + {0x03F, 0x00000430}, + {0x033, 0x0000000C}, + {0x03F, 0x00000430}, + {0x033, 0x0000000D}, + {0x03F, 0x00000000}, + {0x033, 0x0000000E}, + {0x03F, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000080}, + {0x033, 0x00000000}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F358}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F358}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00023458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00023858}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x03E, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00023758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002E758}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000023}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000024}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000025}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000026}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000027}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002E658}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00027758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C758}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000E}, + {0x03F, 0x0002F658}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EE, 0x00002000}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000068}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000068}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000023}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000024}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000025}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000026}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000027}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000040}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000041}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000042}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000043}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000044}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000045}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000046}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000047}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000048}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000049}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000050}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000051}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000052}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000053}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000054}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000055}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000056}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000057}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000058}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000059}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000060}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000061}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000062}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000064}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000065}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000066}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000068}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000069}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000070}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000071}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000072}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000074}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000075}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000076}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000077}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000078}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000079}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000080}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000081}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000082}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000083}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000084}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000085}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000086}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000087}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00000000}, + {0x0EE, 0x00004000}, + {0x033, 0x00000000}, + {0x03F, 0x00003BEF}, + {0x033, 0x00000001}, + {0x03F, 0x00003BE9}, + {0x033, 0x00000002}, + {0x03F, 0x00003BE3}, + {0x033, 0x00000003}, + {0x03F, 0x00003BDD}, + {0x033, 0x00000004}, + {0x03F, 0x00003BD7}, + {0x033, 0x00000005}, + {0x03F, 0x00003BD1}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00001BD9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000BD9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000859}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000859}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000819}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000819}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x03F, 0x000039EE}, + {0x033, 0x00000013}, + {0x03F, 0x000039E8}, + {0x033, 0x00000014}, + {0x03F, 0x000039E2}, + {0x033, 0x00000015}, + {0x03F, 0x000039DC}, + {0x033, 0x00000016}, + {0x03F, 0x000039D6}, + {0x033, 0x00000017}, + {0x03F, 0x000039D0}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x03F, 0x000019D2}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x03F, 0x000009D2}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x03F, 0x000008D3}, + {0x033, 0x0000001E}, + {0x03F, 0x000008CD}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x03F, 0x0000084D}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x03F, 0x0000080D}, + {0x033, 0x00000023}, + {0x03F, 0x00000807}, + {0x033, 0x00000024}, + {0x03F, 0x000039EE}, + {0x033, 0x00000025}, + {0x03F, 0x000039E8}, + {0x033, 0x00000026}, + {0x03F, 0x000039E2}, + {0x033, 0x00000027}, + {0x03F, 0x000039DC}, + {0x033, 0x00000028}, + {0x03F, 0x000039D6}, + {0x033, 0x00000029}, + {0x03F, 0x000039D0}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x03F, 0x000019D2}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x03F, 0x000009D2}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x03F, 0x000008D3}, + {0x033, 0x00000030}, + {0x03F, 0x000008CD}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x03F, 0x0000084D}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x03F, 0x0000080D}, + {0x033, 0x00000035}, + {0x03F, 0x00000807}, + {0x033, 0x00000036}, + {0x03F, 0x000039EE}, + {0x033, 0x00000037}, + {0x03F, 0x000039E8}, + {0x033, 0x00000038}, + {0x03F, 0x000039E2}, + {0x033, 0x00000039}, + {0x03F, 0x000039DC}, + {0x033, 0x0000003A}, + {0x03F, 0x000039D6}, + {0x033, 0x0000003B}, + {0x03F, 0x000039D0}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x03F, 0x000019D2}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x03F, 0x000009D2}, + {0x033, 0x00000040}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000041}, + {0x03F, 0x000008D3}, + {0x033, 0x00000042}, + {0x03F, 0x000008CD}, + {0x033, 0x00000043}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000044}, + {0x03F, 0x0000084D}, + {0x033, 0x00000045}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000046}, + {0x03F, 0x0000080D}, + {0x033, 0x00000047}, + {0x03F, 0x00000807}, + {0x033, 0x00000048}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000049}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000050}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000051}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000052}, + {0x03F, 0x000009CD}, + {0x033, 0x00000053}, + {0x03F, 0x000008D3}, + {0x033, 0x00000054}, + {0x03F, 0x000008CD}, + {0x033, 0x00000055}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000056}, + {0x03F, 0x0000084D}, + {0x033, 0x00000057}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000058}, + {0x03F, 0x0000080D}, + {0x033, 0x00000059}, + {0x03F, 0x00000807}, + {0x033, 0x0000005A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000060}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000061}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000062}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000064}, + {0x03F, 0x000009CD}, + {0x033, 0x00000065}, + {0x03F, 0x000008D3}, + {0x033, 0x00000066}, + {0x03F, 0x000008CD}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000068}, + {0x03F, 0x0000084D}, + {0x033, 0x00000069}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006A}, + {0x03F, 0x0000080D}, + {0x033, 0x0000006B}, + {0x03F, 0x00000807}, + {0x033, 0x0000006C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000070}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000071}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000072}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000074}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000075}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000076}, + {0x03F, 0x000009CD}, + {0x033, 0x00000077}, + {0x03F, 0x000008D3}, + {0x033, 0x00000078}, + {0x03F, 0x000008CD}, + {0x033, 0x00000079}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007A}, + {0x03F, 0x0000084D}, + {0x033, 0x0000007B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007C}, + {0x03F, 0x0000080D}, + {0x033, 0x0000007D}, + {0x03F, 0x00000807}, + {0x033, 0x0000007E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000080}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000081}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000082}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000083}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000084}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000085}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000086}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000087}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000088}, + {0x03F, 0x000009CD}, + {0x033, 0x00000089}, + {0x03F, 0x000008D3}, + {0x033, 0x0000008A}, + {0x03F, 0x000008CD}, + {0x033, 0x0000008B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000008C}, + {0x03F, 0x0000084D}, + {0x033, 0x0000008D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000008E}, + {0x03F, 0x0000080D}, + {0x033, 0x0000008F}, + {0x03F, 0x00000807}, + {0x0EE, 0x00000000}, + {0x0EF, 0x00080000}, + {0x033, 0x00000007}, + {0x03E, 0x00000001}, + {0x03F, 0x00020F3C}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00080000}, + {0x033, 0x0000000C}, + {0x03E, 0x00000001}, + {0x03F, 0x000305BC}, + {0x0EF, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000001}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0EC, 0x00000001}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x03C, 0x00000020}, + {0x03D, 0x00000078}, + {0x03E, 0x00080000}, + {0x03F, 0x00001999}, + {0x0EC, 0x00000000}, + {0x02F, 0x0002260D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000001}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0DE, 0x00000001}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000002}, + {0x033, 0x00000000}, + {0x03F, 0x00000002}, + {0x033, 0x00000001}, + {0x03F, 0x00000002}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000400}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000023}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000024}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000025}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000026}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000027}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000200}, + {0x033, 0x00000000}, + {0x03F, 0x000001FF}, + {0x033, 0x00000001}, + {0x03F, 0x000001FF}, + {0x033, 0x00000002}, + {0x03F, 0x000001FF}, + {0x033, 0x00000003}, + {0x03F, 0x000001FF}, + {0x033, 0x00000004}, + {0x03F, 0x000001FF}, + {0x033, 0x00000005}, + {0x03F, 0x000001FF}, + {0x033, 0x00000006}, + {0x03F, 0x000001FF}, + {0x033, 0x00000007}, + {0x03F, 0x000001FF}, + {0x033, 0x00000008}, + {0x03F, 0x000001FF}, + {0x033, 0x00000009}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000A}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000B}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000C}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000D}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000E}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000F}, + {0x03F, 0x000001FF}, + {0x033, 0x00000010}, + {0x03F, 0x000001FF}, + {0x033, 0x00000011}, + {0x03F, 0x000001FF}, + {0x033, 0x00000012}, + {0x03F, 0x000001FF}, + {0x033, 0x00000013}, + {0x03F, 0x000001FF}, + {0x033, 0x00000014}, + {0x03F, 0x000001FF}, + {0x033, 0x00000015}, + {0x03F, 0x000001FF}, + {0x033, 0x00000016}, + {0x03F, 0x000001FF}, + {0x033, 0x00000017}, + {0x03F, 0x000001FF}, + {0x033, 0x00000018}, + {0x03F, 0x000001FF}, + {0x033, 0x00000019}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001A}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001B}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001C}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001D}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001E}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001F}, + {0x03F, 0x000001FF}, + {0x033, 0x00000020}, + {0x03F, 0x000001FF}, + {0x033, 0x00000021}, + {0x03F, 0x000001FF}, + {0x033, 0x00000022}, + {0x03F, 0x000001FF}, + {0x033, 0x00000023}, + {0x03F, 0x000001FF}, + {0x033, 0x00000024}, + {0x03F, 0x000001FF}, + {0x033, 0x00000025}, + {0x03F, 0x000001FF}, + {0x033, 0x00000026}, + {0x03F, 0x000001FF}, + {0x033, 0x00000027}, + {0x03F, 0x000001FF}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0xA0000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0xB0000000, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0xA0000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0xB0000000, 0x00000000}, + {0x06D, 0x00000C31}, + {0x0EF, 0x00020000}, + {0x033, 0x00000000}, + {0x03F, 0x000005FF}, + {0x0EF, 0x00000000}, + {0x005, 0x00000001}, + {0x0EF, 0x00080000}, + {0x033, 0x00000001}, + {0x03E, 0x00000001}, + {0x03F, 0x00022020}, + {0x0EF, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0xA0000000, 0x00000000}, + {0x087, 0x00000427}, + {0xB0000000, 0x00000000}, + {0x002, 0x00000000}, + {0x067, 0x00000052}, + +}; + +static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = { + {0xF0010000, 0x00000000}, + {0xF0010001, 0x00000001}, + {0xF0020001, 0x00000002}, + {0xF0030001, 0x00000003}, + {0xF0250001, 0x00000004}, + {0xF0260001, 0x00000005}, + {0xF0320001, 0x00000006}, + {0xF0330001, 0x00000007}, + {0xF0340001, 0x00000008}, + {0xF0350001, 0x00000009}, + {0xF0360001, 0x0000000A}, + {0xF0010002, 0x0000000B}, + {0xF0020002, 0x0000000C}, + {0xF0030002, 0x0000000D}, + {0xF0250002, 0x0000000E}, + {0xF0260002, 0x0000000F}, + {0xF0320002, 0x00000010}, + {0xF0330002, 0x00000011}, + {0xF0340002, 0x00000012}, + {0xF0350002, 0x00000013}, + {0xF0360002, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000001}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x005, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x005, 0x00000001}, + {0xB0000000, 0x00000000}, + {0x000, 0x00030000}, + {0x018, 0x00011124}, + {0x000, 0x00033C00}, + {0x01A, 0x00040004}, + {0x0FE, 0x00000000}, + {0x055, 0x00080000}, + {0x056, 0x0008FFF0}, + {0x057, 0x0000C485}, + {0x058, 0x000A4164}, + {0x059, 0x00010000}, + {0x05A, 0x00060000}, + {0x05B, 0x0000A000}, + {0x05C, 0x00000000}, + {0x05D, 0x0001C013}, + {0x05E, 0x00000000}, + {0x05F, 0x00001FF0}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x060, 0x00011008}, + {0xA0000000, 0x00000000}, + {0x060, 0x00011000}, + {0xB0000000, 0x00000000}, + {0x061, 0x0009F338}, + {0x062, 0x0009233A}, + {0x063, 0x000D6002}, + {0x064, 0x000A0CB0}, + {0x065, 0x00030EFE}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00010000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x066, 0x00020000}, + {0xA0000000, 0x00000000}, + {0x066, 0x00010000}, + {0xB0000000, 0x00000000}, + {0x068, 0x00000000}, + {0x069, 0x00030F0A}, + {0x06A, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000AD6A4}, + {0x052, 0x00091345}, + {0x053, 0x00080081}, + {0x054, 0x0007BC24}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x051, 0x000BD267}, + {0x052, 0x00091345}, + {0x053, 0x000B0081}, + {0x054, 0x0007BCA4}, + {0xA0000000, 0x00000000}, + {0x051, 0x000AD6A4}, + {0x052, 0x00091345}, + {0x053, 0x00080081}, + {0x054, 0x0007BC24}, + {0xB0000000, 0x00000000}, + {0x0D3, 0x00000143}, + {0x043, 0x00005000}, + {0x0DD, 0x000003A0}, + {0x0B0, 0x000E6700}, + {0x0AF, 0x0001F82E}, + {0x0B2, 0x000210A7}, + {0x0B1, 0x00065FFF}, + {0x0BB, 0x000F7A00}, + {0x0B3, 0x00013F7A}, + {0x0D4, 0x0000000E}, + {0x0B7, 0x00001E0C}, + {0x0A0, 0x0000004F}, + {0x0B4, 0x0007C03E}, + {0x0B5, 0x0007E301}, + {0x0B6, 0x00080800}, + {0x0CA, 0x00002000}, + {0x0DD, 0x000003A0}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x00080000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0CC, 0x000E0000}, + {0xA0000000, 0x00000000}, + {0x0CC, 0x00080000}, + {0xB0000000, 0x00000000}, + {0x0A1, 0x0006F300}, + {0x0A2, 0x00080500}, + {0x0A3, 0x0008050B}, + {0x0A4, 0x0006DB12}, + {0x0A5, 0x00000000}, + {0x0A6, 0x00000000}, + {0x0A7, 0x00000000}, + {0x0A8, 0x00000000}, + {0x0A9, 0x00000000}, + {0x0AA, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x000B0000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0A5, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0A5, 0x000B0000}, + {0xB0000000, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00008000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0ED, 0x00008000}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000000}, + {0x03E, 0x00008000}, + {0x03F, 0x000E1333}, + {0x033, 0x00000001}, + {0x03E, 0x00008000}, + {0x03F, 0x000E7333}, + {0x033, 0x00000002}, + {0x03E, 0x00008000}, + {0x03F, 0x000FA000}, + {0x033, 0x00000003}, + {0x03E, 0x00004000}, + {0x03F, 0x000FA400}, + {0x033, 0x00000004}, + {0x03E, 0x00004000}, + {0x03F, 0x000F5000}, + {0x033, 0x00000005}, + {0x03E, 0x00004001}, + {0x03F, 0x00029400}, + {0x033, 0x00000006}, + {0x03E, 0x0000AAA1}, + {0x03F, 0x00041999}, + {0x033, 0x00000007}, + {0x03E, 0x0000AAA1}, + {0x03F, 0x00034444}, + {0x033, 0x00000008}, + {0x03E, 0x0000AAA1}, + {0x03F, 0x0004D555}, + {0x033, 0x00000009}, + {0x03E, 0x00005551}, + {0x03F, 0x00046AAA}, + {0x033, 0x0000000A}, + {0x03E, 0x00005551}, + {0x03F, 0x00046AAA}, + {0x033, 0x0000000B}, + {0x03E, 0x00005551}, + {0x03F, 0x0008C555}, + {0x033, 0x0000000C}, + {0x03E, 0x0000CCC1}, + {0x03F, 0x00081EB8}, + {0x033, 0x0000000D}, + {0x03E, 0x0000CCC1}, + {0x03F, 0x00071EB8}, + {0x033, 0x0000000E}, + {0x03E, 0x0000CCC1}, + {0x03F, 0x00090000}, + {0x033, 0x0000000F}, + {0x03E, 0x00006661}, + {0x03F, 0x00088000}, + {0x033, 0x00000010}, + {0x03E, 0x00006661}, + {0x03F, 0x00088000}, + {0x033, 0x00000011}, + {0x03E, 0x00006661}, + {0x03F, 0x000DB999}, + {0x0ED, 0x00000000}, + {0x0ED, 0x00002000}, + {0x033, 0x00000002}, + {0x03D, 0x0004A883}, + {0x03E, 0x00000000}, + {0x03F, 0x00000001}, + {0x033, 0x00000006}, + {0x03D, 0x0004A883}, + {0x03E, 0x00000000}, + {0x03F, 0x00000001}, + {0x0ED, 0x00000000}, + {0x018, 0x00001001}, + {0x002, 0x0000000D}, + {0x0EE, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000A}, + {0x033, 0x0000000C}, + {0x03F, 0x00000011}, + {0x033, 0x0000000D}, + {0x03F, 0x00000018}, + {0xA0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00000000}, + {0x08F, 0x000D0F7A}, + {0x08C, 0x00084584}, + {0x0EF, 0x00004000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000700}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000700}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004700}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000700}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000500}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000B0600}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00094600}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000500}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000400}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4500}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000400}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00008B00}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00038B00}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000D4400}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00008B00}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00014B00}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000B00}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004A00}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00001A00}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00002900}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00004900}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00002900}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00001000}, + {0x033, 0x00000000}, + {0x03F, 0x00000015}, + {0x033, 0x00000001}, + {0x03F, 0x00000017}, + {0x033, 0x00000002}, + {0x03F, 0x00000015}, + {0x033, 0x00000003}, + {0x03F, 0x00000017}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000000}, + {0x03F, 0x000FECFC}, + {0x033, 0x00000001}, + {0x03F, 0x000BECFC}, + {0x033, 0x00000002}, + {0x03F, 0x0003E4FC}, + {0x033, 0x00000003}, + {0x03F, 0x0001D0FC}, + {0x033, 0x00000004}, + {0x03F, 0x0001C3FC}, + {0x033, 0x00000005}, + {0x03F, 0x000103FC}, + {0x033, 0x00000006}, + {0x03F, 0x0000007C}, + {0x033, 0x00000007}, + {0x03F, 0x0000007C}, + {0x033, 0x00000008}, + {0x03F, 0x000FECFC}, + {0x033, 0x00000009}, + {0x03F, 0x000BECFC}, + {0x033, 0x0000000A}, + {0x03F, 0x0003E4FC}, + {0x033, 0x0000000B}, + {0x03F, 0x0001D0FC}, + {0x033, 0x0000000C}, + {0x03F, 0x0001C3FC}, + {0x033, 0x0000000D}, + {0x03F, 0x000103FC}, + {0x033, 0x0000000E}, + {0x03F, 0x0000007C}, + {0x033, 0x0000000F}, + {0x03F, 0x0000007C}, + {0x033, 0x00000010}, + {0x03F, 0x000FECFC}, + {0x033, 0x00000011}, + {0x03F, 0x000BECFC}, + {0x033, 0x00000012}, + {0x03F, 0x0003E4FC}, + {0x033, 0x00000013}, + {0x03F, 0x0001D0FC}, + {0x033, 0x00000014}, + {0x03F, 0x0001C3FC}, + {0x033, 0x00000015}, + {0x03F, 0x000103FC}, + {0x033, 0x00000016}, + {0x03F, 0x0000007C}, + {0x033, 0x00000017}, + {0x03F, 0x0000007C}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000100}, + {0x033, 0x00000000}, + {0x03F, 0x00003317}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x03F, 0x00003317}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x03F, 0x00003317}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003336}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003338}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003337}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00003356}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x03F, 0x00003338}, + {0x033, 0x0000001A}, + {0x03F, 0x00003338}, + {0x033, 0x0000001B}, + {0x03F, 0x00003338}, + {0x033, 0x0000001C}, + {0x03F, 0x00003338}, + {0x033, 0x0000001D}, + {0x03F, 0x00003338}, + {0x033, 0x0000001E}, + {0x03F, 0x00003338}, + {0x033, 0x0000001F}, + {0x03F, 0x00003338}, + {0x033, 0x00000020}, + {0x03F, 0x00003338}, + {0x033, 0x00000021}, + {0x03F, 0x00003338}, + {0x033, 0x00000022}, + {0x03F, 0x00003338}, + {0x033, 0x00000023}, + {0x03F, 0x00003338}, + {0x033, 0x00000024}, + {0x03F, 0x00003338}, + {0x033, 0x00000025}, + {0x03F, 0x00003338}, + {0x033, 0x00000026}, + {0x03F, 0x00003338}, + {0x033, 0x00000027}, + {0x03F, 0x00003338}, + {0x033, 0x00000028}, + {0x03F, 0x00003338}, + {0x033, 0x00000029}, + {0x03F, 0x00003338}, + {0x033, 0x0000002A}, + {0x03F, 0x00003338}, + {0x033, 0x0000002B}, + {0x03F, 0x00003338}, + {0x033, 0x0000002C}, + {0x03F, 0x00003338}, + {0x033, 0x0000002D}, + {0x03F, 0x00003338}, + {0x033, 0x0000002E}, + {0x03F, 0x00003338}, + {0x033, 0x0000002F}, + {0x03F, 0x00003338}, + {0x033, 0x00000030}, + {0x03F, 0x00003338}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000040}, + {0x033, 0x00000001}, + {0x03F, 0x000004BA}, + {0x033, 0x00000002}, + {0x03F, 0x000004BA}, + {0x033, 0x00000003}, + {0x03F, 0x000004BA}, + {0x033, 0x00000004}, + {0x03F, 0x000004BA}, + {0x033, 0x00000005}, + {0x03F, 0x000004BA}, + {0x033, 0x00000006}, + {0x03F, 0x000004BA}, + {0x033, 0x00000007}, + {0x03F, 0x000004BA}, + {0x033, 0x00000008}, + {0x03F, 0x000004BA}, + {0x033, 0x00000009}, + {0x03F, 0x000004BA}, + {0x033, 0x0000000A}, + {0x03F, 0x000004BA}, + {0x033, 0x0000000B}, + {0x03F, 0x000004BA}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000010}, + {0x033, 0x00000001}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000002}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000003}, + {0x03F, 0x00000870}, + {0x033, 0x00000004}, + {0x03F, 0x00000870}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000730}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000430}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000008}, + {0x03F, 0x00000CB0}, + {0x033, 0x00000009}, + {0x03F, 0x00000870}, + {0x033, 0x0000000A}, + {0x03F, 0x00000870}, + {0x033, 0x0000000B}, + {0x03F, 0x00000430}, + {0x033, 0x0000000C}, + {0x03F, 0x00000430}, + {0x033, 0x0000000D}, + {0x03F, 0x00000000}, + {0x033, 0x0000000E}, + {0x03F, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000080}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F258}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F258}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00036458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0003C458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026658}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00026458}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00028558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C358}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0002C558}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000023}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000024}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000025}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000026}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000027}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F358}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002F358}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F258}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023758}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00023558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00025558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x00024558}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000C}, + {0x03F, 0x0002C558}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xA0000000, 0x00000000}, + {0x03E, 0x0000000B}, + {0x03F, 0x0006F458}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EE, 0x00002000}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000166}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000163}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000068}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000068}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F5}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F2}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A5}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A2}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000053}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000023}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000024}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000025}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000026}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000027}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000070}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000067}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000064}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000061}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000058}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E5}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000055}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000E2}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000DF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000065}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000065}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000062}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000062}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000040}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000049}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000041}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000046}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000042}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000059}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000043}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000059}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000043}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000056}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000040}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000056}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000044}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000045}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000046}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000047}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000048}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000049}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000050}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000051}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000052}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000053}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000054}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000055}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000056}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000057}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000058}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000059}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000060}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000061}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000062}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000064}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000065}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000066}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000068}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000069}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000070}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000071}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000072}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000074}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000075}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000076}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000077}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000078}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FC}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000078}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000075}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000079}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000072}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001EA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001F0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E4}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001ED}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AC}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001AA}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000E0}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000080}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000081}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000006C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000082}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000069}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000069}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000083}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000066}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000066}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000084}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000063}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000063}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000085}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000060}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004E}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000057}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000060}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000086}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000004B}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000054}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005D}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000087}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000048}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000051}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00000000}, + {0x0EE, 0x00004000}, + {0x033, 0x00000000}, + {0x03F, 0x00003BEF}, + {0x033, 0x00000001}, + {0x03F, 0x00003BE9}, + {0x033, 0x00000002}, + {0x03F, 0x00003BE3}, + {0x033, 0x00000003}, + {0x03F, 0x00003BDD}, + {0x033, 0x00000004}, + {0x03F, 0x00003BD7}, + {0x033, 0x00000005}, + {0x03F, 0x00003BD1}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00003BCB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00001BD9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BD1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00001BD3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00001BCD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000BD9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BD1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000BD3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000BCD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000859}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000857}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000859}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000851}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000819}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000084D}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000817}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000819}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000811}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x03F, 0x000039EE}, + {0x033, 0x00000013}, + {0x03F, 0x000039E8}, + {0x033, 0x00000014}, + {0x03F, 0x000039E2}, + {0x033, 0x00000015}, + {0x03F, 0x000039DC}, + {0x033, 0x00000016}, + {0x03F, 0x000039D6}, + {0x033, 0x00000017}, + {0x03F, 0x000039D0}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x03F, 0x000019D2}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x03F, 0x000009D2}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x03F, 0x000008D3}, + {0x033, 0x0000001E}, + {0x03F, 0x000008CD}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x03F, 0x0000084D}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x03F, 0x0000080D}, + {0x033, 0x00000023}, + {0x03F, 0x00000807}, + {0x033, 0x00000024}, + {0x03F, 0x000039EE}, + {0x033, 0x00000025}, + {0x03F, 0x000039E8}, + {0x033, 0x00000026}, + {0x03F, 0x000039E2}, + {0x033, 0x00000027}, + {0x03F, 0x000039DC}, + {0x033, 0x00000028}, + {0x03F, 0x000039D6}, + {0x033, 0x00000029}, + {0x03F, 0x000039D0}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x03F, 0x000019D2}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x03F, 0x000009D2}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x03F, 0x000008D3}, + {0x033, 0x00000030}, + {0x03F, 0x000008CD}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x03F, 0x0000084D}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x03F, 0x0000080D}, + {0x033, 0x00000035}, + {0x03F, 0x00000807}, + {0x033, 0x00000036}, + {0x03F, 0x000039EE}, + {0x033, 0x00000037}, + {0x03F, 0x000039E8}, + {0x033, 0x00000038}, + {0x03F, 0x000039E2}, + {0x033, 0x00000039}, + {0x03F, 0x000039DC}, + {0x033, 0x0000003A}, + {0x03F, 0x000039D6}, + {0x033, 0x0000003B}, + {0x03F, 0x000039D0}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x03F, 0x000019D2}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x03F, 0x000009D2}, + {0x033, 0x00000040}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CC}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000008D9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000041}, + {0x03F, 0x000008D3}, + {0x033, 0x00000042}, + {0x03F, 0x000008CD}, + {0x033, 0x00000043}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000044}, + {0x03F, 0x0000084D}, + {0x033, 0x00000045}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000046}, + {0x03F, 0x0000080D}, + {0x033, 0x00000047}, + {0x03F, 0x00000807}, + {0x033, 0x00000048}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000049}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000004F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000050}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000051}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000052}, + {0x03F, 0x000009CD}, + {0x033, 0x00000053}, + {0x03F, 0x000008D3}, + {0x033, 0x00000054}, + {0x03F, 0x000008CD}, + {0x033, 0x00000055}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000056}, + {0x03F, 0x0000084D}, + {0x033, 0x00000057}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000058}, + {0x03F, 0x0000080D}, + {0x033, 0x00000059}, + {0x03F, 0x00000807}, + {0x033, 0x0000005A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000005F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000060}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000061}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000062}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000064}, + {0x03F, 0x000009CD}, + {0x033, 0x00000065}, + {0x03F, 0x000008D3}, + {0x033, 0x00000066}, + {0x03F, 0x000008CD}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000068}, + {0x03F, 0x0000084D}, + {0x033, 0x00000069}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006A}, + {0x03F, 0x0000080D}, + {0x033, 0x0000006B}, + {0x03F, 0x00000807}, + {0x033, 0x0000006C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000070}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000071}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000072}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000074}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000075}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000076}, + {0x03F, 0x000009CD}, + {0x033, 0x00000077}, + {0x03F, 0x000008D3}, + {0x033, 0x00000078}, + {0x03F, 0x000008CD}, + {0x033, 0x00000079}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007A}, + {0x03F, 0x0000084D}, + {0x033, 0x0000007B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007C}, + {0x03F, 0x0000080D}, + {0x033, 0x0000007D}, + {0x03F, 0x00000807}, + {0x033, 0x0000007E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EE}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039EF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E8}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E9}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000080}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039E3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000081}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039DD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000082}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D6}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D7}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000083}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D0}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039D1}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000084}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CA}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000039CB}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000085}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000086}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CC}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000019CD}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000087}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D2}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000009D3}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000088}, + {0x03F, 0x000009CD}, + {0x033, 0x00000089}, + {0x03F, 0x000008D3}, + {0x033, 0x0000008A}, + {0x03F, 0x000008CD}, + {0x033, 0x0000008B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000008C7}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000853}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000008C}, + {0x03F, 0x0000084D}, + {0x033, 0x0000008D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000847}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000813}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000008E}, + {0x03F, 0x0000080D}, + {0x033, 0x0000008F}, + {0x03F, 0x00000807}, + {0x0EE, 0x00000000}, + {0x0EF, 0x00080000}, + {0x033, 0x00000007}, + {0x03E, 0x00000001}, + {0x03F, 0x00020F3C}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00080000}, + {0x033, 0x0000000C}, + {0x03E, 0x00000001}, + {0x03F, 0x000305BC}, + {0x0EF, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000001}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0EC, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0EC, 0x00000001}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x03C, 0x00000020}, + {0x03D, 0x00000078}, + {0x03E, 0x00080000}, + {0x03F, 0x00001999}, + {0x0EC, 0x00000000}, + {0x02F, 0x0002260D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000001}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x0DE, 0x00000000}, + {0xA0000000, 0x00000000}, + {0x0DE, 0x00000001}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000002}, + {0x033, 0x00000000}, + {0x03F, 0x00000002}, + {0x033, 0x00000001}, + {0x03F, 0x00000002}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000400}, + {0x033, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000001}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000002}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000003}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000004}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000005}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000020}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000021}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000022}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000023}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000024}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000025}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000026}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000027}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000017F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000013F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FB}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FA}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000200}, + {0x033, 0x00000000}, + {0x03F, 0x000001FF}, + {0x033, 0x00000001}, + {0x03F, 0x000001FF}, + {0x033, 0x00000002}, + {0x03F, 0x000001FF}, + {0x033, 0x00000003}, + {0x03F, 0x000001FF}, + {0x033, 0x00000004}, + {0x03F, 0x000001FF}, + {0x033, 0x00000005}, + {0x03F, 0x000001FF}, + {0x033, 0x00000006}, + {0x03F, 0x000001FF}, + {0x033, 0x00000007}, + {0x03F, 0x000001FF}, + {0x033, 0x00000008}, + {0x03F, 0x000001FF}, + {0x033, 0x00000009}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000A}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000B}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000C}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000D}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000E}, + {0x03F, 0x000001FF}, + {0x033, 0x0000000F}, + {0x03F, 0x000001FF}, + {0x033, 0x00000010}, + {0x03F, 0x000001FF}, + {0x033, 0x00000011}, + {0x03F, 0x000001FF}, + {0x033, 0x00000012}, + {0x03F, 0x000001FF}, + {0x033, 0x00000013}, + {0x03F, 0x000001FF}, + {0x033, 0x00000014}, + {0x03F, 0x000001FF}, + {0x033, 0x00000015}, + {0x03F, 0x000001FF}, + {0x033, 0x00000016}, + {0x03F, 0x000001FF}, + {0x033, 0x00000017}, + {0x03F, 0x000001FF}, + {0x033, 0x00000018}, + {0x03F, 0x000001FF}, + {0x033, 0x00000019}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001A}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001B}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001C}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001D}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001E}, + {0x03F, 0x000001FF}, + {0x033, 0x0000001F}, + {0x03F, 0x000001FF}, + {0x033, 0x00000020}, + {0x03F, 0x000001FF}, + {0x033, 0x00000021}, + {0x03F, 0x000001FF}, + {0x033, 0x00000022}, + {0x03F, 0x000001FF}, + {0x033, 0x00000023}, + {0x03F, 0x000001FF}, + {0x033, 0x00000024}, + {0x03F, 0x000001FF}, + {0x033, 0x00000025}, + {0x03F, 0x000001FF}, + {0x033, 0x00000026}, + {0x03F, 0x000001FF}, + {0x033, 0x00000027}, + {0x03F, 0x000001FF}, + {0x033, 0x00000028}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000029}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001FF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000002F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000030}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000031}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000032}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000033}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000034}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000035}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000036}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000037}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000038}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000039}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000003F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003B}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000003F}, + {0xB0000000, 0x00000000}, + {0x0EF, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06E, 0x00067A7C}, + {0xA0000000, 0x00000000}, + {0x06E, 0x00077A7C}, + {0xB0000000, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x06F, 0x00067A7C}, + {0xA0000000, 0x00000000}, + {0x06F, 0x00077A7C}, + {0xB0000000, 0x00000000}, + {0x06D, 0x00000C31}, + {0x0EF, 0x00020000}, + {0x033, 0x00000000}, + {0x03F, 0x000005FF}, + {0x0EF, 0x00000000}, + {0x0A0, 0x00000043}, + {0x005, 0x00000001}, + {0x0EF, 0x00080000}, + {0x033, 0x00000001}, + {0x03E, 0x00000001}, + {0x03F, 0x00022020}, + {0x0EF, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90030001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90250001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90260001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90010002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90020002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90030002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90250002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90260002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x00000427}, + {0x90320002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90330002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90340002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90350002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0x90360002, 0x00000000}, {0x40000000, 0x00000000}, + {0x087, 0x0000042F}, + {0xA0000000, 0x00000000}, + {0x087, 0x00000427}, + {0xB0000000, 0x00000000}, + {0x002, 0x00000000}, + {0x067, 0x00000052}, + +}; + +static const struct rtw89_reg2_def rtw89_8852a_phy_nctl_regs[] = { + {0x8000, 0x00000008}, + {0x8008, 0x00000000}, + {0x8004, 0xf0862966}, + {0x800c, 0x78000000}, + {0x8010, 0x88015000}, + {0x8014, 0x80010100}, + {0x8018, 0x10010100}, + {0x801c, 0xa210bc00}, + {0x8020, 0x000403e0}, + {0x8024, 0x00072160}, + {0x8028, 0x00180e00}, + {0x8030, 0x400000c0}, + {0x8034, 0x56000800}, + {0x8038, 0x00000009}, + {0x803c, 0x00000008}, + {0x8040, 0x00000046}, + {0x8044, 0x0010001f}, + {0x8048, 0xf0000003}, + {0x804c, 0x62ac6162}, + {0x8050, 0xf2acf162}, + {0x8054, 0x62ac6162}, + {0x8058, 0xf2acf162}, + {0x805c, 0x150c0b02}, + {0x8060, 0x150c0b02}, + {0x8064, 0x2aa00047}, + {0x8074, 0x80000000}, + {0x807c, 0x000000ee}, + {0x8088, 0x80000000}, + {0x8098, 0x0000ff00}, + {0x809c, 0x0000001f}, + {0x80a0, 0x00010300}, + {0x80b0, 0x00000000}, + {0x80d0, 0x00000000}, + {0x8114, 0x00000000}, + {0x8120, 0x10010000}, + {0x8124, 0x00000000}, + {0x812c, 0x0000c000}, + {0x8138, 0x40000002}, + {0x813c, 0x40000002}, + {0x8140, 0x00000000}, + {0x8144, 0x0b040b03}, + {0x8148, 0x0b040b04}, + {0x814c, 0x0b040b03}, + {0x8150, 0x00000000}, + {0x8158, 0xffffffff}, + {0x815c, 0xffffffff}, + {0x8160, 0xffffffff}, + {0x8164, 0xffffffff}, + {0x8168, 0xffffffff}, + {0x816c, 0x1fffffff}, + {0x81ac, 0x003f1a00}, + {0x81b0, 0x003f1a00}, + {0x81bc, 0x005b5b5b}, + {0x81c0, 0x005b5b5b}, + {0x81b4, 0x00600060}, + {0x81b8, 0x00600060}, + {0x81cc, 0x00000000}, + {0x81dc, 0x00000002}, + {0x8214, 0x00000000}, + {0x8220, 0x10010000}, + {0x8224, 0x00000000}, + {0x822c, 0x0000d000}, + {0x8238, 0x40000002}, + {0x823c, 0x40000002}, + {0x8240, 0x00000000}, + {0x8244, 0x0b040b03}, + {0x8248, 0x0b040b03}, + {0x824c, 0x0b030b03}, + {0x8250, 0x00000000}, + {0x8258, 0xffffffff}, + {0x825c, 0xffffffff}, + {0x8260, 0xffffffff}, + {0x8264, 0xffffffff}, + {0x8268, 0xffffffff}, + {0x826c, 0x1fffffff}, + {0x82ac, 0x003f1a00}, + {0x82b0, 0x003f1a00}, + {0x82bc, 0x005b5b5b}, + {0x82c0, 0x005b5b5b}, + {0x82b4, 0x00600060}, + {0x82b8, 0x00600060}, + {0x82cc, 0x00000000}, + {0x82dc, 0x00000002}, + {0x81d8, 0x00000001}, + {0x82d8, 0x00000001}, + {0x8d00, 0x00000000}, + {0x8d04, 0x00000000}, + {0x8d08, 0x00000000}, + {0x8d0c, 0x00000000}, + {0x8d10, 0x00000000}, + {0x8d14, 0x00000000}, + {0x8d18, 0x00000000}, + {0x8d1c, 0x00000000}, + {0x8d20, 0x00000000}, + {0x8d24, 0x00000000}, + {0x8d28, 0x00000000}, + {0x8d2c, 0x00000000}, + {0x8d30, 0x00000000}, + {0x8d34, 0x00000000}, + {0x8d38, 0x00000000}, + {0x8d3c, 0x00000000}, + {0x8d40, 0x00000000}, + {0x8d44, 0x00000000}, + {0x8d48, 0x00000000}, + {0x8d4c, 0x00000000}, + {0x8d50, 0x00000000}, + {0x8d54, 0x00000000}, + {0x8d58, 0x00000000}, + {0x8d5c, 0x00000000}, + {0x8d60, 0x00000000}, + {0x8d64, 0x00000000}, + {0x8d68, 0x00000000}, + {0x8d6c, 0x00000000}, + {0x8d70, 0x00000000}, + {0x8d74, 0x00000000}, + {0x8d78, 0x00000000}, + {0x8d7c, 0x00000000}, + {0x8d80, 0x00000000}, + {0x8d84, 0x00000000}, + {0x8d88, 0x00000000}, + {0x8d8c, 0x00000000}, + {0x8d90, 0x00000000}, + {0x8d94, 0x00000000}, + {0x8d98, 0x00000000}, + {0x8d9c, 0x00000000}, + {0x8da0, 0x00000000}, + {0x8da4, 0x00000000}, + {0x8da8, 0x00000000}, + {0x8dac, 0x00000000}, + {0x8db0, 0x00000000}, + {0x8db4, 0x00000000}, + {0x8db8, 0x00000000}, + {0x8dbc, 0x00000000}, + {0x8dc0, 0x00000000}, + {0x8dc4, 0x00000000}, + {0x8dc8, 0x00000000}, + {0x8dcc, 0x00000000}, + {0x8dd0, 0x00000000}, + {0x8dd4, 0x00000000}, + {0x8dd8, 0x00000000}, + {0x8ddc, 0x00000000}, + {0x8de0, 0x00000000}, + {0x8de4, 0x00000000}, + {0x8de8, 0x00000000}, + {0x8dec, 0x00000000}, + {0x8df0, 0x00000000}, + {0x8df4, 0x00000000}, + {0x8df8, 0x00000000}, + {0x8dfc, 0x00000000}, + {0x8e00, 0x00000000}, + {0x8e04, 0x00000000}, + {0x8e08, 0x00000000}, + {0x8e0c, 0x00000000}, + {0x8e10, 0x00000000}, + {0x8e14, 0x00000000}, + {0x8e18, 0x00000000}, + {0x8e1c, 0x00000000}, + {0x8e20, 0x00000000}, + {0x8e24, 0x00000000}, + {0x8e28, 0x00000000}, + {0x8e2c, 0x00000000}, + {0x8e30, 0x00000000}, + {0x8e34, 0x00000000}, + {0x8e38, 0x00000000}, + {0x8e3c, 0x00000000}, + {0x8e40, 0x00000000}, + {0x8e44, 0x00000000}, + {0x8e48, 0x00000000}, + {0x8e4c, 0x00000000}, + {0x8e50, 0x00000000}, + {0x8e54, 0x00000000}, + {0x8e58, 0x00000000}, + {0x8e5c, 0x00000000}, + {0x8e60, 0x00000000}, + {0x8e64, 0x00000000}, + {0x8e68, 0x00000000}, + {0x8e6c, 0x00000000}, + {0x8e70, 0x00000000}, + {0x8e74, 0x00000000}, + {0x8e78, 0x00000000}, + {0x8e7c, 0x00000000}, + {0x8e80, 0x00000000}, + {0x8e84, 0x00000000}, + {0x8e88, 0x00000000}, + {0x8e8c, 0x00000000}, + {0x8e90, 0x00000000}, + {0x8e94, 0x00000000}, + {0x8e98, 0x00000000}, + {0x8e9c, 0x00000000}, + {0x8ea0, 0x00000000}, + {0x8ea4, 0x00000000}, + {0x8ea8, 0x00000000}, + {0x8eac, 0x00000000}, + {0x8eb0, 0x00000000}, + {0x8eb4, 0x00000000}, + {0x8eb8, 0x00000000}, + {0x8ebc, 0x00000000}, + {0x8ec0, 0x00000000}, + {0x8ec4, 0x00000000}, + {0x8ec8, 0x00000000}, + {0x8ecc, 0x00000000}, + {0x8ed0, 0x00000000}, + {0x8ed4, 0x00000000}, + {0x8ed8, 0x00000000}, + {0x8edc, 0x00000000}, + {0x8ee0, 0x00000000}, + {0x8ee4, 0x00000000}, + {0x8ee8, 0x00000000}, + {0x8eec, 0x00000000}, + {0x8ef0, 0x00000000}, + {0x8ef4, 0x00000000}, + {0x8ef8, 0x00000000}, + {0x8efc, 0x00000000}, + {0x8f00, 0x00000000}, + {0x8f04, 0x00000000}, + {0x8f08, 0x00000000}, + {0x8f0c, 0x00000000}, + {0x8f10, 0x00000000}, + {0x8f14, 0x00000000}, + {0x8f18, 0x00000000}, + {0x8f1c, 0x00000000}, + {0x8f20, 0x00000000}, + {0x8f24, 0x00000000}, + {0x8f28, 0x00000000}, + {0x8f2c, 0x00000000}, + {0x8f30, 0x00000000}, + {0x8f34, 0x00000000}, + {0x8f38, 0x00000000}, + {0x8f3c, 0x00000000}, + {0x8f40, 0x00000000}, + {0x8f44, 0x00000000}, + {0x8f48, 0x00000000}, + {0x8f4c, 0x00000000}, + {0x8f50, 0x00000000}, + {0x8f54, 0x00000000}, + {0x8f58, 0x00000000}, + {0x8f5c, 0x00000000}, + {0x8f60, 0x00000000}, + {0x8f64, 0x00000000}, + {0x8f68, 0x00000000}, + {0x8f6c, 0x00000000}, + {0x8f70, 0x00000000}, + {0x8f74, 0x00000000}, + {0x8f78, 0x00000000}, + {0x8f7c, 0x00000000}, + {0x8f80, 0x00000000}, + {0x8f84, 0x00000000}, + {0x8f88, 0x00000000}, + {0x8f8c, 0x00000000}, + {0x8f90, 0x00000000}, + {0x8f94, 0x00000000}, + {0x8f98, 0x00000000}, + {0x8f9c, 0x00000000}, + {0x8fa0, 0x00000000}, + {0x8fa4, 0x00000000}, + {0x8fa8, 0x00000000}, + {0x8fac, 0x00000000}, + {0x8fb0, 0x00000000}, + {0x8fb4, 0x00000000}, + {0x8fb8, 0x00000000}, + {0x8fbc, 0x00000000}, + {0x8fc0, 0x00000000}, + {0x8fc4, 0x00000000}, + {0x8fc8, 0x00000000}, + {0x8fcc, 0x00000000}, + {0x8fd0, 0x00000000}, + {0x8fd4, 0x00000000}, + {0x8fd8, 0x00000000}, + {0x8fdc, 0x00000000}, + {0x8fe0, 0x00000000}, + {0x8fe4, 0x00000000}, + {0x8fe8, 0x00000000}, + {0x8fec, 0x00000000}, + {0x8ff0, 0x00000000}, + {0x8ff4, 0x00000000}, + {0x8ff8, 0x00000000}, + {0x8ffc, 0x00000000}, + {0x9000, 0x00000000}, + {0x9004, 0x00000000}, + {0x9008, 0x00000000}, + {0x900c, 0x00000000}, + {0x9010, 0x00000000}, + {0x9014, 0x00000000}, + {0x9018, 0x00000000}, + {0x901c, 0x00000000}, + {0x9020, 0x00000000}, + {0x9024, 0x00000000}, + {0x9028, 0x00000000}, + {0x902c, 0x00000000}, + {0x9030, 0x00000000}, + {0x9034, 0x00000000}, + {0x9038, 0x00000000}, + {0x903c, 0x00000000}, + {0x9040, 0x00000000}, + {0x9044, 0x00000000}, + {0x9048, 0x00000000}, + {0x904c, 0x00000000}, + {0x9050, 0x00000000}, + {0x9054, 0x00000000}, + {0x9058, 0x00000000}, + {0x905c, 0x00000000}, + {0x9060, 0x00000000}, + {0x9064, 0x00000000}, + {0x9068, 0x00000000}, + {0x906c, 0x00000000}, + {0x9070, 0x00000000}, + {0x9074, 0x00000000}, + {0x9078, 0x00000000}, + {0x907c, 0x00000000}, + {0x9080, 0x00000000}, + {0x9084, 0x00000000}, + {0x9088, 0x00000000}, + {0x908c, 0x00000000}, + {0x9090, 0x00000000}, + {0x9094, 0x00000000}, + {0x9098, 0x00000000}, + {0x909c, 0x00000000}, + {0x90a0, 0x00000000}, + {0x90a4, 0x00000000}, + {0x90a8, 0x00000000}, + {0x90ac, 0x00000000}, + {0x90b0, 0x00000000}, + {0x90b4, 0x00000000}, + {0x90b8, 0x00000000}, + {0x90bc, 0x00000000}, + {0x9100, 0x00000000}, + {0x9104, 0x00000000}, + {0x9108, 0x00000000}, + {0x910c, 0x00000000}, + {0x9110, 0x00000000}, + {0x9114, 0x00000000}, + {0x9118, 0x00000000}, + {0x911c, 0x00000000}, + {0x9120, 0x00000000}, + {0x9124, 0x00000000}, + {0x9128, 0x00000000}, + {0x912c, 0x00000000}, + {0x9130, 0x00000000}, + {0x9134, 0x00000000}, + {0x9138, 0x00000000}, + {0x913c, 0x00000000}, + {0x9140, 0x00000000}, + {0x9144, 0x00000000}, + {0x9148, 0x00000000}, + {0x914c, 0x00000000}, + {0x9150, 0x00000000}, + {0x9154, 0x00000000}, + {0x9158, 0x00000000}, + {0x915c, 0x00000000}, + {0x9160, 0x00000000}, + {0x9164, 0x00000000}, + {0x9168, 0x00000000}, + {0x916c, 0x00000000}, + {0x9170, 0x00000000}, + {0x9174, 0x00000000}, + {0x9178, 0x00000000}, + {0x917c, 0x00000000}, + {0x9180, 0x00000000}, + {0x9184, 0x00000000}, + {0x9188, 0x00000000}, + {0x918c, 0x00000000}, + {0x9190, 0x00000000}, + {0x9194, 0x00000000}, + {0x9198, 0x00000000}, + {0x919c, 0x00000000}, + {0x91a0, 0x00000000}, + {0x91a4, 0x00000000}, + {0x91a8, 0x00000000}, + {0x91ac, 0x00000000}, + {0x91b0, 0x00000000}, + {0x91b4, 0x00000000}, + {0x91b8, 0x00000000}, + {0x91bc, 0x00000000}, + {0x91c0, 0x00000000}, + {0x91c4, 0x00000000}, + {0x91c8, 0x00000000}, + {0x91cc, 0x00000000}, + {0x91d0, 0x00000000}, + {0x91d4, 0x00000000}, + {0x91d8, 0x00000000}, + {0x91dc, 0x00000000}, + {0x91e0, 0x00000000}, + {0x91e4, 0x00000000}, + {0x91e8, 0x00000000}, + {0x91ec, 0x00000000}, + {0x91f0, 0x00000000}, + {0x91f4, 0x00000000}, + {0x91f8, 0x00000000}, + {0x91fc, 0x00000000}, + {0x9200, 0x00000000}, + {0x9204, 0x00000000}, + {0x9208, 0x00000000}, + {0x920c, 0x00000000}, + {0x9210, 0x00000000}, + {0x9214, 0x00000000}, + {0x9218, 0x00000000}, + {0x921c, 0x00000000}, + {0x9220, 0x00000000}, + {0x9224, 0x00000000}, + {0x9228, 0x00000000}, + {0x922c, 0x00000000}, + {0x9230, 0x00000000}, + {0x9234, 0x00000000}, + {0x9238, 0x00000000}, + {0x923c, 0x00000000}, + {0x9240, 0x00000000}, + {0x9244, 0x00000000}, + {0x9248, 0x00000000}, + {0x924c, 0x00000000}, + {0x9250, 0x00000000}, + {0x9254, 0x00000000}, + {0x9258, 0x00000000}, + {0x925c, 0x00000000}, + {0x9260, 0x00000000}, + {0x9264, 0x00000000}, + {0x9268, 0x00000000}, + {0x926c, 0x00000000}, + {0x9270, 0x00000000}, + {0x9274, 0x00000000}, + {0x9278, 0x00000000}, + {0x927c, 0x00000000}, + {0x9280, 0x00000000}, + {0x9284, 0x00000000}, + {0x9288, 0x00000000}, + {0x928c, 0x00000000}, + {0x9290, 0x00000000}, + {0x9294, 0x00000000}, + {0x9298, 0x00000000}, + {0x929c, 0x00000000}, + {0x92a0, 0x00000000}, + {0x92a4, 0x00000000}, + {0x92a8, 0x00000000}, + {0x92ac, 0x00000000}, + {0x92b0, 0x00000000}, + {0x92b4, 0x00000000}, + {0x92b8, 0x00000000}, + {0x92bc, 0x00000000}, + {0x92c0, 0x00000000}, + {0x92c4, 0x00000000}, + {0x92c8, 0x00000000}, + {0x92cc, 0x00000000}, + {0x92d0, 0x00000000}, + {0x92d4, 0x00000000}, + {0x92d8, 0x00000000}, + {0x92dc, 0x00000000}, + {0x92e0, 0x00000000}, + {0x92e4, 0x00000000}, + {0x92e8, 0x00000000}, + {0x92ec, 0x00000000}, + {0x92f0, 0x00000000}, + {0x92f4, 0x00000000}, + {0x92f8, 0x00000000}, + {0x92fc, 0x00000000}, + {0x9300, 0x00000000}, + {0x9304, 0x00000000}, + {0x9308, 0x00000000}, + {0x930c, 0x00000000}, + {0x9310, 0x00000000}, + {0x9314, 0x00000000}, + {0x9318, 0x00000000}, + {0x931c, 0x00000000}, + {0x9320, 0x00000000}, + {0x9324, 0x00000000}, + {0x9328, 0x00000000}, + {0x932c, 0x00000000}, + {0x9330, 0x00000000}, + {0x9334, 0x00000000}, + {0x9338, 0x00000000}, + {0x933c, 0x00000000}, + {0x9340, 0x00000000}, + {0x9344, 0x00000000}, + {0x9348, 0x00000000}, + {0x934c, 0x00000000}, + {0x9350, 0x00000000}, + {0x9354, 0x00000000}, + {0x9358, 0x00000000}, + {0x935c, 0x00000000}, + {0x9360, 0x00000000}, + {0x9364, 0x00000000}, + {0x9368, 0x00000000}, + {0x936c, 0x00000000}, + {0x9370, 0x00000000}, + {0x9374, 0x00000000}, + {0x9378, 0x00000000}, + {0x937c, 0x00000000}, + {0x9380, 0x00000000}, + {0x9384, 0x00000000}, + {0x9388, 0x00000000}, + {0x938c, 0x00000000}, + {0x9390, 0x00000000}, + {0x9394, 0x00000000}, + {0x9398, 0x00000000}, + {0x939c, 0x00000000}, + {0x93a0, 0x00000000}, + {0x93a4, 0x00000000}, + {0x93a8, 0x00000000}, + {0x93ac, 0x00000000}, + {0x93b0, 0x00000000}, + {0x93b4, 0x00000000}, + {0x93b8, 0x00000000}, + {0x93bc, 0x00000000}, + {0x93c0, 0x00000000}, + {0x93c4, 0x00000000}, + {0x93c8, 0x00000000}, + {0x93cc, 0x00000000}, + {0x93d0, 0x00000000}, + {0x93d4, 0x00000000}, + {0x93d8, 0x00000000}, + {0x93dc, 0x00000000}, + {0x93e0, 0x00000000}, + {0x93e4, 0x00000000}, + {0x93e8, 0x00000000}, + {0x93ec, 0x00000000}, + {0x93f0, 0x00000000}, + {0x93f4, 0x00000000}, + {0x93f8, 0x00000000}, + {0x93fc, 0x00000000}, + {0x9400, 0x00000000}, + {0x9404, 0x00000000}, + {0x9408, 0x00000000}, + {0x940c, 0x00000000}, + {0x9410, 0x00000000}, + {0x9414, 0x00000000}, + {0x9418, 0x00000000}, + {0x941c, 0x00000000}, + {0x9420, 0x00000000}, + {0x9424, 0x00000000}, + {0x9428, 0x00000000}, + {0x942c, 0x00000000}, + {0x9430, 0x00000000}, + {0x9434, 0x00000000}, + {0x9438, 0x00000000}, + {0x943c, 0x00000000}, + {0x9440, 0x00000000}, + {0x9444, 0x00000000}, + {0x9448, 0x00000000}, + {0x944c, 0x00000000}, + {0x9450, 0x00000000}, + {0x9454, 0x00000000}, + {0x9458, 0x00000000}, + {0x945c, 0x00000000}, + {0x9460, 0x00000000}, + {0x9464, 0x00000000}, + {0x9468, 0x00000000}, + {0x946c, 0x00000000}, + {0x9470, 0x00000000}, + {0x9474, 0x00000000}, + {0x9478, 0x00000000}, + {0x947c, 0x00000000}, + {0x9480, 0x00000000}, + {0x9484, 0x00000000}, + {0x9488, 0x00000000}, + {0x948c, 0x00000000}, + {0x9490, 0x00000000}, + {0x9494, 0x00000000}, + {0x9498, 0x00000000}, + {0x949c, 0x00000000}, + {0x94a0, 0x00000000}, + {0x94a4, 0x00000000}, + {0x94a8, 0x00000000}, + {0x94ac, 0x00000000}, + {0x94b0, 0x00000000}, + {0x94b4, 0x00000000}, + {0x94b8, 0x00000000}, + {0x94bc, 0x00000000}, + {0x81d8, 0x00000000}, + {0x82d8, 0x00000000}, + {0x9f04, 0x2b251f19}, + {0x9f08, 0x433d3731}, + {0x9f0c, 0x5b554f49}, + {0x9f10, 0x736d6761}, + {0x9f14, 0x7f7f7f79}, + {0x9f18, 0x120f7f7f}, + {0x9f1c, 0x1e1b1815}, + {0x9f20, 0x2a272421}, + {0x9f24, 0x3633302d}, + {0x9f28, 0x3f3f3c39}, + {0x9f2c, 0x3f3f3f3f}, + {0x8088, 0x00000110}, + {0x8000, 0x00000008}, + {0x8080, 0x00000005}, + {0x8500, 0x00060009}, + {0x8504, 0x000418b0}, + {0x8508, 0x00089c00}, + {0x850c, 0x43000004}, + {0x8510, 0x4b044a00}, + {0x8514, 0x40098603}, + {0x8518, 0x4b05e01f}, + {0x851c, 0x400b8703}, + {0x8520, 0x4b00e01f}, + {0x8524, 0x43800004}, + {0x8528, 0x4c000007}, + {0x852c, 0x43000004}, + {0x8530, 0x57007430}, + {0x8534, 0x73000006}, + {0x8538, 0x50550004}, + {0x853c, 0xb4163000}, + {0x8540, 0xe37ea510}, + {0x8544, 0xf117f017}, + {0x8548, 0xf317f217}, + {0x854c, 0xf517f417}, + {0x8550, 0xf717f617}, + {0x8554, 0xf917f817}, + {0x8558, 0xfb17fa17}, + {0x855c, 0xfd17fc17}, + {0x8560, 0xf117f017}, + {0x8564, 0xf317f217}, + {0x8568, 0xa503f417}, + {0x856c, 0xf116f016}, + {0x8570, 0x304e0001}, + {0x8574, 0x30873053}, + {0x8578, 0x30ab30a8}, + {0x857c, 0x30b330ae}, + {0x8580, 0x30ba30b6}, + {0x8584, 0x30d430c7}, + {0x8588, 0x310d3100}, + {0x858c, 0x31ed3112}, + {0x8590, 0x320a31f1}, + {0x8594, 0x3243320b}, + {0x8598, 0x31e631b1}, + {0x859c, 0x5b00e283}, + {0x85a0, 0xe2d15500}, + {0x85a4, 0xe2830001}, + {0x85a8, 0x5b10e2e3}, + {0x85ac, 0x20987410}, + {0x85b0, 0xe3750200}, + {0x85b4, 0x00002080}, + {0x85b8, 0x23f0e375}, + {0x85bc, 0xe3750001}, + {0x85c0, 0x000023f0}, + {0x85c4, 0x5507e375}, + {0x85c8, 0xe2d5e2d5}, + {0x85cc, 0x20887410}, + {0x85d0, 0xe3750200}, + {0x85d4, 0x000123f0}, + {0x85d8, 0x23f0e375}, + {0x85dc, 0xe3750000}, + {0x85e0, 0xe2d55517}, + {0x85e4, 0x4e004f02}, + {0x85e8, 0x52015302}, + {0x85ec, 0x7508e2d9}, + {0x85f0, 0x74207900}, + {0x85f4, 0x57005710}, + {0x85f8, 0x75fbe375}, + {0x85fc, 0x23f07410}, + {0x8600, 0xe3750001}, + {0x8604, 0x000023f0}, + {0x8608, 0x7430e375}, + {0x860c, 0x5b100001}, + {0x8610, 0x20907410}, + {0x8614, 0xe3750000}, + {0x8618, 0x000123f0}, + {0x861c, 0x23f0e375}, + {0x8620, 0xe3750000}, + {0x8624, 0xe2d55507}, + {0x8628, 0x7410e2d5}, + {0x862c, 0x02002098}, + {0x8630, 0x23f0e375}, + {0x8634, 0xe3750001}, + {0x8638, 0x000023f0}, + {0x863c, 0x5517e375}, + {0x8640, 0x4f02e2d5}, + {0x8644, 0x53024e00}, + {0x8648, 0xe2d95201}, + {0x864c, 0x30787509}, + {0x8650, 0xe2e3e283}, + {0x8654, 0xe27b0001}, + {0x8658, 0x0001e2e3}, + {0x865c, 0x5b30e28f}, + {0x8660, 0xe2d15500}, + {0x8664, 0xe28f0001}, + {0x8668, 0x0001e312}, + {0x866c, 0x4380e287}, + {0x8670, 0x0001e312}, + {0x8674, 0x30e2e283}, + {0x8678, 0xe3600023}, + {0x867c, 0x54ed0002}, + {0x8680, 0x00230baa}, + {0x8684, 0x0002e360}, + {0x8688, 0xe27be330}, + {0x868c, 0xe2830001}, + {0x8690, 0x002230dd}, + {0x8694, 0x0002e360}, + {0x8698, 0x0baa54ec}, + {0x869c, 0xe3600022}, + {0x86a0, 0xe3300002}, + {0x86a4, 0x0001e27b}, + {0x86a8, 0x0baae283}, + {0x86ac, 0x6d0f6c67}, + {0x86b0, 0xe360e2e3}, + {0x86b4, 0xe2e36c8b}, + {0x86b8, 0x0bace360}, + {0x86bc, 0x6d0f6cb3}, + {0x86c0, 0xe360e2e3}, + {0x86c4, 0x6cdb0bad}, + {0x86c8, 0xe2e36d0f}, + {0x86cc, 0x6cf7e360}, + {0x86d0, 0xe2e36d0f}, + {0x86d4, 0x6c09e360}, + {0x86d8, 0xe2e36d00}, + {0x86dc, 0x6c25e360}, + {0x86e0, 0xe360e2e3}, + {0x86e4, 0x6c4df8ca}, + {0x86e8, 0xe360e2e3}, + {0x86ec, 0x6c75f9d3}, + {0x86f0, 0xe360e2e3}, + {0x86f4, 0xe2e36c99}, + {0x86f8, 0xe330e360}, + {0x86fc, 0x0001e27b}, + {0x8700, 0x314de28f}, + {0x8704, 0xe3650022}, + {0x8708, 0x54ec0002}, + {0x870c, 0x00220baa}, + {0x8710, 0x0002e365}, + {0x8714, 0xe287e330}, + {0x8718, 0xe28f0001}, + {0x871c, 0xe3303139}, + {0x8720, 0x0001e287}, + {0x8724, 0x0ba6e28f}, + {0x8728, 0x21e07410}, + {0x872c, 0x21e80009}, + {0x8730, 0x6e670009}, + {0x8734, 0xe32b6f0f}, + {0x8738, 0xe365e312}, + {0x873c, 0x21e07410}, + {0x8740, 0x21e8000a}, + {0x8744, 0x6e77000a}, + {0x8748, 0xe312e32b}, + {0x874c, 0x7410e365}, + {0x8750, 0x000b21e0}, + {0x8754, 0x000b21e8}, + {0x8758, 0xe32b6e8b}, + {0x875c, 0xe365e312}, + {0x8760, 0x21e07410}, + {0x8764, 0x21e8000c}, + {0x8768, 0x6e9f000c}, + {0x876c, 0xe312e32b}, + {0x8770, 0x0baae365}, + {0x8774, 0x21e07410}, + {0x8778, 0x21e8000d}, + {0x877c, 0x6eb3000d}, + {0x8780, 0xe32b6f0f}, + {0x8784, 0xe365e312}, + {0x8788, 0x21e07410}, + {0x878c, 0x21e8000e}, + {0x8790, 0x6ec7000e}, + {0x8794, 0xe312e32b}, + {0x8798, 0x0bace365}, + {0x879c, 0x21e07410}, + {0x87a0, 0x21e8000f}, + {0x87a4, 0x6edb000f}, + {0x87a8, 0xe32b6f0f}, + {0x87ac, 0xe365e312}, + {0x87b0, 0x21e07410}, + {0x87b4, 0x21e80010}, + {0x87b8, 0x6eef0010}, + {0x87bc, 0xe312e32b}, + {0x87c0, 0xe365e365}, + {0x87c4, 0x21e07410}, + {0x87c8, 0x21e80013}, + {0x87cc, 0x6e110013}, + {0x87d0, 0xe32b6f00}, + {0x87d4, 0xe365e312}, + {0x87d8, 0x7410e365}, + {0x87dc, 0x001421e0}, + {0x87e0, 0x001421e8}, + {0x87e4, 0xe32b6e25}, + {0x87e8, 0xe365e312}, + {0x87ec, 0x7410fb8c}, + {0x87f0, 0x001521e0}, + {0x87f4, 0x001521e8}, + {0x87f8, 0xe32b6e39}, + {0x87fc, 0xe365e312}, + {0x8800, 0x21e07410}, + {0x8804, 0x21e80016}, + {0x8808, 0x6e4d0016}, + {0x880c, 0xe312e32b}, + {0x8810, 0xfc86e365}, + {0x8814, 0x21e07410}, + {0x8818, 0x21e80017}, + {0x881c, 0x6e610017}, + {0x8820, 0xe312e32b}, + {0x8824, 0x7410e365}, + {0x8828, 0x001821e0}, + {0x882c, 0x001821e8}, + {0x8830, 0xe32b6e75}, + {0x8834, 0xe365e312}, + {0x8838, 0x21e07410}, + {0x883c, 0x21e80019}, + {0x8840, 0x6e890019}, + {0x8844, 0xe312e32b}, + {0x8848, 0x7410e365}, + {0x884c, 0x001a21e0}, + {0x8850, 0x001a21e8}, + {0x8854, 0xe32b6e99}, + {0x8858, 0xe365e312}, + {0x885c, 0xe287e330}, + {0x8860, 0x00040001}, + {0x8864, 0x0007775c}, + {0x8868, 0x62006220}, + {0x886c, 0x55010004}, + {0x8870, 0xe2d15b00}, + {0x8874, 0x66055b40}, + {0x8878, 0x62000007}, + {0x887c, 0xe3506300}, + {0x8880, 0xe2d10004}, + {0x8884, 0x0a010900}, + {0x8888, 0x0d000b40}, + {0x888c, 0x00320e01}, + {0x8890, 0x95060004}, + {0x8894, 0x00074380}, + {0x8898, 0x00044d01}, + {0x889c, 0x00074300}, + {0x88a0, 0x05a30562}, + {0x88a4, 0xe3509617}, + {0x88a8, 0xe2d10004}, + {0x88ac, 0x06a20007}, + {0x88b0, 0xe35007a3}, + {0x88b4, 0xe2d10004}, + {0x88b8, 0x0002e340}, + {0x88bc, 0x4380e348}, + {0x88c0, 0x4d000007}, + {0x88c4, 0x43000004}, + {0x88c8, 0x00017900}, + {0x88cc, 0x775e0004}, + {0x88d0, 0x000731b3}, + {0x88d4, 0x07a306a2}, + {0x88d8, 0xe29331dd}, + {0x88dc, 0x73000005}, + {0x88e0, 0xe2930001}, + {0x88e4, 0x5d000006}, + {0x88e8, 0x42f70004}, + {0x88ec, 0x6c000005}, + {0x88f0, 0x42000004}, + {0x88f4, 0x0004e2ab}, + {0x88f8, 0x00074380}, + {0x88fc, 0x4a004e00}, + {0x8900, 0x00064c00}, + {0x8904, 0x60007f00}, + {0x8908, 0x00046f00}, + {0x890c, 0x00054300}, + {0x8910, 0x00017300}, + {0x8914, 0xe2930001}, + {0x8918, 0x5d010006}, + {0x891c, 0x61006002}, + {0x8920, 0x00055601}, + {0x8924, 0xe2ab7710}, + {0x8928, 0x73000005}, + {0x892c, 0x43800004}, + {0x8930, 0x5e010007}, + {0x8934, 0x4d205e00}, + {0x8938, 0x4a084e20}, + {0x893c, 0x4c3f4960}, + {0x8940, 0x00064301}, + {0x8944, 0x63807f01}, + {0x8948, 0x00046010}, + {0x894c, 0x00064300}, + {0x8950, 0x00077402}, + {0x8954, 0x40004001}, + {0x8958, 0x0006ab00}, + {0x895c, 0x00077404}, + {0x8960, 0x40004001}, + {0x8964, 0x0004ab00}, + {0x8968, 0x00074380}, + {0x896c, 0x4e004d00}, + {0x8970, 0x4c004a00}, + {0x8974, 0x00064300}, + {0x8978, 0x63007f00}, + {0x897c, 0x6f006000}, + {0x8980, 0x43000004}, + {0x8984, 0x00040001}, + {0x8988, 0x42bf4380}, + {0x898c, 0x48400007}, + {0x8990, 0x42ef0004}, + {0x8994, 0x4d100007}, + {0x8998, 0x42000004}, + {0x899c, 0x5f800006}, + {0x89a0, 0x5a010007}, + {0x89a4, 0x00044a08}, + {0x89a8, 0x00054300}, + {0x89ac, 0x73807381}, + {0x89b0, 0x003f9300}, + {0x89b4, 0x00000000}, + {0x89b8, 0x00000000}, + {0x89bc, 0x00020000}, + {0x89c0, 0x5f800006}, + {0x89c4, 0x99005f00}, + {0x89c8, 0x43800004}, + {0x89cc, 0x00074280}, + {0x89d0, 0x00044800}, + {0x89d4, 0x000742ef}, + {0x89d8, 0x00044d00}, + {0x89dc, 0x00064200}, + {0x89e0, 0x60005f00}, + {0x89e4, 0x5a000007}, + {0x89e8, 0x48004a00}, + {0x89ec, 0x43000004}, + {0x89f0, 0x73000005}, + {0x89f4, 0x43800001}, + {0x89f8, 0x78006505}, + {0x89fc, 0x7a007900}, + {0x8a00, 0x43007b00}, + {0x8a04, 0x43800001}, + {0x8a08, 0x43006500}, + {0x8a0c, 0x43800001}, + {0x8a10, 0x7c006405}, + {0x8a14, 0x7e007d00}, + {0x8a18, 0x43007f00}, + {0x8a1c, 0x43800001}, + {0x8a20, 0x43006400}, + {0x8a24, 0x00060001}, + {0x8a28, 0x55025601}, + {0x8a2c, 0x00055400}, + {0x8a30, 0x7e127f00}, + {0x8a34, 0x76007710}, + {0x8a38, 0x74007500}, + {0x8a3c, 0x42700004}, + {0x8a40, 0x73810005}, + {0x8a44, 0x00047380}, + {0x8a48, 0x93004200}, + {0x8a4c, 0x77000005}, + {0x8a50, 0x56000006}, + {0x8a54, 0x00060001}, + {0x8a58, 0x5f005f80}, + {0x8a5c, 0x00059900}, + {0x8a60, 0x00067300}, + {0x8a64, 0x63006380}, + {0x8a68, 0x00019800}, + {0x8a6c, 0x7b484380}, + {0x8a70, 0x79007a90}, + {0x8a74, 0x43007802}, + {0x8a78, 0x32cd5503}, + {0x8a7c, 0x7b384380}, + {0x8a80, 0x79007a80}, + {0x8a84, 0x43007802}, + {0x8a88, 0x32cd5513}, + {0x8a8c, 0x7b404380}, + {0x8a90, 0x79007a00}, + {0x8a94, 0x43007802}, + {0x8a98, 0x74315523}, + {0x8a9c, 0x8e007430}, + {0x8aa0, 0x74010001}, + {0x8aa4, 0x8e007400}, + {0x8aa8, 0x74310001}, + {0x8aac, 0x8e007430}, + {0x8ab0, 0x57020001}, + {0x8ab4, 0x97005700}, + {0x8ab8, 0x42ef0001}, + {0x8abc, 0x56005610}, + {0x8ac0, 0x8c004200}, + {0x8ac4, 0x4f780001}, + {0x8ac8, 0x53884e00}, + {0x8acc, 0x5b205201}, + {0x8ad0, 0x5480e2f2}, + {0x8ad4, 0x54815400}, + {0x8ad8, 0x54825400}, + {0x8adc, 0xe2fd5400}, + {0x8ae0, 0x3012bf1d}, + {0x8ae4, 0xe2bee2b6}, + {0x8ae8, 0xe2d9e2c6}, + {0x8aec, 0x5523e359}, + {0x8af0, 0x5525e2cd}, + {0x8af4, 0xe359e2d9}, + {0x8af8, 0x54bf0001}, + {0x8afc, 0x54a354c0}, + {0x8b00, 0x54a454c1}, + {0x8b04, 0xbf074c18}, + {0x8b08, 0x54a454c2}, + {0x8b0c, 0x54c1bf04}, + {0x8b10, 0xbf0154a3}, + {0x8b14, 0x54dfe36a}, + {0x8b18, 0x54bf0001}, + {0x8b1c, 0x050a54e5}, + {0x8b20, 0x000154df}, + {0x8b24, 0x43807b80}, + {0x8b28, 0x7e007f40}, + {0x8b2c, 0x7c027d00}, + {0x8b30, 0x5b404300}, + {0x8b34, 0x5c015501}, + {0x8b38, 0x5480e2dd}, + {0x8b3c, 0x54815400}, + {0x8b40, 0x54825400}, + {0x8b44, 0x7b005400}, + {0x8b48, 0xbfe8e2fd}, + {0x8b4c, 0x56103012}, + {0x8b50, 0x8c005600}, + {0x8b54, 0xe36d0001}, + {0x8b58, 0xe36de36d}, + {0x8b5c, 0x0001e36d}, + {0x8b60, 0x57005704}, + {0x8b64, 0x57089700}, + {0x8b68, 0x97005700}, + {0x8b6c, 0x57805781}, + {0x8b70, 0x43809700}, + {0x8b74, 0x5c010007}, + {0x8b78, 0x00045c00}, + {0x8b7c, 0x00014300}, + {0x8b80, 0x0007427f}, + {0x8b84, 0x62006280}, + {0x8b88, 0x00049200}, + {0x8b8c, 0x00014200}, + {0x8b90, 0x0007427f}, + {0x8b94, 0x63146394}, + {0x8b98, 0x00049100}, + {0x8b9c, 0x00014200}, + {0x8ba0, 0x79010004}, + {0x8ba4, 0xe3757420}, + {0x8ba8, 0x57005710}, + {0x8bac, 0xe375e375}, + {0x8bb0, 0x549f0001}, + {0x8bb4, 0x5c015400}, + {0x8bb8, 0x540054df}, + {0x8bbc, 0x00015c02}, + {0x8bc0, 0x07145c01}, + {0x8bc4, 0x5c025400}, + {0x8bc8, 0x5c020001}, + {0x8bcc, 0x54000714}, + {0x8bd0, 0x00015c01}, + {0x8bd4, 0x4c184c98}, + {0x8bd8, 0x003f0001}, + {0x8bdc, 0x00000000}, + {0x8be0, 0x00000000}, + {0x8be4, 0x00020000}, + {0x8be8, 0x00000001}, + {0x8bec, 0x00000000}, + {0x8bf0, 0x00000000}, + {0x8bf4, 0x00000000}, + {0x8bf8, 0x00010000}, + {0x8bfc, 0x5c020004}, + {0x8c00, 0x66076204}, + {0x8c04, 0x743070c0}, + {0x8c08, 0x0c010901}, + {0x8c0c, 0x00010ba6}, + {0x8080, 0x00000004}, + {0x8080, 0x00000000}, + {0x8088, 0x00000000}, +}; + +static const struct rtw89_txpwr_byrate_cfg rtw89_8852a_txpwr_byrate[] = { + { 0, 0, 0, 0, 4, 0x50505050, }, + { 0, 0, 1, 0, 4, 0x50505050, }, + { 0, 0, 1, 4, 4, 0x484c5050, }, + { 0, 0, 2, 0, 4, 0x50505050, }, + { 0, 0, 2, 4, 4, 0x44484c50, }, + { 0, 0, 2, 8, 4, 0x34383c40, }, + { 0, 0, 3, 0, 4, 0x50505050, }, + { 0, 1, 2, 0, 4, 0x50505050, }, + { 0, 1, 2, 4, 4, 0x44484c50, }, + { 0, 1, 2, 8, 4, 0x34383c40, }, + { 0, 1, 3, 0, 4, 0x50505050, }, + { 0, 0, 4, 1, 4, 0x00000000, }, + { 0, 0, 4, 0, 1, 0x00000000, }, + { 1, 0, 1, 0, 4, 0x50505050, }, + { 1, 0, 1, 4, 4, 0x484c5050, }, + { 1, 0, 2, 0, 4, 0x50505050, }, + { 1, 0, 2, 4, 4, 0x44484c50, }, + { 1, 0, 2, 8, 4, 0x34383c40, }, + { 1, 0, 3, 0, 4, 0x50505050, }, + { 1, 1, 2, 0, 4, 0x50505050, }, + { 1, 1, 2, 4, 4, 0x44484c50, }, + { 1, 1, 2, 8, 4, 0x34383c40, }, + { 1, 1, 3, 0, 4, 0x50505050, }, + { 1, 0, 4, 0, 4, 0x00000000, }, +}; + +static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, + 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11}, + {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, +}; + +static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11}, + {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, +}; + +static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, + 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11}, + {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, +}; + +static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11}, + {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, +}; + +static const u8 _txpwr_track_delta_swingidx_2gb_n[] = { + 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, + 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7}; + +static const u8 _txpwr_track_delta_swingidx_2gb_p[] = { + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3}; + +static const u8 _txpwr_track_delta_swingidx_2ga_n[] = { + 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5}; + +static const u8 _txpwr_track_delta_swingidx_2ga_p[] = { + 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}; + +static const u8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = { + 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, + 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7}; + +static const u8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = { + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3}; + +static const u8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = { + 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5}; + +static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { + 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}; + +const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { + [0][0][0][0][0][0] = 56, + [0][0][0][0][0][1] = 56, + [0][0][0][0][0][2] = 56, + [0][0][0][0][0][3] = 56, + [0][0][0][0][0][4] = 56, + [0][0][0][0][0][5] = 56, + [0][0][0][0][0][6] = 56, + [0][0][0][0][0][7] = 56, + [0][0][0][0][0][8] = 56, + [0][0][0][0][0][9] = 56, + [0][0][0][0][0][10] = 56, + [0][0][0][0][0][11] = 56, + [0][0][0][0][0][12] = 48, + [0][0][0][0][0][13] = 76, + [0][1][0][0][0][0] = 44, + [0][1][0][0][0][1] = 44, + [0][1][0][0][0][2] = 44, + [0][1][0][0][0][3] = 44, + [0][1][0][0][0][4] = 44, + [0][1][0][0][0][5] = 44, + [0][1][0][0][0][6] = 44, + [0][1][0][0][0][7] = 44, + [0][1][0][0][0][8] = 44, + [0][1][0][0][0][9] = 44, + [0][1][0][0][0][10] = 44, + [0][1][0][0][0][11] = 44, + [0][1][0][0][0][12] = 38, + [0][1][0][0][0][13] = 64, + [1][0][0][0][0][0] = 0, + [1][0][0][0][0][1] = 0, + [1][0][0][0][0][2] = 58, + [1][0][0][0][0][3] = 58, + [1][0][0][0][0][4] = 58, + [1][0][0][0][0][5] = 58, + [1][0][0][0][0][6] = 46, + [1][0][0][0][0][7] = 46, + [1][0][0][0][0][8] = 46, + [1][0][0][0][0][9] = 32, + [1][0][0][0][0][10] = 32, + [1][0][0][0][0][11] = 0, + [1][0][0][0][0][12] = 0, + [1][0][0][0][0][13] = 0, + [1][1][0][0][0][0] = 0, + [1][1][0][0][0][1] = 0, + [1][1][0][0][0][2] = 46, + [1][1][0][0][0][3] = 46, + [1][1][0][0][0][4] = 46, + [1][1][0][0][0][5] = 46, + [1][1][0][0][0][6] = 46, + [1][1][0][0][0][7] = 46, + [1][1][0][0][0][8] = 46, + [1][1][0][0][0][9] = 24, + [1][1][0][0][0][10] = 24, + [1][1][0][0][0][11] = 0, + [1][1][0][0][0][12] = 0, + [1][1][0][0][0][13] = 0, + [0][0][1][0][0][0] = 58, + [0][0][1][0][0][1] = 58, + [0][0][1][0][0][2] = 58, + [0][0][1][0][0][3] = 58, + [0][0][1][0][0][4] = 58, + [0][0][1][0][0][5] = 58, + [0][0][1][0][0][6] = 58, + [0][0][1][0][0][7] = 58, + [0][0][1][0][0][8] = 58, + [0][0][1][0][0][9] = 58, + [0][0][1][0][0][10] = 58, + [0][0][1][0][0][11] = 56, + [0][0][1][0][0][12] = 52, + [0][0][1][0][0][13] = 0, + [0][1][1][0][0][0] = 46, + [0][1][1][0][0][1] = 46, + [0][1][1][0][0][2] = 46, + [0][1][1][0][0][3] = 46, + [0][1][1][0][0][4] = 46, + [0][1][1][0][0][5] = 46, + [0][1][1][0][0][6] = 46, + [0][1][1][0][0][7] = 46, + [0][1][1][0][0][8] = 46, + [0][1][1][0][0][9] = 46, + [0][1][1][0][0][10] = 46, + [0][1][1][0][0][11] = 42, + [0][1][1][0][0][12] = 40, + [0][1][1][0][0][13] = 0, + [0][0][2][0][0][0] = 58, + [0][0][2][0][0][1] = 58, + [0][0][2][0][0][2] = 58, + [0][0][2][0][0][3] = 58, + [0][0][2][0][0][4] = 58, + [0][0][2][0][0][5] = 58, + [0][0][2][0][0][6] = 58, + [0][0][2][0][0][7] = 58, + [0][0][2][0][0][8] = 58, + [0][0][2][0][0][9] = 58, + [0][0][2][0][0][10] = 58, + [0][0][2][0][0][11] = 54, + [0][0][2][0][0][12] = 50, + [0][0][2][0][0][13] = 0, + [0][1][2][0][0][0] = 46, + [0][1][2][0][0][1] = 46, + [0][1][2][0][0][2] = 46, + [0][1][2][0][0][3] = 46, + [0][1][2][0][0][4] = 46, + [0][1][2][0][0][5] = 46, + [0][1][2][0][0][6] = 46, + [0][1][2][0][0][7] = 46, + [0][1][2][0][0][8] = 46, + [0][1][2][0][0][9] = 46, + [0][1][2][0][0][10] = 46, + [0][1][2][0][0][11] = 42, + [0][1][2][0][0][12] = 40, + [0][1][2][0][0][13] = 0, + [0][1][2][1][0][0] = 34, + [0][1][2][1][0][1] = 34, + [0][1][2][1][0][2] = 34, + [0][1][2][1][0][3] = 34, + [0][1][2][1][0][4] = 34, + [0][1][2][1][0][5] = 34, + [0][1][2][1][0][6] = 34, + [0][1][2][1][0][7] = 34, + [0][1][2][1][0][8] = 34, + [0][1][2][1][0][9] = 34, + [0][1][2][1][0][10] = 34, + [0][1][2][1][0][11] = 34, + [0][1][2][1][0][12] = 34, + [0][1][2][1][0][13] = 0, + [1][0][2][0][0][0] = 0, + [1][0][2][0][0][1] = 0, + [1][0][2][0][0][2] = 56, + [1][0][2][0][0][3] = 56, + [1][0][2][0][0][4] = 58, + [1][0][2][0][0][5] = 58, + [1][0][2][0][0][6] = 54, + [1][0][2][0][0][7] = 50, + [1][0][2][0][0][8] = 50, + [1][0][2][0][0][9] = 42, + [1][0][2][0][0][10] = 40, + [1][0][2][0][0][11] = 0, + [1][0][2][0][0][12] = 0, + [1][0][2][0][0][13] = 0, + [1][1][2][0][0][0] = 0, + [1][1][2][0][0][1] = 0, + [1][1][2][0][0][2] = 46, + [1][1][2][0][0][3] = 46, + [1][1][2][0][0][4] = 46, + [1][1][2][0][0][5] = 46, + [1][1][2][0][0][6] = 46, + [1][1][2][0][0][7] = 46, + [1][1][2][0][0][8] = 46, + [1][1][2][0][0][9] = 38, + [1][1][2][0][0][10] = 36, + [1][1][2][0][0][11] = 0, + [1][1][2][0][0][12] = 0, + [1][1][2][0][0][13] = 0, + [1][1][2][1][0][0] = 0, + [1][1][2][1][0][1] = 0, + [1][1][2][1][0][2] = 34, + [1][1][2][1][0][3] = 34, + [1][1][2][1][0][4] = 34, + [1][1][2][1][0][5] = 34, + [1][1][2][1][0][6] = 34, + [1][1][2][1][0][7] = 34, + [1][1][2][1][0][8] = 34, + [1][1][2][1][0][9] = 34, + [1][1][2][1][0][10] = 34, + [1][1][2][1][0][11] = 0, + [1][1][2][1][0][12] = 0, + [1][1][2][1][0][13] = 0, + [0][0][0][0][2][0] = 76, + [0][0][0][0][1][0] = 56, + [0][0][0][0][3][0] = 68, + [0][0][0][0][5][0] = 76, + [0][0][0][0][6][0] = 56, + [0][0][0][0][9][0] = 56, + [0][0][0][0][8][0] = 60, + [0][0][0][0][11][0] = 56, + [0][0][0][0][2][1] = 76, + [0][0][0][0][1][1] = 56, + [0][0][0][0][3][1] = 68, + [0][0][0][0][5][1] = 76, + [0][0][0][0][6][1] = 56, + [0][0][0][0][9][1] = 56, + [0][0][0][0][8][1] = 60, + [0][0][0][0][11][1] = 56, + [0][0][0][0][2][2] = 76, + [0][0][0][0][1][2] = 56, + [0][0][0][0][3][2] = 68, + [0][0][0][0][5][2] = 76, + [0][0][0][0][6][2] = 56, + [0][0][0][0][9][2] = 56, + [0][0][0][0][8][2] = 60, + [0][0][0][0][11][2] = 56, + [0][0][0][0][2][3] = 76, + [0][0][0][0][1][3] = 56, + [0][0][0][0][3][3] = 68, + [0][0][0][0][5][3] = 76, + [0][0][0][0][6][3] = 56, + [0][0][0][0][9][3] = 56, + [0][0][0][0][8][3] = 60, + [0][0][0][0][11][3] = 56, + [0][0][0][0][2][4] = 76, + [0][0][0][0][1][4] = 56, + [0][0][0][0][3][4] = 68, + [0][0][0][0][5][4] = 76, + [0][0][0][0][6][4] = 56, + [0][0][0][0][9][4] = 56, + [0][0][0][0][8][4] = 60, + [0][0][0][0][11][4] = 56, + [0][0][0][0][2][5] = 76, + [0][0][0][0][1][5] = 56, + [0][0][0][0][3][5] = 68, + [0][0][0][0][5][5] = 76, + [0][0][0][0][6][5] = 56, + [0][0][0][0][9][5] = 56, + [0][0][0][0][8][5] = 60, + [0][0][0][0][11][5] = 56, + [0][0][0][0][2][6] = 76, + [0][0][0][0][1][6] = 56, + [0][0][0][0][3][6] = 68, + [0][0][0][0][5][6] = 76, + [0][0][0][0][6][6] = 56, + [0][0][0][0][9][6] = 56, + [0][0][0][0][8][6] = 60, + [0][0][0][0][11][6] = 56, + [0][0][0][0][2][7] = 76, + [0][0][0][0][1][7] = 56, + [0][0][0][0][3][7] = 68, + [0][0][0][0][5][7] = 76, + [0][0][0][0][6][7] = 56, + [0][0][0][0][9][7] = 56, + [0][0][0][0][8][7] = 60, + [0][0][0][0][11][7] = 56, + [0][0][0][0][2][8] = 76, + [0][0][0][0][1][8] = 56, + [0][0][0][0][3][8] = 68, + [0][0][0][0][5][8] = 76, + [0][0][0][0][6][8] = 56, + [0][0][0][0][9][8] = 56, + [0][0][0][0][8][8] = 60, + [0][0][0][0][11][8] = 56, + [0][0][0][0][2][9] = 76, + [0][0][0][0][1][9] = 56, + [0][0][0][0][3][9] = 68, + [0][0][0][0][5][9] = 76, + [0][0][0][0][6][9] = 56, + [0][0][0][0][9][9] = 56, + [0][0][0][0][8][9] = 60, + [0][0][0][0][11][9] = 56, + [0][0][0][0][2][10] = 76, + [0][0][0][0][1][10] = 56, + [0][0][0][0][3][10] = 68, + [0][0][0][0][5][10] = 76, + [0][0][0][0][6][10] = 56, + [0][0][0][0][9][10] = 56, + [0][0][0][0][8][10] = 60, + [0][0][0][0][11][10] = 56, + [0][0][0][0][2][11] = 68, + [0][0][0][0][1][11] = 56, + [0][0][0][0][3][11] = 68, + [0][0][0][0][5][11] = 68, + [0][0][0][0][6][11] = 56, + [0][0][0][0][9][11] = 56, + [0][0][0][0][8][11] = 60, + [0][0][0][0][11][11] = 56, + [0][0][0][0][2][12] = 48, + [0][0][0][0][1][12] = 56, + [0][0][0][0][3][12] = 68, + [0][0][0][0][5][12] = 48, + [0][0][0][0][6][12] = 56, + [0][0][0][0][9][12] = 56, + [0][0][0][0][8][12] = 60, + [0][0][0][0][11][12] = 56, + [0][0][0][0][2][13] = 127, + [0][0][0][0][1][13] = 127, + [0][0][0][0][3][13] = 76, + [0][0][0][0][5][13] = 127, + [0][0][0][0][6][13] = 127, + [0][0][0][0][9][13] = 127, + [0][0][0][0][8][13] = 127, + [0][0][0][0][11][13] = 127, + [0][1][0][0][2][0] = 74, + [0][1][0][0][1][0] = 44, + [0][1][0][0][3][0] = 56, + [0][1][0][0][5][0] = 74, + [0][1][0][0][6][0] = 44, + [0][1][0][0][9][0] = 44, + [0][1][0][0][8][0] = 48, + [0][1][0][0][11][0] = 44, + [0][1][0][0][2][1] = 76, + [0][1][0][0][1][1] = 44, + [0][1][0][0][3][1] = 56, + [0][1][0][0][5][1] = 76, + [0][1][0][0][6][1] = 44, + [0][1][0][0][9][1] = 44, + [0][1][0][0][8][1] = 48, + [0][1][0][0][11][1] = 44, + [0][1][0][0][2][2] = 76, + [0][1][0][0][1][2] = 44, + [0][1][0][0][3][2] = 56, + [0][1][0][0][5][2] = 76, + [0][1][0][0][6][2] = 44, + [0][1][0][0][9][2] = 44, + [0][1][0][0][8][2] = 48, + [0][1][0][0][11][2] = 44, + [0][1][0][0][2][3] = 76, + [0][1][0][0][1][3] = 44, + [0][1][0][0][3][3] = 56, + [0][1][0][0][5][3] = 76, + [0][1][0][0][6][3] = 44, + [0][1][0][0][9][3] = 44, + [0][1][0][0][8][3] = 48, + [0][1][0][0][11][3] = 44, + [0][1][0][0][2][4] = 76, + [0][1][0][0][1][4] = 44, + [0][1][0][0][3][4] = 56, + [0][1][0][0][5][4] = 76, + [0][1][0][0][6][4] = 44, + [0][1][0][0][9][4] = 44, + [0][1][0][0][8][4] = 48, + [0][1][0][0][11][4] = 44, + [0][1][0][0][2][5] = 76, + [0][1][0][0][1][5] = 44, + [0][1][0][0][3][5] = 56, + [0][1][0][0][5][5] = 76, + [0][1][0][0][6][5] = 44, + [0][1][0][0][9][5] = 44, + [0][1][0][0][8][5] = 48, + [0][1][0][0][11][5] = 44, + [0][1][0][0][2][6] = 76, + [0][1][0][0][1][6] = 44, + [0][1][0][0][3][6] = 56, + [0][1][0][0][5][6] = 76, + [0][1][0][0][6][6] = 44, + [0][1][0][0][9][6] = 44, + [0][1][0][0][8][6] = 48, + [0][1][0][0][11][6] = 44, + [0][1][0][0][2][7] = 76, + [0][1][0][0][1][7] = 44, + [0][1][0][0][3][7] = 56, + [0][1][0][0][5][7] = 76, + [0][1][0][0][6][7] = 44, + [0][1][0][0][9][7] = 44, + [0][1][0][0][8][7] = 48, + [0][1][0][0][11][7] = 44, + [0][1][0][0][2][8] = 76, + [0][1][0][0][1][8] = 44, + [0][1][0][0][3][8] = 56, + [0][1][0][0][5][8] = 76, + [0][1][0][0][6][8] = 44, + [0][1][0][0][9][8] = 44, + [0][1][0][0][8][8] = 48, + [0][1][0][0][11][8] = 44, + [0][1][0][0][2][9] = 76, + [0][1][0][0][1][9] = 44, + [0][1][0][0][3][9] = 56, + [0][1][0][0][5][9] = 76, + [0][1][0][0][6][9] = 44, + [0][1][0][0][9][9] = 44, + [0][1][0][0][8][9] = 48, + [0][1][0][0][11][9] = 44, + [0][1][0][0][2][10] = 62, + [0][1][0][0][1][10] = 44, + [0][1][0][0][3][10] = 56, + [0][1][0][0][5][10] = 62, + [0][1][0][0][6][10] = 44, + [0][1][0][0][9][10] = 44, + [0][1][0][0][8][10] = 48, + [0][1][0][0][11][10] = 44, + [0][1][0][0][2][11] = 52, + [0][1][0][0][1][11] = 44, + [0][1][0][0][3][11] = 56, + [0][1][0][0][5][11] = 52, + [0][1][0][0][6][11] = 44, + [0][1][0][0][9][11] = 44, + [0][1][0][0][8][11] = 48, + [0][1][0][0][11][11] = 44, + [0][1][0][0][2][12] = 38, + [0][1][0][0][1][12] = 44, + [0][1][0][0][3][12] = 56, + [0][1][0][0][5][12] = 38, + [0][1][0][0][6][12] = 44, + [0][1][0][0][9][12] = 44, + [0][1][0][0][8][12] = 48, + [0][1][0][0][11][12] = 44, + [0][1][0][0][2][13] = 127, + [0][1][0][0][1][13] = 127, + [0][1][0][0][3][13] = 64, + [0][1][0][0][5][13] = 127, + [0][1][0][0][6][13] = 127, + [0][1][0][0][9][13] = 127, + [0][1][0][0][8][13] = 127, + [0][1][0][0][11][13] = 127, + [1][0][0][0][2][0] = 127, + [1][0][0][0][1][0] = 127, + [1][0][0][0][3][0] = 127, + [1][0][0][0][5][0] = 127, + [1][0][0][0][6][0] = 127, + [1][0][0][0][9][0] = 127, + [1][0][0][0][8][0] = 127, + [1][0][0][0][11][0] = 127, + [1][0][0][0][2][1] = 127, + [1][0][0][0][1][1] = 127, + [1][0][0][0][3][1] = 127, + [1][0][0][0][5][1] = 127, + [1][0][0][0][6][1] = 127, + [1][0][0][0][9][1] = 127, + [1][0][0][0][8][1] = 127, + [1][0][0][0][11][1] = 127, + [1][0][0][0][2][2] = 60, + [1][0][0][0][1][2] = 58, + [1][0][0][0][3][2] = 68, + [1][0][0][0][5][2] = 60, + [1][0][0][0][6][2] = 58, + [1][0][0][0][9][2] = 58, + [1][0][0][0][8][2] = 60, + [1][0][0][0][11][2] = 58, + [1][0][0][0][2][3] = 60, + [1][0][0][0][1][3] = 58, + [1][0][0][0][3][3] = 68, + [1][0][0][0][5][3] = 60, + [1][0][0][0][6][3] = 58, + [1][0][0][0][9][3] = 58, + [1][0][0][0][8][3] = 60, + [1][0][0][0][11][3] = 58, + [1][0][0][0][2][4] = 60, + [1][0][0][0][1][4] = 58, + [1][0][0][0][3][4] = 68, + [1][0][0][0][5][4] = 60, + [1][0][0][0][6][4] = 58, + [1][0][0][0][9][4] = 58, + [1][0][0][0][8][4] = 60, + [1][0][0][0][11][4] = 58, + [1][0][0][0][2][5] = 60, + [1][0][0][0][1][5] = 58, + [1][0][0][0][3][5] = 68, + [1][0][0][0][5][5] = 60, + [1][0][0][0][6][5] = 58, + [1][0][0][0][9][5] = 58, + [1][0][0][0][8][5] = 60, + [1][0][0][0][11][5] = 58, + [1][0][0][0][2][6] = 46, + [1][0][0][0][1][6] = 58, + [1][0][0][0][3][6] = 68, + [1][0][0][0][5][6] = 46, + [1][0][0][0][6][6] = 58, + [1][0][0][0][9][6] = 58, + [1][0][0][0][8][6] = 60, + [1][0][0][0][11][6] = 58, + [1][0][0][0][2][7] = 46, + [1][0][0][0][1][7] = 58, + [1][0][0][0][3][7] = 68, + [1][0][0][0][5][7] = 46, + [1][0][0][0][6][7] = 58, + [1][0][0][0][9][7] = 58, + [1][0][0][0][8][7] = 60, + [1][0][0][0][11][7] = 58, + [1][0][0][0][2][8] = 46, + [1][0][0][0][1][8] = 58, + [1][0][0][0][3][8] = 68, + [1][0][0][0][5][8] = 46, + [1][0][0][0][6][8] = 58, + [1][0][0][0][9][8] = 58, + [1][0][0][0][8][8] = 60, + [1][0][0][0][11][8] = 58, + [1][0][0][0][2][9] = 32, + [1][0][0][0][1][9] = 58, + [1][0][0][0][3][9] = 68, + [1][0][0][0][5][9] = 32, + [1][0][0][0][6][9] = 58, + [1][0][0][0][9][9] = 58, + [1][0][0][0][8][9] = 60, + [1][0][0][0][11][9] = 58, + [1][0][0][0][2][10] = 32, + [1][0][0][0][1][10] = 58, + [1][0][0][0][3][10] = 68, + [1][0][0][0][5][10] = 32, + [1][0][0][0][6][10] = 58, + [1][0][0][0][9][10] = 58, + [1][0][0][0][8][10] = 60, + [1][0][0][0][11][10] = 58, + [1][0][0][0][2][11] = 127, + [1][0][0][0][1][11] = 127, + [1][0][0][0][3][11] = 127, + [1][0][0][0][5][11] = 127, + [1][0][0][0][6][11] = 127, + [1][0][0][0][9][11] = 127, + [1][0][0][0][8][11] = 127, + [1][0][0][0][11][11] = 127, + [1][0][0][0][2][12] = 127, + [1][0][0][0][1][12] = 127, + [1][0][0][0][3][12] = 127, + [1][0][0][0][5][12] = 127, + [1][0][0][0][6][12] = 127, + [1][0][0][0][9][12] = 127, + [1][0][0][0][8][12] = 127, + [1][0][0][0][11][12] = 127, + [1][0][0][0][2][13] = 127, + [1][0][0][0][1][13] = 127, + [1][0][0][0][3][13] = 127, + [1][0][0][0][5][13] = 127, + [1][0][0][0][6][13] = 127, + [1][0][0][0][9][13] = 127, + [1][0][0][0][8][13] = 127, + [1][0][0][0][11][13] = 127, + [1][1][0][0][2][0] = 127, + [1][1][0][0][1][0] = 127, + [1][1][0][0][3][0] = 127, + [1][1][0][0][5][0] = 127, + [1][1][0][0][6][0] = 127, + [1][1][0][0][9][0] = 127, + [1][1][0][0][8][0] = 127, + [1][1][0][0][11][0] = 127, + [1][1][0][0][2][1] = 127, + [1][1][0][0][1][1] = 127, + [1][1][0][0][3][1] = 127, + [1][1][0][0][5][1] = 127, + [1][1][0][0][6][1] = 127, + [1][1][0][0][9][1] = 127, + [1][1][0][0][8][1] = 127, + [1][1][0][0][11][1] = 127, + [1][1][0][0][2][2] = 48, + [1][1][0][0][1][2] = 46, + [1][1][0][0][3][2] = 56, + [1][1][0][0][5][2] = 48, + [1][1][0][0][6][2] = 46, + [1][1][0][0][9][2] = 46, + [1][1][0][0][8][2] = 48, + [1][1][0][0][11][2] = 46, + [1][1][0][0][2][3] = 48, + [1][1][0][0][1][3] = 46, + [1][1][0][0][3][3] = 56, + [1][1][0][0][5][3] = 48, + [1][1][0][0][6][3] = 46, + [1][1][0][0][9][3] = 46, + [1][1][0][0][8][3] = 48, + [1][1][0][0][11][3] = 46, + [1][1][0][0][2][4] = 48, + [1][1][0][0][1][4] = 46, + [1][1][0][0][3][4] = 56, + [1][1][0][0][5][4] = 48, + [1][1][0][0][6][4] = 46, + [1][1][0][0][9][4] = 46, + [1][1][0][0][8][4] = 48, + [1][1][0][0][11][4] = 46, + [1][1][0][0][2][5] = 58, + [1][1][0][0][1][5] = 46, + [1][1][0][0][3][5] = 56, + [1][1][0][0][5][5] = 58, + [1][1][0][0][6][5] = 46, + [1][1][0][0][9][5] = 46, + [1][1][0][0][8][5] = 48, + [1][1][0][0][11][5] = 46, + [1][1][0][0][2][6] = 46, + [1][1][0][0][1][6] = 46, + [1][1][0][0][3][6] = 56, + [1][1][0][0][5][6] = 46, + [1][1][0][0][6][6] = 46, + [1][1][0][0][9][6] = 46, + [1][1][0][0][8][6] = 48, + [1][1][0][0][11][6] = 46, + [1][1][0][0][2][7] = 46, + [1][1][0][0][1][7] = 46, + [1][1][0][0][3][7] = 56, + [1][1][0][0][5][7] = 46, + [1][1][0][0][6][7] = 46, + [1][1][0][0][9][7] = 46, + [1][1][0][0][8][7] = 48, + [1][1][0][0][11][7] = 46, + [1][1][0][0][2][8] = 46, + [1][1][0][0][1][8] = 46, + [1][1][0][0][3][8] = 56, + [1][1][0][0][5][8] = 46, + [1][1][0][0][6][8] = 46, + [1][1][0][0][9][8] = 46, + [1][1][0][0][8][8] = 48, + [1][1][0][0][11][8] = 46, + [1][1][0][0][2][9] = 24, + [1][1][0][0][1][9] = 46, + [1][1][0][0][3][9] = 56, + [1][1][0][0][5][9] = 24, + [1][1][0][0][6][9] = 46, + [1][1][0][0][9][9] = 46, + [1][1][0][0][8][9] = 48, + [1][1][0][0][11][9] = 46, + [1][1][0][0][2][10] = 24, + [1][1][0][0][1][10] = 46, + [1][1][0][0][3][10] = 56, + [1][1][0][0][5][10] = 24, + [1][1][0][0][6][10] = 46, + [1][1][0][0][9][10] = 46, + [1][1][0][0][8][10] = 48, + [1][1][0][0][11][10] = 46, + [1][1][0][0][2][11] = 127, + [1][1][0][0][1][11] = 127, + [1][1][0][0][3][11] = 127, + [1][1][0][0][5][11] = 127, + [1][1][0][0][6][11] = 127, + [1][1][0][0][9][11] = 127, + [1][1][0][0][8][11] = 127, + [1][1][0][0][11][11] = 127, + [1][1][0][0][2][12] = 127, + [1][1][0][0][1][12] = 127, + [1][1][0][0][3][12] = 127, + [1][1][0][0][5][12] = 127, + [1][1][0][0][6][12] = 127, + [1][1][0][0][9][12] = 127, + [1][1][0][0][8][12] = 127, + [1][1][0][0][11][12] = 127, + [1][1][0][0][2][13] = 127, + [1][1][0][0][1][13] = 127, + [1][1][0][0][3][13] = 127, + [1][1][0][0][5][13] = 127, + [1][1][0][0][6][13] = 127, + [1][1][0][0][9][13] = 127, + [1][1][0][0][8][13] = 127, + [1][1][0][0][11][13] = 127, + [0][0][1][0][2][0] = 66, + [0][0][1][0][1][0] = 58, + [0][0][1][0][3][0] = 76, + [0][0][1][0][5][0] = 66, + [0][0][1][0][6][0] = 58, + [0][0][1][0][9][0] = 58, + [0][0][1][0][8][0] = 60, + [0][0][1][0][11][0] = 58, + [0][0][1][0][2][1] = 66, + [0][0][1][0][1][1] = 58, + [0][0][1][0][3][1] = 76, + [0][0][1][0][5][1] = 66, + [0][0][1][0][6][1] = 58, + [0][0][1][0][9][1] = 58, + [0][0][1][0][8][1] = 60, + [0][0][1][0][11][1] = 58, + [0][0][1][0][2][2] = 70, + [0][0][1][0][1][2] = 58, + [0][0][1][0][3][2] = 76, + [0][0][1][0][5][2] = 70, + [0][0][1][0][6][2] = 58, + [0][0][1][0][9][2] = 58, + [0][0][1][0][8][2] = 60, + [0][0][1][0][11][2] = 58, + [0][0][1][0][2][3] = 74, + [0][0][1][0][1][3] = 58, + [0][0][1][0][3][3] = 76, + [0][0][1][0][5][3] = 74, + [0][0][1][0][6][3] = 58, + [0][0][1][0][9][3] = 58, + [0][0][1][0][8][3] = 60, + [0][0][1][0][11][3] = 58, + [0][0][1][0][2][4] = 78, + [0][0][1][0][1][4] = 58, + [0][0][1][0][3][4] = 76, + [0][0][1][0][5][4] = 78, + [0][0][1][0][6][4] = 58, + [0][0][1][0][9][4] = 58, + [0][0][1][0][8][4] = 60, + [0][0][1][0][11][4] = 58, + [0][0][1][0][2][5] = 78, + [0][0][1][0][1][5] = 58, + [0][0][1][0][3][5] = 76, + [0][0][1][0][5][5] = 78, + [0][0][1][0][6][5] = 58, + [0][0][1][0][9][5] = 58, + [0][0][1][0][8][5] = 60, + [0][0][1][0][11][5] = 58, + [0][0][1][0][2][6] = 78, + [0][0][1][0][1][6] = 58, + [0][0][1][0][3][6] = 76, + [0][0][1][0][5][6] = 78, + [0][0][1][0][6][6] = 58, + [0][0][1][0][9][6] = 58, + [0][0][1][0][8][6] = 60, + [0][0][1][0][11][6] = 58, + [0][0][1][0][2][7] = 74, + [0][0][1][0][1][7] = 58, + [0][0][1][0][3][7] = 76, + [0][0][1][0][5][7] = 74, + [0][0][1][0][6][7] = 58, + [0][0][1][0][9][7] = 58, + [0][0][1][0][8][7] = 60, + [0][0][1][0][11][7] = 58, + [0][0][1][0][2][8] = 70, + [0][0][1][0][1][8] = 58, + [0][0][1][0][3][8] = 76, + [0][0][1][0][5][8] = 70, + [0][0][1][0][6][8] = 58, + [0][0][1][0][9][8] = 58, + [0][0][1][0][8][8] = 60, + [0][0][1][0][11][8] = 58, + [0][0][1][0][2][9] = 66, + [0][0][1][0][1][9] = 58, + [0][0][1][0][3][9] = 76, + [0][0][1][0][5][9] = 66, + [0][0][1][0][6][9] = 58, + [0][0][1][0][9][9] = 58, + [0][0][1][0][8][9] = 60, + [0][0][1][0][11][9] = 58, + [0][0][1][0][2][10] = 66, + [0][0][1][0][1][10] = 58, + [0][0][1][0][3][10] = 76, + [0][0][1][0][5][10] = 66, + [0][0][1][0][6][10] = 58, + [0][0][1][0][9][10] = 58, + [0][0][1][0][8][10] = 60, + [0][0][1][0][11][10] = 58, + [0][0][1][0][2][11] = 56, + [0][0][1][0][1][11] = 58, + [0][0][1][0][3][11] = 76, + [0][0][1][0][5][11] = 56, + [0][0][1][0][6][11] = 58, + [0][0][1][0][9][11] = 58, + [0][0][1][0][8][11] = 60, + [0][0][1][0][11][11] = 58, + [0][0][1][0][2][12] = 52, + [0][0][1][0][1][12] = 58, + [0][0][1][0][3][12] = 76, + [0][0][1][0][5][12] = 52, + [0][0][1][0][6][12] = 58, + [0][0][1][0][9][12] = 58, + [0][0][1][0][8][12] = 60, + [0][0][1][0][11][12] = 58, + [0][0][1][0][2][13] = 127, + [0][0][1][0][1][13] = 127, + [0][0][1][0][3][13] = 127, + [0][0][1][0][5][13] = 127, + [0][0][1][0][6][13] = 127, + [0][0][1][0][9][13] = 127, + [0][0][1][0][8][13] = 127, + [0][0][1][0][11][13] = 127, + [0][1][1][0][2][0] = 62, + [0][1][1][0][1][0] = 46, + [0][1][1][0][3][0] = 64, + [0][1][1][0][5][0] = 62, + [0][1][1][0][6][0] = 46, + [0][1][1][0][9][0] = 46, + [0][1][1][0][8][0] = 48, + [0][1][1][0][11][0] = 46, + [0][1][1][0][2][1] = 62, + [0][1][1][0][1][1] = 46, + [0][1][1][0][3][1] = 64, + [0][1][1][0][5][1] = 62, + [0][1][1][0][6][1] = 46, + [0][1][1][0][9][1] = 46, + [0][1][1][0][8][1] = 48, + [0][1][1][0][11][1] = 46, + [0][1][1][0][2][2] = 66, + [0][1][1][0][1][2] = 46, + [0][1][1][0][3][2] = 64, + [0][1][1][0][5][2] = 66, + [0][1][1][0][6][2] = 46, + [0][1][1][0][9][2] = 46, + [0][1][1][0][8][2] = 48, + [0][1][1][0][11][2] = 46, + [0][1][1][0][2][3] = 70, + [0][1][1][0][1][3] = 46, + [0][1][1][0][3][3] = 64, + [0][1][1][0][5][3] = 70, + [0][1][1][0][6][3] = 46, + [0][1][1][0][9][3] = 46, + [0][1][1][0][8][3] = 48, + [0][1][1][0][11][3] = 46, + [0][1][1][0][2][4] = 78, + [0][1][1][0][1][4] = 46, + [0][1][1][0][3][4] = 64, + [0][1][1][0][5][4] = 78, + [0][1][1][0][6][4] = 46, + [0][1][1][0][9][4] = 46, + [0][1][1][0][8][4] = 48, + [0][1][1][0][11][4] = 46, + [0][1][1][0][2][5] = 78, + [0][1][1][0][1][5] = 46, + [0][1][1][0][3][5] = 64, + [0][1][1][0][5][5] = 78, + [0][1][1][0][6][5] = 46, + [0][1][1][0][9][5] = 46, + [0][1][1][0][8][5] = 48, + [0][1][1][0][11][5] = 46, + [0][1][1][0][2][6] = 78, + [0][1][1][0][1][6] = 46, + [0][1][1][0][3][6] = 64, + [0][1][1][0][5][6] = 78, + [0][1][1][0][6][6] = 46, + [0][1][1][0][9][6] = 46, + [0][1][1][0][8][6] = 48, + [0][1][1][0][11][6] = 46, + [0][1][1][0][2][7] = 70, + [0][1][1][0][1][7] = 46, + [0][1][1][0][3][7] = 64, + [0][1][1][0][5][7] = 70, + [0][1][1][0][6][7] = 46, + [0][1][1][0][9][7] = 46, + [0][1][1][0][8][7] = 48, + [0][1][1][0][11][7] = 46, + [0][1][1][0][2][8] = 66, + [0][1][1][0][1][8] = 46, + [0][1][1][0][3][8] = 64, + [0][1][1][0][5][8] = 66, + [0][1][1][0][6][8] = 46, + [0][1][1][0][9][8] = 46, + [0][1][1][0][8][8] = 48, + [0][1][1][0][11][8] = 46, + [0][1][1][0][2][9] = 62, + [0][1][1][0][1][9] = 46, + [0][1][1][0][3][9] = 64, + [0][1][1][0][5][9] = 62, + [0][1][1][0][6][9] = 46, + [0][1][1][0][9][9] = 46, + [0][1][1][0][8][9] = 48, + [0][1][1][0][11][9] = 46, + [0][1][1][0][2][10] = 62, + [0][1][1][0][1][10] = 46, + [0][1][1][0][3][10] = 64, + [0][1][1][0][5][10] = 62, + [0][1][1][0][6][10] = 46, + [0][1][1][0][9][10] = 46, + [0][1][1][0][8][10] = 48, + [0][1][1][0][11][10] = 46, + [0][1][1][0][2][11] = 42, + [0][1][1][0][1][11] = 46, + [0][1][1][0][3][11] = 64, + [0][1][1][0][5][11] = 42, + [0][1][1][0][6][11] = 46, + [0][1][1][0][9][11] = 46, + [0][1][1][0][8][11] = 48, + [0][1][1][0][11][11] = 46, + [0][1][1][0][2][12] = 40, + [0][1][1][0][1][12] = 46, + [0][1][1][0][3][12] = 64, + [0][1][1][0][5][12] = 40, + [0][1][1][0][6][12] = 46, + [0][1][1][0][9][12] = 46, + [0][1][1][0][8][12] = 48, + [0][1][1][0][11][12] = 46, + [0][1][1][0][2][13] = 127, + [0][1][1][0][1][13] = 127, + [0][1][1][0][3][13] = 127, + [0][1][1][0][5][13] = 127, + [0][1][1][0][6][13] = 127, + [0][1][1][0][9][13] = 127, + [0][1][1][0][8][13] = 127, + [0][1][1][0][11][13] = 127, + [0][0][2][0][2][0] = 66, + [0][0][2][0][1][0] = 58, + [0][0][2][0][3][0] = 76, + [0][0][2][0][5][0] = 66, + [0][0][2][0][6][0] = 58, + [0][0][2][0][9][0] = 58, + [0][0][2][0][8][0] = 60, + [0][0][2][0][11][0] = 58, + [0][0][2][0][2][1] = 66, + [0][0][2][0][1][1] = 58, + [0][0][2][0][3][1] = 76, + [0][0][2][0][5][1] = 66, + [0][0][2][0][6][1] = 58, + [0][0][2][0][9][1] = 58, + [0][0][2][0][8][1] = 60, + [0][0][2][0][11][1] = 58, + [0][0][2][0][2][2] = 70, + [0][0][2][0][1][2] = 58, + [0][0][2][0][3][2] = 76, + [0][0][2][0][5][2] = 70, + [0][0][2][0][6][2] = 58, + [0][0][2][0][9][2] = 58, + [0][0][2][0][8][2] = 60, + [0][0][2][0][11][2] = 58, + [0][0][2][0][2][3] = 74, + [0][0][2][0][1][3] = 58, + [0][0][2][0][3][3] = 76, + [0][0][2][0][5][3] = 74, + [0][0][2][0][6][3] = 58, + [0][0][2][0][9][3] = 58, + [0][0][2][0][8][3] = 60, + [0][0][2][0][11][3] = 58, + [0][0][2][0][2][4] = 76, + [0][0][2][0][1][4] = 58, + [0][0][2][0][3][4] = 76, + [0][0][2][0][5][4] = 76, + [0][0][2][0][6][4] = 58, + [0][0][2][0][9][4] = 58, + [0][0][2][0][8][4] = 60, + [0][0][2][0][11][4] = 58, + [0][0][2][0][2][5] = 76, + [0][0][2][0][1][5] = 58, + [0][0][2][0][3][5] = 76, + [0][0][2][0][5][5] = 76, + [0][0][2][0][6][5] = 58, + [0][0][2][0][9][5] = 58, + [0][0][2][0][8][5] = 60, + [0][0][2][0][11][5] = 58, + [0][0][2][0][2][6] = 76, + [0][0][2][0][1][6] = 58, + [0][0][2][0][3][6] = 76, + [0][0][2][0][5][6] = 76, + [0][0][2][0][6][6] = 58, + [0][0][2][0][9][6] = 58, + [0][0][2][0][8][6] = 60, + [0][0][2][0][11][6] = 58, + [0][0][2][0][2][7] = 74, + [0][0][2][0][1][7] = 58, + [0][0][2][0][3][7] = 76, + [0][0][2][0][5][7] = 74, + [0][0][2][0][6][7] = 58, + [0][0][2][0][9][7] = 58, + [0][0][2][0][8][7] = 60, + [0][0][2][0][11][7] = 58, + [0][0][2][0][2][8] = 70, + [0][0][2][0][1][8] = 58, + [0][0][2][0][3][8] = 76, + [0][0][2][0][5][8] = 70, + [0][0][2][0][6][8] = 58, + [0][0][2][0][9][8] = 58, + [0][0][2][0][8][8] = 60, + [0][0][2][0][11][8] = 58, + [0][0][2][0][2][9] = 66, + [0][0][2][0][1][9] = 58, + [0][0][2][0][3][9] = 76, + [0][0][2][0][5][9] = 66, + [0][0][2][0][6][9] = 58, + [0][0][2][0][9][9] = 58, + [0][0][2][0][8][9] = 60, + [0][0][2][0][11][9] = 58, + [0][0][2][0][2][10] = 66, + [0][0][2][0][1][10] = 58, + [0][0][2][0][3][10] = 76, + [0][0][2][0][5][10] = 66, + [0][0][2][0][6][10] = 58, + [0][0][2][0][9][10] = 58, + [0][0][2][0][8][10] = 60, + [0][0][2][0][11][10] = 58, + [0][0][2][0][2][11] = 54, + [0][0][2][0][1][11] = 58, + [0][0][2][0][3][11] = 76, + [0][0][2][0][5][11] = 54, + [0][0][2][0][6][11] = 58, + [0][0][2][0][9][11] = 58, + [0][0][2][0][8][11] = 60, + [0][0][2][0][11][11] = 58, + [0][0][2][0][2][12] = 50, + [0][0][2][0][1][12] = 58, + [0][0][2][0][3][12] = 76, + [0][0][2][0][5][12] = 50, + [0][0][2][0][6][12] = 58, + [0][0][2][0][9][12] = 58, + [0][0][2][0][8][12] = 60, + [0][0][2][0][11][12] = 58, + [0][0][2][0][2][13] = 127, + [0][0][2][0][1][13] = 127, + [0][0][2][0][3][13] = 127, + [0][0][2][0][5][13] = 127, + [0][0][2][0][6][13] = 127, + [0][0][2][0][9][13] = 127, + [0][0][2][0][8][13] = 127, + [0][0][2][0][11][13] = 127, + [0][1][2][0][2][0] = 62, + [0][1][2][0][1][0] = 46, + [0][1][2][0][3][0] = 64, + [0][1][2][0][5][0] = 62, + [0][1][2][0][6][0] = 46, + [0][1][2][0][9][0] = 46, + [0][1][2][0][8][0] = 48, + [0][1][2][0][11][0] = 46, + [0][1][2][0][2][1] = 62, + [0][1][2][0][1][1] = 46, + [0][1][2][0][3][1] = 64, + [0][1][2][0][5][1] = 62, + [0][1][2][0][6][1] = 46, + [0][1][2][0][9][1] = 46, + [0][1][2][0][8][1] = 48, + [0][1][2][0][11][1] = 46, + [0][1][2][0][2][2] = 66, + [0][1][2][0][1][2] = 46, + [0][1][2][0][3][2] = 64, + [0][1][2][0][5][2] = 66, + [0][1][2][0][6][2] = 46, + [0][1][2][0][9][2] = 46, + [0][1][2][0][8][2] = 48, + [0][1][2][0][11][2] = 46, + [0][1][2][0][2][3] = 70, + [0][1][2][0][1][3] = 46, + [0][1][2][0][3][3] = 64, + [0][1][2][0][5][3] = 70, + [0][1][2][0][6][3] = 46, + [0][1][2][0][9][3] = 46, + [0][1][2][0][8][3] = 48, + [0][1][2][0][11][3] = 46, + [0][1][2][0][2][4] = 76, + [0][1][2][0][1][4] = 46, + [0][1][2][0][3][4] = 64, + [0][1][2][0][5][4] = 76, + [0][1][2][0][6][4] = 46, + [0][1][2][0][9][4] = 46, + [0][1][2][0][8][4] = 48, + [0][1][2][0][11][4] = 46, + [0][1][2][0][2][5] = 76, + [0][1][2][0][1][5] = 46, + [0][1][2][0][3][5] = 64, + [0][1][2][0][5][5] = 76, + [0][1][2][0][6][5] = 46, + [0][1][2][0][9][5] = 46, + [0][1][2][0][8][5] = 48, + [0][1][2][0][11][5] = 46, + [0][1][2][0][2][6] = 76, + [0][1][2][0][1][6] = 46, + [0][1][2][0][3][6] = 64, + [0][1][2][0][5][6] = 76, + [0][1][2][0][6][6] = 46, + [0][1][2][0][9][6] = 46, + [0][1][2][0][8][6] = 48, + [0][1][2][0][11][6] = 46, + [0][1][2][0][2][7] = 68, + [0][1][2][0][1][7] = 46, + [0][1][2][0][3][7] = 64, + [0][1][2][0][5][7] = 68, + [0][1][2][0][6][7] = 46, + [0][1][2][0][9][7] = 46, + [0][1][2][0][8][7] = 48, + [0][1][2][0][11][7] = 46, + [0][1][2][0][2][8] = 64, + [0][1][2][0][1][8] = 46, + [0][1][2][0][3][8] = 64, + [0][1][2][0][5][8] = 64, + [0][1][2][0][6][8] = 46, + [0][1][2][0][9][8] = 46, + [0][1][2][0][8][8] = 48, + [0][1][2][0][11][8] = 46, + [0][1][2][0][2][9] = 60, + [0][1][2][0][1][9] = 46, + [0][1][2][0][3][9] = 64, + [0][1][2][0][5][9] = 60, + [0][1][2][0][6][9] = 46, + [0][1][2][0][9][9] = 46, + [0][1][2][0][8][9] = 48, + [0][1][2][0][11][9] = 46, + [0][1][2][0][2][10] = 60, + [0][1][2][0][1][10] = 46, + [0][1][2][0][3][10] = 64, + [0][1][2][0][5][10] = 60, + [0][1][2][0][6][10] = 46, + [0][1][2][0][9][10] = 46, + [0][1][2][0][8][10] = 48, + [0][1][2][0][11][10] = 46, + [0][1][2][0][2][11] = 42, + [0][1][2][0][1][11] = 46, + [0][1][2][0][3][11] = 64, + [0][1][2][0][5][11] = 42, + [0][1][2][0][6][11] = 46, + [0][1][2][0][9][11] = 46, + [0][1][2][0][8][11] = 48, + [0][1][2][0][11][11] = 46, + [0][1][2][0][2][12] = 40, + [0][1][2][0][1][12] = 46, + [0][1][2][0][3][12] = 64, + [0][1][2][0][5][12] = 40, + [0][1][2][0][6][12] = 46, + [0][1][2][0][9][12] = 46, + [0][1][2][0][8][12] = 48, + [0][1][2][0][11][12] = 46, + [0][1][2][0][2][13] = 127, + [0][1][2][0][1][13] = 127, + [0][1][2][0][3][13] = 127, + [0][1][2][0][5][13] = 127, + [0][1][2][0][6][13] = 127, + [0][1][2][0][9][13] = 127, + [0][1][2][0][8][13] = 127, + [0][1][2][0][11][13] = 127, + [0][1][2][1][2][0] = 62, + [0][1][2][1][1][0] = 34, + [0][1][2][1][3][0] = 64, + [0][1][2][1][5][0] = 62, + [0][1][2][1][6][0] = 34, + [0][1][2][1][9][0] = 34, + [0][1][2][1][8][0] = 36, + [0][1][2][1][11][0] = 34, + [0][1][2][1][2][1] = 62, + [0][1][2][1][1][1] = 34, + [0][1][2][1][3][1] = 64, + [0][1][2][1][5][1] = 62, + [0][1][2][1][6][1] = 34, + [0][1][2][1][9][1] = 34, + [0][1][2][1][8][1] = 36, + [0][1][2][1][11][1] = 34, + [0][1][2][1][2][2] = 66, + [0][1][2][1][1][2] = 34, + [0][1][2][1][3][2] = 64, + [0][1][2][1][5][2] = 66, + [0][1][2][1][6][2] = 34, + [0][1][2][1][9][2] = 34, + [0][1][2][1][8][2] = 36, + [0][1][2][1][11][2] = 34, + [0][1][2][1][2][3] = 70, + [0][1][2][1][1][3] = 34, + [0][1][2][1][3][3] = 64, + [0][1][2][1][5][3] = 70, + [0][1][2][1][6][3] = 34, + [0][1][2][1][9][3] = 34, + [0][1][2][1][8][3] = 36, + [0][1][2][1][11][3] = 34, + [0][1][2][1][2][4] = 76, + [0][1][2][1][1][4] = 34, + [0][1][2][1][3][4] = 64, + [0][1][2][1][5][4] = 76, + [0][1][2][1][6][4] = 34, + [0][1][2][1][9][4] = 34, + [0][1][2][1][8][4] = 36, + [0][1][2][1][11][4] = 34, + [0][1][2][1][2][5] = 76, + [0][1][2][1][1][5] = 34, + [0][1][2][1][3][5] = 64, + [0][1][2][1][5][5] = 76, + [0][1][2][1][6][5] = 34, + [0][1][2][1][9][5] = 34, + [0][1][2][1][8][5] = 36, + [0][1][2][1][11][5] = 34, + [0][1][2][1][2][6] = 76, + [0][1][2][1][1][6] = 34, + [0][1][2][1][3][6] = 64, + [0][1][2][1][5][6] = 76, + [0][1][2][1][6][6] = 34, + [0][1][2][1][9][6] = 34, + [0][1][2][1][8][6] = 36, + [0][1][2][1][11][6] = 34, + [0][1][2][1][2][7] = 68, + [0][1][2][1][1][7] = 34, + [0][1][2][1][3][7] = 64, + [0][1][2][1][5][7] = 68, + [0][1][2][1][6][7] = 34, + [0][1][2][1][9][7] = 34, + [0][1][2][1][8][7] = 36, + [0][1][2][1][11][7] = 34, + [0][1][2][1][2][8] = 64, + [0][1][2][1][1][8] = 34, + [0][1][2][1][3][8] = 64, + [0][1][2][1][5][8] = 64, + [0][1][2][1][6][8] = 34, + [0][1][2][1][9][8] = 34, + [0][1][2][1][8][8] = 36, + [0][1][2][1][11][8] = 34, + [0][1][2][1][2][9] = 60, + [0][1][2][1][1][9] = 34, + [0][1][2][1][3][9] = 64, + [0][1][2][1][5][9] = 60, + [0][1][2][1][6][9] = 34, + [0][1][2][1][9][9] = 34, + [0][1][2][1][8][9] = 36, + [0][1][2][1][11][9] = 34, + [0][1][2][1][2][10] = 60, + [0][1][2][1][1][10] = 34, + [0][1][2][1][3][10] = 64, + [0][1][2][1][5][10] = 60, + [0][1][2][1][6][10] = 34, + [0][1][2][1][9][10] = 34, + [0][1][2][1][8][10] = 36, + [0][1][2][1][11][10] = 34, + [0][1][2][1][2][11] = 42, + [0][1][2][1][1][11] = 34, + [0][1][2][1][3][11] = 64, + [0][1][2][1][5][11] = 42, + [0][1][2][1][6][11] = 34, + [0][1][2][1][9][11] = 34, + [0][1][2][1][8][11] = 36, + [0][1][2][1][11][11] = 34, + [0][1][2][1][2][12] = 40, + [0][1][2][1][1][12] = 34, + [0][1][2][1][3][12] = 64, + [0][1][2][1][5][12] = 40, + [0][1][2][1][6][12] = 34, + [0][1][2][1][9][12] = 34, + [0][1][2][1][8][12] = 36, + [0][1][2][1][11][12] = 34, + [0][1][2][1][2][13] = 127, + [0][1][2][1][1][13] = 127, + [0][1][2][1][3][13] = 127, + [0][1][2][1][5][13] = 127, + [0][1][2][1][6][13] = 127, + [0][1][2][1][9][13] = 127, + [0][1][2][1][8][13] = 127, + [0][1][2][1][11][13] = 127, + [1][0][2][0][2][0] = 127, + [1][0][2][0][1][0] = 127, + [1][0][2][0][3][0] = 127, + [1][0][2][0][5][0] = 127, + [1][0][2][0][6][0] = 127, + [1][0][2][0][9][0] = 127, + [1][0][2][0][8][0] = 127, + [1][0][2][0][11][0] = 127, + [1][0][2][0][2][1] = 127, + [1][0][2][0][1][1] = 127, + [1][0][2][0][3][1] = 127, + [1][0][2][0][5][1] = 127, + [1][0][2][0][6][1] = 127, + [1][0][2][0][9][1] = 127, + [1][0][2][0][8][1] = 127, + [1][0][2][0][11][1] = 127, + [1][0][2][0][2][2] = 56, + [1][0][2][0][1][2] = 58, + [1][0][2][0][3][2] = 76, + [1][0][2][0][5][2] = 56, + [1][0][2][0][6][2] = 58, + [1][0][2][0][9][2] = 58, + [1][0][2][0][8][2] = 60, + [1][0][2][0][11][2] = 58, + [1][0][2][0][2][3] = 56, + [1][0][2][0][1][3] = 58, + [1][0][2][0][3][3] = 76, + [1][0][2][0][5][3] = 56, + [1][0][2][0][6][3] = 58, + [1][0][2][0][9][3] = 58, + [1][0][2][0][8][3] = 60, + [1][0][2][0][11][3] = 58, + [1][0][2][0][2][4] = 60, + [1][0][2][0][1][4] = 58, + [1][0][2][0][3][4] = 76, + [1][0][2][0][5][4] = 60, + [1][0][2][0][6][4] = 58, + [1][0][2][0][9][4] = 58, + [1][0][2][0][8][4] = 60, + [1][0][2][0][11][4] = 58, + [1][0][2][0][2][5] = 64, + [1][0][2][0][1][5] = 58, + [1][0][2][0][3][5] = 76, + [1][0][2][0][5][5] = 64, + [1][0][2][0][6][5] = 58, + [1][0][2][0][9][5] = 58, + [1][0][2][0][8][5] = 60, + [1][0][2][0][11][5] = 58, + [1][0][2][0][2][6] = 54, + [1][0][2][0][1][6] = 58, + [1][0][2][0][3][6] = 76, + [1][0][2][0][5][6] = 54, + [1][0][2][0][6][6] = 58, + [1][0][2][0][9][6] = 58, + [1][0][2][0][8][6] = 60, + [1][0][2][0][11][6] = 58, + [1][0][2][0][2][7] = 50, + [1][0][2][0][1][7] = 58, + [1][0][2][0][3][7] = 76, + [1][0][2][0][5][7] = 50, + [1][0][2][0][6][7] = 58, + [1][0][2][0][9][7] = 58, + [1][0][2][0][8][7] = 60, + [1][0][2][0][11][7] = 58, + [1][0][2][0][2][8] = 50, + [1][0][2][0][1][8] = 58, + [1][0][2][0][3][8] = 76, + [1][0][2][0][5][8] = 50, + [1][0][2][0][6][8] = 58, + [1][0][2][0][9][8] = 58, + [1][0][2][0][8][8] = 60, + [1][0][2][0][11][8] = 58, + [1][0][2][0][2][9] = 42, + [1][0][2][0][1][9] = 58, + [1][0][2][0][3][9] = 76, + [1][0][2][0][5][9] = 42, + [1][0][2][0][6][9] = 58, + [1][0][2][0][9][9] = 58, + [1][0][2][0][8][9] = 60, + [1][0][2][0][11][9] = 58, + [1][0][2][0][2][10] = 40, + [1][0][2][0][1][10] = 58, + [1][0][2][0][3][10] = 76, + [1][0][2][0][5][10] = 40, + [1][0][2][0][6][10] = 58, + [1][0][2][0][9][10] = 58, + [1][0][2][0][8][10] = 60, + [1][0][2][0][11][10] = 58, + [1][0][2][0][2][11] = 127, + [1][0][2][0][1][11] = 127, + [1][0][2][0][3][11] = 127, + [1][0][2][0][5][11] = 127, + [1][0][2][0][6][11] = 127, + [1][0][2][0][9][11] = 127, + [1][0][2][0][8][11] = 127, + [1][0][2][0][11][11] = 127, + [1][0][2][0][2][12] = 127, + [1][0][2][0][1][12] = 127, + [1][0][2][0][3][12] = 127, + [1][0][2][0][5][12] = 127, + [1][0][2][0][6][12] = 127, + [1][0][2][0][9][12] = 127, + [1][0][2][0][8][12] = 127, + [1][0][2][0][11][12] = 127, + [1][0][2][0][2][13] = 127, + [1][0][2][0][1][13] = 127, + [1][0][2][0][3][13] = 127, + [1][0][2][0][5][13] = 127, + [1][0][2][0][6][13] = 127, + [1][0][2][0][9][13] = 127, + [1][0][2][0][8][13] = 127, + [1][0][2][0][11][13] = 127, + [1][1][2][0][2][0] = 127, + [1][1][2][0][1][0] = 127, + [1][1][2][0][3][0] = 127, + [1][1][2][0][5][0] = 127, + [1][1][2][0][6][0] = 127, + [1][1][2][0][9][0] = 127, + [1][1][2][0][8][0] = 127, + [1][1][2][0][11][0] = 127, + [1][1][2][0][2][1] = 127, + [1][1][2][0][1][1] = 127, + [1][1][2][0][3][1] = 127, + [1][1][2][0][5][1] = 127, + [1][1][2][0][6][1] = 127, + [1][1][2][0][9][1] = 127, + [1][1][2][0][8][1] = 127, + [1][1][2][0][11][1] = 127, + [1][1][2][0][2][2] = 52, + [1][1][2][0][1][2] = 46, + [1][1][2][0][3][2] = 64, + [1][1][2][0][5][2] = 52, + [1][1][2][0][6][2] = 46, + [1][1][2][0][9][2] = 46, + [1][1][2][0][8][2] = 48, + [1][1][2][0][11][2] = 46, + [1][1][2][0][2][3] = 52, + [1][1][2][0][1][3] = 46, + [1][1][2][0][3][3] = 64, + [1][1][2][0][5][3] = 52, + [1][1][2][0][6][3] = 46, + [1][1][2][0][9][3] = 46, + [1][1][2][0][8][3] = 48, + [1][1][2][0][11][3] = 46, + [1][1][2][0][2][4] = 56, + [1][1][2][0][1][4] = 46, + [1][1][2][0][3][4] = 64, + [1][1][2][0][5][4] = 56, + [1][1][2][0][6][4] = 46, + [1][1][2][0][9][4] = 46, + [1][1][2][0][8][4] = 48, + [1][1][2][0][11][4] = 46, + [1][1][2][0][2][5] = 60, + [1][1][2][0][1][5] = 46, + [1][1][2][0][3][5] = 64, + [1][1][2][0][5][5] = 60, + [1][1][2][0][6][5] = 46, + [1][1][2][0][9][5] = 46, + [1][1][2][0][8][5] = 48, + [1][1][2][0][11][5] = 46, + [1][1][2][0][2][6] = 54, + [1][1][2][0][1][6] = 46, + [1][1][2][0][3][6] = 64, + [1][1][2][0][5][6] = 52, + [1][1][2][0][6][6] = 46, + [1][1][2][0][9][6] = 46, + [1][1][2][0][8][6] = 48, + [1][1][2][0][11][6] = 46, + [1][1][2][0][2][7] = 50, + [1][1][2][0][1][7] = 46, + [1][1][2][0][3][7] = 64, + [1][1][2][0][5][7] = 48, + [1][1][2][0][6][7] = 46, + [1][1][2][0][9][7] = 46, + [1][1][2][0][8][7] = 48, + [1][1][2][0][11][7] = 46, + [1][1][2][0][2][8] = 50, + [1][1][2][0][1][8] = 46, + [1][1][2][0][3][8] = 64, + [1][1][2][0][5][8] = 48, + [1][1][2][0][6][8] = 46, + [1][1][2][0][9][8] = 46, + [1][1][2][0][8][8] = 48, + [1][1][2][0][11][8] = 46, + [1][1][2][0][2][9] = 38, + [1][1][2][0][1][9] = 46, + [1][1][2][0][3][9] = 64, + [1][1][2][0][5][9] = 38, + [1][1][2][0][6][9] = 46, + [1][1][2][0][9][9] = 46, + [1][1][2][0][8][9] = 48, + [1][1][2][0][11][9] = 46, + [1][1][2][0][2][10] = 36, + [1][1][2][0][1][10] = 46, + [1][1][2][0][3][10] = 64, + [1][1][2][0][5][10] = 36, + [1][1][2][0][6][10] = 46, + [1][1][2][0][9][10] = 46, + [1][1][2][0][8][10] = 48, + [1][1][2][0][11][10] = 46, + [1][1][2][0][2][11] = 127, + [1][1][2][0][1][11] = 127, + [1][1][2][0][3][11] = 127, + [1][1][2][0][5][11] = 127, + [1][1][2][0][6][11] = 127, + [1][1][2][0][9][11] = 127, + [1][1][2][0][8][11] = 127, + [1][1][2][0][11][11] = 127, + [1][1][2][0][2][12] = 127, + [1][1][2][0][1][12] = 127, + [1][1][2][0][3][12] = 127, + [1][1][2][0][5][12] = 127, + [1][1][2][0][6][12] = 127, + [1][1][2][0][9][12] = 127, + [1][1][2][0][8][12] = 127, + [1][1][2][0][11][12] = 127, + [1][1][2][0][2][13] = 127, + [1][1][2][0][1][13] = 127, + [1][1][2][0][3][13] = 127, + [1][1][2][0][5][13] = 127, + [1][1][2][0][6][13] = 127, + [1][1][2][0][9][13] = 127, + [1][1][2][0][8][13] = 127, + [1][1][2][0][11][13] = 127, + [1][1][2][1][2][0] = 127, + [1][1][2][1][1][0] = 127, + [1][1][2][1][3][0] = 127, + [1][1][2][1][5][0] = 127, + [1][1][2][1][6][0] = 127, + [1][1][2][1][9][0] = 127, + [1][1][2][1][8][0] = 127, + [1][1][2][1][11][0] = 127, + [1][1][2][1][2][1] = 127, + [1][1][2][1][1][1] = 127, + [1][1][2][1][3][1] = 127, + [1][1][2][1][5][1] = 127, + [1][1][2][1][6][1] = 127, + [1][1][2][1][9][1] = 127, + [1][1][2][1][8][1] = 127, + [1][1][2][1][11][1] = 127, + [1][1][2][1][2][2] = 52, + [1][1][2][1][1][2] = 34, + [1][1][2][1][3][2] = 64, + [1][1][2][1][5][2] = 52, + [1][1][2][1][6][2] = 34, + [1][1][2][1][9][2] = 34, + [1][1][2][1][8][2] = 36, + [1][1][2][1][11][2] = 34, + [1][1][2][1][2][3] = 52, + [1][1][2][1][1][3] = 34, + [1][1][2][1][3][3] = 64, + [1][1][2][1][5][3] = 52, + [1][1][2][1][6][3] = 34, + [1][1][2][1][9][3] = 34, + [1][1][2][1][8][3] = 36, + [1][1][2][1][11][3] = 34, + [1][1][2][1][2][4] = 56, + [1][1][2][1][1][4] = 34, + [1][1][2][1][3][4] = 64, + [1][1][2][1][5][4] = 56, + [1][1][2][1][6][4] = 34, + [1][1][2][1][9][4] = 34, + [1][1][2][1][8][4] = 36, + [1][1][2][1][11][4] = 34, + [1][1][2][1][2][5] = 60, + [1][1][2][1][1][5] = 34, + [1][1][2][1][3][5] = 64, + [1][1][2][1][5][5] = 60, + [1][1][2][1][6][5] = 34, + [1][1][2][1][9][5] = 34, + [1][1][2][1][8][5] = 36, + [1][1][2][1][11][5] = 34, + [1][1][2][1][2][6] = 54, + [1][1][2][1][1][6] = 34, + [1][1][2][1][3][6] = 64, + [1][1][2][1][5][6] = 52, + [1][1][2][1][6][6] = 34, + [1][1][2][1][9][6] = 34, + [1][1][2][1][8][6] = 36, + [1][1][2][1][11][6] = 34, + [1][1][2][1][2][7] = 50, + [1][1][2][1][1][7] = 34, + [1][1][2][1][3][7] = 64, + [1][1][2][1][5][7] = 48, + [1][1][2][1][6][7] = 34, + [1][1][2][1][9][7] = 34, + [1][1][2][1][8][7] = 36, + [1][1][2][1][11][7] = 34, + [1][1][2][1][2][8] = 50, + [1][1][2][1][1][8] = 34, + [1][1][2][1][3][8] = 64, + [1][1][2][1][5][8] = 48, + [1][1][2][1][6][8] = 34, + [1][1][2][1][9][8] = 34, + [1][1][2][1][8][8] = 36, + [1][1][2][1][11][8] = 34, + [1][1][2][1][2][9] = 38, + [1][1][2][1][1][9] = 34, + [1][1][2][1][3][9] = 64, + [1][1][2][1][5][9] = 38, + [1][1][2][1][6][9] = 34, + [1][1][2][1][9][9] = 34, + [1][1][2][1][8][9] = 36, + [1][1][2][1][11][9] = 34, + [1][1][2][1][2][10] = 36, + [1][1][2][1][1][10] = 34, + [1][1][2][1][3][10] = 64, + [1][1][2][1][5][10] = 36, + [1][1][2][1][6][10] = 34, + [1][1][2][1][9][10] = 34, + [1][1][2][1][8][10] = 36, + [1][1][2][1][11][10] = 34, + [1][1][2][1][2][11] = 127, + [1][1][2][1][1][11] = 127, + [1][1][2][1][3][11] = 127, + [1][1][2][1][5][11] = 127, + [1][1][2][1][6][11] = 127, + [1][1][2][1][9][11] = 127, + [1][1][2][1][8][11] = 127, + [1][1][2][1][11][11] = 127, + [1][1][2][1][2][12] = 127, + [1][1][2][1][1][12] = 127, + [1][1][2][1][3][12] = 127, + [1][1][2][1][5][12] = 127, + [1][1][2][1][6][12] = 127, + [1][1][2][1][9][12] = 127, + [1][1][2][1][8][12] = 127, + [1][1][2][1][11][12] = 127, + [1][1][2][1][2][13] = 127, + [1][1][2][1][1][13] = 127, + [1][1][2][1][3][13] = 127, + [1][1][2][1][5][13] = 127, + [1][1][2][1][6][13] = 127, + [1][1][2][1][9][13] = 127, + [1][1][2][1][8][13] = 127, + [1][1][2][1][11][13] = 127, +}; + +const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { + [0][0][1][0][0][0] = 30, + [0][0][1][0][0][2] = 30, + [0][0][1][0][0][4] = 30, + [0][0][1][0][0][6] = 30, + [0][0][1][0][0][8] = 52, + [0][0][1][0][0][10] = 52, + [0][0][1][0][0][12] = 52, + [0][0][1][0][0][14] = 52, + [0][0][1][0][0][15] = 52, + [0][0][1][0][0][17] = 52, + [0][0][1][0][0][19] = 52, + [0][0][1][0][0][21] = 52, + [0][0][1][0][0][23] = 52, + [0][0][1][0][0][25] = 52, + [0][0][1][0][0][27] = 52, + [0][0][1][0][0][29] = 52, + [0][0][1][0][0][31] = 52, + [0][0][1][0][0][33] = 52, + [0][0][1][0][0][35] = 52, + [0][0][1][0][0][37] = 54, + [0][0][1][0][0][38] = 28, + [0][0][1][0][0][40] = 28, + [0][0][1][0][0][42] = 28, + [0][0][1][0][0][44] = 28, + [0][0][1][0][0][46] = 28, + [0][1][1][0][0][0] = 18, + [0][1][1][0][0][2] = 18, + [0][1][1][0][0][4] = 18, + [0][1][1][0][0][6] = 18, + [0][1][1][0][0][8] = 40, + [0][1][1][0][0][10] = 40, + [0][1][1][0][0][12] = 40, + [0][1][1][0][0][14] = 40, + [0][1][1][0][0][15] = 40, + [0][1][1][0][0][17] = 40, + [0][1][1][0][0][19] = 40, + [0][1][1][0][0][21] = 40, + [0][1][1][0][0][23] = 40, + [0][1][1][0][0][25] = 40, + [0][1][1][0][0][27] = 40, + [0][1][1][0][0][29] = 40, + [0][1][1][0][0][31] = 40, + [0][1][1][0][0][33] = 40, + [0][1][1][0][0][35] = 40, + [0][1][1][0][0][37] = 42, + [0][1][1][0][0][38] = 16, + [0][1][1][0][0][40] = 16, + [0][1][1][0][0][42] = 16, + [0][1][1][0][0][44] = 16, + [0][1][1][0][0][46] = 16, + [0][0][2][0][0][0] = 30, + [0][0][2][0][0][2] = 30, + [0][0][2][0][0][4] = 30, + [0][0][2][0][0][6] = 30, + [0][0][2][0][0][8] = 52, + [0][0][2][0][0][10] = 52, + [0][0][2][0][0][12] = 52, + [0][0][2][0][0][14] = 52, + [0][0][2][0][0][15] = 52, + [0][0][2][0][0][17] = 52, + [0][0][2][0][0][19] = 52, + [0][0][2][0][0][21] = 52, + [0][0][2][0][0][23] = 52, + [0][0][2][0][0][25] = 52, + [0][0][2][0][0][27] = 52, + [0][0][2][0][0][29] = 52, + [0][0][2][0][0][31] = 52, + [0][0][2][0][0][33] = 52, + [0][0][2][0][0][35] = 52, + [0][0][2][0][0][37] = 54, + [0][0][2][0][0][38] = 28, + [0][0][2][0][0][40] = 28, + [0][0][2][0][0][42] = 28, + [0][0][2][0][0][44] = 28, + [0][0][2][0][0][46] = 28, + [0][1][2][0][0][0] = 18, + [0][1][2][0][0][2] = 18, + [0][1][2][0][0][4] = 18, + [0][1][2][0][0][6] = 18, + [0][1][2][0][0][8] = 40, + [0][1][2][0][0][10] = 40, + [0][1][2][0][0][12] = 40, + [0][1][2][0][0][14] = 40, + [0][1][2][0][0][15] = 40, + [0][1][2][0][0][17] = 40, + [0][1][2][0][0][19] = 40, + [0][1][2][0][0][21] = 40, + [0][1][2][0][0][23] = 40, + [0][1][2][0][0][25] = 40, + [0][1][2][0][0][27] = 40, + [0][1][2][0][0][29] = 40, + [0][1][2][0][0][31] = 40, + [0][1][2][0][0][33] = 40, + [0][1][2][0][0][35] = 40, + [0][1][2][0][0][37] = 42, + [0][1][2][0][0][38] = 16, + [0][1][2][0][0][40] = 16, + [0][1][2][0][0][42] = 16, + [0][1][2][0][0][44] = 16, + [0][1][2][0][0][46] = 16, + [0][1][2][1][0][0] = 6, + [0][1][2][1][0][2] = 6, + [0][1][2][1][0][4] = 6, + [0][1][2][1][0][6] = 6, + [0][1][2][1][0][8] = 28, + [0][1][2][1][0][10] = 28, + [0][1][2][1][0][12] = 28, + [0][1][2][1][0][14] = 28, + [0][1][2][1][0][15] = 28, + [0][1][2][1][0][17] = 28, + [0][1][2][1][0][19] = 28, + [0][1][2][1][0][21] = 28, + [0][1][2][1][0][23] = 28, + [0][1][2][1][0][25] = 28, + [0][1][2][1][0][27] = 28, + [0][1][2][1][0][29] = 28, + [0][1][2][1][0][31] = 28, + [0][1][2][1][0][33] = 28, + [0][1][2][1][0][35] = 28, + [0][1][2][1][0][37] = 30, + [0][1][2][1][0][38] = 4, + [0][1][2][1][0][40] = 4, + [0][1][2][1][0][42] = 4, + [0][1][2][1][0][44] = 4, + [0][1][2][1][0][46] = 4, + [1][0][2][0][0][1] = 30, + [1][0][2][0][0][5] = 30, + [1][0][2][0][0][9] = 52, + [1][0][2][0][0][13] = 52, + [1][0][2][0][0][16] = 52, + [1][0][2][0][0][20] = 52, + [1][0][2][0][0][24] = 52, + [1][0][2][0][0][28] = 52, + [1][0][2][0][0][32] = 52, + [1][0][2][0][0][36] = 54, + [1][0][2][0][0][39] = 28, + [1][0][2][0][0][43] = 28, + [1][1][2][0][0][1] = 18, + [1][1][2][0][0][5] = 18, + [1][1][2][0][0][9] = 40, + [1][1][2][0][0][13] = 40, + [1][1][2][0][0][16] = 40, + [1][1][2][0][0][20] = 40, + [1][1][2][0][0][24] = 40, + [1][1][2][0][0][28] = 40, + [1][1][2][0][0][32] = 40, + [1][1][2][0][0][36] = 42, + [1][1][2][0][0][39] = 16, + [1][1][2][0][0][43] = 16, + [1][1][2][1][0][1] = 6, + [1][1][2][1][0][5] = 6, + [1][1][2][1][0][9] = 28, + [1][1][2][1][0][13] = 28, + [1][1][2][1][0][16] = 28, + [1][1][2][1][0][20] = 28, + [1][1][2][1][0][24] = 28, + [1][1][2][1][0][28] = 28, + [1][1][2][1][0][32] = 28, + [1][1][2][1][0][36] = 30, + [1][1][2][1][0][39] = 4, + [1][1][2][1][0][43] = 4, + [2][0][2][0][0][3] = 30, + [2][0][2][0][0][11] = 52, + [2][0][2][0][0][18] = 52, + [2][0][2][0][0][26] = 52, + [2][0][2][0][0][34] = 54, + [2][0][2][0][0][41] = 28, + [2][1][2][0][0][3] = 18, + [2][1][2][0][0][11] = 40, + [2][1][2][0][0][18] = 40, + [2][1][2][0][0][26] = 40, + [2][1][2][0][0][34] = 42, + [2][1][2][0][0][41] = 16, + [2][1][2][1][0][3] = 6, + [2][1][2][1][0][11] = 28, + [2][1][2][1][0][18] = 28, + [2][1][2][1][0][26] = 28, + [2][1][2][1][0][34] = 30, + [2][1][2][1][0][41] = 4, + [0][0][1][0][2][0] = 76, + [0][0][1][0][1][0] = 58, + [0][0][1][0][3][0] = 62, + [0][0][1][0][5][0] = 62, + [0][0][1][0][6][0] = 58, + [0][0][1][0][9][0] = 58, + [0][0][1][0][8][0] = 30, + [0][0][1][0][11][0] = 52, + [0][0][1][0][2][2] = 76, + [0][0][1][0][1][2] = 58, + [0][0][1][0][3][2] = 62, + [0][0][1][0][5][2] = 62, + [0][0][1][0][6][2] = 58, + [0][0][1][0][9][2] = 58, + [0][0][1][0][8][2] = 30, + [0][0][1][0][11][2] = 52, + [0][0][1][0][2][4] = 76, + [0][0][1][0][1][4] = 58, + [0][0][1][0][3][4] = 62, + [0][0][1][0][5][4] = 62, + [0][0][1][0][6][4] = 58, + [0][0][1][0][9][4] = 58, + [0][0][1][0][8][4] = 30, + [0][0][1][0][11][4] = 52, + [0][0][1][0][2][6] = 76, + [0][0][1][0][1][6] = 58, + [0][0][1][0][3][6] = 62, + [0][0][1][0][5][6] = 62, + [0][0][1][0][6][6] = 54, + [0][0][1][0][9][6] = 58, + [0][0][1][0][8][6] = 30, + [0][0][1][0][11][6] = 52, + [0][0][1][0][2][8] = 76, + [0][0][1][0][1][8] = 58, + [0][0][1][0][3][8] = 62, + [0][0][1][0][5][8] = 64, + [0][0][1][0][6][8] = 58, + [0][0][1][0][9][8] = 58, + [0][0][1][0][8][8] = 54, + [0][0][1][0][11][8] = 52, + [0][0][1][0][2][10] = 76, + [0][0][1][0][1][10] = 58, + [0][0][1][0][3][10] = 62, + [0][0][1][0][5][10] = 64, + [0][0][1][0][6][10] = 58, + [0][0][1][0][9][10] = 58, + [0][0][1][0][8][10] = 54, + [0][0][1][0][11][10] = 52, + [0][0][1][0][2][12] = 76, + [0][0][1][0][1][12] = 58, + [0][0][1][0][3][12] = 62, + [0][0][1][0][5][12] = 64, + [0][0][1][0][6][12] = 58, + [0][0][1][0][9][12] = 58, + [0][0][1][0][8][12] = 54, + [0][0][1][0][11][12] = 52, + [0][0][1][0][2][14] = 76, + [0][0][1][0][1][14] = 58, + [0][0][1][0][3][14] = 62, + [0][0][1][0][5][14] = 64, + [0][0][1][0][6][14] = 58, + [0][0][1][0][9][14] = 58, + [0][0][1][0][8][14] = 54, + [0][0][1][0][11][14] = 52, + [0][0][1][0][2][15] = 76, + [0][0][1][0][1][15] = 58, + [0][0][1][0][3][15] = 76, + [0][0][1][0][5][15] = 76, + [0][0][1][0][6][15] = 58, + [0][0][1][0][9][15] = 58, + [0][0][1][0][8][15] = 54, + [0][0][1][0][11][15] = 52, + [0][0][1][0][2][17] = 76, + [0][0][1][0][1][17] = 58, + [0][0][1][0][3][17] = 76, + [0][0][1][0][5][17] = 76, + [0][0][1][0][6][17] = 58, + [0][0][1][0][9][17] = 58, + [0][0][1][0][8][17] = 54, + [0][0][1][0][11][17] = 52, + [0][0][1][0][2][19] = 76, + [0][0][1][0][1][19] = 58, + [0][0][1][0][3][19] = 76, + [0][0][1][0][5][19] = 76, + [0][0][1][0][6][19] = 58, + [0][0][1][0][9][19] = 58, + [0][0][1][0][8][19] = 54, + [0][0][1][0][11][19] = 52, + [0][0][1][0][2][21] = 76, + [0][0][1][0][1][21] = 58, + [0][0][1][0][3][21] = 76, + [0][0][1][0][5][21] = 76, + [0][0][1][0][6][21] = 58, + [0][0][1][0][9][21] = 58, + [0][0][1][0][8][21] = 54, + [0][0][1][0][11][21] = 52, + [0][0][1][0][2][23] = 76, + [0][0][1][0][1][23] = 58, + [0][0][1][0][3][23] = 76, + [0][0][1][0][5][23] = 76, + [0][0][1][0][6][23] = 58, + [0][0][1][0][9][23] = 58, + [0][0][1][0][8][23] = 54, + [0][0][1][0][11][23] = 52, + [0][0][1][0][2][25] = 76, + [0][0][1][0][1][25] = 58, + [0][0][1][0][3][25] = 76, + [0][0][1][0][5][25] = 127, + [0][0][1][0][6][25] = 58, + [0][0][1][0][9][25] = 127, + [0][0][1][0][8][25] = 54, + [0][0][1][0][11][25] = 52, + [0][0][1][0][2][27] = 76, + [0][0][1][0][1][27] = 58, + [0][0][1][0][3][27] = 76, + [0][0][1][0][5][27] = 127, + [0][0][1][0][6][27] = 58, + [0][0][1][0][9][27] = 127, + [0][0][1][0][8][27] = 54, + [0][0][1][0][11][27] = 52, + [0][0][1][0][2][29] = 76, + [0][0][1][0][1][29] = 58, + [0][0][1][0][3][29] = 76, + [0][0][1][0][5][29] = 127, + [0][0][1][0][6][29] = 58, + [0][0][1][0][9][29] = 127, + [0][0][1][0][8][29] = 54, + [0][0][1][0][11][29] = 52, + [0][0][1][0][2][31] = 76, + [0][0][1][0][1][31] = 58, + [0][0][1][0][3][31] = 76, + [0][0][1][0][5][31] = 76, + [0][0][1][0][6][31] = 58, + [0][0][1][0][9][31] = 58, + [0][0][1][0][8][31] = 54, + [0][0][1][0][11][31] = 52, + [0][0][1][0][2][33] = 76, + [0][0][1][0][1][33] = 58, + [0][0][1][0][3][33] = 76, + [0][0][1][0][5][33] = 76, + [0][0][1][0][6][33] = 58, + [0][0][1][0][9][33] = 58, + [0][0][1][0][8][33] = 54, + [0][0][1][0][11][33] = 52, + [0][0][1][0][2][35] = 74, + [0][0][1][0][1][35] = 58, + [0][0][1][0][3][35] = 76, + [0][0][1][0][5][35] = 74, + [0][0][1][0][6][35] = 58, + [0][0][1][0][9][35] = 58, + [0][0][1][0][8][35] = 54, + [0][0][1][0][11][35] = 52, + [0][0][1][0][2][37] = 76, + [0][0][1][0][1][37] = 127, + [0][0][1][0][3][37] = 76, + [0][0][1][0][5][37] = 76, + [0][0][1][0][6][37] = 58, + [0][0][1][0][9][37] = 76, + [0][0][1][0][8][37] = 54, + [0][0][1][0][11][37] = 127, + [0][0][1][0][2][38] = 76, + [0][0][1][0][1][38] = 28, + [0][0][1][0][3][38] = 127, + [0][0][1][0][5][38] = 76, + [0][0][1][0][6][38] = 28, + [0][0][1][0][9][38] = 76, + [0][0][1][0][8][38] = 54, + [0][0][1][0][11][38] = 52, + [0][0][1][0][2][40] = 76, + [0][0][1][0][1][40] = 28, + [0][0][1][0][3][40] = 127, + [0][0][1][0][5][40] = 76, + [0][0][1][0][6][40] = 28, + [0][0][1][0][9][40] = 76, + [0][0][1][0][8][40] = 54, + [0][0][1][0][11][40] = 52, + [0][0][1][0][2][42] = 76, + [0][0][1][0][1][42] = 28, + [0][0][1][0][3][42] = 127, + [0][0][1][0][5][42] = 76, + [0][0][1][0][6][42] = 28, + [0][0][1][0][9][42] = 76, + [0][0][1][0][8][42] = 54, + [0][0][1][0][11][42] = 52, + [0][0][1][0][2][44] = 76, + [0][0][1][0][1][44] = 28, + [0][0][1][0][3][44] = 127, + [0][0][1][0][5][44] = 76, + [0][0][1][0][6][44] = 28, + [0][0][1][0][9][44] = 76, + [0][0][1][0][8][44] = 54, + [0][0][1][0][11][44] = 52, + [0][0][1][0][2][46] = 76, + [0][0][1][0][1][46] = 28, + [0][0][1][0][3][46] = 127, + [0][0][1][0][5][46] = 76, + [0][0][1][0][6][46] = 28, + [0][0][1][0][9][46] = 76, + [0][0][1][0][8][46] = 54, + [0][0][1][0][11][46] = 52, + [0][1][1][0][2][0] = 68, + [0][1][1][0][1][0] = 46, + [0][1][1][0][3][0] = 50, + [0][1][1][0][5][0] = 40, + [0][1][1][0][6][0] = 46, + [0][1][1][0][9][0] = 46, + [0][1][1][0][8][0] = 18, + [0][1][1][0][11][0] = 40, + [0][1][1][0][2][2] = 68, + [0][1][1][0][1][2] = 46, + [0][1][1][0][3][2] = 50, + [0][1][1][0][5][2] = 40, + [0][1][1][0][6][2] = 46, + [0][1][1][0][9][2] = 46, + [0][1][1][0][8][2] = 18, + [0][1][1][0][11][2] = 40, + [0][1][1][0][2][4] = 68, + [0][1][1][0][1][4] = 46, + [0][1][1][0][3][4] = 50, + [0][1][1][0][5][4] = 40, + [0][1][1][0][6][4] = 46, + [0][1][1][0][9][4] = 46, + [0][1][1][0][8][4] = 18, + [0][1][1][0][11][4] = 40, + [0][1][1][0][2][6] = 68, + [0][1][1][0][1][6] = 46, + [0][1][1][0][3][6] = 50, + [0][1][1][0][5][6] = 40, + [0][1][1][0][6][6] = 36, + [0][1][1][0][9][6] = 46, + [0][1][1][0][8][6] = 18, + [0][1][1][0][11][6] = 40, + [0][1][1][0][2][8] = 68, + [0][1][1][0][1][8] = 46, + [0][1][1][0][3][8] = 50, + [0][1][1][0][5][8] = 52, + [0][1][1][0][6][8] = 46, + [0][1][1][0][9][8] = 46, + [0][1][1][0][8][8] = 42, + [0][1][1][0][11][8] = 40, + [0][1][1][0][2][10] = 68, + [0][1][1][0][1][10] = 46, + [0][1][1][0][3][10] = 50, + [0][1][1][0][5][10] = 52, + [0][1][1][0][6][10] = 46, + [0][1][1][0][9][10] = 46, + [0][1][1][0][8][10] = 42, + [0][1][1][0][11][10] = 40, + [0][1][1][0][2][12] = 68, + [0][1][1][0][1][12] = 46, + [0][1][1][0][3][12] = 50, + [0][1][1][0][5][12] = 52, + [0][1][1][0][6][12] = 46, + [0][1][1][0][9][12] = 46, + [0][1][1][0][8][12] = 42, + [0][1][1][0][11][12] = 40, + [0][1][1][0][2][14] = 68, + [0][1][1][0][1][14] = 46, + [0][1][1][0][3][14] = 50, + [0][1][1][0][5][14] = 52, + [0][1][1][0][6][14] = 46, + [0][1][1][0][9][14] = 46, + [0][1][1][0][8][14] = 42, + [0][1][1][0][11][14] = 40, + [0][1][1][0][2][15] = 68, + [0][1][1][0][1][15] = 46, + [0][1][1][0][3][15] = 70, + [0][1][1][0][5][15] = 68, + [0][1][1][0][6][15] = 46, + [0][1][1][0][9][15] = 46, + [0][1][1][0][8][15] = 42, + [0][1][1][0][11][15] = 40, + [0][1][1][0][2][17] = 68, + [0][1][1][0][1][17] = 46, + [0][1][1][0][3][17] = 70, + [0][1][1][0][5][17] = 68, + [0][1][1][0][6][17] = 46, + [0][1][1][0][9][17] = 46, + [0][1][1][0][8][17] = 42, + [0][1][1][0][11][17] = 40, + [0][1][1][0][2][19] = 68, + [0][1][1][0][1][19] = 46, + [0][1][1][0][3][19] = 70, + [0][1][1][0][5][19] = 68, + [0][1][1][0][6][19] = 46, + [0][1][1][0][9][19] = 46, + [0][1][1][0][8][19] = 42, + [0][1][1][0][11][19] = 40, + [0][1][1][0][2][21] = 68, + [0][1][1][0][1][21] = 46, + [0][1][1][0][3][21] = 70, + [0][1][1][0][5][21] = 68, + [0][1][1][0][6][21] = 46, + [0][1][1][0][9][21] = 46, + [0][1][1][0][8][21] = 42, + [0][1][1][0][11][21] = 40, + [0][1][1][0][2][23] = 68, + [0][1][1][0][1][23] = 46, + [0][1][1][0][3][23] = 70, + [0][1][1][0][5][23] = 68, + [0][1][1][0][6][23] = 46, + [0][1][1][0][9][23] = 46, + [0][1][1][0][8][23] = 42, + [0][1][1][0][11][23] = 40, + [0][1][1][0][2][25] = 68, + [0][1][1][0][1][25] = 46, + [0][1][1][0][3][25] = 70, + [0][1][1][0][5][25] = 127, + [0][1][1][0][6][25] = 46, + [0][1][1][0][9][25] = 127, + [0][1][1][0][8][25] = 42, + [0][1][1][0][11][25] = 40, + [0][1][1][0][2][27] = 68, + [0][1][1][0][1][27] = 46, + [0][1][1][0][3][27] = 70, + [0][1][1][0][5][27] = 127, + [0][1][1][0][6][27] = 46, + [0][1][1][0][9][27] = 127, + [0][1][1][0][8][27] = 42, + [0][1][1][0][11][27] = 40, + [0][1][1][0][2][29] = 68, + [0][1][1][0][1][29] = 46, + [0][1][1][0][3][29] = 70, + [0][1][1][0][5][29] = 127, + [0][1][1][0][6][29] = 46, + [0][1][1][0][9][29] = 127, + [0][1][1][0][8][29] = 42, + [0][1][1][0][11][29] = 40, + [0][1][1][0][2][31] = 68, + [0][1][1][0][1][31] = 46, + [0][1][1][0][3][31] = 70, + [0][1][1][0][5][31] = 68, + [0][1][1][0][6][31] = 46, + [0][1][1][0][9][31] = 46, + [0][1][1][0][8][31] = 42, + [0][1][1][0][11][31] = 40, + [0][1][1][0][2][33] = 68, + [0][1][1][0][1][33] = 46, + [0][1][1][0][3][33] = 70, + [0][1][1][0][5][33] = 68, + [0][1][1][0][6][33] = 46, + [0][1][1][0][9][33] = 46, + [0][1][1][0][8][33] = 42, + [0][1][1][0][11][33] = 40, + [0][1][1][0][2][35] = 66, + [0][1][1][0][1][35] = 46, + [0][1][1][0][3][35] = 70, + [0][1][1][0][5][35] = 66, + [0][1][1][0][6][35] = 46, + [0][1][1][0][9][35] = 46, + [0][1][1][0][8][35] = 42, + [0][1][1][0][11][35] = 40, + [0][1][1][0][2][37] = 68, + [0][1][1][0][1][37] = 127, + [0][1][1][0][3][37] = 70, + [0][1][1][0][5][37] = 68, + [0][1][1][0][6][37] = 46, + [0][1][1][0][9][37] = 68, + [0][1][1][0][8][37] = 42, + [0][1][1][0][11][37] = 127, + [0][1][1][0][2][38] = 76, + [0][1][1][0][1][38] = 16, + [0][1][1][0][3][38] = 127, + [0][1][1][0][5][38] = 76, + [0][1][1][0][6][38] = 16, + [0][1][1][0][9][38] = 76, + [0][1][1][0][8][38] = 42, + [0][1][1][0][11][38] = 40, + [0][1][1][0][2][40] = 76, + [0][1][1][0][1][40] = 16, + [0][1][1][0][3][40] = 127, + [0][1][1][0][5][40] = 76, + [0][1][1][0][6][40] = 16, + [0][1][1][0][9][40] = 76, + [0][1][1][0][8][40] = 42, + [0][1][1][0][11][40] = 40, + [0][1][1][0][2][42] = 76, + [0][1][1][0][1][42] = 16, + [0][1][1][0][3][42] = 127, + [0][1][1][0][5][42] = 76, + [0][1][1][0][6][42] = 16, + [0][1][1][0][9][42] = 76, + [0][1][1][0][8][42] = 42, + [0][1][1][0][11][42] = 40, + [0][1][1][0][2][44] = 76, + [0][1][1][0][1][44] = 16, + [0][1][1][0][3][44] = 127, + [0][1][1][0][5][44] = 76, + [0][1][1][0][6][44] = 16, + [0][1][1][0][9][44] = 76, + [0][1][1][0][8][44] = 42, + [0][1][1][0][11][44] = 40, + [0][1][1][0][2][46] = 76, + [0][1][1][0][1][46] = 16, + [0][1][1][0][3][46] = 127, + [0][1][1][0][5][46] = 76, + [0][1][1][0][6][46] = 16, + [0][1][1][0][9][46] = 76, + [0][1][1][0][8][46] = 42, + [0][1][1][0][11][46] = 40, + [0][0][2][0][2][0] = 76, + [0][0][2][0][1][0] = 58, + [0][0][2][0][3][0] = 62, + [0][0][2][0][5][0] = 62, + [0][0][2][0][6][0] = 58, + [0][0][2][0][9][0] = 58, + [0][0][2][0][8][0] = 30, + [0][0][2][0][11][0] = 52, + [0][0][2][0][2][2] = 76, + [0][0][2][0][1][2] = 58, + [0][0][2][0][3][2] = 62, + [0][0][2][0][5][2] = 62, + [0][0][2][0][6][2] = 58, + [0][0][2][0][9][2] = 58, + [0][0][2][0][8][2] = 30, + [0][0][2][0][11][2] = 52, + [0][0][2][0][2][4] = 76, + [0][0][2][0][1][4] = 58, + [0][0][2][0][3][4] = 62, + [0][0][2][0][5][4] = 62, + [0][0][2][0][6][4] = 58, + [0][0][2][0][9][4] = 58, + [0][0][2][0][8][4] = 30, + [0][0][2][0][11][4] = 52, + [0][0][2][0][2][6] = 76, + [0][0][2][0][1][6] = 58, + [0][0][2][0][3][6] = 62, + [0][0][2][0][5][6] = 62, + [0][0][2][0][6][6] = 54, + [0][0][2][0][9][6] = 58, + [0][0][2][0][8][6] = 30, + [0][0][2][0][11][6] = 52, + [0][0][2][0][2][8] = 76, + [0][0][2][0][1][8] = 58, + [0][0][2][0][3][8] = 62, + [0][0][2][0][5][8] = 64, + [0][0][2][0][6][8] = 58, + [0][0][2][0][9][8] = 58, + [0][0][2][0][8][8] = 54, + [0][0][2][0][11][8] = 52, + [0][0][2][0][2][10] = 76, + [0][0][2][0][1][10] = 58, + [0][0][2][0][3][10] = 62, + [0][0][2][0][5][10] = 64, + [0][0][2][0][6][10] = 58, + [0][0][2][0][9][10] = 58, + [0][0][2][0][8][10] = 54, + [0][0][2][0][11][10] = 52, + [0][0][2][0][2][12] = 76, + [0][0][2][0][1][12] = 58, + [0][0][2][0][3][12] = 62, + [0][0][2][0][5][12] = 64, + [0][0][2][0][6][12] = 58, + [0][0][2][0][9][12] = 58, + [0][0][2][0][8][12] = 54, + [0][0][2][0][11][12] = 52, + [0][0][2][0][2][14] = 76, + [0][0][2][0][1][14] = 58, + [0][0][2][0][3][14] = 62, + [0][0][2][0][5][14] = 64, + [0][0][2][0][6][14] = 58, + [0][0][2][0][9][14] = 58, + [0][0][2][0][8][14] = 54, + [0][0][2][0][11][14] = 52, + [0][0][2][0][2][15] = 74, + [0][0][2][0][1][15] = 58, + [0][0][2][0][3][15] = 76, + [0][0][2][0][5][15] = 74, + [0][0][2][0][6][15] = 58, + [0][0][2][0][9][15] = 58, + [0][0][2][0][8][15] = 54, + [0][0][2][0][11][15] = 52, + [0][0][2][0][2][17] = 76, + [0][0][2][0][1][17] = 58, + [0][0][2][0][3][17] = 76, + [0][0][2][0][5][17] = 76, + [0][0][2][0][6][17] = 58, + [0][0][2][0][9][17] = 58, + [0][0][2][0][8][17] = 54, + [0][0][2][0][11][17] = 52, + [0][0][2][0][2][19] = 76, + [0][0][2][0][1][19] = 58, + [0][0][2][0][3][19] = 76, + [0][0][2][0][5][19] = 76, + [0][0][2][0][6][19] = 58, + [0][0][2][0][9][19] = 58, + [0][0][2][0][8][19] = 54, + [0][0][2][0][11][19] = 52, + [0][0][2][0][2][21] = 76, + [0][0][2][0][1][21] = 58, + [0][0][2][0][3][21] = 76, + [0][0][2][0][5][21] = 76, + [0][0][2][0][6][21] = 58, + [0][0][2][0][9][21] = 58, + [0][0][2][0][8][21] = 54, + [0][0][2][0][11][21] = 52, + [0][0][2][0][2][23] = 76, + [0][0][2][0][1][23] = 58, + [0][0][2][0][3][23] = 76, + [0][0][2][0][5][23] = 76, + [0][0][2][0][6][23] = 58, + [0][0][2][0][9][23] = 58, + [0][0][2][0][8][23] = 54, + [0][0][2][0][11][23] = 52, + [0][0][2][0][2][25] = 76, + [0][0][2][0][1][25] = 58, + [0][0][2][0][3][25] = 76, + [0][0][2][0][5][25] = 127, + [0][0][2][0][6][25] = 58, + [0][0][2][0][9][25] = 127, + [0][0][2][0][8][25] = 54, + [0][0][2][0][11][25] = 52, + [0][0][2][0][2][27] = 76, + [0][0][2][0][1][27] = 58, + [0][0][2][0][3][27] = 76, + [0][0][2][0][5][27] = 127, + [0][0][2][0][6][27] = 58, + [0][0][2][0][9][27] = 127, + [0][0][2][0][8][27] = 54, + [0][0][2][0][11][27] = 52, + [0][0][2][0][2][29] = 76, + [0][0][2][0][1][29] = 58, + [0][0][2][0][3][29] = 76, + [0][0][2][0][5][29] = 127, + [0][0][2][0][6][29] = 58, + [0][0][2][0][9][29] = 127, + [0][0][2][0][8][29] = 54, + [0][0][2][0][11][29] = 52, + [0][0][2][0][2][31] = 76, + [0][0][2][0][1][31] = 58, + [0][0][2][0][3][31] = 76, + [0][0][2][0][5][31] = 76, + [0][0][2][0][6][31] = 58, + [0][0][2][0][9][31] = 58, + [0][0][2][0][8][31] = 54, + [0][0][2][0][11][31] = 52, + [0][0][2][0][2][33] = 76, + [0][0][2][0][1][33] = 58, + [0][0][2][0][3][33] = 76, + [0][0][2][0][5][33] = 76, + [0][0][2][0][6][33] = 58, + [0][0][2][0][9][33] = 58, + [0][0][2][0][8][33] = 54, + [0][0][2][0][11][33] = 52, + [0][0][2][0][2][35] = 70, + [0][0][2][0][1][35] = 58, + [0][0][2][0][3][35] = 76, + [0][0][2][0][5][35] = 70, + [0][0][2][0][6][35] = 58, + [0][0][2][0][9][35] = 58, + [0][0][2][0][8][35] = 54, + [0][0][2][0][11][35] = 52, + [0][0][2][0][2][37] = 76, + [0][0][2][0][1][37] = 127, + [0][0][2][0][3][37] = 76, + [0][0][2][0][5][37] = 76, + [0][0][2][0][6][37] = 58, + [0][0][2][0][9][37] = 76, + [0][0][2][0][8][37] = 54, + [0][0][2][0][11][37] = 127, + [0][0][2][0][2][38] = 76, + [0][0][2][0][1][38] = 28, + [0][0][2][0][3][38] = 127, + [0][0][2][0][5][38] = 76, + [0][0][2][0][6][38] = 28, + [0][0][2][0][9][38] = 76, + [0][0][2][0][8][38] = 54, + [0][0][2][0][11][38] = 52, + [0][0][2][0][2][40] = 76, + [0][0][2][0][1][40] = 28, + [0][0][2][0][3][40] = 127, + [0][0][2][0][5][40] = 76, + [0][0][2][0][6][40] = 28, + [0][0][2][0][9][40] = 76, + [0][0][2][0][8][40] = 54, + [0][0][2][0][11][40] = 52, + [0][0][2][0][2][42] = 76, + [0][0][2][0][1][42] = 28, + [0][0][2][0][3][42] = 127, + [0][0][2][0][5][42] = 76, + [0][0][2][0][6][42] = 28, + [0][0][2][0][9][42] = 76, + [0][0][2][0][8][42] = 54, + [0][0][2][0][11][42] = 52, + [0][0][2][0][2][44] = 76, + [0][0][2][0][1][44] = 28, + [0][0][2][0][3][44] = 127, + [0][0][2][0][5][44] = 76, + [0][0][2][0][6][44] = 28, + [0][0][2][0][9][44] = 76, + [0][0][2][0][8][44] = 54, + [0][0][2][0][11][44] = 52, + [0][0][2][0][2][46] = 76, + [0][0][2][0][1][46] = 28, + [0][0][2][0][3][46] = 127, + [0][0][2][0][5][46] = 76, + [0][0][2][0][6][46] = 28, + [0][0][2][0][9][46] = 76, + [0][0][2][0][8][46] = 54, + [0][0][2][0][11][46] = 52, + [0][1][2][0][2][0] = 68, + [0][1][2][0][1][0] = 46, + [0][1][2][0][3][0] = 50, + [0][1][2][0][5][0] = 40, + [0][1][2][0][6][0] = 46, + [0][1][2][0][9][0] = 46, + [0][1][2][0][8][0] = 18, + [0][1][2][0][11][0] = 40, + [0][1][2][0][2][2] = 68, + [0][1][2][0][1][2] = 46, + [0][1][2][0][3][2] = 50, + [0][1][2][0][5][2] = 40, + [0][1][2][0][6][2] = 46, + [0][1][2][0][9][2] = 46, + [0][1][2][0][8][2] = 18, + [0][1][2][0][11][2] = 40, + [0][1][2][0][2][4] = 68, + [0][1][2][0][1][4] = 46, + [0][1][2][0][3][4] = 50, + [0][1][2][0][5][4] = 40, + [0][1][2][0][6][4] = 46, + [0][1][2][0][9][4] = 46, + [0][1][2][0][8][4] = 18, + [0][1][2][0][11][4] = 40, + [0][1][2][0][2][6] = 68, + [0][1][2][0][1][6] = 46, + [0][1][2][0][3][6] = 50, + [0][1][2][0][5][6] = 40, + [0][1][2][0][6][6] = 36, + [0][1][2][0][9][6] = 46, + [0][1][2][0][8][6] = 18, + [0][1][2][0][11][6] = 40, + [0][1][2][0][2][8] = 68, + [0][1][2][0][1][8] = 46, + [0][1][2][0][3][8] = 50, + [0][1][2][0][5][8] = 52, + [0][1][2][0][6][8] = 46, + [0][1][2][0][9][8] = 46, + [0][1][2][0][8][8] = 42, + [0][1][2][0][11][8] = 40, + [0][1][2][0][2][10] = 68, + [0][1][2][0][1][10] = 46, + [0][1][2][0][3][10] = 50, + [0][1][2][0][5][10] = 52, + [0][1][2][0][6][10] = 46, + [0][1][2][0][9][10] = 46, + [0][1][2][0][8][10] = 42, + [0][1][2][0][11][10] = 40, + [0][1][2][0][2][12] = 68, + [0][1][2][0][1][12] = 46, + [0][1][2][0][3][12] = 50, + [0][1][2][0][5][12] = 52, + [0][1][2][0][6][12] = 46, + [0][1][2][0][9][12] = 46, + [0][1][2][0][8][12] = 42, + [0][1][2][0][11][12] = 40, + [0][1][2][0][2][14] = 68, + [0][1][2][0][1][14] = 46, + [0][1][2][0][3][14] = 50, + [0][1][2][0][5][14] = 52, + [0][1][2][0][6][14] = 46, + [0][1][2][0][9][14] = 46, + [0][1][2][0][8][14] = 42, + [0][1][2][0][11][14] = 40, + [0][1][2][0][2][15] = 68, + [0][1][2][0][1][15] = 46, + [0][1][2][0][3][15] = 70, + [0][1][2][0][5][15] = 68, + [0][1][2][0][6][15] = 46, + [0][1][2][0][9][15] = 46, + [0][1][2][0][8][15] = 42, + [0][1][2][0][11][15] = 40, + [0][1][2][0][2][17] = 68, + [0][1][2][0][1][17] = 46, + [0][1][2][0][3][17] = 70, + [0][1][2][0][5][17] = 68, + [0][1][2][0][6][17] = 46, + [0][1][2][0][9][17] = 46, + [0][1][2][0][8][17] = 42, + [0][1][2][0][11][17] = 40, + [0][1][2][0][2][19] = 68, + [0][1][2][0][1][19] = 46, + [0][1][2][0][3][19] = 70, + [0][1][2][0][5][19] = 68, + [0][1][2][0][6][19] = 46, + [0][1][2][0][9][19] = 46, + [0][1][2][0][8][19] = 42, + [0][1][2][0][11][19] = 40, + [0][1][2][0][2][21] = 68, + [0][1][2][0][1][21] = 46, + [0][1][2][0][3][21] = 70, + [0][1][2][0][5][21] = 68, + [0][1][2][0][6][21] = 46, + [0][1][2][0][9][21] = 46, + [0][1][2][0][8][21] = 42, + [0][1][2][0][11][21] = 40, + [0][1][2][0][2][23] = 68, + [0][1][2][0][1][23] = 46, + [0][1][2][0][3][23] = 70, + [0][1][2][0][5][23] = 68, + [0][1][2][0][6][23] = 46, + [0][1][2][0][9][23] = 46, + [0][1][2][0][8][23] = 42, + [0][1][2][0][11][23] = 40, + [0][1][2][0][2][25] = 68, + [0][1][2][0][1][25] = 46, + [0][1][2][0][3][25] = 70, + [0][1][2][0][5][25] = 127, + [0][1][2][0][6][25] = 46, + [0][1][2][0][9][25] = 127, + [0][1][2][0][8][25] = 42, + [0][1][2][0][11][25] = 40, + [0][1][2][0][2][27] = 68, + [0][1][2][0][1][27] = 46, + [0][1][2][0][3][27] = 70, + [0][1][2][0][5][27] = 127, + [0][1][2][0][6][27] = 46, + [0][1][2][0][9][27] = 127, + [0][1][2][0][8][27] = 42, + [0][1][2][0][11][27] = 40, + [0][1][2][0][2][29] = 68, + [0][1][2][0][1][29] = 46, + [0][1][2][0][3][29] = 70, + [0][1][2][0][5][29] = 127, + [0][1][2][0][6][29] = 46, + [0][1][2][0][9][29] = 127, + [0][1][2][0][8][29] = 42, + [0][1][2][0][11][29] = 40, + [0][1][2][0][2][31] = 68, + [0][1][2][0][1][31] = 46, + [0][1][2][0][3][31] = 70, + [0][1][2][0][5][31] = 68, + [0][1][2][0][6][31] = 46, + [0][1][2][0][9][31] = 46, + [0][1][2][0][8][31] = 42, + [0][1][2][0][11][31] = 40, + [0][1][2][0][2][33] = 68, + [0][1][2][0][1][33] = 46, + [0][1][2][0][3][33] = 70, + [0][1][2][0][5][33] = 68, + [0][1][2][0][6][33] = 46, + [0][1][2][0][9][33] = 46, + [0][1][2][0][8][33] = 42, + [0][1][2][0][11][33] = 40, + [0][1][2][0][2][35] = 64, + [0][1][2][0][1][35] = 46, + [0][1][2][0][3][35] = 70, + [0][1][2][0][5][35] = 64, + [0][1][2][0][6][35] = 46, + [0][1][2][0][9][35] = 46, + [0][1][2][0][8][35] = 42, + [0][1][2][0][11][35] = 40, + [0][1][2][0][2][37] = 68, + [0][1][2][0][1][37] = 127, + [0][1][2][0][3][37] = 70, + [0][1][2][0][5][37] = 68, + [0][1][2][0][6][37] = 46, + [0][1][2][0][9][37] = 68, + [0][1][2][0][8][37] = 42, + [0][1][2][0][11][37] = 127, + [0][1][2][0][2][38] = 76, + [0][1][2][0][1][38] = 16, + [0][1][2][0][3][38] = 127, + [0][1][2][0][5][38] = 76, + [0][1][2][0][6][38] = 16, + [0][1][2][0][9][38] = 76, + [0][1][2][0][8][38] = 42, + [0][1][2][0][11][38] = 40, + [0][1][2][0][2][40] = 76, + [0][1][2][0][1][40] = 16, + [0][1][2][0][3][40] = 127, + [0][1][2][0][5][40] = 76, + [0][1][2][0][6][40] = 16, + [0][1][2][0][9][40] = 76, + [0][1][2][0][8][40] = 42, + [0][1][2][0][11][40] = 40, + [0][1][2][0][2][42] = 76, + [0][1][2][0][1][42] = 16, + [0][1][2][0][3][42] = 127, + [0][1][2][0][5][42] = 76, + [0][1][2][0][6][42] = 16, + [0][1][2][0][9][42] = 76, + [0][1][2][0][8][42] = 42, + [0][1][2][0][11][42] = 40, + [0][1][2][0][2][44] = 76, + [0][1][2][0][1][44] = 16, + [0][1][2][0][3][44] = 127, + [0][1][2][0][5][44] = 76, + [0][1][2][0][6][44] = 16, + [0][1][2][0][9][44] = 76, + [0][1][2][0][8][44] = 42, + [0][1][2][0][11][44] = 40, + [0][1][2][0][2][46] = 76, + [0][1][2][0][1][46] = 16, + [0][1][2][0][3][46] = 127, + [0][1][2][0][5][46] = 76, + [0][1][2][0][6][46] = 16, + [0][1][2][0][9][46] = 76, + [0][1][2][0][8][46] = 42, + [0][1][2][0][11][46] = 40, + [0][1][2][1][2][0] = 68, + [0][1][2][1][1][0] = 34, + [0][1][2][1][3][0] = 50, + [0][1][2][1][5][0] = 38, + [0][1][2][1][6][0] = 34, + [0][1][2][1][9][0] = 34, + [0][1][2][1][8][0] = 6, + [0][1][2][1][11][0] = 28, + [0][1][2][1][2][2] = 68, + [0][1][2][1][1][2] = 34, + [0][1][2][1][3][2] = 50, + [0][1][2][1][5][2] = 38, + [0][1][2][1][6][2] = 34, + [0][1][2][1][9][2] = 34, + [0][1][2][1][8][2] = 6, + [0][1][2][1][11][2] = 28, + [0][1][2][1][2][4] = 68, + [0][1][2][1][1][4] = 34, + [0][1][2][1][3][4] = 50, + [0][1][2][1][5][4] = 38, + [0][1][2][1][6][4] = 34, + [0][1][2][1][9][4] = 34, + [0][1][2][1][8][4] = 6, + [0][1][2][1][11][4] = 28, + [0][1][2][1][2][6] = 68, + [0][1][2][1][1][6] = 34, + [0][1][2][1][3][6] = 50, + [0][1][2][1][5][6] = 38, + [0][1][2][1][6][6] = 34, + [0][1][2][1][9][6] = 34, + [0][1][2][1][8][6] = 6, + [0][1][2][1][11][6] = 28, + [0][1][2][1][2][8] = 68, + [0][1][2][1][1][8] = 34, + [0][1][2][1][3][8] = 50, + [0][1][2][1][5][8] = 38, + [0][1][2][1][6][8] = 34, + [0][1][2][1][9][8] = 34, + [0][1][2][1][8][8] = 30, + [0][1][2][1][11][8] = 28, + [0][1][2][1][2][10] = 68, + [0][1][2][1][1][10] = 34, + [0][1][2][1][3][10] = 50, + [0][1][2][1][5][10] = 38, + [0][1][2][1][6][10] = 34, + [0][1][2][1][9][10] = 34, + [0][1][2][1][8][10] = 30, + [0][1][2][1][11][10] = 28, + [0][1][2][1][2][12] = 68, + [0][1][2][1][1][12] = 34, + [0][1][2][1][3][12] = 50, + [0][1][2][1][5][12] = 38, + [0][1][2][1][6][12] = 34, + [0][1][2][1][9][12] = 34, + [0][1][2][1][8][12] = 30, + [0][1][2][1][11][12] = 28, + [0][1][2][1][2][14] = 68, + [0][1][2][1][1][14] = 34, + [0][1][2][1][3][14] = 50, + [0][1][2][1][5][14] = 38, + [0][1][2][1][6][14] = 34, + [0][1][2][1][9][14] = 34, + [0][1][2][1][8][14] = 30, + [0][1][2][1][11][14] = 28, + [0][1][2][1][2][15] = 68, + [0][1][2][1][1][15] = 34, + [0][1][2][1][3][15] = 70, + [0][1][2][1][5][15] = 62, + [0][1][2][1][6][15] = 34, + [0][1][2][1][9][15] = 34, + [0][1][2][1][8][15] = 30, + [0][1][2][1][11][15] = 28, + [0][1][2][1][2][17] = 68, + [0][1][2][1][1][17] = 34, + [0][1][2][1][3][17] = 70, + [0][1][2][1][5][17] = 62, + [0][1][2][1][6][17] = 34, + [0][1][2][1][9][17] = 34, + [0][1][2][1][8][17] = 30, + [0][1][2][1][11][17] = 28, + [0][1][2][1][2][19] = 68, + [0][1][2][1][1][19] = 34, + [0][1][2][1][3][19] = 70, + [0][1][2][1][5][19] = 62, + [0][1][2][1][6][19] = 34, + [0][1][2][1][9][19] = 34, + [0][1][2][1][8][19] = 30, + [0][1][2][1][11][19] = 28, + [0][1][2][1][2][21] = 68, + [0][1][2][1][1][21] = 34, + [0][1][2][1][3][21] = 70, + [0][1][2][1][5][21] = 62, + [0][1][2][1][6][21] = 34, + [0][1][2][1][9][21] = 34, + [0][1][2][1][8][21] = 30, + [0][1][2][1][11][21] = 28, + [0][1][2][1][2][23] = 68, + [0][1][2][1][1][23] = 34, + [0][1][2][1][3][23] = 70, + [0][1][2][1][5][23] = 62, + [0][1][2][1][6][23] = 34, + [0][1][2][1][9][23] = 34, + [0][1][2][1][8][23] = 30, + [0][1][2][1][11][23] = 28, + [0][1][2][1][2][25] = 68, + [0][1][2][1][1][25] = 34, + [0][1][2][1][3][25] = 70, + [0][1][2][1][5][25] = 127, + [0][1][2][1][6][25] = 34, + [0][1][2][1][9][25] = 127, + [0][1][2][1][8][25] = 30, + [0][1][2][1][11][25] = 28, + [0][1][2][1][2][27] = 68, + [0][1][2][1][1][27] = 34, + [0][1][2][1][3][27] = 70, + [0][1][2][1][5][27] = 127, + [0][1][2][1][6][27] = 34, + [0][1][2][1][9][27] = 127, + [0][1][2][1][8][27] = 30, + [0][1][2][1][11][27] = 28, + [0][1][2][1][2][29] = 68, + [0][1][2][1][1][29] = 34, + [0][1][2][1][3][29] = 70, + [0][1][2][1][5][29] = 127, + [0][1][2][1][6][29] = 34, + [0][1][2][1][9][29] = 127, + [0][1][2][1][8][29] = 30, + [0][1][2][1][11][29] = 28, + [0][1][2][1][2][31] = 68, + [0][1][2][1][1][31] = 34, + [0][1][2][1][3][31] = 70, + [0][1][2][1][5][31] = 62, + [0][1][2][1][6][31] = 34, + [0][1][2][1][9][31] = 34, + [0][1][2][1][8][31] = 30, + [0][1][2][1][11][31] = 28, + [0][1][2][1][2][33] = 68, + [0][1][2][1][1][33] = 34, + [0][1][2][1][3][33] = 70, + [0][1][2][1][5][33] = 62, + [0][1][2][1][6][33] = 34, + [0][1][2][1][9][33] = 34, + [0][1][2][1][8][33] = 30, + [0][1][2][1][11][33] = 28, + [0][1][2][1][2][35] = 64, + [0][1][2][1][1][35] = 34, + [0][1][2][1][3][35] = 70, + [0][1][2][1][5][35] = 62, + [0][1][2][1][6][35] = 34, + [0][1][2][1][9][35] = 34, + [0][1][2][1][8][35] = 30, + [0][1][2][1][11][35] = 28, + [0][1][2][1][2][37] = 68, + [0][1][2][1][1][37] = 127, + [0][1][2][1][3][37] = 70, + [0][1][2][1][5][37] = 62, + [0][1][2][1][6][37] = 34, + [0][1][2][1][9][37] = 68, + [0][1][2][1][8][37] = 30, + [0][1][2][1][11][37] = 127, + [0][1][2][1][2][38] = 76, + [0][1][2][1][1][38] = 4, + [0][1][2][1][3][38] = 127, + [0][1][2][1][5][38] = 76, + [0][1][2][1][6][38] = 4, + [0][1][2][1][9][38] = 76, + [0][1][2][1][8][38] = 30, + [0][1][2][1][11][38] = 28, + [0][1][2][1][2][40] = 76, + [0][1][2][1][1][40] = 4, + [0][1][2][1][3][40] = 127, + [0][1][2][1][5][40] = 76, + [0][1][2][1][6][40] = 4, + [0][1][2][1][9][40] = 76, + [0][1][2][1][8][40] = 30, + [0][1][2][1][11][40] = 28, + [0][1][2][1][2][42] = 76, + [0][1][2][1][1][42] = 4, + [0][1][2][1][3][42] = 127, + [0][1][2][1][5][42] = 76, + [0][1][2][1][6][42] = 4, + [0][1][2][1][9][42] = 76, + [0][1][2][1][8][42] = 30, + [0][1][2][1][11][42] = 28, + [0][1][2][1][2][44] = 76, + [0][1][2][1][1][44] = 4, + [0][1][2][1][3][44] = 127, + [0][1][2][1][5][44] = 76, + [0][1][2][1][6][44] = 4, + [0][1][2][1][9][44] = 76, + [0][1][2][1][8][44] = 30, + [0][1][2][1][11][44] = 28, + [0][1][2][1][2][46] = 76, + [0][1][2][1][1][46] = 4, + [0][1][2][1][3][46] = 127, + [0][1][2][1][5][46] = 76, + [0][1][2][1][6][46] = 4, + [0][1][2][1][9][46] = 76, + [0][1][2][1][8][46] = 30, + [0][1][2][1][11][46] = 28, + [1][0][2][0][2][1] = 68, + [1][0][2][0][1][1] = 64, + [1][0][2][0][3][1] = 62, + [1][0][2][0][5][1] = 64, + [1][0][2][0][6][1] = 64, + [1][0][2][0][9][1] = 64, + [1][0][2][0][8][1] = 30, + [1][0][2][0][11][1] = 52, + [1][0][2][0][2][5] = 72, + [1][0][2][0][1][5] = 64, + [1][0][2][0][3][5] = 62, + [1][0][2][0][5][5] = 64, + [1][0][2][0][6][5] = 60, + [1][0][2][0][9][5] = 64, + [1][0][2][0][8][5] = 30, + [1][0][2][0][11][5] = 52, + [1][0][2][0][2][9] = 72, + [1][0][2][0][1][9] = 64, + [1][0][2][0][3][9] = 62, + [1][0][2][0][5][9] = 64, + [1][0][2][0][6][9] = 64, + [1][0][2][0][9][9] = 64, + [1][0][2][0][8][9] = 54, + [1][0][2][0][11][9] = 52, + [1][0][2][0][2][13] = 66, + [1][0][2][0][1][13] = 64, + [1][0][2][0][3][13] = 62, + [1][0][2][0][5][13] = 64, + [1][0][2][0][6][13] = 64, + [1][0][2][0][9][13] = 64, + [1][0][2][0][8][13] = 54, + [1][0][2][0][11][13] = 52, + [1][0][2][0][2][16] = 62, + [1][0][2][0][1][16] = 64, + [1][0][2][0][3][16] = 72, + [1][0][2][0][5][16] = 62, + [1][0][2][0][6][16] = 64, + [1][0][2][0][9][16] = 64, + [1][0][2][0][8][16] = 54, + [1][0][2][0][11][16] = 52, + [1][0][2][0][2][20] = 72, + [1][0][2][0][1][20] = 64, + [1][0][2][0][3][20] = 72, + [1][0][2][0][5][20] = 72, + [1][0][2][0][6][20] = 64, + [1][0][2][0][9][20] = 64, + [1][0][2][0][8][20] = 54, + [1][0][2][0][11][20] = 52, + [1][0][2][0][2][24] = 72, + [1][0][2][0][1][24] = 64, + [1][0][2][0][3][24] = 72, + [1][0][2][0][5][24] = 127, + [1][0][2][0][6][24] = 64, + [1][0][2][0][9][24] = 127, + [1][0][2][0][8][24] = 54, + [1][0][2][0][11][24] = 52, + [1][0][2][0][2][28] = 72, + [1][0][2][0][1][28] = 64, + [1][0][2][0][3][28] = 72, + [1][0][2][0][5][28] = 127, + [1][0][2][0][6][28] = 64, + [1][0][2][0][9][28] = 127, + [1][0][2][0][8][28] = 54, + [1][0][2][0][11][28] = 52, + [1][0][2][0][2][32] = 72, + [1][0][2][0][1][32] = 64, + [1][0][2][0][3][32] = 72, + [1][0][2][0][5][32] = 72, + [1][0][2][0][6][32] = 64, + [1][0][2][0][9][32] = 64, + [1][0][2][0][8][32] = 54, + [1][0][2][0][11][32] = 52, + [1][0][2][0][2][36] = 72, + [1][0][2][0][1][36] = 127, + [1][0][2][0][3][36] = 72, + [1][0][2][0][5][36] = 72, + [1][0][2][0][6][36] = 64, + [1][0][2][0][9][36] = 72, + [1][0][2][0][8][36] = 54, + [1][0][2][0][11][36] = 127, + [1][0][2][0][2][39] = 72, + [1][0][2][0][1][39] = 28, + [1][0][2][0][3][39] = 127, + [1][0][2][0][5][39] = 72, + [1][0][2][0][6][39] = 28, + [1][0][2][0][9][39] = 72, + [1][0][2][0][8][39] = 54, + [1][0][2][0][11][39] = 52, + [1][0][2][0][2][43] = 72, + [1][0][2][0][1][43] = 28, + [1][0][2][0][3][43] = 127, + [1][0][2][0][5][43] = 72, + [1][0][2][0][6][43] = 28, + [1][0][2][0][9][43] = 72, + [1][0][2][0][8][43] = 54, + [1][0][2][0][11][43] = 52, + [1][1][2][0][2][1] = 58, + [1][1][2][0][1][1] = 52, + [1][1][2][0][3][1] = 50, + [1][1][2][0][5][1] = 52, + [1][1][2][0][6][1] = 52, + [1][1][2][0][9][1] = 52, + [1][1][2][0][8][1] = 18, + [1][1][2][0][11][1] = 40, + [1][1][2][0][2][5] = 72, + [1][1][2][0][1][5] = 52, + [1][1][2][0][3][5] = 50, + [1][1][2][0][5][5] = 52, + [1][1][2][0][6][5] = 46, + [1][1][2][0][9][5] = 52, + [1][1][2][0][8][5] = 18, + [1][1][2][0][11][5] = 40, + [1][1][2][0][2][9] = 72, + [1][1][2][0][1][9] = 52, + [1][1][2][0][3][9] = 50, + [1][1][2][0][5][9] = 52, + [1][1][2][0][6][9] = 52, + [1][1][2][0][9][9] = 52, + [1][1][2][0][8][9] = 42, + [1][1][2][0][11][9] = 40, + [1][1][2][0][2][13] = 58, + [1][1][2][0][1][13] = 52, + [1][1][2][0][3][13] = 50, + [1][1][2][0][5][13] = 52, + [1][1][2][0][6][13] = 52, + [1][1][2][0][9][13] = 52, + [1][1][2][0][8][13] = 42, + [1][1][2][0][11][13] = 40, + [1][1][2][0][2][16] = 56, + [1][1][2][0][1][16] = 52, + [1][1][2][0][3][16] = 72, + [1][1][2][0][5][16] = 56, + [1][1][2][0][6][16] = 52, + [1][1][2][0][9][16] = 52, + [1][1][2][0][8][16] = 42, + [1][1][2][0][11][16] = 40, + [1][1][2][0][2][20] = 72, + [1][1][2][0][1][20] = 52, + [1][1][2][0][3][20] = 72, + [1][1][2][0][5][20] = 72, + [1][1][2][0][6][20] = 52, + [1][1][2][0][9][20] = 52, + [1][1][2][0][8][20] = 42, + [1][1][2][0][11][20] = 40, + [1][1][2][0][2][24] = 72, + [1][1][2][0][1][24] = 52, + [1][1][2][0][3][24] = 72, + [1][1][2][0][5][24] = 127, + [1][1][2][0][6][24] = 52, + [1][1][2][0][9][24] = 127, + [1][1][2][0][8][24] = 42, + [1][1][2][0][11][24] = 40, + [1][1][2][0][2][28] = 72, + [1][1][2][0][1][28] = 52, + [1][1][2][0][3][28] = 72, + [1][1][2][0][5][28] = 127, + [1][1][2][0][6][28] = 52, + [1][1][2][0][9][28] = 127, + [1][1][2][0][8][28] = 42, + [1][1][2][0][11][28] = 40, + [1][1][2][0][2][32] = 68, + [1][1][2][0][1][32] = 52, + [1][1][2][0][3][32] = 72, + [1][1][2][0][5][32] = 68, + [1][1][2][0][6][32] = 52, + [1][1][2][0][9][32] = 52, + [1][1][2][0][8][32] = 42, + [1][1][2][0][11][32] = 40, + [1][1][2][0][2][36] = 72, + [1][1][2][0][1][36] = 127, + [1][1][2][0][3][36] = 72, + [1][1][2][0][5][36] = 72, + [1][1][2][0][6][36] = 52, + [1][1][2][0][9][36] = 72, + [1][1][2][0][8][36] = 42, + [1][1][2][0][11][36] = 127, + [1][1][2][0][2][39] = 72, + [1][1][2][0][1][39] = 16, + [1][1][2][0][3][39] = 127, + [1][1][2][0][5][39] = 72, + [1][1][2][0][6][39] = 16, + [1][1][2][0][9][39] = 72, + [1][1][2][0][8][39] = 42, + [1][1][2][0][11][39] = 40, + [1][1][2][0][2][43] = 72, + [1][1][2][0][1][43] = 16, + [1][1][2][0][3][43] = 127, + [1][1][2][0][5][43] = 72, + [1][1][2][0][6][43] = 16, + [1][1][2][0][9][43] = 72, + [1][1][2][0][8][43] = 42, + [1][1][2][0][11][43] = 40, + [1][1][2][1][2][1] = 58, + [1][1][2][1][1][1] = 40, + [1][1][2][1][3][1] = 50, + [1][1][2][1][5][1] = 40, + [1][1][2][1][6][1] = 40, + [1][1][2][1][9][1] = 40, + [1][1][2][1][8][1] = 6, + [1][1][2][1][11][1] = 28, + [1][1][2][1][2][5] = 68, + [1][1][2][1][1][5] = 40, + [1][1][2][1][3][5] = 50, + [1][1][2][1][5][5] = 40, + [1][1][2][1][6][5] = 40, + [1][1][2][1][9][5] = 40, + [1][1][2][1][8][5] = 6, + [1][1][2][1][11][5] = 28, + [1][1][2][1][2][9] = 68, + [1][1][2][1][1][9] = 40, + [1][1][2][1][3][9] = 50, + [1][1][2][1][5][9] = 40, + [1][1][2][1][6][9] = 40, + [1][1][2][1][9][9] = 40, + [1][1][2][1][8][9] = 30, + [1][1][2][1][11][9] = 28, + [1][1][2][1][2][13] = 58, + [1][1][2][1][1][13] = 40, + [1][1][2][1][3][13] = 50, + [1][1][2][1][5][13] = 40, + [1][1][2][1][6][13] = 40, + [1][1][2][1][9][13] = 40, + [1][1][2][1][8][13] = 30, + [1][1][2][1][11][13] = 28, + [1][1][2][1][2][16] = 56, + [1][1][2][1][1][16] = 40, + [1][1][2][1][3][16] = 72, + [1][1][2][1][5][16] = 56, + [1][1][2][1][6][16] = 40, + [1][1][2][1][9][16] = 40, + [1][1][2][1][8][16] = 30, + [1][1][2][1][11][16] = 28, + [1][1][2][1][2][20] = 68, + [1][1][2][1][1][20] = 40, + [1][1][2][1][3][20] = 72, + [1][1][2][1][5][20] = 68, + [1][1][2][1][6][20] = 40, + [1][1][2][1][9][20] = 40, + [1][1][2][1][8][20] = 30, + [1][1][2][1][11][20] = 28, + [1][1][2][1][2][24] = 68, + [1][1][2][1][1][24] = 40, + [1][1][2][1][3][24] = 72, + [1][1][2][1][5][24] = 127, + [1][1][2][1][6][24] = 40, + [1][1][2][1][9][24] = 127, + [1][1][2][1][8][24] = 30, + [1][1][2][1][11][24] = 28, + [1][1][2][1][2][28] = 68, + [1][1][2][1][1][28] = 40, + [1][1][2][1][3][28] = 72, + [1][1][2][1][5][28] = 127, + [1][1][2][1][6][28] = 40, + [1][1][2][1][9][28] = 127, + [1][1][2][1][8][28] = 30, + [1][1][2][1][11][28] = 28, + [1][1][2][1][2][32] = 68, + [1][1][2][1][1][32] = 40, + [1][1][2][1][3][32] = 72, + [1][1][2][1][5][32] = 68, + [1][1][2][1][6][32] = 40, + [1][1][2][1][9][32] = 40, + [1][1][2][1][8][32] = 30, + [1][1][2][1][11][32] = 28, + [1][1][2][1][2][36] = 68, + [1][1][2][1][1][36] = 127, + [1][1][2][1][3][36] = 72, + [1][1][2][1][5][36] = 68, + [1][1][2][1][6][36] = 40, + [1][1][2][1][9][36] = 68, + [1][1][2][1][8][36] = 30, + [1][1][2][1][11][36] = 127, + [1][1][2][1][2][39] = 72, + [1][1][2][1][1][39] = 4, + [1][1][2][1][3][39] = 127, + [1][1][2][1][5][39] = 72, + [1][1][2][1][6][39] = 4, + [1][1][2][1][9][39] = 72, + [1][1][2][1][8][39] = 30, + [1][1][2][1][11][39] = 28, + [1][1][2][1][2][43] = 72, + [1][1][2][1][1][43] = 4, + [1][1][2][1][3][43] = 127, + [1][1][2][1][5][43] = 72, + [1][1][2][1][6][43] = 4, + [1][1][2][1][9][43] = 72, + [1][1][2][1][8][43] = 30, + [1][1][2][1][11][43] = 28, + [2][0][2][0][2][3] = 64, + [2][0][2][0][1][3] = 64, + [2][0][2][0][3][3] = 64, + [2][0][2][0][5][3] = 62, + [2][0][2][0][6][3] = 64, + [2][0][2][0][9][3] = 64, + [2][0][2][0][8][3] = 30, + [2][0][2][0][11][3] = 52, + [2][0][2][0][2][11] = 64, + [2][0][2][0][1][11] = 64, + [2][0][2][0][3][11] = 64, + [2][0][2][0][5][11] = 62, + [2][0][2][0][6][11] = 64, + [2][0][2][0][9][11] = 64, + [2][0][2][0][8][11] = 54, + [2][0][2][0][11][11] = 52, + [2][0][2][0][2][18] = 62, + [2][0][2][0][1][18] = 64, + [2][0][2][0][3][18] = 72, + [2][0][2][0][5][18] = 66, + [2][0][2][0][6][18] = 64, + [2][0][2][0][9][18] = 64, + [2][0][2][0][8][18] = 54, + [2][0][2][0][11][18] = 52, + [2][0][2][0][2][26] = 72, + [2][0][2][0][1][26] = 64, + [2][0][2][0][3][26] = 72, + [2][0][2][0][5][26] = 127, + [2][0][2][0][6][26] = 64, + [2][0][2][0][9][26] = 127, + [2][0][2][0][8][26] = 54, + [2][0][2][0][11][26] = 52, + [2][0][2][0][2][34] = 72, + [2][0][2][0][1][34] = 127, + [2][0][2][0][3][34] = 72, + [2][0][2][0][5][34] = 72, + [2][0][2][0][6][34] = 64, + [2][0][2][0][9][34] = 72, + [2][0][2][0][8][34] = 54, + [2][0][2][0][11][34] = 127, + [2][0][2][0][2][41] = 72, + [2][0][2][0][1][41] = 28, + [2][0][2][0][3][41] = 127, + [2][0][2][0][5][41] = 72, + [2][0][2][0][6][41] = 28, + [2][0][2][0][9][41] = 72, + [2][0][2][0][8][41] = 54, + [2][0][2][0][11][41] = 52, + [2][1][2][0][2][3] = 56, + [2][1][2][0][1][3] = 52, + [2][1][2][0][3][3] = 52, + [2][1][2][0][5][3] = 52, + [2][1][2][0][6][3] = 52, + [2][1][2][0][9][3] = 52, + [2][1][2][0][8][3] = 18, + [2][1][2][0][11][3] = 40, + [2][1][2][0][2][11] = 56, + [2][1][2][0][1][11] = 52, + [2][1][2][0][3][11] = 52, + [2][1][2][0][5][11] = 52, + [2][1][2][0][6][11] = 52, + [2][1][2][0][9][11] = 52, + [2][1][2][0][8][11] = 42, + [2][1][2][0][11][11] = 40, + [2][1][2][0][2][18] = 56, + [2][1][2][0][1][18] = 52, + [2][1][2][0][3][18] = 72, + [2][1][2][0][5][18] = 56, + [2][1][2][0][6][18] = 52, + [2][1][2][0][9][18] = 52, + [2][1][2][0][8][18] = 42, + [2][1][2][0][11][18] = 40, + [2][1][2][0][2][26] = 72, + [2][1][2][0][1][26] = 52, + [2][1][2][0][3][26] = 72, + [2][1][2][0][5][26] = 127, + [2][1][2][0][6][26] = 52, + [2][1][2][0][9][26] = 127, + [2][1][2][0][8][26] = 42, + [2][1][2][0][11][26] = 40, + [2][1][2][0][2][34] = 72, + [2][1][2][0][1][34] = 127, + [2][1][2][0][3][34] = 72, + [2][1][2][0][5][34] = 72, + [2][1][2][0][6][34] = 52, + [2][1][2][0][9][34] = 72, + [2][1][2][0][8][34] = 42, + [2][1][2][0][11][34] = 127, + [2][1][2][0][2][41] = 72, + [2][1][2][0][1][41] = 16, + [2][1][2][0][3][41] = 127, + [2][1][2][0][5][41] = 72, + [2][1][2][0][6][41] = 16, + [2][1][2][0][9][41] = 72, + [2][1][2][0][8][41] = 42, + [2][1][2][0][11][41] = 40, + [2][1][2][1][2][3] = 56, + [2][1][2][1][1][3] = 40, + [2][1][2][1][3][3] = 52, + [2][1][2][1][5][3] = 40, + [2][1][2][1][6][3] = 40, + [2][1][2][1][9][3] = 40, + [2][1][2][1][8][3] = 6, + [2][1][2][1][11][3] = 28, + [2][1][2][1][2][11] = 56, + [2][1][2][1][1][11] = 40, + [2][1][2][1][3][11] = 52, + [2][1][2][1][5][11] = 40, + [2][1][2][1][6][11] = 40, + [2][1][2][1][9][11] = 40, + [2][1][2][1][8][11] = 30, + [2][1][2][1][11][11] = 28, + [2][1][2][1][2][18] = 56, + [2][1][2][1][1][18] = 40, + [2][1][2][1][3][18] = 72, + [2][1][2][1][5][18] = 56, + [2][1][2][1][6][18] = 40, + [2][1][2][1][9][18] = 40, + [2][1][2][1][8][18] = 30, + [2][1][2][1][11][18] = 28, + [2][1][2][1][2][26] = 68, + [2][1][2][1][1][26] = 40, + [2][1][2][1][3][26] = 72, + [2][1][2][1][5][26] = 127, + [2][1][2][1][6][26] = 40, + [2][1][2][1][9][26] = 127, + [2][1][2][1][8][26] = 30, + [2][1][2][1][11][26] = 28, + [2][1][2][1][2][34] = 68, + [2][1][2][1][1][34] = 127, + [2][1][2][1][3][34] = 72, + [2][1][2][1][5][34] = 68, + [2][1][2][1][6][34] = 40, + [2][1][2][1][9][34] = 68, + [2][1][2][1][8][34] = 30, + [2][1][2][1][11][34] = 127, + [2][1][2][1][2][41] = 72, + [2][1][2][1][1][41] = 4, + [2][1][2][1][3][41] = 127, + [2][1][2][1][5][41] = 72, + [2][1][2][1][6][41] = 4, + [2][1][2][1][9][41] = 72, + [2][1][2][1][8][41] = 30, + [2][1][2][1][11][41] = 28, +}; + +const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { + [0][0][0][0] = 32, + [0][0][0][1] = 32, + [0][0][0][2] = 32, + [0][0][0][3] = 32, + [0][0][0][4] = 32, + [0][0][0][5] = 32, + [0][0][0][6] = 32, + [0][0][0][7] = 32, + [0][0][0][8] = 32, + [0][0][0][9] = 32, + [0][0][0][10] = 32, + [0][0][0][11] = 32, + [0][0][0][12] = 32, + [0][0][0][13] = 0, + [0][1][0][0] = 20, + [0][1][0][1] = 20, + [0][1][0][2] = 20, + [0][1][0][3] = 20, + [0][1][0][4] = 20, + [0][1][0][5] = 20, + [0][1][0][6] = 20, + [0][1][0][7] = 20, + [0][1][0][8] = 20, + [0][1][0][9] = 20, + [0][1][0][10] = 20, + [0][1][0][11] = 20, + [0][1][0][12] = 20, + [0][1][0][13] = 0, + [1][0][0][0] = 42, + [1][0][0][1] = 42, + [1][0][0][2] = 42, + [1][0][0][3] = 42, + [1][0][0][4] = 42, + [1][0][0][5] = 42, + [1][0][0][6] = 42, + [1][0][0][7] = 42, + [1][0][0][8] = 42, + [1][0][0][9] = 42, + [1][0][0][10] = 42, + [1][0][0][11] = 42, + [1][0][0][12] = 36, + [1][0][0][13] = 0, + [1][1][0][0] = 30, + [1][1][0][1] = 30, + [1][1][0][2] = 30, + [1][1][0][3] = 30, + [1][1][0][4] = 30, + [1][1][0][5] = 30, + [1][1][0][6] = 30, + [1][1][0][7] = 30, + [1][1][0][8] = 30, + [1][1][0][9] = 30, + [1][1][0][10] = 30, + [1][1][0][11] = 30, + [1][1][0][12] = 30, + [1][1][0][13] = 0, + [2][0][0][0] = 52, + [2][0][0][1] = 52, + [2][0][0][2] = 52, + [2][0][0][3] = 52, + [2][0][0][4] = 52, + [2][0][0][5] = 52, + [2][0][0][6] = 52, + [2][0][0][7] = 52, + [2][0][0][8] = 52, + [2][0][0][9] = 52, + [2][0][0][10] = 52, + [2][0][0][11] = 52, + [2][0][0][12] = 40, + [2][0][0][13] = 0, + [2][1][0][0] = 40, + [2][1][0][1] = 40, + [2][1][0][2] = 40, + [2][1][0][3] = 40, + [2][1][0][4] = 40, + [2][1][0][5] = 40, + [2][1][0][6] = 40, + [2][1][0][7] = 40, + [2][1][0][8] = 40, + [2][1][0][9] = 40, + [2][1][0][10] = 40, + [2][1][0][11] = 40, + [2][1][0][12] = 26, + [2][1][0][13] = 0, + [0][0][2][0] = 70, + [0][0][1][0] = 32, + [0][0][3][0] = 40, + [0][0][5][0] = 70, + [0][0][6][0] = 32, + [0][0][9][0] = 32, + [0][0][8][0] = 60, + [0][0][11][0] = 32, + [0][0][2][1] = 70, + [0][0][1][1] = 32, + [0][0][3][1] = 40, + [0][0][5][1] = 70, + [0][0][6][1] = 32, + [0][0][9][1] = 32, + [0][0][8][1] = 60, + [0][0][11][1] = 32, + [0][0][2][2] = 74, + [0][0][1][2] = 32, + [0][0][3][2] = 40, + [0][0][5][2] = 74, + [0][0][6][2] = 32, + [0][0][9][2] = 32, + [0][0][8][2] = 60, + [0][0][11][2] = 32, + [0][0][2][3] = 78, + [0][0][1][3] = 32, + [0][0][3][3] = 40, + [0][0][5][3] = 78, + [0][0][6][3] = 32, + [0][0][9][3] = 32, + [0][0][8][3] = 60, + [0][0][11][3] = 32, + [0][0][2][4] = 78, + [0][0][1][4] = 32, + [0][0][3][4] = 40, + [0][0][5][4] = 78, + [0][0][6][4] = 32, + [0][0][9][4] = 32, + [0][0][8][4] = 60, + [0][0][11][4] = 32, + [0][0][2][5] = 78, + [0][0][1][5] = 32, + [0][0][3][5] = 40, + [0][0][5][5] = 78, + [0][0][6][5] = 32, + [0][0][9][5] = 32, + [0][0][8][5] = 60, + [0][0][11][5] = 32, + [0][0][2][6] = 78, + [0][0][1][6] = 32, + [0][0][3][6] = 40, + [0][0][5][6] = 78, + [0][0][6][6] = 32, + [0][0][9][6] = 32, + [0][0][8][6] = 60, + [0][0][11][6] = 32, + [0][0][2][7] = 78, + [0][0][1][7] = 32, + [0][0][3][7] = 40, + [0][0][5][7] = 78, + [0][0][6][7] = 32, + [0][0][9][7] = 32, + [0][0][8][7] = 60, + [0][0][11][7] = 32, + [0][0][2][8] = 74, + [0][0][1][8] = 32, + [0][0][3][8] = 40, + [0][0][5][8] = 74, + [0][0][6][8] = 32, + [0][0][9][8] = 32, + [0][0][8][8] = 60, + [0][0][11][8] = 32, + [0][0][2][9] = 70, + [0][0][1][9] = 32, + [0][0][3][9] = 40, + [0][0][5][9] = 70, + [0][0][6][9] = 32, + [0][0][9][9] = 32, + [0][0][8][9] = 60, + [0][0][11][9] = 32, + [0][0][2][10] = 70, + [0][0][1][10] = 32, + [0][0][3][10] = 40, + [0][0][5][10] = 70, + [0][0][6][10] = 32, + [0][0][9][10] = 32, + [0][0][8][10] = 60, + [0][0][11][10] = 32, + [0][0][2][11] = 58, + [0][0][1][11] = 32, + [0][0][3][11] = 40, + [0][0][5][11] = 58, + [0][0][6][11] = 32, + [0][0][9][11] = 32, + [0][0][8][11] = 60, + [0][0][11][11] = 32, + [0][0][2][12] = 34, + [0][0][1][12] = 32, + [0][0][3][12] = 40, + [0][0][5][12] = 34, + [0][0][6][12] = 32, + [0][0][9][12] = 32, + [0][0][8][12] = 60, + [0][0][11][12] = 32, + [0][0][2][13] = 127, + [0][0][1][13] = 127, + [0][0][3][13] = 127, + [0][0][5][13] = 127, + [0][0][6][13] = 127, + [0][0][9][13] = 127, + [0][0][8][13] = 127, + [0][0][11][13] = 127, + [0][1][2][0] = 64, + [0][1][1][0] = 20, + [0][1][3][0] = 28, + [0][1][5][0] = 64, + [0][1][6][0] = 20, + [0][1][9][0] = 20, + [0][1][8][0] = 48, + [0][1][11][0] = 20, + [0][1][2][1] = 64, + [0][1][1][1] = 20, + [0][1][3][1] = 28, + [0][1][5][1] = 64, + [0][1][6][1] = 20, + [0][1][9][1] = 20, + [0][1][8][1] = 48, + [0][1][11][1] = 20, + [0][1][2][2] = 68, + [0][1][1][2] = 20, + [0][1][3][2] = 28, + [0][1][5][2] = 68, + [0][1][6][2] = 20, + [0][1][9][2] = 20, + [0][1][8][2] = 48, + [0][1][11][2] = 20, + [0][1][2][3] = 72, + [0][1][1][3] = 20, + [0][1][3][3] = 28, + [0][1][5][3] = 72, + [0][1][6][3] = 20, + [0][1][9][3] = 20, + [0][1][8][3] = 48, + [0][1][11][3] = 20, + [0][1][2][4] = 76, + [0][1][1][4] = 20, + [0][1][3][4] = 28, + [0][1][5][4] = 76, + [0][1][6][4] = 20, + [0][1][9][4] = 20, + [0][1][8][4] = 48, + [0][1][11][4] = 20, + [0][1][2][5] = 78, + [0][1][1][5] = 20, + [0][1][3][5] = 28, + [0][1][5][5] = 78, + [0][1][6][5] = 20, + [0][1][9][5] = 20, + [0][1][8][5] = 48, + [0][1][11][5] = 20, + [0][1][2][6] = 76, + [0][1][1][6] = 20, + [0][1][3][6] = 28, + [0][1][5][6] = 76, + [0][1][6][6] = 20, + [0][1][9][6] = 20, + [0][1][8][6] = 48, + [0][1][11][6] = 20, + [0][1][2][7] = 72, + [0][1][1][7] = 20, + [0][1][3][7] = 28, + [0][1][5][7] = 72, + [0][1][6][7] = 20, + [0][1][9][7] = 20, + [0][1][8][7] = 48, + [0][1][11][7] = 20, + [0][1][2][8] = 68, + [0][1][1][8] = 20, + [0][1][3][8] = 28, + [0][1][5][8] = 68, + [0][1][6][8] = 20, + [0][1][9][8] = 20, + [0][1][8][8] = 48, + [0][1][11][8] = 20, + [0][1][2][9] = 64, + [0][1][1][9] = 20, + [0][1][3][9] = 28, + [0][1][5][9] = 64, + [0][1][6][9] = 20, + [0][1][9][9] = 20, + [0][1][8][9] = 48, + [0][1][11][9] = 20, + [0][1][2][10] = 64, + [0][1][1][10] = 20, + [0][1][3][10] = 28, + [0][1][5][10] = 64, + [0][1][6][10] = 20, + [0][1][9][10] = 20, + [0][1][8][10] = 48, + [0][1][11][10] = 20, + [0][1][2][11] = 54, + [0][1][1][11] = 20, + [0][1][3][11] = 28, + [0][1][5][11] = 54, + [0][1][6][11] = 20, + [0][1][9][11] = 20, + [0][1][8][11] = 48, + [0][1][11][11] = 20, + [0][1][2][12] = 32, + [0][1][1][12] = 20, + [0][1][3][12] = 28, + [0][1][5][12] = 32, + [0][1][6][12] = 20, + [0][1][9][12] = 20, + [0][1][8][12] = 48, + [0][1][11][12] = 20, + [0][1][2][13] = 127, + [0][1][1][13] = 127, + [0][1][3][13] = 127, + [0][1][5][13] = 127, + [0][1][6][13] = 127, + [0][1][9][13] = 127, + [0][1][8][13] = 127, + [0][1][11][13] = 127, + [1][0][2][0] = 72, + [1][0][1][0] = 42, + [1][0][3][0] = 50, + [1][0][5][0] = 72, + [1][0][6][0] = 42, + [1][0][9][0] = 42, + [1][0][8][0] = 60, + [1][0][11][0] = 42, + [1][0][2][1] = 72, + [1][0][1][1] = 42, + [1][0][3][1] = 50, + [1][0][5][1] = 72, + [1][0][6][1] = 42, + [1][0][9][1] = 42, + [1][0][8][1] = 60, + [1][0][11][1] = 42, + [1][0][2][2] = 76, + [1][0][1][2] = 42, + [1][0][3][2] = 50, + [1][0][5][2] = 76, + [1][0][6][2] = 42, + [1][0][9][2] = 42, + [1][0][8][2] = 60, + [1][0][11][2] = 42, + [1][0][2][3] = 78, + [1][0][1][3] = 42, + [1][0][3][3] = 50, + [1][0][5][3] = 78, + [1][0][6][3] = 42, + [1][0][9][3] = 42, + [1][0][8][3] = 60, + [1][0][11][3] = 42, + [1][0][2][4] = 78, + [1][0][1][4] = 42, + [1][0][3][4] = 50, + [1][0][5][4] = 78, + [1][0][6][4] = 42, + [1][0][9][4] = 42, + [1][0][8][4] = 60, + [1][0][11][4] = 42, + [1][0][2][5] = 78, + [1][0][1][5] = 42, + [1][0][3][5] = 50, + [1][0][5][5] = 78, + [1][0][6][5] = 42, + [1][0][9][5] = 42, + [1][0][8][5] = 60, + [1][0][11][5] = 42, + [1][0][2][6] = 78, + [1][0][1][6] = 42, + [1][0][3][6] = 50, + [1][0][5][6] = 78, + [1][0][6][6] = 42, + [1][0][9][6] = 42, + [1][0][8][6] = 60, + [1][0][11][6] = 42, + [1][0][2][7] = 78, + [1][0][1][7] = 42, + [1][0][3][7] = 50, + [1][0][5][7] = 78, + [1][0][6][7] = 42, + [1][0][9][7] = 42, + [1][0][8][7] = 60, + [1][0][11][7] = 42, + [1][0][2][8] = 78, + [1][0][1][8] = 42, + [1][0][3][8] = 50, + [1][0][5][8] = 78, + [1][0][6][8] = 42, + [1][0][9][8] = 42, + [1][0][8][8] = 60, + [1][0][11][8] = 42, + [1][0][2][9] = 74, + [1][0][1][9] = 42, + [1][0][3][9] = 50, + [1][0][5][9] = 74, + [1][0][6][9] = 42, + [1][0][9][9] = 42, + [1][0][8][9] = 60, + [1][0][11][9] = 42, + [1][0][2][10] = 74, + [1][0][1][10] = 42, + [1][0][3][10] = 50, + [1][0][5][10] = 74, + [1][0][6][10] = 42, + [1][0][9][10] = 42, + [1][0][8][10] = 60, + [1][0][11][10] = 42, + [1][0][2][11] = 64, + [1][0][1][11] = 42, + [1][0][3][11] = 50, + [1][0][5][11] = 64, + [1][0][6][11] = 42, + [1][0][9][11] = 42, + [1][0][8][11] = 60, + [1][0][11][11] = 42, + [1][0][2][12] = 36, + [1][0][1][12] = 42, + [1][0][3][12] = 50, + [1][0][5][12] = 36, + [1][0][6][12] = 42, + [1][0][9][12] = 42, + [1][0][8][12] = 60, + [1][0][11][12] = 42, + [1][0][2][13] = 127, + [1][0][1][13] = 127, + [1][0][3][13] = 127, + [1][0][5][13] = 127, + [1][0][6][13] = 127, + [1][0][9][13] = 127, + [1][0][8][13] = 127, + [1][0][11][13] = 127, + [1][1][2][0] = 66, + [1][1][1][0] = 30, + [1][1][3][0] = 38, + [1][1][5][0] = 66, + [1][1][6][0] = 30, + [1][1][9][0] = 30, + [1][1][8][0] = 48, + [1][1][11][0] = 30, + [1][1][2][1] = 66, + [1][1][1][1] = 30, + [1][1][3][1] = 38, + [1][1][5][1] = 66, + [1][1][6][1] = 30, + [1][1][9][1] = 30, + [1][1][8][1] = 48, + [1][1][11][1] = 30, + [1][1][2][2] = 70, + [1][1][1][2] = 30, + [1][1][3][2] = 38, + [1][1][5][2] = 70, + [1][1][6][2] = 30, + [1][1][9][2] = 30, + [1][1][8][2] = 48, + [1][1][11][2] = 30, + [1][1][2][3] = 74, + [1][1][1][3] = 30, + [1][1][3][3] = 38, + [1][1][5][3] = 74, + [1][1][6][3] = 30, + [1][1][9][3] = 30, + [1][1][8][3] = 48, + [1][1][11][3] = 30, + [1][1][2][4] = 78, + [1][1][1][4] = 30, + [1][1][3][4] = 38, + [1][1][5][4] = 78, + [1][1][6][4] = 30, + [1][1][9][4] = 30, + [1][1][8][4] = 48, + [1][1][11][4] = 30, + [1][1][2][5] = 78, + [1][1][1][5] = 30, + [1][1][3][5] = 38, + [1][1][5][5] = 78, + [1][1][6][5] = 30, + [1][1][9][5] = 30, + [1][1][8][5] = 48, + [1][1][11][5] = 30, + [1][1][2][6] = 78, + [1][1][1][6] = 30, + [1][1][3][6] = 38, + [1][1][5][6] = 78, + [1][1][6][6] = 30, + [1][1][9][6] = 30, + [1][1][8][6] = 48, + [1][1][11][6] = 30, + [1][1][2][7] = 74, + [1][1][1][7] = 30, + [1][1][3][7] = 38, + [1][1][5][7] = 74, + [1][1][6][7] = 30, + [1][1][9][7] = 30, + [1][1][8][7] = 48, + [1][1][11][7] = 30, + [1][1][2][8] = 70, + [1][1][1][8] = 30, + [1][1][3][8] = 38, + [1][1][5][8] = 70, + [1][1][6][8] = 30, + [1][1][9][8] = 30, + [1][1][8][8] = 48, + [1][1][11][8] = 30, + [1][1][2][9] = 66, + [1][1][1][9] = 30, + [1][1][3][9] = 38, + [1][1][5][9] = 66, + [1][1][6][9] = 30, + [1][1][9][9] = 30, + [1][1][8][9] = 48, + [1][1][11][9] = 30, + [1][1][2][10] = 66, + [1][1][1][10] = 30, + [1][1][3][10] = 38, + [1][1][5][10] = 66, + [1][1][6][10] = 30, + [1][1][9][10] = 30, + [1][1][8][10] = 48, + [1][1][11][10] = 30, + [1][1][2][11] = 60, + [1][1][1][11] = 30, + [1][1][3][11] = 38, + [1][1][5][11] = 60, + [1][1][6][11] = 30, + [1][1][9][11] = 30, + [1][1][8][11] = 48, + [1][1][11][11] = 30, + [1][1][2][12] = 32, + [1][1][1][12] = 30, + [1][1][3][12] = 38, + [1][1][5][12] = 32, + [1][1][6][12] = 30, + [1][1][9][12] = 30, + [1][1][8][12] = 48, + [1][1][11][12] = 30, + [1][1][2][13] = 127, + [1][1][1][13] = 127, + [1][1][3][13] = 127, + [1][1][5][13] = 127, + [1][1][6][13] = 127, + [1][1][9][13] = 127, + [1][1][8][13] = 127, + [1][1][11][13] = 127, + [2][0][2][0] = 76, + [2][0][1][0] = 52, + [2][0][3][0] = 64, + [2][0][5][0] = 76, + [2][0][6][0] = 52, + [2][0][9][0] = 52, + [2][0][8][0] = 60, + [2][0][11][0] = 52, + [2][0][2][1] = 76, + [2][0][1][1] = 52, + [2][0][3][1] = 64, + [2][0][5][1] = 76, + [2][0][6][1] = 52, + [2][0][9][1] = 52, + [2][0][8][1] = 60, + [2][0][11][1] = 52, + [2][0][2][2] = 78, + [2][0][1][2] = 52, + [2][0][3][2] = 64, + [2][0][5][2] = 78, + [2][0][6][2] = 52, + [2][0][9][2] = 52, + [2][0][8][2] = 60, + [2][0][11][2] = 52, + [2][0][2][3] = 78, + [2][0][1][3] = 52, + [2][0][3][3] = 64, + [2][0][5][3] = 78, + [2][0][6][3] = 52, + [2][0][9][3] = 52, + [2][0][8][3] = 60, + [2][0][11][3] = 52, + [2][0][2][4] = 78, + [2][0][1][4] = 52, + [2][0][3][4] = 64, + [2][0][5][4] = 78, + [2][0][6][4] = 52, + [2][0][9][4] = 52, + [2][0][8][4] = 60, + [2][0][11][4] = 52, + [2][0][2][5] = 78, + [2][0][1][5] = 52, + [2][0][3][5] = 64, + [2][0][5][5] = 78, + [2][0][6][5] = 52, + [2][0][9][5] = 52, + [2][0][8][5] = 60, + [2][0][11][5] = 52, + [2][0][2][6] = 78, + [2][0][1][6] = 52, + [2][0][3][6] = 64, + [2][0][5][6] = 78, + [2][0][6][6] = 52, + [2][0][9][6] = 52, + [2][0][8][6] = 60, + [2][0][11][6] = 52, + [2][0][2][7] = 78, + [2][0][1][7] = 52, + [2][0][3][7] = 64, + [2][0][5][7] = 78, + [2][0][6][7] = 52, + [2][0][9][7] = 52, + [2][0][8][7] = 60, + [2][0][11][7] = 52, + [2][0][2][8] = 78, + [2][0][1][8] = 52, + [2][0][3][8] = 64, + [2][0][5][8] = 78, + [2][0][6][8] = 52, + [2][0][9][8] = 52, + [2][0][8][8] = 60, + [2][0][11][8] = 52, + [2][0][2][9] = 76, + [2][0][1][9] = 52, + [2][0][3][9] = 64, + [2][0][5][9] = 76, + [2][0][6][9] = 52, + [2][0][9][9] = 52, + [2][0][8][9] = 60, + [2][0][11][9] = 52, + [2][0][2][10] = 76, + [2][0][1][10] = 52, + [2][0][3][10] = 64, + [2][0][5][10] = 76, + [2][0][6][10] = 52, + [2][0][9][10] = 52, + [2][0][8][10] = 60, + [2][0][11][10] = 52, + [2][0][2][11] = 68, + [2][0][1][11] = 52, + [2][0][3][11] = 64, + [2][0][5][11] = 68, + [2][0][6][11] = 52, + [2][0][9][11] = 52, + [2][0][8][11] = 60, + [2][0][11][11] = 52, + [2][0][2][12] = 40, + [2][0][1][12] = 52, + [2][0][3][12] = 64, + [2][0][5][12] = 40, + [2][0][6][12] = 52, + [2][0][9][12] = 52, + [2][0][8][12] = 60, + [2][0][11][12] = 52, + [2][0][2][13] = 127, + [2][0][1][13] = 127, + [2][0][3][13] = 127, + [2][0][5][13] = 127, + [2][0][6][13] = 127, + [2][0][9][13] = 127, + [2][0][8][13] = 127, + [2][0][11][13] = 127, + [2][1][2][0] = 68, + [2][1][1][0] = 40, + [2][1][3][0] = 52, + [2][1][5][0] = 68, + [2][1][6][0] = 40, + [2][1][9][0] = 40, + [2][1][8][0] = 48, + [2][1][11][0] = 40, + [2][1][2][1] = 68, + [2][1][1][1] = 40, + [2][1][3][1] = 52, + [2][1][5][1] = 68, + [2][1][6][1] = 40, + [2][1][9][1] = 40, + [2][1][8][1] = 48, + [2][1][11][1] = 40, + [2][1][2][2] = 72, + [2][1][1][2] = 40, + [2][1][3][2] = 52, + [2][1][5][2] = 72, + [2][1][6][2] = 40, + [2][1][9][2] = 40, + [2][1][8][2] = 48, + [2][1][11][2] = 40, + [2][1][2][3] = 76, + [2][1][1][3] = 40, + [2][1][3][3] = 52, + [2][1][5][3] = 76, + [2][1][6][3] = 40, + [2][1][9][3] = 40, + [2][1][8][3] = 48, + [2][1][11][3] = 40, + [2][1][2][4] = 78, + [2][1][1][4] = 40, + [2][1][3][4] = 52, + [2][1][5][4] = 78, + [2][1][6][4] = 40, + [2][1][9][4] = 40, + [2][1][8][4] = 48, + [2][1][11][4] = 40, + [2][1][2][5] = 78, + [2][1][1][5] = 40, + [2][1][3][5] = 52, + [2][1][5][5] = 78, + [2][1][6][5] = 40, + [2][1][9][5] = 40, + [2][1][8][5] = 48, + [2][1][11][5] = 40, + [2][1][2][6] = 78, + [2][1][1][6] = 40, + [2][1][3][6] = 52, + [2][1][5][6] = 78, + [2][1][6][6] = 40, + [2][1][9][6] = 40, + [2][1][8][6] = 48, + [2][1][11][6] = 40, + [2][1][2][7] = 78, + [2][1][1][7] = 40, + [2][1][3][7] = 52, + [2][1][5][7] = 78, + [2][1][6][7] = 40, + [2][1][9][7] = 40, + [2][1][8][7] = 48, + [2][1][11][7] = 40, + [2][1][2][8] = 74, + [2][1][1][8] = 40, + [2][1][3][8] = 52, + [2][1][5][8] = 74, + [2][1][6][8] = 40, + [2][1][9][8] = 40, + [2][1][8][8] = 48, + [2][1][11][8] = 40, + [2][1][2][9] = 70, + [2][1][1][9] = 40, + [2][1][3][9] = 52, + [2][1][5][9] = 70, + [2][1][6][9] = 40, + [2][1][9][9] = 40, + [2][1][8][9] = 48, + [2][1][11][9] = 40, + [2][1][2][10] = 70, + [2][1][1][10] = 40, + [2][1][3][10] = 52, + [2][1][5][10] = 70, + [2][1][6][10] = 40, + [2][1][9][10] = 40, + [2][1][8][10] = 48, + [2][1][11][10] = 40, + [2][1][2][11] = 48, + [2][1][1][11] = 40, + [2][1][3][11] = 52, + [2][1][5][11] = 48, + [2][1][6][11] = 40, + [2][1][9][11] = 40, + [2][1][8][11] = 48, + [2][1][11][11] = 40, + [2][1][2][12] = 26, + [2][1][1][12] = 40, + [2][1][3][12] = 52, + [2][1][5][12] = 26, + [2][1][6][12] = 40, + [2][1][9][12] = 40, + [2][1][8][12] = 48, + [2][1][11][12] = 40, + [2][1][2][13] = 127, + [2][1][1][13] = 127, + [2][1][3][13] = 127, + [2][1][5][13] = 127, + [2][1][6][13] = 127, + [2][1][9][13] = 127, + [2][1][8][13] = 127, + [2][1][11][13] = 127, +}; + +const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { + [0][0][0][0] = 22, + [0][0][0][2] = 22, + [0][0][0][4] = 22, + [0][0][0][6] = 22, + [0][0][0][8] = 24, + [0][0][0][10] = 24, + [0][0][0][12] = 24, + [0][0][0][14] = 24, + [0][0][0][15] = 24, + [0][0][0][17] = 24, + [0][0][0][19] = 24, + [0][0][0][21] = 24, + [0][0][0][23] = 24, + [0][0][0][25] = 24, + [0][0][0][27] = 24, + [0][0][0][29] = 24, + [0][0][0][31] = 24, + [0][0][0][33] = 24, + [0][0][0][35] = 24, + [0][0][0][37] = 24, + [0][0][0][38] = 28, + [0][0][0][40] = 28, + [0][0][0][42] = 28, + [0][0][0][44] = 28, + [0][0][0][46] = 28, + [0][1][0][0] = 8, + [0][1][0][2] = 8, + [0][1][0][4] = 8, + [0][1][0][6] = 8, + [0][1][0][8] = 12, + [0][1][0][10] = 12, + [0][1][0][12] = 12, + [0][1][0][14] = 12, + [0][1][0][15] = 12, + [0][1][0][17] = 12, + [0][1][0][19] = 12, + [0][1][0][21] = 12, + [0][1][0][23] = 12, + [0][1][0][25] = 12, + [0][1][0][27] = 12, + [0][1][0][29] = 12, + [0][1][0][31] = 12, + [0][1][0][33] = 12, + [0][1][0][35] = 12, + [0][1][0][37] = 12, + [0][1][0][38] = 16, + [0][1][0][40] = 16, + [0][1][0][42] = 16, + [0][1][0][44] = 16, + [0][1][0][46] = 16, + [1][0][0][0] = 30, + [1][0][0][2] = 30, + [1][0][0][4] = 30, + [1][0][0][6] = 30, + [1][0][0][8] = 36, + [1][0][0][10] = 36, + [1][0][0][12] = 36, + [1][0][0][14] = 36, + [1][0][0][15] = 36, + [1][0][0][17] = 36, + [1][0][0][19] = 36, + [1][0][0][21] = 36, + [1][0][0][23] = 36, + [1][0][0][25] = 36, + [1][0][0][27] = 36, + [1][0][0][29] = 36, + [1][0][0][31] = 36, + [1][0][0][33] = 36, + [1][0][0][35] = 36, + [1][0][0][37] = 36, + [1][0][0][38] = 28, + [1][0][0][40] = 28, + [1][0][0][42] = 28, + [1][0][0][44] = 28, + [1][0][0][46] = 28, + [1][1][0][0] = 18, + [1][1][0][2] = 18, + [1][1][0][4] = 18, + [1][1][0][6] = 18, + [1][1][0][8] = 22, + [1][1][0][10] = 22, + [1][1][0][12] = 22, + [1][1][0][14] = 22, + [1][1][0][15] = 22, + [1][1][0][17] = 22, + [1][1][0][19] = 22, + [1][1][0][21] = 22, + [1][1][0][23] = 22, + [1][1][0][25] = 22, + [1][1][0][27] = 22, + [1][1][0][29] = 22, + [1][1][0][31] = 22, + [1][1][0][33] = 22, + [1][1][0][35] = 22, + [1][1][0][37] = 22, + [1][1][0][38] = 16, + [1][1][0][40] = 16, + [1][1][0][42] = 16, + [1][1][0][44] = 16, + [1][1][0][46] = 16, + [2][0][0][0] = 30, + [2][0][0][2] = 30, + [2][0][0][4] = 30, + [2][0][0][6] = 30, + [2][0][0][8] = 46, + [2][0][0][10] = 46, + [2][0][0][12] = 46, + [2][0][0][14] = 46, + [2][0][0][15] = 46, + [2][0][0][17] = 46, + [2][0][0][19] = 46, + [2][0][0][21] = 46, + [2][0][0][23] = 46, + [2][0][0][25] = 46, + [2][0][0][27] = 46, + [2][0][0][29] = 46, + [2][0][0][31] = 46, + [2][0][0][33] = 46, + [2][0][0][35] = 46, + [2][0][0][37] = 46, + [2][0][0][38] = 28, + [2][0][0][40] = 28, + [2][0][0][42] = 28, + [2][0][0][44] = 28, + [2][0][0][46] = 28, + [2][1][0][0] = 18, + [2][1][0][2] = 18, + [2][1][0][4] = 18, + [2][1][0][6] = 18, + [2][1][0][8] = 32, + [2][1][0][10] = 32, + [2][1][0][12] = 32, + [2][1][0][14] = 32, + [2][1][0][15] = 32, + [2][1][0][17] = 32, + [2][1][0][19] = 32, + [2][1][0][21] = 32, + [2][1][0][23] = 32, + [2][1][0][25] = 32, + [2][1][0][27] = 32, + [2][1][0][29] = 32, + [2][1][0][31] = 32, + [2][1][0][33] = 32, + [2][1][0][35] = 32, + [2][1][0][37] = 32, + [2][1][0][38] = 16, + [2][1][0][40] = 16, + [2][1][0][42] = 16, + [2][1][0][44] = 16, + [2][1][0][46] = 16, + [0][0][2][0] = 48, + [0][0][1][0] = 24, + [0][0][3][0] = 26, + [0][0][5][0] = 22, + [0][0][6][0] = 24, + [0][0][9][0] = 24, + [0][0][8][0] = 30, + [0][0][11][0] = 24, + [0][0][2][2] = 48, + [0][0][1][2] = 24, + [0][0][3][2] = 26, + [0][0][5][2] = 22, + [0][0][6][2] = 24, + [0][0][9][2] = 24, + [0][0][8][2] = 30, + [0][0][11][2] = 24, + [0][0][2][4] = 48, + [0][0][1][4] = 24, + [0][0][3][4] = 26, + [0][0][5][4] = 22, + [0][0][6][4] = 24, + [0][0][9][4] = 24, + [0][0][8][4] = 30, + [0][0][11][4] = 24, + [0][0][2][6] = 48, + [0][0][1][6] = 24, + [0][0][3][6] = 26, + [0][0][5][6] = 22, + [0][0][6][6] = 24, + [0][0][9][6] = 24, + [0][0][8][6] = 30, + [0][0][11][6] = 24, + [0][0][2][8] = 48, + [0][0][1][8] = 24, + [0][0][3][8] = 26, + [0][0][5][8] = 48, + [0][0][6][8] = 24, + [0][0][9][8] = 24, + [0][0][8][8] = 54, + [0][0][11][8] = 24, + [0][0][2][10] = 48, + [0][0][1][10] = 24, + [0][0][3][10] = 26, + [0][0][5][10] = 48, + [0][0][6][10] = 24, + [0][0][9][10] = 24, + [0][0][8][10] = 54, + [0][0][11][10] = 24, + [0][0][2][12] = 48, + [0][0][1][12] = 24, + [0][0][3][12] = 26, + [0][0][5][12] = 48, + [0][0][6][12] = 24, + [0][0][9][12] = 24, + [0][0][8][12] = 54, + [0][0][11][12] = 24, + [0][0][2][14] = 48, + [0][0][1][14] = 24, + [0][0][3][14] = 26, + [0][0][5][14] = 48, + [0][0][6][14] = 24, + [0][0][9][14] = 24, + [0][0][8][14] = 54, + [0][0][11][14] = 24, + [0][0][2][15] = 48, + [0][0][1][15] = 24, + [0][0][3][15] = 44, + [0][0][5][15] = 48, + [0][0][6][15] = 24, + [0][0][9][15] = 24, + [0][0][8][15] = 54, + [0][0][11][15] = 24, + [0][0][2][17] = 48, + [0][0][1][17] = 24, + [0][0][3][17] = 44, + [0][0][5][17] = 48, + [0][0][6][17] = 24, + [0][0][9][17] = 24, + [0][0][8][17] = 54, + [0][0][11][17] = 24, + [0][0][2][19] = 48, + [0][0][1][19] = 24, + [0][0][3][19] = 44, + [0][0][5][19] = 48, + [0][0][6][19] = 24, + [0][0][9][19] = 24, + [0][0][8][19] = 54, + [0][0][11][19] = 24, + [0][0][2][21] = 48, + [0][0][1][21] = 24, + [0][0][3][21] = 44, + [0][0][5][21] = 48, + [0][0][6][21] = 24, + [0][0][9][21] = 24, + [0][0][8][21] = 54, + [0][0][11][21] = 24, + [0][0][2][23] = 48, + [0][0][1][23] = 24, + [0][0][3][23] = 44, + [0][0][5][23] = 48, + [0][0][6][23] = 24, + [0][0][9][23] = 24, + [0][0][8][23] = 54, + [0][0][11][23] = 24, + [0][0][2][25] = 48, + [0][0][1][25] = 24, + [0][0][3][25] = 44, + [0][0][5][25] = 127, + [0][0][6][25] = 24, + [0][0][9][25] = 127, + [0][0][8][25] = 54, + [0][0][11][25] = 24, + [0][0][2][27] = 48, + [0][0][1][27] = 24, + [0][0][3][27] = 44, + [0][0][5][27] = 127, + [0][0][6][27] = 24, + [0][0][9][27] = 127, + [0][0][8][27] = 54, + [0][0][11][27] = 24, + [0][0][2][29] = 48, + [0][0][1][29] = 24, + [0][0][3][29] = 44, + [0][0][5][29] = 127, + [0][0][6][29] = 24, + [0][0][9][29] = 127, + [0][0][8][29] = 54, + [0][0][11][29] = 24, + [0][0][2][31] = 48, + [0][0][1][31] = 24, + [0][0][3][31] = 44, + [0][0][5][31] = 48, + [0][0][6][31] = 24, + [0][0][9][31] = 24, + [0][0][8][31] = 54, + [0][0][11][31] = 24, + [0][0][2][33] = 48, + [0][0][1][33] = 24, + [0][0][3][33] = 44, + [0][0][5][33] = 48, + [0][0][6][33] = 24, + [0][0][9][33] = 24, + [0][0][8][33] = 54, + [0][0][11][33] = 24, + [0][0][2][35] = 48, + [0][0][1][35] = 24, + [0][0][3][35] = 44, + [0][0][5][35] = 48, + [0][0][6][35] = 24, + [0][0][9][35] = 24, + [0][0][8][35] = 54, + [0][0][11][35] = 24, + [0][0][2][37] = 48, + [0][0][1][37] = 127, + [0][0][3][37] = 44, + [0][0][5][37] = 48, + [0][0][6][37] = 24, + [0][0][9][37] = 48, + [0][0][8][37] = 54, + [0][0][11][37] = 127, + [0][0][2][38] = 76, + [0][0][1][38] = 28, + [0][0][3][38] = 127, + [0][0][5][38] = 76, + [0][0][6][38] = 28, + [0][0][9][38] = 76, + [0][0][8][38] = 54, + [0][0][11][38] = 28, + [0][0][2][40] = 76, + [0][0][1][40] = 28, + [0][0][3][40] = 127, + [0][0][5][40] = 76, + [0][0][6][40] = 28, + [0][0][9][40] = 76, + [0][0][8][40] = 54, + [0][0][11][40] = 28, + [0][0][2][42] = 76, + [0][0][1][42] = 28, + [0][0][3][42] = 127, + [0][0][5][42] = 76, + [0][0][6][42] = 28, + [0][0][9][42] = 76, + [0][0][8][42] = 54, + [0][0][11][42] = 28, + [0][0][2][44] = 76, + [0][0][1][44] = 28, + [0][0][3][44] = 127, + [0][0][5][44] = 76, + [0][0][6][44] = 28, + [0][0][9][44] = 76, + [0][0][8][44] = 54, + [0][0][11][44] = 28, + [0][0][2][46] = 76, + [0][0][1][46] = 28, + [0][0][3][46] = 127, + [0][0][5][46] = 76, + [0][0][6][46] = 28, + [0][0][9][46] = 76, + [0][0][8][46] = 54, + [0][0][11][46] = 28, + [0][1][2][0] = 36, + [0][1][1][0] = 12, + [0][1][3][0] = 14, + [0][1][5][0] = 8, + [0][1][6][0] = 12, + [0][1][9][0] = 12, + [0][1][8][0] = 18, + [0][1][11][0] = 12, + [0][1][2][2] = 36, + [0][1][1][2] = 12, + [0][1][3][2] = 14, + [0][1][5][2] = 8, + [0][1][6][2] = 12, + [0][1][9][2] = 12, + [0][1][8][2] = 18, + [0][1][11][2] = 12, + [0][1][2][4] = 36, + [0][1][1][4] = 12, + [0][1][3][4] = 14, + [0][1][5][4] = 8, + [0][1][6][4] = 12, + [0][1][9][4] = 12, + [0][1][8][4] = 18, + [0][1][11][4] = 12, + [0][1][2][6] = 36, + [0][1][1][6] = 12, + [0][1][3][6] = 14, + [0][1][5][6] = 8, + [0][1][6][6] = 12, + [0][1][9][6] = 12, + [0][1][8][6] = 18, + [0][1][11][6] = 12, + [0][1][2][8] = 36, + [0][1][1][8] = 12, + [0][1][3][8] = 14, + [0][1][5][8] = 36, + [0][1][6][8] = 12, + [0][1][9][8] = 12, + [0][1][8][8] = 42, + [0][1][11][8] = 12, + [0][1][2][10] = 36, + [0][1][1][10] = 12, + [0][1][3][10] = 14, + [0][1][5][10] = 36, + [0][1][6][10] = 12, + [0][1][9][10] = 12, + [0][1][8][10] = 42, + [0][1][11][10] = 12, + [0][1][2][12] = 36, + [0][1][1][12] = 12, + [0][1][3][12] = 14, + [0][1][5][12] = 36, + [0][1][6][12] = 12, + [0][1][9][12] = 12, + [0][1][8][12] = 42, + [0][1][11][12] = 12, + [0][1][2][14] = 36, + [0][1][1][14] = 12, + [0][1][3][14] = 14, + [0][1][5][14] = 36, + [0][1][6][14] = 12, + [0][1][9][14] = 12, + [0][1][8][14] = 42, + [0][1][11][14] = 12, + [0][1][2][15] = 36, + [0][1][1][15] = 12, + [0][1][3][15] = 32, + [0][1][5][15] = 36, + [0][1][6][15] = 12, + [0][1][9][15] = 12, + [0][1][8][15] = 42, + [0][1][11][15] = 12, + [0][1][2][17] = 36, + [0][1][1][17] = 12, + [0][1][3][17] = 32, + [0][1][5][17] = 36, + [0][1][6][17] = 12, + [0][1][9][17] = 12, + [0][1][8][17] = 42, + [0][1][11][17] = 12, + [0][1][2][19] = 36, + [0][1][1][19] = 12, + [0][1][3][19] = 32, + [0][1][5][19] = 36, + [0][1][6][19] = 12, + [0][1][9][19] = 12, + [0][1][8][19] = 42, + [0][1][11][19] = 12, + [0][1][2][21] = 36, + [0][1][1][21] = 12, + [0][1][3][21] = 32, + [0][1][5][21] = 36, + [0][1][6][21] = 12, + [0][1][9][21] = 12, + [0][1][8][21] = 42, + [0][1][11][21] = 12, + [0][1][2][23] = 36, + [0][1][1][23] = 12, + [0][1][3][23] = 32, + [0][1][5][23] = 36, + [0][1][6][23] = 12, + [0][1][9][23] = 12, + [0][1][8][23] = 42, + [0][1][11][23] = 12, + [0][1][2][25] = 36, + [0][1][1][25] = 12, + [0][1][3][25] = 32, + [0][1][5][25] = 127, + [0][1][6][25] = 12, + [0][1][9][25] = 127, + [0][1][8][25] = 42, + [0][1][11][25] = 12, + [0][1][2][27] = 36, + [0][1][1][27] = 12, + [0][1][3][27] = 32, + [0][1][5][27] = 127, + [0][1][6][27] = 12, + [0][1][9][27] = 127, + [0][1][8][27] = 42, + [0][1][11][27] = 12, + [0][1][2][29] = 36, + [0][1][1][29] = 12, + [0][1][3][29] = 32, + [0][1][5][29] = 127, + [0][1][6][29] = 12, + [0][1][9][29] = 127, + [0][1][8][29] = 42, + [0][1][11][29] = 12, + [0][1][2][31] = 36, + [0][1][1][31] = 12, + [0][1][3][31] = 32, + [0][1][5][31] = 36, + [0][1][6][31] = 12, + [0][1][9][31] = 12, + [0][1][8][31] = 42, + [0][1][11][31] = 12, + [0][1][2][33] = 36, + [0][1][1][33] = 12, + [0][1][3][33] = 32, + [0][1][5][33] = 36, + [0][1][6][33] = 12, + [0][1][9][33] = 12, + [0][1][8][33] = 42, + [0][1][11][33] = 12, + [0][1][2][35] = 36, + [0][1][1][35] = 12, + [0][1][3][35] = 32, + [0][1][5][35] = 36, + [0][1][6][35] = 12, + [0][1][9][35] = 12, + [0][1][8][35] = 42, + [0][1][11][35] = 12, + [0][1][2][37] = 36, + [0][1][1][37] = 127, + [0][1][3][37] = 32, + [0][1][5][37] = 36, + [0][1][6][37] = 12, + [0][1][9][37] = 36, + [0][1][8][37] = 42, + [0][1][11][37] = 127, + [0][1][2][38] = 72, + [0][1][1][38] = 16, + [0][1][3][38] = 127, + [0][1][5][38] = 72, + [0][1][6][38] = 16, + [0][1][9][38] = 76, + [0][1][8][38] = 42, + [0][1][11][38] = 16, + [0][1][2][40] = 76, + [0][1][1][40] = 16, + [0][1][3][40] = 127, + [0][1][5][40] = 76, + [0][1][6][40] = 16, + [0][1][9][40] = 76, + [0][1][8][40] = 42, + [0][1][11][40] = 16, + [0][1][2][42] = 76, + [0][1][1][42] = 16, + [0][1][3][42] = 127, + [0][1][5][42] = 76, + [0][1][6][42] = 16, + [0][1][9][42] = 76, + [0][1][8][42] = 42, + [0][1][11][42] = 16, + [0][1][2][44] = 76, + [0][1][1][44] = 16, + [0][1][3][44] = 127, + [0][1][5][44] = 76, + [0][1][6][44] = 16, + [0][1][9][44] = 76, + [0][1][8][44] = 42, + [0][1][11][44] = 16, + [0][1][2][46] = 76, + [0][1][1][46] = 16, + [0][1][3][46] = 127, + [0][1][5][46] = 76, + [0][1][6][46] = 16, + [0][1][9][46] = 76, + [0][1][8][46] = 42, + [0][1][11][46] = 16, + [1][0][2][0] = 62, + [1][0][1][0] = 36, + [1][0][3][0] = 36, + [1][0][5][0] = 34, + [1][0][6][0] = 36, + [1][0][9][0] = 36, + [1][0][8][0] = 30, + [1][0][11][0] = 36, + [1][0][2][2] = 62, + [1][0][1][2] = 36, + [1][0][3][2] = 36, + [1][0][5][2] = 34, + [1][0][6][2] = 36, + [1][0][9][2] = 36, + [1][0][8][2] = 30, + [1][0][11][2] = 36, + [1][0][2][4] = 62, + [1][0][1][4] = 36, + [1][0][3][4] = 36, + [1][0][5][4] = 34, + [1][0][6][4] = 36, + [1][0][9][4] = 36, + [1][0][8][4] = 30, + [1][0][11][4] = 36, + [1][0][2][6] = 62, + [1][0][1][6] = 36, + [1][0][3][6] = 36, + [1][0][5][6] = 34, + [1][0][6][6] = 36, + [1][0][9][6] = 36, + [1][0][8][6] = 30, + [1][0][11][6] = 36, + [1][0][2][8] = 62, + [1][0][1][8] = 36, + [1][0][3][8] = 36, + [1][0][5][8] = 62, + [1][0][6][8] = 36, + [1][0][9][8] = 36, + [1][0][8][8] = 54, + [1][0][11][8] = 36, + [1][0][2][10] = 62, + [1][0][1][10] = 36, + [1][0][3][10] = 36, + [1][0][5][10] = 62, + [1][0][6][10] = 36, + [1][0][9][10] = 36, + [1][0][8][10] = 54, + [1][0][11][10] = 36, + [1][0][2][12] = 62, + [1][0][1][12] = 36, + [1][0][3][12] = 36, + [1][0][5][12] = 62, + [1][0][6][12] = 36, + [1][0][9][12] = 36, + [1][0][8][12] = 54, + [1][0][11][12] = 36, + [1][0][2][14] = 62, + [1][0][1][14] = 36, + [1][0][3][14] = 36, + [1][0][5][14] = 62, + [1][0][6][14] = 36, + [1][0][9][14] = 36, + [1][0][8][14] = 54, + [1][0][11][14] = 36, + [1][0][2][15] = 62, + [1][0][1][15] = 36, + [1][0][3][15] = 58, + [1][0][5][15] = 62, + [1][0][6][15] = 36, + [1][0][9][15] = 36, + [1][0][8][15] = 54, + [1][0][11][15] = 36, + [1][0][2][17] = 62, + [1][0][1][17] = 36, + [1][0][3][17] = 58, + [1][0][5][17] = 62, + [1][0][6][17] = 36, + [1][0][9][17] = 36, + [1][0][8][17] = 54, + [1][0][11][17] = 36, + [1][0][2][19] = 62, + [1][0][1][19] = 36, + [1][0][3][19] = 58, + [1][0][5][19] = 62, + [1][0][6][19] = 36, + [1][0][9][19] = 36, + [1][0][8][19] = 54, + [1][0][11][19] = 36, + [1][0][2][21] = 62, + [1][0][1][21] = 36, + [1][0][3][21] = 58, + [1][0][5][21] = 62, + [1][0][6][21] = 36, + [1][0][9][21] = 36, + [1][0][8][21] = 54, + [1][0][11][21] = 36, + [1][0][2][23] = 62, + [1][0][1][23] = 36, + [1][0][3][23] = 58, + [1][0][5][23] = 62, + [1][0][6][23] = 36, + [1][0][9][23] = 36, + [1][0][8][23] = 54, + [1][0][11][23] = 36, + [1][0][2][25] = 62, + [1][0][1][25] = 36, + [1][0][3][25] = 58, + [1][0][5][25] = 127, + [1][0][6][25] = 36, + [1][0][9][25] = 127, + [1][0][8][25] = 54, + [1][0][11][25] = 36, + [1][0][2][27] = 62, + [1][0][1][27] = 36, + [1][0][3][27] = 58, + [1][0][5][27] = 127, + [1][0][6][27] = 36, + [1][0][9][27] = 127, + [1][0][8][27] = 54, + [1][0][11][27] = 36, + [1][0][2][29] = 62, + [1][0][1][29] = 36, + [1][0][3][29] = 58, + [1][0][5][29] = 127, + [1][0][6][29] = 36, + [1][0][9][29] = 127, + [1][0][8][29] = 54, + [1][0][11][29] = 36, + [1][0][2][31] = 62, + [1][0][1][31] = 36, + [1][0][3][31] = 58, + [1][0][5][31] = 62, + [1][0][6][31] = 36, + [1][0][9][31] = 36, + [1][0][8][31] = 54, + [1][0][11][31] = 36, + [1][0][2][33] = 62, + [1][0][1][33] = 36, + [1][0][3][33] = 58, + [1][0][5][33] = 62, + [1][0][6][33] = 36, + [1][0][9][33] = 36, + [1][0][8][33] = 54, + [1][0][11][33] = 36, + [1][0][2][35] = 62, + [1][0][1][35] = 36, + [1][0][3][35] = 58, + [1][0][5][35] = 62, + [1][0][6][35] = 36, + [1][0][9][35] = 36, + [1][0][8][35] = 54, + [1][0][11][35] = 36, + [1][0][2][37] = 56, + [1][0][1][37] = 62, + [1][0][3][37] = 127, + [1][0][5][37] = 58, + [1][0][6][37] = 62, + [1][0][9][37] = 36, + [1][0][8][37] = 62, + [1][0][11][37] = 54, + [1][0][2][38] = 76, + [1][0][1][38] = 28, + [1][0][3][38] = 127, + [1][0][5][38] = 76, + [1][0][6][38] = 28, + [1][0][9][38] = 76, + [1][0][8][38] = 54, + [1][0][11][38] = 28, + [1][0][2][40] = 76, + [1][0][1][40] = 28, + [1][0][3][40] = 127, + [1][0][5][40] = 76, + [1][0][6][40] = 28, + [1][0][9][40] = 76, + [1][0][8][40] = 54, + [1][0][11][40] = 28, + [1][0][2][42] = 76, + [1][0][1][42] = 28, + [1][0][3][42] = 127, + [1][0][5][42] = 76, + [1][0][6][42] = 28, + [1][0][9][42] = 76, + [1][0][8][42] = 54, + [1][0][11][42] = 28, + [1][0][2][44] = 76, + [1][0][1][44] = 28, + [1][0][3][44] = 127, + [1][0][5][44] = 76, + [1][0][6][44] = 28, + [1][0][9][44] = 76, + [1][0][8][44] = 54, + [1][0][11][44] = 28, + [1][0][2][46] = 76, + [1][0][1][46] = 28, + [1][0][3][46] = 127, + [1][0][5][46] = 76, + [1][0][6][46] = 28, + [1][0][9][46] = 76, + [1][0][8][46] = 54, + [1][0][11][46] = 28, + [1][1][2][0] = 46, + [1][1][1][0] = 22, + [1][1][3][0] = 24, + [1][1][5][0] = 18, + [1][1][6][0] = 22, + [1][1][9][0] = 22, + [1][1][8][0] = 18, + [1][1][11][0] = 22, + [1][1][2][2] = 46, + [1][1][1][2] = 22, + [1][1][3][2] = 24, + [1][1][5][2] = 18, + [1][1][6][2] = 22, + [1][1][9][2] = 22, + [1][1][8][2] = 18, + [1][1][11][2] = 22, + [1][1][2][4] = 46, + [1][1][1][4] = 22, + [1][1][3][4] = 24, + [1][1][5][4] = 18, + [1][1][6][4] = 22, + [1][1][9][4] = 22, + [1][1][8][4] = 18, + [1][1][11][4] = 22, + [1][1][2][6] = 46, + [1][1][1][6] = 22, + [1][1][3][6] = 24, + [1][1][5][6] = 18, + [1][1][6][6] = 22, + [1][1][9][6] = 22, + [1][1][8][6] = 18, + [1][1][11][6] = 22, + [1][1][2][8] = 46, + [1][1][1][8] = 22, + [1][1][3][8] = 24, + [1][1][5][8] = 46, + [1][1][6][8] = 22, + [1][1][9][8] = 22, + [1][1][8][8] = 42, + [1][1][11][8] = 22, + [1][1][2][10] = 46, + [1][1][1][10] = 22, + [1][1][3][10] = 24, + [1][1][5][10] = 46, + [1][1][6][10] = 22, + [1][1][9][10] = 22, + [1][1][8][10] = 42, + [1][1][11][10] = 22, + [1][1][2][12] = 46, + [1][1][1][12] = 22, + [1][1][3][12] = 24, + [1][1][5][12] = 46, + [1][1][6][12] = 22, + [1][1][9][12] = 22, + [1][1][8][12] = 42, + [1][1][11][12] = 22, + [1][1][2][14] = 46, + [1][1][1][14] = 22, + [1][1][3][14] = 24, + [1][1][5][14] = 46, + [1][1][6][14] = 22, + [1][1][9][14] = 22, + [1][1][8][14] = 42, + [1][1][11][14] = 22, + [1][1][2][15] = 46, + [1][1][1][15] = 22, + [1][1][3][15] = 46, + [1][1][5][15] = 46, + [1][1][6][15] = 22, + [1][1][9][15] = 22, + [1][1][8][15] = 42, + [1][1][11][15] = 22, + [1][1][2][17] = 46, + [1][1][1][17] = 22, + [1][1][3][17] = 46, + [1][1][5][17] = 46, + [1][1][6][17] = 22, + [1][1][9][17] = 22, + [1][1][8][17] = 42, + [1][1][11][17] = 22, + [1][1][2][19] = 46, + [1][1][1][19] = 22, + [1][1][3][19] = 46, + [1][1][5][19] = 46, + [1][1][6][19] = 22, + [1][1][9][19] = 22, + [1][1][8][19] = 42, + [1][1][11][19] = 22, + [1][1][2][21] = 46, + [1][1][1][21] = 22, + [1][1][3][21] = 46, + [1][1][5][21] = 46, + [1][1][6][21] = 22, + [1][1][9][21] = 22, + [1][1][8][21] = 42, + [1][1][11][21] = 22, + [1][1][2][23] = 46, + [1][1][1][23] = 22, + [1][1][3][23] = 46, + [1][1][5][23] = 46, + [1][1][6][23] = 22, + [1][1][9][23] = 22, + [1][1][8][23] = 42, + [1][1][11][23] = 22, + [1][1][2][25] = 46, + [1][1][1][25] = 22, + [1][1][3][25] = 46, + [1][1][5][25] = 127, + [1][1][6][25] = 22, + [1][1][9][25] = 127, + [1][1][8][25] = 42, + [1][1][11][25] = 22, + [1][1][2][27] = 46, + [1][1][1][27] = 22, + [1][1][3][27] = 46, + [1][1][5][27] = 127, + [1][1][6][27] = 22, + [1][1][9][27] = 127, + [1][1][8][27] = 42, + [1][1][11][27] = 22, + [1][1][2][29] = 46, + [1][1][1][29] = 22, + [1][1][3][29] = 46, + [1][1][5][29] = 127, + [1][1][6][29] = 22, + [1][1][9][29] = 127, + [1][1][8][29] = 42, + [1][1][11][29] = 22, + [1][1][2][31] = 46, + [1][1][1][31] = 22, + [1][1][3][31] = 46, + [1][1][5][31] = 46, + [1][1][6][31] = 22, + [1][1][9][31] = 22, + [1][1][8][31] = 42, + [1][1][11][31] = 22, + [1][1][2][33] = 46, + [1][1][1][33] = 22, + [1][1][3][33] = 46, + [1][1][5][33] = 46, + [1][1][6][33] = 22, + [1][1][9][33] = 22, + [1][1][8][33] = 42, + [1][1][11][33] = 22, + [1][1][2][35] = 46, + [1][1][1][35] = 22, + [1][1][3][35] = 46, + [1][1][5][35] = 46, + [1][1][6][35] = 22, + [1][1][9][35] = 22, + [1][1][8][35] = 42, + [1][1][11][35] = 22, + [1][1][2][37] = 46, + [1][1][1][37] = 127, + [1][1][3][37] = 46, + [1][1][5][37] = 46, + [1][1][6][37] = 22, + [1][1][9][37] = 50, + [1][1][8][37] = 42, + [1][1][11][37] = 127, + [1][1][2][38] = 74, + [1][1][1][38] = 16, + [1][1][3][38] = 127, + [1][1][5][38] = 74, + [1][1][6][38] = 16, + [1][1][9][38] = 76, + [1][1][8][38] = 42, + [1][1][11][38] = 16, + [1][1][2][40] = 76, + [1][1][1][40] = 16, + [1][1][3][40] = 127, + [1][1][5][40] = 76, + [1][1][6][40] = 16, + [1][1][9][40] = 76, + [1][1][8][40] = 42, + [1][1][11][40] = 16, + [1][1][2][42] = 76, + [1][1][1][42] = 16, + [1][1][3][42] = 127, + [1][1][5][42] = 76, + [1][1][6][42] = 16, + [1][1][9][42] = 76, + [1][1][8][42] = 42, + [1][1][11][42] = 16, + [1][1][2][44] = 76, + [1][1][1][44] = 16, + [1][1][3][44] = 127, + [1][1][5][44] = 76, + [1][1][6][44] = 16, + [1][1][9][44] = 76, + [1][1][8][44] = 42, + [1][1][11][44] = 16, + [1][1][2][46] = 76, + [1][1][1][46] = 16, + [1][1][3][46] = 127, + [1][1][5][46] = 76, + [1][1][6][46] = 16, + [1][1][9][46] = 76, + [1][1][8][46] = 42, + [1][1][11][46] = 16, + [2][0][2][0] = 74, + [2][0][1][0] = 46, + [2][0][3][0] = 50, + [2][0][5][0] = 46, + [2][0][6][0] = 46, + [2][0][9][0] = 46, + [2][0][8][0] = 30, + [2][0][11][0] = 46, + [2][0][2][2] = 74, + [2][0][1][2] = 46, + [2][0][3][2] = 50, + [2][0][5][2] = 46, + [2][0][6][2] = 46, + [2][0][9][2] = 46, + [2][0][8][2] = 30, + [2][0][11][2] = 46, + [2][0][2][4] = 74, + [2][0][1][4] = 46, + [2][0][3][4] = 50, + [2][0][5][4] = 46, + [2][0][6][4] = 46, + [2][0][9][4] = 46, + [2][0][8][4] = 30, + [2][0][11][4] = 46, + [2][0][2][6] = 74, + [2][0][1][6] = 46, + [2][0][3][6] = 50, + [2][0][5][6] = 46, + [2][0][6][6] = 46, + [2][0][9][6] = 46, + [2][0][8][6] = 30, + [2][0][11][6] = 46, + [2][0][2][8] = 74, + [2][0][1][8] = 46, + [2][0][3][8] = 50, + [2][0][5][8] = 66, + [2][0][6][8] = 46, + [2][0][9][8] = 46, + [2][0][8][8] = 54, + [2][0][11][8] = 46, + [2][0][2][10] = 74, + [2][0][1][10] = 46, + [2][0][3][10] = 50, + [2][0][5][10] = 66, + [2][0][6][10] = 46, + [2][0][9][10] = 46, + [2][0][8][10] = 54, + [2][0][11][10] = 46, + [2][0][2][12] = 74, + [2][0][1][12] = 46, + [2][0][3][12] = 50, + [2][0][5][12] = 66, + [2][0][6][12] = 46, + [2][0][9][12] = 46, + [2][0][8][12] = 54, + [2][0][11][12] = 46, + [2][0][2][14] = 74, + [2][0][1][14] = 46, + [2][0][3][14] = 50, + [2][0][5][14] = 66, + [2][0][6][14] = 46, + [2][0][9][14] = 46, + [2][0][8][14] = 54, + [2][0][11][14] = 46, + [2][0][2][15] = 74, + [2][0][1][15] = 46, + [2][0][3][15] = 70, + [2][0][5][15] = 74, + [2][0][6][15] = 46, + [2][0][9][15] = 46, + [2][0][8][15] = 54, + [2][0][11][15] = 46, + [2][0][2][17] = 74, + [2][0][1][17] = 46, + [2][0][3][17] = 70, + [2][0][5][17] = 74, + [2][0][6][17] = 46, + [2][0][9][17] = 46, + [2][0][8][17] = 54, + [2][0][11][17] = 46, + [2][0][2][19] = 74, + [2][0][1][19] = 46, + [2][0][3][19] = 70, + [2][0][5][19] = 74, + [2][0][6][19] = 46, + [2][0][9][19] = 46, + [2][0][8][19] = 54, + [2][0][11][19] = 46, + [2][0][2][21] = 74, + [2][0][1][21] = 46, + [2][0][3][21] = 70, + [2][0][5][21] = 74, + [2][0][6][21] = 46, + [2][0][9][21] = 46, + [2][0][8][21] = 54, + [2][0][11][21] = 46, + [2][0][2][23] = 74, + [2][0][1][23] = 46, + [2][0][3][23] = 70, + [2][0][5][23] = 74, + [2][0][6][23] = 46, + [2][0][9][23] = 46, + [2][0][8][23] = 54, + [2][0][11][23] = 46, + [2][0][2][25] = 74, + [2][0][1][25] = 46, + [2][0][3][25] = 70, + [2][0][5][25] = 127, + [2][0][6][25] = 46, + [2][0][9][25] = 127, + [2][0][8][25] = 54, + [2][0][11][25] = 46, + [2][0][2][27] = 74, + [2][0][1][27] = 46, + [2][0][3][27] = 70, + [2][0][5][27] = 127, + [2][0][6][27] = 46, + [2][0][9][27] = 127, + [2][0][8][27] = 54, + [2][0][11][27] = 46, + [2][0][2][29] = 74, + [2][0][1][29] = 46, + [2][0][3][29] = 70, + [2][0][5][29] = 127, + [2][0][6][29] = 46, + [2][0][9][29] = 127, + [2][0][8][29] = 54, + [2][0][11][29] = 46, + [2][0][2][31] = 74, + [2][0][1][31] = 46, + [2][0][3][31] = 70, + [2][0][5][31] = 74, + [2][0][6][31] = 46, + [2][0][9][31] = 46, + [2][0][8][31] = 54, + [2][0][11][31] = 46, + [2][0][2][33] = 74, + [2][0][1][33] = 46, + [2][0][3][33] = 70, + [2][0][5][33] = 74, + [2][0][6][33] = 46, + [2][0][9][33] = 46, + [2][0][8][33] = 54, + [2][0][11][33] = 46, + [2][0][2][35] = 74, + [2][0][1][35] = 46, + [2][0][3][35] = 70, + [2][0][5][35] = 74, + [2][0][6][35] = 46, + [2][0][9][35] = 46, + [2][0][8][35] = 54, + [2][0][11][35] = 46, + [2][0][2][37] = 74, + [2][0][1][37] = 127, + [2][0][3][37] = 70, + [2][0][5][37] = 74, + [2][0][6][37] = 46, + [2][0][9][37] = 74, + [2][0][8][37] = 54, + [2][0][11][37] = 127, + [2][0][2][38] = 76, + [2][0][1][38] = 28, + [2][0][3][38] = 127, + [2][0][5][38] = 76, + [2][0][6][38] = 28, + [2][0][9][38] = 76, + [2][0][8][38] = 54, + [2][0][11][38] = 28, + [2][0][2][40] = 76, + [2][0][1][40] = 28, + [2][0][3][40] = 127, + [2][0][5][40] = 76, + [2][0][6][40] = 28, + [2][0][9][40] = 76, + [2][0][8][40] = 54, + [2][0][11][40] = 28, + [2][0][2][42] = 76, + [2][0][1][42] = 28, + [2][0][3][42] = 127, + [2][0][5][42] = 76, + [2][0][6][42] = 28, + [2][0][9][42] = 76, + [2][0][8][42] = 54, + [2][0][11][42] = 28, + [2][0][2][44] = 76, + [2][0][1][44] = 28, + [2][0][3][44] = 127, + [2][0][5][44] = 76, + [2][0][6][44] = 28, + [2][0][9][44] = 76, + [2][0][8][44] = 54, + [2][0][11][44] = 28, + [2][0][2][46] = 76, + [2][0][1][46] = 28, + [2][0][3][46] = 127, + [2][0][5][46] = 76, + [2][0][6][46] = 28, + [2][0][9][46] = 76, + [2][0][8][46] = 54, + [2][0][11][46] = 28, + [2][1][2][0] = 58, + [2][1][1][0] = 32, + [2][1][3][0] = 38, + [2][1][5][0] = 30, + [2][1][6][0] = 32, + [2][1][9][0] = 32, + [2][1][8][0] = 18, + [2][1][11][0] = 32, + [2][1][2][2] = 58, + [2][1][1][2] = 32, + [2][1][3][2] = 38, + [2][1][5][2] = 30, + [2][1][6][2] = 32, + [2][1][9][2] = 32, + [2][1][8][2] = 18, + [2][1][11][2] = 32, + [2][1][2][4] = 58, + [2][1][1][4] = 32, + [2][1][3][4] = 38, + [2][1][5][4] = 30, + [2][1][6][4] = 32, + [2][1][9][4] = 32, + [2][1][8][4] = 18, + [2][1][11][4] = 32, + [2][1][2][6] = 58, + [2][1][1][6] = 32, + [2][1][3][6] = 38, + [2][1][5][6] = 30, + [2][1][6][6] = 32, + [2][1][9][6] = 32, + [2][1][8][6] = 18, + [2][1][11][6] = 32, + [2][1][2][8] = 58, + [2][1][1][8] = 32, + [2][1][3][8] = 38, + [2][1][5][8] = 52, + [2][1][6][8] = 32, + [2][1][9][8] = 32, + [2][1][8][8] = 42, + [2][1][11][8] = 32, + [2][1][2][10] = 58, + [2][1][1][10] = 32, + [2][1][3][10] = 38, + [2][1][5][10] = 52, + [2][1][6][10] = 32, + [2][1][9][10] = 32, + [2][1][8][10] = 42, + [2][1][11][10] = 32, + [2][1][2][12] = 58, + [2][1][1][12] = 32, + [2][1][3][12] = 38, + [2][1][5][12] = 52, + [2][1][6][12] = 32, + [2][1][9][12] = 32, + [2][1][8][12] = 42, + [2][1][11][12] = 32, + [2][1][2][14] = 58, + [2][1][1][14] = 32, + [2][1][3][14] = 38, + [2][1][5][14] = 52, + [2][1][6][14] = 32, + [2][1][9][14] = 32, + [2][1][8][14] = 42, + [2][1][11][14] = 32, + [2][1][2][15] = 58, + [2][1][1][15] = 32, + [2][1][3][15] = 58, + [2][1][5][15] = 58, + [2][1][6][15] = 32, + [2][1][9][15] = 32, + [2][1][8][15] = 42, + [2][1][11][15] = 32, + [2][1][2][17] = 58, + [2][1][1][17] = 32, + [2][1][3][17] = 58, + [2][1][5][17] = 58, + [2][1][6][17] = 32, + [2][1][9][17] = 32, + [2][1][8][17] = 42, + [2][1][11][17] = 32, + [2][1][2][19] = 58, + [2][1][1][19] = 32, + [2][1][3][19] = 58, + [2][1][5][19] = 58, + [2][1][6][19] = 32, + [2][1][9][19] = 32, + [2][1][8][19] = 42, + [2][1][11][19] = 32, + [2][1][2][21] = 58, + [2][1][1][21] = 32, + [2][1][3][21] = 58, + [2][1][5][21] = 58, + [2][1][6][21] = 32, + [2][1][9][21] = 32, + [2][1][8][21] = 42, + [2][1][11][21] = 32, + [2][1][2][23] = 58, + [2][1][1][23] = 32, + [2][1][3][23] = 58, + [2][1][5][23] = 58, + [2][1][6][23] = 32, + [2][1][9][23] = 32, + [2][1][8][23] = 42, + [2][1][11][23] = 32, + [2][1][2][25] = 58, + [2][1][1][25] = 32, + [2][1][3][25] = 58, + [2][1][5][25] = 127, + [2][1][6][25] = 32, + [2][1][9][25] = 127, + [2][1][8][25] = 42, + [2][1][11][25] = 32, + [2][1][2][27] = 58, + [2][1][1][27] = 32, + [2][1][3][27] = 58, + [2][1][5][27] = 127, + [2][1][6][27] = 32, + [2][1][9][27] = 127, + [2][1][8][27] = 42, + [2][1][11][27] = 32, + [2][1][2][29] = 58, + [2][1][1][29] = 32, + [2][1][3][29] = 58, + [2][1][5][29] = 127, + [2][1][6][29] = 32, + [2][1][9][29] = 127, + [2][1][8][29] = 42, + [2][1][11][29] = 32, + [2][1][2][31] = 58, + [2][1][1][31] = 32, + [2][1][3][31] = 58, + [2][1][5][31] = 58, + [2][1][6][31] = 32, + [2][1][9][31] = 32, + [2][1][8][31] = 42, + [2][1][11][31] = 32, + [2][1][2][33] = 58, + [2][1][1][33] = 32, + [2][1][3][33] = 58, + [2][1][5][33] = 58, + [2][1][6][33] = 32, + [2][1][9][33] = 32, + [2][1][8][33] = 42, + [2][1][11][33] = 32, + [2][1][2][35] = 58, + [2][1][1][35] = 32, + [2][1][3][35] = 58, + [2][1][5][35] = 58, + [2][1][6][35] = 32, + [2][1][9][35] = 32, + [2][1][8][35] = 42, + [2][1][11][35] = 32, + [2][1][2][37] = 58, + [2][1][1][37] = 127, + [2][1][3][37] = 58, + [2][1][5][37] = 58, + [2][1][6][37] = 32, + [2][1][9][37] = 62, + [2][1][8][37] = 42, + [2][1][11][37] = 127, + [2][1][2][38] = 76, + [2][1][1][38] = 16, + [2][1][3][38] = 127, + [2][1][5][38] = 76, + [2][1][6][38] = 16, + [2][1][9][38] = 76, + [2][1][8][38] = 42, + [2][1][11][38] = 16, + [2][1][2][40] = 76, + [2][1][1][40] = 16, + [2][1][3][40] = 127, + [2][1][5][40] = 76, + [2][1][6][40] = 16, + [2][1][9][40] = 76, + [2][1][8][40] = 42, + [2][1][11][40] = 16, + [2][1][2][42] = 76, + [2][1][1][42] = 16, + [2][1][3][42] = 127, + [2][1][5][42] = 76, + [2][1][6][42] = 16, + [2][1][9][42] = 76, + [2][1][8][42] = 42, + [2][1][11][42] = 16, + [2][1][2][44] = 76, + [2][1][1][44] = 16, + [2][1][3][44] = 127, + [2][1][5][44] = 76, + [2][1][6][44] = 16, + [2][1][9][44] = 76, + [2][1][8][44] = 42, + [2][1][11][44] = 16, + [2][1][2][46] = 76, + [2][1][1][46] = 16, + [2][1][3][46] = 127, + [2][1][5][46] = 76, + [2][1][6][46] = 16, + [2][1][9][46] = 76, + [2][1][8][46] = 42, + [2][1][11][46] = 16, +}; + +#define DECLARE_DIG_TABLE(name) \ +static const struct rtw89_phy_dig_gain_cfg name##_table = { \ + .table = name, \ + .size = ARRAY_SIZE(name) \ +} + +static const struct rtw89_reg_def rtw89_8852a_lna_gain_g[] = { + {R_PATH0_LNA_ERR1, B_PATH0_LNA_ERR_G0_G_MSK}, + {R_PATH0_LNA_ERR2, B_PATH0_LNA_ERR_G1_G_MSK}, + {R_PATH0_LNA_ERR2, B_PATH0_LNA_ERR_G2_G_MSK}, + {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G3_G_MSK}, + {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G4_G_MSK}, + {R_PATH0_LNA_ERR4, B_PATH0_LNA_ERR_G5_G_MSK}, + {R_PATH0_LNA_ERR5, B_PATH0_LNA_ERR_G6_G_MSK}, +}; + +DECLARE_DIG_TABLE(rtw89_8852a_lna_gain_g); + +static const struct rtw89_reg_def rtw89_8852a_tia_gain_g[] = { + {R_PATH0_TIA_ERR_G0, B_PATH0_TIA_ERR_G0_G_MSK}, + {R_PATH0_TIA_ERR_G1, B_PATH0_TIA_ERR_G1_G_MSK}, +}; + +DECLARE_DIG_TABLE(rtw89_8852a_tia_gain_g); + +static const struct rtw89_reg_def rtw89_8852a_lna_gain_a[] = { + {R_PATH0_LNA_ERR1, B_PATH0_LNA_ERR_G0_A_MSK}, + {R_PATH0_LNA_ERR1, B_PATH0_LNA_ERR_G1_A_MSK}, + {R_PATH0_LNA_ERR2, B_PATH0_LNA_ERR_G2_A_MSK}, + {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G3_A_MSK}, + {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G4_A_MSK}, + {R_PATH0_LNA_ERR4, B_PATH0_LNA_ERR_G5_A_MSK}, + {R_PATH0_LNA_ERR4, B_PATH0_LNA_ERR_G6_A_MSK}, +}; + +DECLARE_DIG_TABLE(rtw89_8852a_lna_gain_a); + +static const struct rtw89_reg_def rtw89_8852a_tia_gain_a[] = { + {R_PATH0_TIA_ERR_G0, B_PATH0_TIA_ERR_G0_A_MSK}, + {R_PATH0_TIA_ERR_G1, B_PATH0_TIA_ERR_G1_A_MSK}, +}; + +DECLARE_DIG_TABLE(rtw89_8852a_tia_gain_a); + +const struct rtw89_phy_table rtw89_8852a_phy_bb_table = { + .regs = rtw89_8852a_phy_bb_regs, + .n_regs = ARRAY_SIZE(rtw89_8852a_phy_bb_regs), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_phy_table rtw89_8852a_phy_radioa_table = { + .regs = rtw89_8852a_phy_radioa_regs, + .n_regs = ARRAY_SIZE(rtw89_8852a_phy_radioa_regs), + .rf_path = RF_PATH_A, +}; + +const struct rtw89_phy_table rtw89_8852a_phy_radiob_table = { + .regs = rtw89_8852a_phy_radiob_regs, + .n_regs = ARRAY_SIZE(rtw89_8852a_phy_radiob_regs), + .rf_path = RF_PATH_B, +}; + +const struct rtw89_phy_table rtw89_8852a_phy_nctl_table = { + .regs = rtw89_8852a_phy_nctl_regs, + .n_regs = ARRAY_SIZE(rtw89_8852a_phy_nctl_regs), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_txpwr_table rtw89_8852a_byr_table = { + .data = rtw89_8852a_txpwr_byrate, + .size = ARRAY_SIZE(rtw89_8852a_txpwr_byrate), + .load = rtw89_phy_load_txpwr_byrate, +}; + +const struct rtw89_txpwr_track_cfg rtw89_8852a_trk_cfg = { + .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n, + .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p, + .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n, + .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p, + .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n, + .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p, + .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n, + .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p, + .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n, + .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p, + .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n, + .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p, +}; + +const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table = { + .cfg_lna_g = &rtw89_8852a_lna_gain_g_table, + .cfg_tia_g = &rtw89_8852a_tia_gain_g_table, + .cfg_lna_a = &rtw89_8852a_lna_gain_a_table, + .cfg_tia_a = &rtw89_8852a_tia_gain_a_table +}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h new file mode 100644 index 00000000000000..91379650628647 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_8852A_TABLE_H__ +#define __RTW89_8852A_TABLE_H__ + +#include "core.h" + +extern const struct rtw89_phy_table rtw89_8852a_phy_bb_table; +extern const struct rtw89_phy_table rtw89_8852a_phy_radioa_table; +extern const struct rtw89_phy_table rtw89_8852a_phy_radiob_table; +extern const struct rtw89_phy_table rtw89_8852a_phy_nctl_table; +extern const struct rtw89_txpwr_table rtw89_8852a_byr_table; +extern const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table; +extern const struct rtw89_txpwr_track_cfg rtw89_8852a_trk_cfg; +extern const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +extern const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +extern const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +extern const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c new file mode 100644 index 00000000000000..097c87899ceaa0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "debug.h" +#include "sar.h" + +static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg) +{ + struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common; + enum rtw89_subband subband = rtwdev->hal.current_subband; + + if (!rtwsar->set[subband]) + return -ENODATA; + + *cfg = rtwsar->cfg[subband]; + return 0; +} + +static const +struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = { + [RTW89_SAR_SOURCE_COMMON] = { + .descr_sar_source = "RTW89_SAR_SOURCE_COMMON", + .txpwr_factor_sar = 2, + .query_sar_config = rtw89_query_sar_config_common, + }, +}; + +#define rtw89_sar_set_src(_dev, _src, _cfg_name, _cfg_data) \ + do { \ + typeof(_src) _s = (_src); \ + typeof(_dev) _d = (_dev); \ + BUILD_BUG_ON(!rtw89_sar_handlers[_s].descr_sar_source); \ + BUILD_BUG_ON(!rtw89_sar_handlers[_s].query_sar_config); \ + lockdep_assert_held(&_d->mutex); \ + _d->sar._cfg_name = *(_cfg_data); \ + _d->sar.src = _s; \ + } while (0) + +static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg) +{ + const u8 fct_mac = rtwdev->chip->txpwr_factor_mac; + s32 cfg_mac; + + cfg_mac = fct > fct_mac ? + cfg >> (fct - fct_mac) : cfg << (fct_mac - fct); + + return (s8)clamp_t(s32, cfg_mac, + RTW89_SAR_TXPWR_MAC_MIN, + RTW89_SAR_TXPWR_MAC_MAX); +} + +s8 rtw89_query_sar(struct rtw89_dev *rtwdev) +{ + const enum rtw89_sar_sources src = rtwdev->sar.src; + /* its members are protected by rtw89_sar_set_src() */ + const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + int ret; + s32 cfg; + u8 fct; + + lockdep_assert_held(&rtwdev->mutex); + + if (src == RTW89_SAR_SOURCE_NONE) + return RTW89_SAR_TXPWR_MAC_MAX; + + ret = sar_hdl->query_sar_config(rtwdev, &cfg); + if (ret) + return RTW89_SAR_TXPWR_MAC_MAX; + + fct = sar_hdl->txpwr_factor_sar; + + return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg); +} + +void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) +{ + const enum rtw89_sar_sources src = rtwdev->sar.src; + /* its members are protected by rtw89_sar_set_src() */ + const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + const u8 fct_mac = rtwdev->chip->txpwr_factor_mac; + int ret; + s32 cfg; + u8 fct; + + lockdep_assert_held(&rtwdev->mutex); + + if (src == RTW89_SAR_SOURCE_NONE) { + seq_puts(m, "no SAR is applied\n"); + return; + } + + seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source); + + ret = sar_hdl->query_sar_config(rtwdev, &cfg); + if (ret) { + seq_printf(m, "config: return code: %d\n", ret); + seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n", + RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac)); + return; + } + + fct = sar_hdl->txpwr_factor_sar; + + seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct)); +} + +static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev, + const struct rtw89_sar_cfg_common *sar) +{ + enum rtw89_sar_sources src; + int ret = 0; + + mutex_lock(&rtwdev->mutex); + + src = rtwdev->sar.src; + if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) { + rtw89_warn(rtwdev, "SAR source: %d is in use", src); + ret = -EBUSY; + goto exit; + } + + rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar); + rtw89_chip_set_txpwr(rtwdev); + +exit: + mutex_unlock(&rtwdev->mutex); + return ret; +} + +static const u8 rtw89_common_sar_subband_map[] = { + RTW89_CH_2G, + RTW89_CH_5G_BAND_1, + RTW89_CH_5G_BAND_3, + RTW89_CH_5G_BAND_4, +}; + +static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = { + { .start_freq = 2412, .end_freq = 2484, }, + { .start_freq = 5180, .end_freq = 5320, }, + { .start_freq = 5500, .end_freq = 5720, }, + { .start_freq = 5745, .end_freq = 5825, }, +}; + +static_assert(ARRAY_SIZE(rtw89_common_sar_subband_map) == + ARRAY_SIZE(rtw89_common_sar_freq_ranges)); + +const struct cfg80211_sar_capa rtw89_sar_capa = { + .type = NL80211_SAR_TYPE_POWER, + .num_freq_ranges = ARRAY_SIZE(rtw89_common_sar_freq_ranges), + .freq_ranges = rtw89_common_sar_freq_ranges, +}; + +int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_sar_cfg_common sar_common = {0}; + u8 fct; + u32 freq_start; + u32 freq_end; + u32 band; + s32 power; + u32 i, idx; + + if (sar->type != NL80211_SAR_TYPE_POWER) + return -EINVAL; + + fct = rtw89_sar_handlers[RTW89_SAR_SOURCE_COMMON].txpwr_factor_sar; + + for (i = 0; i < sar->num_sub_specs; i++) { + idx = sar->sub_specs[i].freq_range_index; + if (idx >= ARRAY_SIZE(rtw89_common_sar_freq_ranges)) + return -EINVAL; + + freq_start = rtw89_common_sar_freq_ranges[idx].start_freq; + freq_end = rtw89_common_sar_freq_ranges[idx].end_freq; + band = rtw89_common_sar_subband_map[idx]; + power = sar->sub_specs[i].power; + + rtw89_info(rtwdev, "On freq %u to %u, ", freq_start, freq_end); + rtw89_info(rtwdev, "set SAR power limit %d (unit: 1/%lu dBm)\n", + power, BIT(fct)); + + sar_common.set[band] = true; + sar_common.cfg[band] = power; + } + + return rtw89_apply_sar_common(rtwdev, &sar_common); +} diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h new file mode 100644 index 00000000000000..7b5484c84eb11a --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/sar.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#ifndef __RTW89_SAR_H__ +#define __RTW89_SAR_H__ + +#include "core.h" + +#define RTW89_SAR_TXPWR_MAC_MAX S8_MAX +#define RTW89_SAR_TXPWR_MAC_MIN S8_MIN + +struct rtw89_sar_handler { + const char *descr_sar_source; + u8 txpwr_factor_sar; + int (*query_sar_config)(struct rtw89_dev *rtwdev, s32 *cfg); +}; + +extern const struct cfg80211_sar_capa rtw89_sar_capa; + +s8 rtw89_query_sar(struct rtw89_dev *rtwdev); +void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev); +int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c new file mode 100644 index 00000000000000..837cdc366a61a0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "cam.h" +#include "debug.h" +#include "mac.h" +#include "ps.h" +#include "ser.h" +#include "util.h" + +#define SER_RECFG_TIMEOUT 1000 + +enum ser_evt { + SER_EV_NONE, + SER_EV_STATE_IN, + SER_EV_STATE_OUT, + SER_EV_L1_RESET, /* M1 */ + SER_EV_DO_RECOVERY, /* M3 */ + SER_EV_MAC_RESET_DONE, /* M5 */ + SER_EV_L2_RESET, + SER_EV_L2_RECFG_DONE, + SER_EV_L2_RECFG_TIMEOUT, + SER_EV_M3_TIMEOUT, + SER_EV_FW_M5_TIMEOUT, + SER_EV_L0_RESET, + SER_EV_MAXX +}; + +enum ser_state { + SER_IDLE_ST, + SER_RESET_TRX_ST, + SER_DO_HCI_ST, + SER_L2_RESET_ST, + SER_ST_MAX_ST +}; + +struct ser_msg { + struct list_head list; + u8 event; +}; + +struct state_ent { + u8 state; + char *name; + void (*st_func)(struct rtw89_ser *ser, u8 event); +}; + +struct event_ent { + u8 event; + char *name; +}; + +static char *ser_ev_name(struct rtw89_ser *ser, u8 event) +{ + if (event < SER_EV_MAXX) + return ser->ev_tbl[event].name; + + return "err_ev_name"; +} + +static char *ser_st_name(struct rtw89_ser *ser) +{ + if (ser->state < SER_ST_MAX_ST) + return ser->st_tbl[ser->state].name; + + return "err_st_name"; +} + +static void ser_state_run(struct rtw89_ser *ser, u8 evt) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", + ser_st_name(ser), ser_ev_name(ser, evt)); + + rtw89_leave_lps(rtwdev); + ser->st_tbl[ser->state].st_func(ser, evt); +} + +static void ser_state_goto(struct rtw89_ser *ser, u8 new_state) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + if (ser->state == new_state || new_state >= SER_ST_MAX_ST) + return; + ser_state_run(ser, SER_EV_STATE_OUT); + + rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n", + ser_st_name(ser), ser->st_tbl[new_state].name); + + ser->state = new_state; + ser_state_run(ser, SER_EV_STATE_IN); +} + +static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser) +{ + struct ser_msg *msg; + + spin_lock_irq(&ser->msg_q_lock); + msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list); + if (msg) + list_del(&msg->list); + spin_unlock_irq(&ser->msg_q_lock); + + return msg; +} + +static void rtw89_ser_hdl_work(struct work_struct *work) +{ + struct ser_msg *msg; + struct rtw89_ser *ser = container_of(work, struct rtw89_ser, + ser_hdl_work); + + while ((msg = __rtw89_ser_dequeue_msg(ser))) { + ser_state_run(ser, msg->event); + kfree(msg); + } +} + +static int ser_send_msg(struct rtw89_ser *ser, u8 event) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + struct ser_msg *msg = NULL; + + if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) + return -EIO; + + msg = kmalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return -ENOMEM; + + msg->event = event; + + spin_lock_irq(&ser->msg_q_lock); + list_add(&msg->list, &ser->msg_q); + spin_unlock_irq(&ser->msg_q_lock); + + ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); + return 0; +} + +static void rtw89_ser_alarm_work(struct work_struct *work) +{ + struct rtw89_ser *ser = container_of(work, struct rtw89_ser, + ser_alarm_work.work); + + ser_send_msg(ser, ser->alarm_event); + ser->alarm_event = SER_EV_NONE; +} + +static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) + return; + + ser->alarm_event = event; + ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work, + msecs_to_jiffies(ms)); +} + +static void ser_del_alarm(struct rtw89_ser *ser) +{ + cancel_delayed_work(&ser->ser_alarm_work); + ser->alarm_event = SER_EV_NONE; +} + +/* driver function */ +static void drv_stop_tx(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + ieee80211_stop_queues(rtwdev->hw); + set_bit(RTW89_SER_DRV_STOP_TX, ser->flags); +} + +static void drv_stop_rx(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); + set_bit(RTW89_SER_DRV_STOP_RX, ser->flags); +} + +static void drv_trx_reset(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + rtw89_hci_reset(rtwdev); +} + +static void drv_resume_tx(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags)) + return; + + ieee80211_wake_queues(rtwdev->hw); + clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags); +} + +static void drv_resume_rx(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags)) + return; + + set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); + clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags); +} + +static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); + rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; + rtwvif->trigger = false; +} + +static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + + rtw89_cam_reset_keys(rtwdev); + rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); + rtw89_for_each_rtwvif(rtwdev, rtwvif) + ser_reset_vif(rtwdev, rtwvif); +} + +/* hal function */ +static int hal_enable_dma(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + int ret; + + if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags)) + return 0; + + if (!rtwdev->hci.ops->mac_lv1_rcvy) + return -EIO; + + ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2); + if (!ret) + clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); + + return ret; +} + +static int hal_stop_dma(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + int ret; + + if (!rtwdev->hci.ops->mac_lv1_rcvy) + return -EIO; + + ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1); + if (!ret) + set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); + + return ret; +} + +static void hal_send_m2_event(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN); +} + +static void hal_send_m4_event(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN); +} + +/* state handler */ +static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) +{ + switch (evt) { + case SER_EV_STATE_IN: + break; + case SER_EV_L1_RESET: + ser_state_goto(ser, SER_RESET_TRX_ST); + break; + case SER_EV_L2_RESET: + ser_state_goto(ser, SER_L2_RESET_ST); + break; + case SER_EV_STATE_OUT: + default: + break; + } +} + +static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) +{ + switch (evt) { + case SER_EV_STATE_IN: + drv_stop_tx(ser); + + if (hal_stop_dma(ser)) { + ser_state_goto(ser, SER_L2_RESET_ST); + break; + } + + drv_stop_rx(ser); + drv_trx_reset(ser); + + /* wait m3 */ + hal_send_m2_event(ser); + + /* set alarm to prevent FW response timeout */ + ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT); + break; + + case SER_EV_DO_RECOVERY: + ser_state_goto(ser, SER_DO_HCI_ST); + break; + + case SER_EV_M3_TIMEOUT: + ser_state_goto(ser, SER_L2_RESET_ST); + break; + + case SER_EV_STATE_OUT: + ser_del_alarm(ser); + hal_enable_dma(ser); + drv_resume_rx(ser); + drv_resume_tx(ser); + break; + + default: + break; + } +} + +static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) +{ + switch (evt) { + case SER_EV_STATE_IN: + /* wait m5 */ + hal_send_m4_event(ser); + + /* prevent FW response timeout */ + ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT); + break; + + case SER_EV_FW_M5_TIMEOUT: + ser_state_goto(ser, SER_L2_RESET_ST); + break; + + case SER_EV_MAC_RESET_DONE: + ser_state_goto(ser, SER_IDLE_ST); + break; + + case SER_EV_STATE_OUT: + ser_del_alarm(ser); + break; + + default: + break; + } +} + +static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + + switch (evt) { + case SER_EV_STATE_IN: + mutex_lock(&rtwdev->mutex); + ser_reset_mac_binding(rtwdev); + rtw89_core_stop(rtwdev); + mutex_unlock(&rtwdev->mutex); + + ieee80211_restart_hw(rtwdev->hw); + ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); + break; + + case SER_EV_L2_RECFG_TIMEOUT: + rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n"); + fallthrough; + case SER_EV_L2_RECFG_DONE: + ser_state_goto(ser, SER_IDLE_ST); + break; + + case SER_EV_STATE_OUT: + ser_del_alarm(ser); + break; + + default: + break; + } +} + +static struct event_ent ser_ev_tbl[] = { + {SER_EV_NONE, "SER_EV_NONE"}, + {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, + {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, + {SER_EV_L1_RESET, "SER_EV_L1_RESET"}, + {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"}, + {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"}, + {SER_EV_L2_RESET, "SER_EV_L2_RESET"}, + {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"}, + {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"}, + {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"}, + {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"}, + {SER_EV_L0_RESET, "SER_EV_L0_RESET"}, + {SER_EV_MAXX, "SER_EV_MAX"} +}; + +static struct state_ent ser_st_tbl[] = { + {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, + {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, + {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, + {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl} +}; + +int rtw89_ser_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_ser *ser = &rtwdev->ser; + + memset(ser, 0, sizeof(*ser)); + INIT_LIST_HEAD(&ser->msg_q); + ser->state = SER_IDLE_ST; + ser->st_tbl = ser_st_tbl; + ser->ev_tbl = ser_ev_tbl; + + bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS); + spin_lock_init(&ser->msg_q_lock); + INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work); + INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work); + return 0; +} + +int rtw89_ser_deinit(struct rtw89_dev *rtwdev) +{ + struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser; + + set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); + cancel_delayed_work_sync(&ser->ser_alarm_work); + cancel_work_sync(&ser->ser_hdl_work); + clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); + return 0; +} + +void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev) +{ + ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE); +} + +int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) +{ + u8 event = SER_EV_NONE; + + rtw89_info(rtwdev, "ser event = 0x%04x\n", err); + + switch (err) { + case MAC_AX_ERR_L1_ERR_DMAC: + case MAC_AX_ERR_L0_PROMOTE_TO_L1: + event = SER_EV_L1_RESET; /* M1 */ + break; + case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE: + event = SER_EV_DO_RECOVERY; /* M3 */ + break; + case MAC_AX_ERR_L1_RESET_RECOVERY_DONE: + event = SER_EV_MAC_RESET_DONE; /* M5 */ + break; + case MAC_AX_ERR_L0_ERR_CMAC0: + case MAC_AX_ERR_L0_ERR_CMAC1: + case MAC_AX_ERR_L0_RESET_DONE: + event = SER_EV_L0_RESET; + break; + default: + if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 || + (err >= MAC_AX_ERR_L2_ERR_AH_DMA && + err <= MAC_AX_GET_ERR_MAX)) + event = SER_EV_L2_RESET; + break; + } + + if (event == SER_EV_NONE) + return -EINVAL; + + ser_send_msg(&rtwdev->ser, event); + return 0; +} +EXPORT_SYMBOL(rtw89_ser_notify); diff --git a/drivers/net/wireless/realtek/rtw89/ser.h b/drivers/net/wireless/realtek/rtw89/ser.h new file mode 100644 index 00000000000000..6b8e62019942db --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/ser.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + * Copyright(c) 2019-2020 Realtek Corporation + */ +#ifndef __SER_H__ +#define __SER_H__ + +#include "core.h" + +int rtw89_ser_init(struct rtw89_dev *rtwdev); +int rtw89_ser_deinit(struct rtw89_dev *rtwdev); +int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err); +void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev); + +#endif /* __SER_H__*/ + diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h new file mode 100644 index 00000000000000..f1e0fe36107de5 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -0,0 +1,358 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2020 Realtek Corporation + */ + +#ifndef __RTW89_TXRX_H__ +#define __RTW89_TXRX_H__ + +#include "debug.h" + +#define DATA_RATE_MODE_CTRL_MASK GENMASK(8, 7) +#define DATA_RATE_NOT_HT_IDX_MASK GENMASK(3, 0) +#define DATA_RATE_MODE_NON_HT 0x0 +#define DATA_RATE_HT_IDX_MASK GENMASK(4, 0) +#define DATA_RATE_MODE_HT 0x1 +#define DATA_RATE_VHT_HE_NSS_MASK GENMASK(6, 4) +#define DATA_RATE_VHT_HE_IDX_MASK GENMASK(3, 0) +#define DATA_RATE_MODE_VHT 0x2 +#define DATA_RATE_MODE_HE 0x3 +#define GET_DATA_RATE_MODE(r) FIELD_GET(DATA_RATE_MODE_CTRL_MASK, r) +#define GET_DATA_RATE_NOT_HT_IDX(r) FIELD_GET(DATA_RATE_NOT_HT_IDX_MASK, r) +#define GET_DATA_RATE_HT_IDX(r) FIELD_GET(DATA_RATE_HT_IDX_MASK, r) +#define GET_DATA_RATE_VHT_HE_IDX(r) FIELD_GET(DATA_RATE_VHT_HE_IDX_MASK, r) +#define GET_DATA_RATE_NSS(r) FIELD_GET(DATA_RATE_VHT_HE_NSS_MASK, r) + +/* TX WD BODY DWORD 0 */ +#define RTW89_TXWD_BODY0_WP_OFFSET GENMASK(31, 24) +#define RTW89_TXWD_BODY0_MORE_DATA BIT(23) +#define RTW89_TXWD_BODY0_WD_INFO_EN BIT(22) +#define RTW89_TXWD_BODY0_FW_DL BIT(20) +#define RTW89_TXWD_BODY0_CHANNEL_DMA GENMASK(19, 16) +#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11) +#define RTW89_TXWD_BODY0_WD_PAGE BIT(7) +#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5) + +/* TX WD BODY DWORD 1 */ +#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16) + +/* TX WD BODY DWORD 2 */ +#define RTW89_TXWD_BODY2_MACID GENMASK(30, 24) +#define RTW89_TXWD_BODY2_TID_INDICATE BIT(23) +#define RTW89_TXWD_BODY2_QSEL GENMASK(22, 17) +#define RTW89_TXWD_BODY2_TXPKT_SIZE GENMASK(13, 0) + +/* TX WD BODY DWORD 3 */ +#define RTW89_TXWD_BODY3_BK BIT(13) +#define RTW89_TXWD_BODY3_AGG_EN BIT(12) +#define RTW89_TXWD_BODY3_SW_SEQ GENMASK(11, 0) + +/* TX WD BODY DWORD 4 */ + +/* TX WD BODY DWORD 5 */ + +/* TX WD INFO DWORD 0 */ +#define RTW89_TXWD_INFO0_USE_RATE BIT(30) +#define RTW89_TXWD_INFO0_DATA_BW GENMASK(29, 28) +#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25) +#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16) +#define RTW89_TXWD_INFO0_DISDATAFB BIT(10) + +/* TX WD INFO DWORD 1 */ +#define RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(24, 16) +#define RTW89_TXWD_INFO1_A_CTRL_BSR BIT(14) +#define RTW89_TXWD_INFO1_MAX_AGGNUM GENMASK(7, 0) + +/* TX WD INFO DWORD 2 */ +#define RTW89_TXWD_INFO2_AMPDU_DENSITY GENMASK(20, 18) +#define RTW89_TXWD_INFO2_SEC_TYPE GENMASK(12, 9) +#define RTW89_TXWD_INFO2_SEC_HW_ENC BIT(8) +#define RTW89_TXWD_INFO2_SEC_CAM_IDX GENMASK(7, 0) + +/* TX WD INFO DWORD 3 */ + +/* TX WD INFO DWORD 4 */ +#define RTW89_TXWD_INFO4_RTS_EN BIT(27) +#define RTW89_TXWD_INFO4_HW_RTS_EN BIT(31) + +/* TX WD INFO DWORD 5 */ + +/* RX DESC helpers */ +/* Short Descriptor */ +#define RTW89_GET_RXWD_LONG_RXD(rxdesc) \ + le32_get_bits((rxdesc)->dword0, BIT(31)) +#define RTW89_GET_RXWD_DRV_INFO_SIZE(rxdesc) \ + le32_get_bits((rxdesc)->dword0, GENMASK(30, 28)) +#define RTW89_GET_RXWD_RPKT_TYPE(rxdesc) \ + le32_get_bits((rxdesc)->dword0, GENMASK(27, 24)) +#define RTW89_GET_RXWD_MAC_INFO_VALID(rxdesc) \ + le32_get_bits((rxdesc)->dword0, BIT(23)) +#define RTW89_GET_RXWD_BB_SEL(rxdesc) \ + le32_get_bits((rxdesc)->dword0, BIT(22)) +#define RTW89_GET_RXWD_HD_IV_LEN(rxdesc) \ + le32_get_bits((rxdesc)->dword0, GENMASK(21, 16)) +#define RTW89_GET_RXWD_SHIFT(rxdesc) \ + le32_get_bits((rxdesc)->dword0, GENMASK(15, 14)) +#define RTW89_GET_RXWD_PKT_SIZE(rxdesc) \ + le32_get_bits((rxdesc)->dword0, GENMASK(13, 0)) +#define RTW89_GET_RXWD_BW(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(31, 30)) +#define RTW89_GET_RXWD_GI_LTF(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(27, 25)) +#define RTW89_GET_RXWD_DATA_RATE(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(24, 16)) +#define RTW89_GET_RXWD_USER_ID(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(15, 8)) +#define RTW89_GET_RXWD_SR_EN(rxdesc) \ + le32_get_bits((rxdesc)->dword1, BIT(7)) +#define RTW89_GET_RXWD_PPDU_CNT(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(6, 4)) +#define RTW89_GET_RXWD_PPDU_TYPE(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(3, 0)) +#define RTW89_GET_RXWD_FREE_RUN_CNT(rxdesc) \ + le32_get_bits((rxdesc)->dword2, GENMASK(31, 0)) +#define RTW89_GET_RXWD_ICV_ERR(rxdesc) \ + le32_get_bits((rxdesc)->dword3, BIT(10)) +#define RTW89_GET_RXWD_CRC32_ERR(rxdesc) \ + le32_get_bits((rxdesc)->dword3, BIT(9)) +#define RTW89_GET_RXWD_HW_DEC(rxdesc) \ + le32_get_bits((rxdesc)->dword3, BIT(2)) +#define RTW89_GET_RXWD_SW_DEC(rxdesc) \ + le32_get_bits((rxdesc)->dword3, BIT(1)) +#define RTW89_GET_RXWD_A1_MATCH(rxdesc) \ + le32_get_bits((rxdesc)->dword3, BIT(0)) + +/* Long Descriptor */ +#define RTW89_GET_RXWD_FRAG(rxdesc) \ + le32_get_bits((rxdesc)->dword4, GENMASK(31, 28)) +#define RTW89_GET_RXWD_SEQ(rxdesc) \ + le32_get_bits((rxdesc)->dword4, GENMASK(27, 16)) +#define RTW89_GET_RXWD_TYPE(rxdesc) \ + le32_get_bits((rxdesc)->dword4, GENMASK(1, 0)) +#define RTW89_GET_RXWD_ADDR_CAM_VLD(rxdesc) \ + le32_get_bits((rxdesc)->dword5, BIT(28)) +#define RTW89_GET_RXWD_RX_PL_ID(rxdesc) \ + le32_get_bits((rxdesc)->dword5, GENMASK(27, 24)) +#define RTW89_GET_RXWD_MAC_ID(rxdesc) \ + le32_get_bits((rxdesc)->dword5, GENMASK(23, 16)) +#define RTW89_GET_RXWD_ADDR_CAM_ID(rxdesc) \ + le32_get_bits((rxdesc)->dword5, GENMASK(15, 8)) +#define RTW89_GET_RXWD_SEC_CAM_ID(rxdesc) \ + le32_get_bits((rxdesc)->dword5, GENMASK(7, 0)) + +#define RTW89_GET_RXINFO_USR_NUM(rpt) \ + le32_get_bits(*((__le32 *)rpt), GENMASK(3, 0)) +#define RTW89_GET_RXINFO_FW_DEFINE(rpt) \ + le32_get_bits(*((__le32 *)rpt), GENMASK(15, 8)) +#define RTW89_GET_RXINFO_LSIG_LEN(rpt) \ + le32_get_bits(*((__le32 *)rpt), GENMASK(27, 16)) +#define RTW89_GET_RXINFO_IS_TO_SELF(rpt) \ + le32_get_bits(*((__le32 *)rpt), BIT(28)) +#define RTW89_GET_RXINFO_RX_CNT_VLD(rpt) \ + le32_get_bits(*((__le32 *)rpt), BIT(29)) +#define RTW89_GET_RXINFO_LONG_RXD(rpt) \ + le32_get_bits(*((__le32 *)rpt), GENMASK(31, 30)) +#define RTW89_GET_RXINFO_SERVICE(rpt) \ + le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(15, 0)) +#define RTW89_GET_RXINFO_PLCP_LEN(rpt) \ + le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(23, 16)) +#define RTW89_GET_RXINFO_MAC_ID_VALID(rpt, usr) \ + le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(0)) +#define RTW89_GET_RXINFO_DATA(rpt, usr) \ + le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(1)) +#define RTW89_GET_RXINFO_CTRL(rpt, usr) \ + le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(2)) +#define RTW89_GET_RXINFO_MGMT(rpt, usr) \ + le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(3)) +#define RTW89_GET_RXINFO_BCM(rpt, usr) \ + le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(4)) +#define RTW89_GET_RXINFO_MACID(rpt, usr) \ + le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), GENMASK(15, 8)) + +#define RTW89_GET_PHY_STS_RSSI_A(sts) \ + le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(7, 0)) +#define RTW89_GET_PHY_STS_RSSI_B(sts) \ + le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(15, 8)) +#define RTW89_GET_PHY_STS_RSSI_C(sts) \ + le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(23, 16)) +#define RTW89_GET_PHY_STS_RSSI_D(sts) \ + le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(31, 24)) +#define RTW89_GET_PHY_STS_LEN(sts) \ + le32_get_bits(*((__le32 *)sts), GENMASK(15, 8)) +#define RTW89_GET_PHY_STS_RSSI_AVG(sts) \ + le32_get_bits(*((__le32 *)sts), GENMASK(31, 24)) +#define RTW89_GET_PHY_STS_IE_TYPE(ie) \ + le32_get_bits(*((__le32 *)ie), GENMASK(4, 0)) +#define RTW89_GET_PHY_STS_IE_LEN(ie) \ + le32_get_bits(*((__le32 *)ie), GENMASK(11, 5)) +#define RTW89_GET_PHY_STS_IE0_CFO(ie) \ + le32_get_bits(*((__le32 *)(ie) + 1), GENMASK(31, 20)) + +enum rtw89_tx_channel { + RTW89_TXCH_ACH0 = 0, + RTW89_TXCH_ACH1 = 1, + RTW89_TXCH_ACH2 = 2, + RTW89_TXCH_ACH3 = 3, + RTW89_TXCH_ACH4 = 4, + RTW89_TXCH_ACH5 = 5, + RTW89_TXCH_ACH6 = 6, + RTW89_TXCH_ACH7 = 7, + RTW89_TXCH_CH8 = 8, /* MGMT Band 0 */ + RTW89_TXCH_CH9 = 9, /* HI Band 0 */ + RTW89_TXCH_CH10 = 10, /* MGMT Band 1 */ + RTW89_TXCH_CH11 = 11, /* HI Band 1 */ + RTW89_TXCH_CH12 = 12, /* FW CMD */ + + /* keep last */ + RTW89_TXCH_NUM, + RTW89_TXCH_MAX = RTW89_TXCH_NUM - 1 +}; + +enum rtw89_rx_channel { + RTW89_RXCH_RXQ = 0, + RTW89_RXCH_RPQ = 1, + + /* keep last */ + RTW89_RXCH_NUM, + RTW89_RXCH_MAX = RTW89_RXCH_NUM - 1 +}; + +enum rtw89_tx_qsel { + RTW89_TX_QSEL_BE_0 = 0x00, + RTW89_TX_QSEL_BK_0 = 0x01, + RTW89_TX_QSEL_VI_0 = 0x02, + RTW89_TX_QSEL_VO_0 = 0x03, + RTW89_TX_QSEL_BE_1 = 0x04, + RTW89_TX_QSEL_BK_1 = 0x05, + RTW89_TX_QSEL_VI_1 = 0x06, + RTW89_TX_QSEL_VO_1 = 0x07, + RTW89_TX_QSEL_BE_2 = 0x08, + RTW89_TX_QSEL_BK_2 = 0x09, + RTW89_TX_QSEL_VI_2 = 0x0a, + RTW89_TX_QSEL_VO_2 = 0x0b, + RTW89_TX_QSEL_BE_3 = 0x0c, + RTW89_TX_QSEL_BK_3 = 0x0d, + RTW89_TX_QSEL_VI_3 = 0x0e, + RTW89_TX_QSEL_VO_3 = 0x0f, + RTW89_TX_QSEL_B0_BCN = 0x10, + RTW89_TX_QSEL_B0_HI = 0x11, + RTW89_TX_QSEL_B0_MGMT = 0x12, + RTW89_TX_QSEL_B0_NOPS = 0x13, + RTW89_TX_QSEL_B0_MGMT_FAST = 0x14, + /* reserved */ + /* reserved */ + /* reserved */ + RTW89_TX_QSEL_B1_BCN = 0x18, + RTW89_TX_QSEL_B1_HI = 0x19, + RTW89_TX_QSEL_B1_MGMT = 0x1a, + RTW89_TX_QSEL_B1_NOPS = 0x1b, + RTW89_TX_QSEL_B1_MGMT_FAST = 0x1c, + /* reserved */ + /* reserved */ + /* reserved */ +}; + +enum rtw89_phy_status_ie_type { + RTW89_PHYSTS_IE00_CMN_CCK = 0, + RTW89_PHYSTS_IE01_CMN_OFDM = 1, + RTW89_PHYSTS_IE02_CMN_EXT_AX = 2, + RTW89_PHYSTS_IE03_CMN_EXT_SEG_1 = 3, + RTW89_PHYSTS_IE04_CMN_EXT_PATH_A = 4, + RTW89_PHYSTS_IE05_CMN_EXT_PATH_B = 5, + RTW89_PHYSTS_IE06_CMN_EXT_PATH_C = 6, + RTW89_PHYSTS_IE07_CMN_EXT_PATH_D = 7, + RTW89_PHYSTS_IE08_FTR_CH = 8, + RTW89_PHYSTS_IE09_FTR_PLCP_0 = 9, + RTW89_PHYSTS_IE10_FTR_PLCP_EXT = 10, + RTW89_PHYSTS_IE11_FTR_PLCP_HISTOGRAM = 11, + RTW89_PHYSTS_IE12_MU_EIGEN_INFO = 12, + RTW89_PHYSTS_IE13_DL_MU_DEF = 13, + RTW89_PHYSTS_IE14_TB_UL_CQI = 14, + RTW89_PHYSTS_IE15_TB_UL_DEF = 15, + RTW89_PHYSTS_IE16_RSVD16 = 16, + RTW89_PHYSTS_IE17_TB_UL_CTRL = 17, + RTW89_PHYSTS_IE18_DBG_OFDM_FD_CMN = 18, + RTW89_PHYSTS_IE19_DBG_OFDM_TD_CMN = 19, + RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0 = 20, + RTW89_PHYSTS_IE21_DBG_OFDM_FD_USER_SEG_1 = 21, + RTW89_PHYSTS_IE22_DBG_OFDM_FD_USER_AGC = 22, + RTW89_PHYSTS_IE23_RSVD23 = 23, + RTW89_PHYSTS_IE24_DBG_OFDM_TD_PATH_A = 24, + RTW89_PHYSTS_IE25_DBG_OFDM_TD_PATH_B = 25, + RTW89_PHYSTS_IE26_DBG_OFDM_TD_PATH_C = 26, + RTW89_PHYSTS_IE27_DBG_OFDM_TD_PATH_D = 27, + RTW89_PHYSTS_IE28_DBG_CCK_PATH_A = 28, + RTW89_PHYSTS_IE29_DBG_CCK_PATH_B = 29, + RTW89_PHYSTS_IE30_DBG_CCK_PATH_C = 30, + RTW89_PHYSTS_IE31_DBG_CCK_PATH_D = 31, + + /* keep last */ + RTW89_PHYSTS_IE_NUM, + RTW89_PHYSTS_IE_MAX = RTW89_PHYSTS_IE_NUM - 1 +}; + +static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid) +{ + switch (tid) { + default: + rtw89_warn(rtwdev, "Should use tag 1d: %d\n", tid); + fallthrough; + case 0: + case 3: + return RTW89_TX_QSEL_BE_0; + case 1: + case 2: + return RTW89_TX_QSEL_BK_0; + case 4: + case 5: + return RTW89_TX_QSEL_VI_0; + case 6: + case 7: + return RTW89_TX_QSEL_VO_0; + } +} + +static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) +{ + switch (qsel) { + default: + rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel); + fallthrough; + case RTW89_TX_QSEL_BE_0: + return RTW89_TXCH_ACH0; + case RTW89_TX_QSEL_BK_0: + return RTW89_TXCH_ACH1; + case RTW89_TX_QSEL_VI_0: + return RTW89_TXCH_ACH2; + case RTW89_TX_QSEL_VO_0: + return RTW89_TXCH_ACH3; + case RTW89_TX_QSEL_B0_MGMT: + return RTW89_TXCH_CH8; + case RTW89_TX_QSEL_B0_HI: + return RTW89_TXCH_CH9; + case RTW89_TX_QSEL_B1_MGMT: + return RTW89_TXCH_CH10; + case RTW89_TX_QSEL_B1_HI: + return RTW89_TXCH_CH11; + } +} + +static inline u8 rtw89_core_get_tid_indicate(struct rtw89_dev *rtwdev, u8 tid) +{ + switch (tid) { + case 3: + case 2: + case 5: + case 7: + return 1; + default: + rtw89_warn(rtwdev, "Should use tag 1d: %d\n", tid); + fallthrough; + case 0: + case 1: + case 4: + case 6: + return 0; + } +} + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h new file mode 100644 index 00000000000000..229e81009de63b --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + * Copyright(c) 2019-2020 Realtek Corporation + */ +#ifndef __RTW89_UTIL_H__ +#define __RTW89_UTIL_H__ + +#include "core.h" + +#define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \ + ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \ + IEEE80211_IFACE_ITER_NORMAL, iterator, data) + +/* call this function with rtwdev->mutex is held */ +#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \ + list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list) + +#endif diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 63ce2443f13647..ff2448394a1eda 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -3501,7 +3501,6 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); wiphy_free(wiphy); @@ -3518,7 +3517,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf) cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); rndis_unbind(usbdev, intf); diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index a48e616e0fb916..6bfaab48b507de 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -399,6 +399,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; + /* info->driver_data and info->control part of union so make copy */ + tx_params->have_key = !!info->control.hw_key; wh = (struct ieee80211_hdr *)&skb->data[0]; tx_params->sta_id = 0; diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index f4a26f16f00f44..dca81a4bbdd7f6 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE); if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) && - info->control.hw_key) { + tx_params->have_key) { if (rsi_is_cipher_wep(common)) ieee80211_size += 4; else @@ -214,15 +214,17 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) RSI_WIFI_DATA_Q); data_desc->header_len = ieee80211_size; - if (common->min_rate != RSI_RATE_AUTO) { + if (common->rate_config[common->band].fixed_enabled) { /* Send fixed rate */ + u16 fixed_rate = common->rate_config[common->band].fixed_hw_rate; + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); - data_desc->rate_info = cpu_to_le16(common->min_rate); + data_desc->rate_info = cpu_to_le16(fixed_rate); if (conf_is_ht40(&common->priv->hw->conf)) data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); - if ((common->vif_info[0].sgi) && (common->min_rate & 0x100)) { + if (common->vif_info[0].sgi && (fixed_rate & 0x100)) { /* Only MCS rates */ data_desc->rate_info |= cpu_to_le16(ENABLE_SHORTGI_RATE); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index b66975f5456751..e70c1c7fdf5956 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -510,7 +510,6 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, if ((vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_P2P_GO)) { rsi_send_rx_filter_frame(common, DISALLOW_BEACONS); - common->min_rate = RSI_RATE_AUTO; for (i = 0; i < common->max_stations; i++) common->stations[i].sta = NULL; } @@ -1228,20 +1227,32 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { + const unsigned int mcs_offset = ARRAY_SIZE(rsi_rates); struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; - enum nl80211_band band = hw->conf.chandef.chan->band; + int i; mutex_lock(&common->mutex); - common->fixedrate_mask[band] = 0; - if (mask->control[band].legacy == 0xfff) { - common->fixedrate_mask[band] = - (mask->control[band].ht_mcs[0] << 12); - } else { - common->fixedrate_mask[band] = - mask->control[band].legacy; + for (i = 0; i < ARRAY_SIZE(common->rate_config); i++) { + struct rsi_rate_config *cfg = &common->rate_config[i]; + u32 bm; + + bm = mask->control[i].legacy | (mask->control[i].ht_mcs[0] << mcs_offset); + if (hweight32(bm) == 1) { /* single rate */ + int rate_index = ffs(bm) - 1; + + if (rate_index < mcs_offset) + cfg->fixed_hw_rate = rsi_rates[rate_index].hw_value; + else + cfg->fixed_hw_rate = rsi_mcsrates[rate_index - mcs_offset]; + cfg->fixed_enabled = true; + } else { + cfg->configured_mask = bm; + cfg->fixed_enabled = false; + } } + mutex_unlock(&common->mutex); return 0; @@ -1378,46 +1389,6 @@ void rsi_indicate_pkt_to_os(struct rsi_common *common, ieee80211_rx_irqsafe(hw, skb); } -static void rsi_set_min_rate(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - struct rsi_common *common) -{ - u8 band = hw->conf.chandef.chan->band; - u8 ii; - u32 rate_bitmap; - bool matched = false; - - common->bitrate_mask[band] = sta->supp_rates[band]; - - rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]); - - if (rate_bitmap & 0xfff) { - /* Find out the min rate */ - for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) { - if (rate_bitmap & BIT(ii)) { - common->min_rate = rsi_rates[ii].hw_value; - matched = true; - break; - } - } - } - - common->vif_info[0].is_ht = sta->ht_cap.ht_supported; - - if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) { - for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) { - if ((rate_bitmap >> 12) & BIT(ii)) { - common->min_rate = rsi_mcsrates[ii]; - matched = true; - break; - } - } - } - - if (!matched) - common->min_rate = 0xffff; -} - /** * rsi_mac80211_sta_add() - This function notifies driver about a peer getting * connected. @@ -1516,9 +1487,9 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, if ((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_P2P_CLIENT)) { - rsi_set_min_rate(hw, sta, common); + common->bitrate_mask[common->band] = sta->supp_rates[common->band]; + common->vif_info[0].is_ht = sta->ht_cap.ht_supported; if (sta->ht_cap.ht_supported) { - common->vif_info[0].is_ht = true; common->bitrate_mask[NL80211_BAND_2GHZ] = sta->supp_rates[NL80211_BAND_2GHZ]; if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || @@ -1592,7 +1563,6 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, bss->qos = sta->wme; common->bitrate_mask[NL80211_BAND_2GHZ] = 0; common->bitrate_mask[NL80211_BAND_5GHZ] = 0; - common->min_rate = 0xffff; common->vif_info[0].is_ht = false; common->vif_info[0].sgi = false; common->vif_info[0].seq_start = 0; diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index d98483298555c2..f1bf71e6c6081e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -211,9 +211,10 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) bt_pkt_type = frame_desc[offset + BT_RX_PKT_TYPE_OFST]; if (bt_pkt_type == BT_CARD_READY_IND) { rsi_dbg(INFO_ZONE, "BT Card ready recvd\n"); - if (rsi_bt_ops.attach(common, &g_proto_ops)) - rsi_dbg(ERR_ZONE, - "Failed to attach BT module\n"); + if (common->fsm_state == FSM_MAC_INIT_DONE) + rsi_attach_bt(common); + else + common->bt_defer_attach = true; } else { if (common->bt_adapter) rsi_bt_ops.recv_pkt(common->bt_adapter, @@ -278,6 +279,15 @@ void rsi_set_bt_context(void *priv, void *bt_context) } #endif +void rsi_attach_bt(struct rsi_common *common) +{ +#ifdef CONFIG_RSI_COEX + if (rsi_bt_ops.attach(common, &g_proto_ops)) + rsi_dbg(ERR_ZONE, + "Failed to attach BT module\n"); +#endif +} + /** * rsi_91x_init() - This function initializes os interface operations. * @oper_mode: One of DEV_OPMODE_*. @@ -359,6 +369,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode) if (common->coex_mode > 1) { if (rsi_coex_attach(common)) { rsi_dbg(ERR_ZONE, "Failed to init coex module\n"); + rsi_kill_thread(&common->tx_thread); goto err; } } diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 891fd5f0fa7657..0848f7a7e76c6a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -276,7 +276,7 @@ static void rsi_set_default_parameters(struct rsi_common *common) common->channel_width = BW_20MHZ; common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; common->channel = 1; - common->min_rate = 0xffff; + memset(&common->rate_config, 0, sizeof(common->rate_config)); common->fsm_state = FSM_CARD_NOT_READY; common->iface_down = true; common->endpoint = EP_2GHZ_20MHZ; @@ -1314,7 +1314,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common, u8 band = hw->conf.chandef.chan->band; u8 num_supported_rates = 0; u8 rate_table_offset, rate_offset = 0; - u32 rate_bitmap; + u32 rate_bitmap, configured_rates; u16 *selected_rates, min_rate; bool is_ht = false, is_sgi = false; u16 frame_len = sizeof(struct rsi_auto_rate); @@ -1364,6 +1364,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common, is_sgi = true; } + /* Limit to any rates administratively configured by cfg80211 */ + configured_rates = common->rate_config[band].configured_mask ?: 0xffffffff; + rate_bitmap &= configured_rates; + if (band == NL80211_BAND_2GHZ) { if ((rate_bitmap == 0) && (is_ht)) min_rate = RSI_RATE_MCS0; @@ -1389,10 +1393,13 @@ static int rsi_send_auto_rate_request(struct rsi_common *common, num_supported_rates = jj; if (is_ht) { - for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) - selected_rates[jj++] = mcs[ii]; - num_supported_rates += ARRAY_SIZE(mcs); - rate_offset += ARRAY_SIZE(mcs); + for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) { + if (configured_rates & BIT(ii + ARRAY_SIZE(rsi_rates))) { + selected_rates[jj++] = mcs[ii]; + num_supported_rates++; + rate_offset++; + } + } } sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL); @@ -1482,7 +1489,7 @@ void rsi_inform_bss_status(struct rsi_common *common, qos_enable, aid, sta_id, vif); - if (common->min_rate == 0xffff) + if (!common->rate_config[common->band].fixed_enabled) rsi_send_auto_rate_request(common, sta, sta_id, vif); if (opmode == RSI_OPMODE_STA && !(assoc_cap & WLAN_CAPABILITY_PRIVACY) && @@ -2071,6 +2078,9 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, if (common->reinit_hw) { complete(&common->wlan_init_completion); } else { + if (common->bt_defer_attach) + rsi_attach_bt(common); + return rsi_mac80211_attach(common); } } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index e0c502bc427074..9f16128e4ffab1 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -24,10 +24,7 @@ /* Default operating mode is wlan STA + BT */ static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL; module_param(dev_oper_mode, ushort, 0444); -MODULE_PARM_DESC(dev_oper_mode, - "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n" - "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n" - "6[AP + BT classic], 14[AP + BT classic + BT LE]"); +MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC); /** * rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg. diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 416976f098882f..6821ea99189563 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -25,10 +25,7 @@ /* Default operating mode is wlan STA + BT */ static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL; module_param(dev_oper_mode, ushort, 0444); -MODULE_PARM_DESC(dev_oper_mode, - "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n" - "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n" - "6[AP + BT classic], 14[AP + BT classic + BT LE]"); +MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC); static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags); @@ -61,7 +58,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter, (void *)seg, (int)len, &transfer, - HZ * 5); + USB_CTRL_SET_TIMEOUT); if (status < 0) { rsi_dbg(ERR_ZONE, diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index d044a440fa0801..5b07262a974088 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -28,6 +28,17 @@ #define DEV_OPMODE_AP_BT 6 #define DEV_OPMODE_AP_BT_DUAL 14 +#define DEV_OPMODE_PARAM_DESC \ + __stringify(DEV_OPMODE_WIFI_ALONE) "[Wi-Fi alone], " \ + __stringify(DEV_OPMODE_BT_ALONE) "[BT classic alone], " \ + __stringify(DEV_OPMODE_BT_LE_ALONE) "[BT LE alone], " \ + __stringify(DEV_OPMODE_BT_DUAL) "[BT classic + BT LE alone], " \ + __stringify(DEV_OPMODE_STA_BT) "[Wi-Fi STA + BT classic], " \ + __stringify(DEV_OPMODE_STA_BT_LE) "[Wi-Fi STA + BT LE], " \ + __stringify(DEV_OPMODE_STA_BT_DUAL) "[Wi-Fi STA + BT classic + BT LE], " \ + __stringify(DEV_OPMODE_AP_BT) "[Wi-Fi AP + BT classic], " \ + __stringify(DEV_OPMODE_AP_BT_DUAL) "[Wi-Fi AP + BT classic + BT LE]" + #define FLASH_WRITE_CHUNK_SIZE (4 * 1024) #define FLASH_SECTOR_SIZE (4 * 1024) diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 0f535850a38361..dcf8fb40698b7d 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -61,6 +61,7 @@ enum RSI_FSM_STATES { extern u32 rsi_zone_enabled; extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); +#define RSI_MAX_BANDS 2 #define RSI_MAX_VIFS 3 #define NUM_EDCA_QUEUES 4 #define IEEE80211_ADDR_LEN 6 @@ -139,6 +140,7 @@ struct skb_info { u8 internal_hdr_size; struct ieee80211_vif *vif; u8 vap_id; + bool have_key; }; enum edca_queue { @@ -229,6 +231,12 @@ struct rsi_9116_features { u32 ps_options; }; +struct rsi_rate_config { + u32 configured_mask; /* configured by mac80211 bits 0-11=legacy 12+ mcs */ + u16 fixed_hw_rate; + bool fixed_enabled; +}; + struct rsi_common { struct rsi_hw *priv; struct vif_priv vif_info[RSI_MAX_VIFS]; @@ -254,8 +262,8 @@ struct rsi_common { u8 channel_width; u16 rts_threshold; - u16 bitrate_mask[2]; - u32 fixedrate_mask[2]; + u32 bitrate_mask[RSI_MAX_BANDS]; + struct rsi_rate_config rate_config[RSI_MAX_BANDS]; u8 rf_reset; struct transmit_q_stats tx_stats; @@ -276,7 +284,6 @@ struct rsi_common { u8 mac_id; u8 radio_id; u16 rate_pwr[20]; - u16 min_rate; /* WMM algo related */ u8 selected_qnum; @@ -320,6 +327,7 @@ struct rsi_common { struct ieee80211_vif *roc_vif; bool eapol4_confirm; + bool bt_defer_attach; void *bt_adapter; struct cfg80211_scan_request *hwscan; @@ -401,5 +409,6 @@ struct rsi_host_intf_ops { enum rsi_host_intf rsi_get_host_intf(void *priv); void rsi_set_bt_context(void *priv, void *bt_context); +void rsi_attach_bt(struct rsi_common *common); #endif diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c index 8bade5d89f12f8..10e019cddcc654 100644 --- a/drivers/net/wireless/st/cw1200/bh.c +++ b/drivers/net/wireless/st/cw1200/bh.c @@ -85,8 +85,6 @@ void cw1200_unregister_bh(struct cw1200_common *priv) atomic_inc(&priv->bh_term); wake_up(&priv->bh_wq); - flush_workqueue(priv->bh_workqueue); - destroy_workqueue(priv->bh_workqueue); priv->bh_workqueue = NULL; diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index f26fc150ecd014..354a7e1c3315c6 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -488,12 +488,9 @@ static int wl1271_probe(struct spi_device *spi) spi->bits_per_word = 32; glue->reg = devm_regulator_get(&spi->dev, "vwlan"); - if (PTR_ERR(glue->reg) == -EPROBE_DEFER) - return -EPROBE_DEFER; - if (IS_ERR(glue->reg)) { - dev_err(glue->dev, "can't get regulator\n"); - return PTR_ERR(glue->reg); - } + if (IS_ERR(glue->reg)) + return dev_err_probe(glue->dev, PTR_ERR(glue->reg), + "can't get regulator\n"); ret = wlcore_probe_of(spi, glue, pdev_data); if (ret) { diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 672f5d5f3f2c7f..dad38fc0424334 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1945,8 +1945,7 @@ static int wl3501_config(struct pcmcia_device *link) goto failed; } - for (i = 0; i < 6; i++) - dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; + eth_hw_addr_set(dev, this->mac_addr); /* print probe information */ printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, " diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 097805b55c59fe..e64e4e5795186c 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -507,7 +507,7 @@ static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata, * byte data[12] * total: 16 */ -static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int wait) +static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len, int wait) { int err; unsigned char *request; @@ -857,7 +857,7 @@ static int zd1201_set_mac_address(struct net_device *dev, void *p) addr->sa_data, dev->addr_len, 1); if (err) return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return zd1201_mac_reset(zd); } @@ -1729,6 +1729,7 @@ static int zd1201_probe(struct usb_interface *interface, int err; short porttype; char buf[IW_ESSID_MAX_SIZE+2]; + u8 addr[ETH_ALEN]; usb = interface_to_usbdev(interface); @@ -1779,10 +1780,10 @@ static int zd1201_probe(struct usb_interface *interface, dev->watchdog_timeo = ZD1201_TX_TIMEOUT; strcpy(dev->name, "wlan%d"); - err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, - dev->dev_addr, dev->addr_len); + err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, addr, ETH_ALEN); if (err) goto err_start; + eth_hw_addr_set(dev, addr); /* Set wildcard essid to match zd->essid */ *(__le16 *)buf = cpu_to_le16(0); diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index a7ceef10bf6aef..850c26bc952470 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -65,7 +65,6 @@ static const struct usb_device_id usb_ids[] = { { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, - { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B }, diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index 77dbfc418bced7..17543be14665b7 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -71,6 +71,7 @@ config RPMSG_WWAN_CTRL config IOSM tristate "IOSM Driver for Intel M.2 WWAN Device" depends on INTEL_IOMMU + select NET_DEVLINK help This driver enables Intel M.2 WWAN Device communication. diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile index 4f9f0ae398e159..b838034bb120e5 100644 --- a/drivers/net/wwan/iosm/Makefile +++ b/drivers/net/wwan/iosm/Makefile @@ -18,6 +18,9 @@ iosm-y = \ iosm_ipc_protocol.o \ iosm_ipc_protocol_ops.o \ iosm_ipc_mux.o \ - iosm_ipc_mux_codec.o + iosm_ipc_mux_codec.o \ + iosm_ipc_devlink.o \ + iosm_ipc_flash.o \ + iosm_ipc_coredump.o obj-$(CONFIG_IOSM) := iosm.o diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c index 519361ec40dfcb..128c999e08bb93 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c @@ -8,7 +8,7 @@ #include "iosm_ipc_chnl_cfg.h" /* Max. sizes of a downlink buffers */ -#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (16 * 1024) +#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (64 * 1024) #define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024) #define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048 #define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024) @@ -60,6 +60,10 @@ static struct ipc_chnl_cfg modem_cfg[] = { { IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13, IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM }, + /* Flash Channel/Coredump Channel */ + { IPC_MEM_CTRL_CHL_ID_7, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1, + IPC_MEM_MAX_TDS_FLASH_UL, IPC_MEM_MAX_TDS_FLASH_DL, + IPC_MEM_MAX_DL_FLASH_BUF_SIZE, WWAN_PORT_UNKNOWN }, }; int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index) diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h index 422471367f7823..e77084e7671857 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h @@ -23,6 +23,7 @@ enum ipc_channel_id { IPC_MEM_CTRL_CHL_ID_4, IPC_MEM_CTRL_CHL_ID_5, IPC_MEM_CTRL_CHL_ID_6, + IPC_MEM_CTRL_CHL_ID_7, }; /** diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c new file mode 100644 index 00000000000000..9acd87724c9de4 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include "iosm_ipc_coredump.h" + +/** + * ipc_coredump_collect - To collect coredump + * @devlink: Pointer to devlink instance. + * @data: Pointer to snapshot + * @entry: ID of requested snapshot + * @region_size: Region size + * + * Returns: 0 on success, error on failure + */ +int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry, + u32 region_size) +{ + int ret, bytes_to_read, bytes_read = 0, i = 0; + s32 remaining; + u8 *data_ptr; + + data_ptr = vmalloc(region_size); + if (!data_ptr) + return -ENOMEM; + + remaining = devlink->cd_file_info[entry].actual_size; + ret = ipc_devlink_send_cmd(devlink, rpsi_cmd_coredump_get, entry); + if (ret) { + dev_err(devlink->dev, "Send coredump_get cmd failed"); + goto get_cd_fail; + } + while (remaining > 0) { + bytes_to_read = min(remaining, MAX_DATA_SIZE); + bytes_read = 0; + ret = ipc_imem_sys_devlink_read(devlink, data_ptr + i, + bytes_to_read, &bytes_read); + if (ret) { + dev_err(devlink->dev, "CD data read failed"); + goto get_cd_fail; + } + remaining -= bytes_read; + i += bytes_read; + } + + *data = data_ptr; + + return 0; + +get_cd_fail: + vfree(data_ptr); + return ret; +} + +/** + * ipc_coredump_get_list - Get coredump list from modem + * @devlink: Pointer to devlink instance. + * @cmd: RPSI command to be sent + * + * Returns: 0 on success, error on failure + */ +int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd) +{ + u32 byte_read, num_entries, file_size; + struct iosm_cd_table *cd_table; + u8 size[MAX_SIZE_LEN], i; + char *filename; + int ret; + + cd_table = kzalloc(MAX_CD_LIST_SIZE, GFP_KERNEL); + if (!cd_table) { + ret = -ENOMEM; + goto cd_init_fail; + } + + ret = ipc_devlink_send_cmd(devlink, cmd, MAX_CD_LIST_SIZE); + if (ret) { + dev_err(devlink->dev, "rpsi_cmd_coredump_start failed"); + goto cd_init_fail; + } + + ret = ipc_imem_sys_devlink_read(devlink, (u8 *)cd_table, + MAX_CD_LIST_SIZE, &byte_read); + if (ret) { + dev_err(devlink->dev, "Coredump data is invalid"); + goto cd_init_fail; + } + + if (byte_read != MAX_CD_LIST_SIZE) + goto cd_init_fail; + + if (cmd == rpsi_cmd_coredump_start) { + num_entries = le32_to_cpu(cd_table->list.num_entries); + if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) { + ret = -EINVAL; + goto cd_init_fail; + } + + for (i = 0; i < num_entries; i++) { + file_size = le32_to_cpu(cd_table->list.entry[i].size); + filename = cd_table->list.entry[i].filename; + + if (file_size > devlink->cd_file_info[i].default_size) { + ret = -EINVAL; + goto cd_init_fail; + } + + devlink->cd_file_info[i].actual_size = file_size; + dev_dbg(devlink->dev, "file: %s actual size %d", + filename, file_size); + devlink_flash_update_status_notify(devlink->devlink_ctx, + filename, + "FILENAME", 0, 0); + snprintf(size, sizeof(size), "%d", file_size); + devlink_flash_update_status_notify(devlink->devlink_ctx, + size, "FILE SIZE", + 0, 0); + } + } + +cd_init_fail: + kfree(cd_table); + return ret; +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h new file mode 100644 index 00000000000000..0809ba6642768f --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef _IOSM_IPC_COREDUMP_H_ +#define _IOSM_IPC_COREDUMP_H_ + +#include "iosm_ipc_devlink.h" + +/* Max number of bytes to receive for Coredump list structure */ +#define MAX_CD_LIST_SIZE 0x1000 + +/* Max buffer allocated to receive coredump data */ +#define MAX_DATA_SIZE 0x00010000 + +/* Max number of file entries */ +#define MAX_NOF_ENTRY 256 + +/* Max length */ +#define MAX_SIZE_LEN 32 + +/** + * struct iosm_cd_list_entry - Structure to hold coredump file info. + * @size: Number of bytes for the entry + * @filename: Coredump filename to be generated on host + */ +struct iosm_cd_list_entry { + __le32 size; + char filename[IOSM_MAX_FILENAME_LEN]; +} __packed; + +/** + * struct iosm_cd_list - Structure to hold list of coredump files + * to be collected. + * @num_entries: Number of entries to be received + * @entry: Contains File info + */ +struct iosm_cd_list { + __le32 num_entries; + struct iosm_cd_list_entry entry[MAX_NOF_ENTRY]; +} __packed; + +/** + * struct iosm_cd_table - Common Coredump table + * @version: Version of coredump structure + * @list: Coredump list structure + */ +struct iosm_cd_table { + __le32 version; + struct iosm_cd_list list; +} __packed; + +int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry, + u32 region_size); + +int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd); + +#endif /* _IOSM_IPC_COREDUMP_H_ */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c new file mode 100644 index 00000000000000..17da85a8f33710 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_coredump.h" +#include "iosm_ipc_devlink.h" +#include "iosm_ipc_flash.h" + +/* Coredump list */ +static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = { + {"report.json", REPORT_JSON_SIZE,}, + {"coredump.fcd", COREDUMP_FCD_SIZE,}, + {"cdd.log", CDD_LOG_SIZE,}, + {"eeprom.bin", EEPROM_BIN_SIZE,}, + {"bootcore_trace.bin", BOOTCORE_TRC_BIN_SIZE,}, + {"bootcore_prev_trace.bin", BOOTCORE_PREV_TRC_BIN_SIZE,}, +}; + +/* Get the param values for the specific param ID's */ +static int ipc_devlink_get_param(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(dl); + + if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH) + ctx->val.vu8 = ipc_devlink->param.erase_full_flash; + + return 0; +} + +/* Set the param values for the specific param ID's */ +static int ipc_devlink_set_param(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(dl); + + if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH) + ipc_devlink->param.erase_full_flash = ctx->val.vu8; + + return 0; +} + +/* Devlink param structure array */ +static const struct devlink_param iosm_devlink_params[] = { + DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH, + "erase_full_flash", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ipc_devlink_get_param, ipc_devlink_set_param, + NULL), +}; + +/* Get devlink flash component type */ +static enum iosm_flash_comp_type +ipc_devlink_get_flash_comp_type(const char comp_str[], u32 len) +{ + enum iosm_flash_comp_type fls_type; + + if (!strncmp("PSI", comp_str, len)) + fls_type = FLASH_COMP_TYPE_PSI; + else if (!strncmp("EBL", comp_str, len)) + fls_type = FLASH_COMP_TYPE_EBL; + else if (!strncmp("FLS", comp_str, len)) + fls_type = FLASH_COMP_TYPE_FLS; + else + fls_type = FLASH_COMP_TYPE_INVAL; + + return fls_type; +} + +/* Function triggered on devlink flash command + * Flash update function which calls multiple functions based on + * component type specified in the flash command + */ +static int ipc_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(devlink); + enum iosm_flash_comp_type fls_type; + struct iosm_devlink_image *header; + int rc = -EINVAL; + u8 *mdm_rsp; + + header = (struct iosm_devlink_image *)params->fw->data; + + if (!header || params->fw->size <= IOSM_DEVLINK_HDR_SIZE || + (memcmp(header->magic_header, IOSM_DEVLINK_MAGIC_HEADER, + IOSM_DEVLINK_MAGIC_HEADER_LEN) != 0)) + return -EINVAL; + + mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL); + if (!mdm_rsp) + return -ENOMEM; + + fls_type = ipc_devlink_get_flash_comp_type(header->image_type, + IOSM_DEVLINK_MAX_IMG_LEN); + + switch (fls_type) { + case FLASH_COMP_TYPE_PSI: + rc = ipc_flash_boot_psi(ipc_devlink, params->fw); + break; + case FLASH_COMP_TYPE_EBL: + rc = ipc_flash_boot_ebl(ipc_devlink, params->fw); + if (rc) + break; + rc = ipc_flash_boot_set_capabilities(ipc_devlink, mdm_rsp); + if (rc) + break; + rc = ipc_flash_read_swid(ipc_devlink, mdm_rsp); + break; + case FLASH_COMP_TYPE_FLS: + rc = ipc_flash_send_fls(ipc_devlink, params->fw, mdm_rsp); + break; + default: + devlink_flash_update_status_notify(devlink, "Invalid component", + NULL, 0, 0); + break; + } + + if (!rc) + devlink_flash_update_status_notify(devlink, "Flashing success", + header->image_type, 0, 0); + else + devlink_flash_update_status_notify(devlink, "Flashing failed", + header->image_type, 0, 0); + + kfree(mdm_rsp); + return rc; +} + +/* Call back function for devlink ops */ +static const struct devlink_ops devlink_flash_ops = { + .flash_update = ipc_devlink_flash_update, +}; + +/** + * ipc_devlink_send_cmd - Send command to Modem + * @ipc_devlink: Pointer to struct iosm_devlink + * @cmd: Command to be sent to modem + * @entry: Command entry number + * + * Returns: 0 on success and failure value on error + */ +int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry) +{ + struct iosm_rpsi_cmd rpsi_cmd; + + rpsi_cmd.param.dword = cpu_to_le32(entry); + rpsi_cmd.cmd = cpu_to_le16(cmd); + rpsi_cmd.crc = rpsi_cmd.param.word[0] ^ rpsi_cmd.param.word[1] ^ + rpsi_cmd.cmd; + + return ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&rpsi_cmd, + sizeof(rpsi_cmd)); +} + +/* Function to create snapshot */ +static int ipc_devlink_coredump_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(dl); + struct iosm_coredump_file_info *cd_list = ops->priv; + u32 region_size; + int rc; + + dev_dbg(ipc_devlink->dev, "Region:%s, ID:%d", ops->name, + cd_list->entry); + region_size = cd_list->default_size; + rc = ipc_coredump_collect(ipc_devlink, data, cd_list->entry, + region_size); + if (rc) { + dev_err(ipc_devlink->dev, "Fail to create snapshot,err %d", rc); + goto coredump_collect_err; + } + + /* Send coredump end cmd indicating end of coredump collection */ + if (cd_list->entry == (IOSM_NOF_CD_REGION - 1)) + ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end); + + return 0; + +coredump_collect_err: + ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end); + return rc; +} + +/* To create regions for coredump files */ +static int ipc_devlink_create_region(struct iosm_devlink *devlink) +{ + struct devlink_region_ops *mdm_coredump; + int rc = 0; + int i; + + mdm_coredump = devlink->iosm_devlink_mdm_coredump; + for (i = 0; i < IOSM_NOF_CD_REGION; i++) { + mdm_coredump[i].name = list[i].filename; + mdm_coredump[i].snapshot = ipc_devlink_coredump_snapshot; + mdm_coredump[i].destructor = vfree; + devlink->cd_regions[i] = + devlink_region_create(devlink->devlink_ctx, + &mdm_coredump[i], MAX_SNAPSHOTS, + list[i].default_size); + + if (IS_ERR(devlink->cd_regions[i])) { + rc = PTR_ERR(devlink->cd_regions[i]); + dev_err(devlink->dev, "Devlink region fail,err %d", rc); + /* Delete previously created regions */ + for ( ; i >= 0; i--) + devlink_region_destroy(devlink->cd_regions[i]); + goto region_create_fail; + } + list[i].entry = i; + mdm_coredump[i].priv = list + i; + } +region_create_fail: + return rc; +} + +/* To Destroy devlink regions */ +static void ipc_devlink_destroy_region(struct iosm_devlink *ipc_devlink) +{ + u8 i; + + for (i = 0; i < IOSM_NOF_CD_REGION; i++) + devlink_region_destroy(ipc_devlink->cd_regions[i]); +} + +/** + * ipc_devlink_init - Initialize/register devlink to IOSM driver + * @ipc_imem: Pointer to struct iosm_imem + * + * Returns: Pointer to iosm_devlink on success and NULL on failure + */ +struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem) +{ + struct ipc_chnl_cfg chnl_cfg_flash = { 0 }; + struct iosm_devlink *ipc_devlink; + struct devlink *devlink_ctx; + int rc; + + devlink_ctx = devlink_alloc(&devlink_flash_ops, + sizeof(struct iosm_devlink), + ipc_imem->dev); + if (!devlink_ctx) { + dev_err(ipc_imem->dev, "devlink_alloc failed"); + goto devlink_alloc_fail; + } + + ipc_devlink = devlink_priv(devlink_ctx); + ipc_devlink->devlink_ctx = devlink_ctx; + ipc_devlink->pcie = ipc_imem->pcie; + ipc_devlink->dev = ipc_imem->dev; + + rc = devlink_params_register(devlink_ctx, iosm_devlink_params, + ARRAY_SIZE(iosm_devlink_params)); + if (rc) { + dev_err(ipc_devlink->dev, + "devlink_params_register failed. rc %d", rc); + goto param_reg_fail; + } + + ipc_devlink->cd_file_info = list; + + rc = ipc_devlink_create_region(ipc_devlink); + if (rc) { + dev_err(ipc_devlink->dev, "Devlink Region create failed, rc %d", + rc); + goto region_create_fail; + } + + if (ipc_chnl_cfg_get(&chnl_cfg_flash, IPC_MEM_CTRL_CHL_ID_7) < 0) + goto chnl_get_fail; + + ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, + chnl_cfg_flash, IRQ_MOD_OFF); + + init_completion(&ipc_devlink->devlink_sio.read_sem); + skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list); + + devlink_register(devlink_ctx); + dev_dbg(ipc_devlink->dev, "iosm devlink register success"); + + return ipc_devlink; + +chnl_get_fail: + ipc_devlink_destroy_region(ipc_devlink); +region_create_fail: + devlink_params_unregister(devlink_ctx, iosm_devlink_params, + ARRAY_SIZE(iosm_devlink_params)); +param_reg_fail: + devlink_free(devlink_ctx); +devlink_alloc_fail: + return NULL; +} + +/** + * ipc_devlink_deinit - To unintialize the devlink from IOSM driver. + * @ipc_devlink: Devlink instance + */ +void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink) +{ + struct devlink *devlink_ctx = ipc_devlink->devlink_ctx; + + devlink_unregister(devlink_ctx); + ipc_devlink_destroy_region(ipc_devlink); + devlink_params_unregister(devlink_ctx, iosm_devlink_params, + ARRAY_SIZE(iosm_devlink_params)); + if (ipc_devlink->devlink_sio.devlink_read_pend) { + complete(&ipc_devlink->devlink_sio.read_sem); + complete(&ipc_devlink->devlink_sio.channel->ul_sem); + } + if (!ipc_devlink->devlink_sio.devlink_read_pend) + skb_queue_purge(&ipc_devlink->devlink_sio.rx_list); + + ipc_imem_sys_devlink_close(ipc_devlink); + devlink_free(devlink_ctx); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.h b/drivers/net/wwan/iosm/iosm_ipc_devlink.h new file mode 100644 index 00000000000000..35c2d013b9ccdc --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef _IOSM_IPC_DEVLINK_H_ +#define _IOSM_IPC_DEVLINK_H_ + +#include + +#include "iosm_ipc_imem.h" +#include "iosm_ipc_imem_ops.h" +#include "iosm_ipc_pcie.h" + +/* Image ext max len */ +#define IOSM_DEVLINK_MAX_IMG_LEN 3 +/* Magic Header */ +#define IOSM_DEVLINK_MAGIC_HEADER "IOSM_DEVLINK_HEADER" +/* Magic Header len */ +#define IOSM_DEVLINK_MAGIC_HEADER_LEN 20 +/* Devlink image type */ +#define IOSM_DEVLINK_IMG_TYPE 4 +/* Reserve header size */ +#define IOSM_DEVLINK_RESERVED 34 +/* Devlink Image Header size */ +#define IOSM_DEVLINK_HDR_SIZE sizeof(struct iosm_devlink_image) +/* MAX file name length */ +#define IOSM_MAX_FILENAME_LEN 32 +/* EBL response size */ +#define IOSM_EBL_RSP_SIZE 76 +/* MAX number of regions supported */ +#define IOSM_NOF_CD_REGION 6 +/* MAX number of SNAPSHOTS supported */ +#define MAX_SNAPSHOTS 1 +/* Default Coredump file size */ +#define REPORT_JSON_SIZE 0x800 +#define COREDUMP_FCD_SIZE 0x10E00000 +#define CDD_LOG_SIZE 0x30000 +#define EEPROM_BIN_SIZE 0x10000 +#define BOOTCORE_TRC_BIN_SIZE 0x8000 +#define BOOTCORE_PREV_TRC_BIN_SIZE 0x20000 + +/** + * enum iosm_devlink_param_id - Enum type to different devlink params + * @IOSM_DEVLINK_PARAM_ID_BASE: Devlink param base ID + * @IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: Set if full erase required + */ + +enum iosm_devlink_param_id { + IOSM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH, +}; + +/** + * enum iosm_rpsi_cmd_code - Enum type for RPSI command list + * @rpsi_cmd_code_ebl: Command to load ebl + * @rpsi_cmd_coredump_start: Command to get list of files and + * file size info from PSI + * @rpsi_cmd_coredump_get: Command to get the coredump data + * @rpsi_cmd_coredump_end: Command to stop receiving the coredump + */ +enum iosm_rpsi_cmd_code { + rpsi_cmd_code_ebl = 0x02, + rpsi_cmd_coredump_start = 0x10, + rpsi_cmd_coredump_get = 0x11, + rpsi_cmd_coredump_end = 0x12, +}; + +/** + * enum iosm_flash_comp_type - Enum for different flash component types + * @FLASH_COMP_TYPE_PSI: PSI flash comp type + * @FLASH_COMP_TYPE_EBL: EBL flash comp type + * @FLASH_COMP_TYPE_FLS: FLS flash comp type + * @FLASH_COMP_TYPE_INVAL: Invalid flash comp type + */ +enum iosm_flash_comp_type { + FLASH_COMP_TYPE_PSI, + FLASH_COMP_TYPE_EBL, + FLASH_COMP_TYPE_FLS, + FLASH_COMP_TYPE_INVAL, +}; + +/** + * struct iosm_devlink_sio - SIO instance + * @rx_list: Downlink skbuf list received from CP + * @read_sem: Needed for the blocking read or downlink transfer + * @channel_id: Reserved channel id for flashing/CD collection to RAM + * @channel: Channel instance for flashing and coredump + * @devlink_read_pend: Check if read is pending + */ +struct iosm_devlink_sio { + struct sk_buff_head rx_list; + struct completion read_sem; + int channel_id; + struct ipc_mem_channel *channel; + u32 devlink_read_pend; +}; + +/** + * struct iosm_flash_params - List of flash params required for flashing + * @erase_full_flash: To set the flashing mode + * erase_full_flash = 1; full erase + * erase_full_flash = 0; no erase + * @erase_full_flash_done: Flag to check if it is a full erase + */ +struct iosm_flash_params { + u8 erase_full_flash; + u8 erase_full_flash_done; +}; + +/** + * struct iosm_devlink_image - Structure with Fls file header info + * @magic_header: Header of the firmware image + * @image_type: Firmware image type + * @region_address: Address of the region to be flashed + * @download_region: Field to identify if it is a region + * @last_region: Field to identify if it is last region + * @reserved: Reserved field + */ +struct iosm_devlink_image { + char magic_header[IOSM_DEVLINK_MAGIC_HEADER_LEN]; + char image_type[IOSM_DEVLINK_IMG_TYPE]; + __le32 region_address; + u8 download_region; + u8 last_region; + u8 reserved[IOSM_DEVLINK_RESERVED]; +} __packed; + +/** + * struct iosm_ebl_ctx_data - EBL ctx data used during flashing + * @ebl_sw_info_version: SWID version info obtained from EBL + * @m_ebl_resp: Buffer used to read and write the ebl data + */ +struct iosm_ebl_ctx_data { + u8 ebl_sw_info_version; + u8 m_ebl_resp[IOSM_EBL_RSP_SIZE]; +}; + +/** + * struct iosm_coredump_file_info - Coredump file info + * @filename: Name of coredump file + * @default_size: Default size of coredump file + * @actual_size: Actual size of coredump file + * @entry: Index of the coredump file + */ +struct iosm_coredump_file_info { + char filename[IOSM_MAX_FILENAME_LEN]; + u32 default_size; + u32 actual_size; + u32 entry; +}; + +/** + * struct iosm_devlink - IOSM Devlink structure + * @devlink_sio: SIO instance for read/write functionality + * @pcie: Pointer to PCIe component + * @dev: Pointer to device struct + * @devlink_ctx: Pointer to devlink context + * @param: Params required for flashing + * @ebl_ctx: Data to be read and written to Modem + * @cd_file_info: coredump file info + * @iosm_devlink_mdm_coredump: region ops for coredump collection + * @cd_regions: coredump regions + */ +struct iosm_devlink { + struct iosm_devlink_sio devlink_sio; + struct iosm_pcie *pcie; + struct device *dev; + struct devlink *devlink_ctx; + struct iosm_flash_params param; + struct iosm_ebl_ctx_data ebl_ctx; + struct iosm_coredump_file_info *cd_file_info; + struct devlink_region_ops iosm_devlink_mdm_coredump[IOSM_NOF_CD_REGION]; + struct devlink_region *cd_regions[IOSM_NOF_CD_REGION]; +}; + +/** + * union iosm_rpsi_param_u - RPSI cmd param for CRC calculation + * @word: Words member used in CRC calculation + * @dword: Actual data + */ +union iosm_rpsi_param_u { + __le16 word[2]; + __le32 dword; +}; + +/** + * struct iosm_rpsi_cmd - Structure for RPSI Command + * @param: Used to calculate CRC + * @cmd: Stores the RPSI command + * @crc: Stores the CRC value + */ +struct iosm_rpsi_cmd { + union iosm_rpsi_param_u param; + __le16 cmd; + __le16 crc; +}; + +struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem); + +void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink); + +int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry); + +#endif /* _IOSM_IPC_DEVLINK_H */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.c b/drivers/net/wwan/iosm/iosm_ipc_flash.c new file mode 100644 index 00000000000000..d890914aa3495f --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_flash.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include "iosm_ipc_coredump.h" +#include "iosm_ipc_devlink.h" +#include "iosm_ipc_flash.h" + +/* This function will pack the data to be sent to the modem using the + * payload, payload length and pack id + */ +static int ipc_flash_proc_format_ebl_pack(struct iosm_flash_data *flash_req, + u32 pack_length, u16 pack_id, + u8 *payload, u32 payload_length) +{ + u16 checksum = pack_id; + u32 i; + + if (payload_length + IOSM_EBL_HEAD_SIZE > pack_length) + return -EINVAL; + + flash_req->pack_id = cpu_to_le16(pack_id); + flash_req->msg_length = cpu_to_le32(payload_length); + checksum += (payload_length >> IOSM_EBL_PAYL_SHIFT) + + (payload_length & IOSM_EBL_CKSM); + + for (i = 0; i < payload_length; i++) + checksum += payload[i]; + + flash_req->checksum = cpu_to_le16(checksum); + + return 0; +} + +/* validate the response received from modem and + * check the type of errors received + */ +static int ipc_flash_proc_check_ebl_rsp(void *hdr_rsp, void *payload_rsp) +{ + struct iosm_ebl_error *err_info = payload_rsp; + u16 *rsp_code = hdr_rsp; + u32 i; + + if (*rsp_code == IOSM_EBL_RSP_BUFF) { + for (i = 0; i < IOSM_MAX_ERRORS; i++) { + if (!err_info->error[i].error_code) { + pr_err("EBL: error_class = %d, error_code = %d", + err_info->error[i].error_class, + err_info->error[i].error_code); + } + } + return -EINVAL; + } + + return 0; +} + +/* Send data to the modem */ +static int ipc_flash_send_data(struct iosm_devlink *ipc_devlink, u32 size, + u16 pack_id, u8 *payload, u32 payload_length) +{ + struct iosm_flash_data flash_req; + int ret; + + ret = ipc_flash_proc_format_ebl_pack(&flash_req, size, + pack_id, payload, payload_length); + if (ret) { + dev_err(ipc_devlink->dev, "EBL2 pack failed for pack_id:%d", + pack_id); + goto ipc_free_payload; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&flash_req, + IOSM_EBL_HEAD_SIZE); + if (ret) { + dev_err(ipc_devlink->dev, "EBL Header write failed for Id:%x", + pack_id); + goto ipc_free_payload; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, payload, payload_length); + if (ret) { + dev_err(ipc_devlink->dev, "EBL Payload write failed for Id:%x", + pack_id); + } + +ipc_free_payload: + return ret; +} + +/** + * ipc_flash_link_establish - Flash link establishment + * @ipc_imem: Pointer to struct iosm_imem + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_link_establish(struct iosm_imem *ipc_imem) +{ + u8 ler_data[IOSM_LER_RSP_SIZE]; + u32 bytes_read; + + /* Allocate channel for flashing/cd collection */ + ipc_imem->ipc_devlink->devlink_sio.channel = + ipc_imem_sys_devlink_open(ipc_imem); + + if (!ipc_imem->ipc_devlink->devlink_sio.channel) + goto chl_open_fail; + + if (ipc_imem_sys_devlink_read(ipc_imem->ipc_devlink, ler_data, + IOSM_LER_RSP_SIZE, &bytes_read)) + goto devlink_read_fail; + + if (bytes_read != IOSM_LER_RSP_SIZE) + goto devlink_read_fail; + + return 0; + +devlink_read_fail: + ipc_imem_sys_devlink_close(ipc_imem->ipc_devlink); +chl_open_fail: + return -EIO; +} + +/* Receive data from the modem */ +static int ipc_flash_receive_data(struct iosm_devlink *ipc_devlink, u32 size, + u8 *mdm_rsp) +{ + u8 mdm_rsp_hdr[IOSM_EBL_HEAD_SIZE]; + u32 bytes_read; + int ret; + + ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp_hdr, + IOSM_EBL_HEAD_SIZE, &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed", + IOSM_EBL_HEAD_SIZE); + goto ipc_flash_recv_err; + } + + if (bytes_read != IOSM_EBL_HEAD_SIZE) { + ret = -EINVAL; + goto ipc_flash_recv_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp, size, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed", + size); + goto ipc_flash_recv_err; + } + + if (bytes_read != size) { + ret = -EINVAL; + goto ipc_flash_recv_err; + } + + ret = ipc_flash_proc_check_ebl_rsp(mdm_rsp_hdr + 2, mdm_rsp); + +ipc_flash_recv_err: + return ret; +} + +/* Function to send command to modem and receive response */ +static int ipc_flash_send_receive(struct iosm_devlink *ipc_devlink, u16 pack_id, + u8 *payload, u32 payload_length, u8 *mdm_rsp) +{ + size_t frame_len = IOSM_EBL_DW_PACK_SIZE; + int ret; + + if (pack_id == FLASH_SET_PROT_CONF) + frame_len = IOSM_EBL_W_PACK_SIZE; + + ret = ipc_flash_send_data(ipc_devlink, frame_len, pack_id, payload, + payload_length); + if (ret) + goto ipc_flash_send_rcv; + + ret = ipc_flash_receive_data(ipc_devlink, + frame_len - IOSM_EBL_HEAD_SIZE, mdm_rsp); + +ipc_flash_send_rcv: + return ret; +} + +/** + * ipc_flash_boot_set_capabilities - Set modem boot capabilities in flash + * @ipc_devlink: Pointer to devlink structure + * @mdm_rsp: Pointer to modem response buffer + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink, + u8 *mdm_rsp) +{ + ipc_devlink->ebl_ctx.ebl_sw_info_version = + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_RSP_SW_INFO_VER]; + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_ERASE] = IOSM_CAP_NOT_ENHANCED; + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_CRC] = IOSM_CAP_NOT_ENHANCED; + + if (ipc_devlink->ebl_ctx.m_ebl_resp[EBL_CAPS_FLAG] & + IOSM_CAP_USE_EXT_CAP) { + if (ipc_devlink->param.erase_full_flash) + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &= + ~((u8)IOSM_EXT_CAP_ERASE_ALL); + else + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &= + ~((u8)IOSM_EXT_CAP_COMMIT_ALL); + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_EXT_CAPS_HANDLED] = + IOSM_CAP_USE_EXT_CAP; + } + + /* Write back the EBL capability to modem + * Request Set Protcnf command + */ + return ipc_flash_send_receive(ipc_devlink, FLASH_SET_PROT_CONF, + ipc_devlink->ebl_ctx.m_ebl_resp, + IOSM_EBL_RSP_SIZE, mdm_rsp); +} + +/* Read the SWID type and SWID value from the EBL */ +int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp) +{ + struct iosm_flash_msg_control cmd_msg; + struct iosm_swid_table *swid; + char ebl_swid[IOSM_SWID_STR]; + int ret; + + if (ipc_devlink->ebl_ctx.ebl_sw_info_version != + IOSM_EXT_CAP_SWID_OOS_PACK) + return -EINVAL; + + cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_READ); + cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_SWID_TABLE); + cmd_msg.length = cpu_to_le32(IOSM_MSG_LEN_ARG); + cmd_msg.arguments = cpu_to_le32(IOSM_MSG_LEN_ARG); + + ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL, + (u8 *)&cmd_msg, IOSM_MDM_SEND_16, mdm_rsp); + if (ret) + goto ipc_swid_err; + + cmd_msg.action = cpu_to_le32(*((u32 *)mdm_rsp)); + + ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_DATA_READ, + (u8 *)&cmd_msg, IOSM_MDM_SEND_4, mdm_rsp); + if (ret) + goto ipc_swid_err; + + swid = (struct iosm_swid_table *)mdm_rsp; + dev_dbg(ipc_devlink->dev, "SWID %x RF_ENGINE_ID %x", swid->sw_id_val, + swid->rf_engine_id_val); + + snprintf(ebl_swid, sizeof(ebl_swid), "SWID: %x, RF_ENGINE_ID: %x", + swid->sw_id_val, swid->rf_engine_id_val); + + devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, ebl_swid, + NULL, 0, 0); +ipc_swid_err: + return ret; +} + +/* Function to check if full erase or conditional erase was successful */ +static int ipc_flash_erase_check(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp) +{ + int ret, count = 0; + u16 mdm_rsp_data; + + /* Request Flash Erase Check */ + do { + mdm_rsp_data = IOSM_MDM_SEND_DATA; + ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_CHECK, + (u8 *)&mdm_rsp_data, + IOSM_MDM_SEND_2, mdm_rsp); + if (ret) + goto ipc_erase_chk_err; + + mdm_rsp_data = *((u16 *)mdm_rsp); + if (mdm_rsp_data > IOSM_MDM_ERASE_RSP) { + dev_err(ipc_devlink->dev, + "Flash Erase Check resp wrong 0x%04X", + mdm_rsp_data); + ret = -EINVAL; + goto ipc_erase_chk_err; + } + count++; + msleep(IOSM_FLASH_ERASE_CHECK_INTERVAL); + } while ((mdm_rsp_data != IOSM_MDM_ERASE_RSP) && + (count < (IOSM_FLASH_ERASE_CHECK_TIMEOUT / + IOSM_FLASH_ERASE_CHECK_INTERVAL))); + + if (mdm_rsp_data != IOSM_MDM_ERASE_RSP) { + dev_err(ipc_devlink->dev, "Modem erase check timeout failure!"); + ret = -ETIMEDOUT; + } + +ipc_erase_chk_err: + return ret; +} + +/* Full erase function which will erase the nand flash through EBL command */ +static int ipc_flash_full_erase(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp) +{ + u32 erase_address = IOSM_ERASE_START_ADDR; + struct iosm_flash_msg_control cmd_msg; + u32 erase_length = IOSM_ERASE_LEN; + int ret; + + dev_dbg(ipc_devlink->dev, "Erase full nand flash"); + cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_ERASE); + cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_ALL_FLASH); + cmd_msg.length = cpu_to_le32(erase_length); + cmd_msg.arguments = cpu_to_le32(erase_address); + + ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL, + (unsigned char *)&cmd_msg, + IOSM_MDM_SEND_16, mdm_rsp); + if (ret) + goto ipc_flash_erase_err; + + ipc_devlink->param.erase_full_flash_done = IOSM_SET_FLAG; + ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp); + +ipc_flash_erase_err: + return ret; +} + +/* Logic for flashing all the Loadmaps available for individual fls file */ +static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink, + const struct firmware *fw, u8 *mdm_rsp) +{ + u32 raw_len, rest_len = fw->size - IOSM_DEVLINK_HDR_SIZE; + struct iosm_devlink_image *fls_data; + __le32 reg_info[2]; /* 0th position region address, 1st position size */ + u32 nand_address; + char *file_ptr; + int ret; + + fls_data = (struct iosm_devlink_image *)fw->data; + file_ptr = (void *)(fls_data + 1); + nand_address = le32_to_cpu(fls_data->region_address); + reg_info[0] = cpu_to_le32(nand_address); + + if (!ipc_devlink->param.erase_full_flash_done) { + reg_info[1] = cpu_to_le32(nand_address + rest_len - 2); + ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START, + (u8 *)reg_info, IOSM_MDM_SEND_8, + mdm_rsp); + if (ret) + goto dl_region_fail; + + ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp); + if (ret) + goto dl_region_fail; + } + + /* Request Flash Set Address */ + ret = ipc_flash_send_receive(ipc_devlink, FLASH_SET_ADDRESS, + (u8 *)reg_info, IOSM_MDM_SEND_4, mdm_rsp); + if (ret) + goto dl_region_fail; + + /* Request Flash Write Raw Image */ + ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE, + FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len, + IOSM_MDM_SEND_4); + if (ret) + goto dl_region_fail; + + do { + raw_len = (rest_len > IOSM_FLS_BUF_SIZE) ? IOSM_FLS_BUF_SIZE : + rest_len; + ret = ipc_imem_sys_devlink_write(ipc_devlink, file_ptr, + raw_len); + if (ret) { + dev_err(ipc_devlink->dev, "Image write failed"); + goto dl_region_fail; + } + file_ptr += raw_len; + rest_len -= raw_len; + } while (rest_len); + + ret = ipc_flash_receive_data(ipc_devlink, IOSM_EBL_DW_PAYL_SIZE, + mdm_rsp); + +dl_region_fail: + return ret; +} + +/** + * ipc_flash_send_fls - Inject Modem subsystem fls file to device + * @ipc_devlink: Pointer to devlink structure + * @fw: FW image + * @mdm_rsp: Pointer to modem response buffer + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink, + const struct firmware *fw, u8 *mdm_rsp) +{ + u32 fw_size = fw->size - IOSM_DEVLINK_HDR_SIZE; + struct iosm_devlink_image *fls_data; + u16 flash_cmd; + int ret; + + fls_data = (struct iosm_devlink_image *)fw->data; + if (ipc_devlink->param.erase_full_flash) { + ipc_devlink->param.erase_full_flash = false; + ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp); + if (ret) + goto ipc_flash_err; + } + + /* Request Sec Start */ + if (!fls_data->download_region) { + ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START, + (u8 *)fw->data + + IOSM_DEVLINK_HDR_SIZE, fw_size, + mdm_rsp); + if (ret) + goto ipc_flash_err; + } else { + /* Download regions */ + ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp); + if (ret) + goto ipc_flash_err; + + if (fls_data->last_region) { + /* Request Sec End */ + flash_cmd = IOSM_MDM_SEND_DATA; + ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END, + (u8 *)&flash_cmd, + IOSM_MDM_SEND_2, mdm_rsp); + } + } + +ipc_flash_err: + return ret; +} + +/** + * ipc_flash_boot_psi - Inject PSI image + * @ipc_devlink: Pointer to devlink structure + * @fw: FW image + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink, + const struct firmware *fw) +{ + u32 bytes_read, psi_size = fw->size - IOSM_DEVLINK_HDR_SIZE; + u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2]; + u8 *psi_code; + int ret; + + dev_dbg(ipc_devlink->dev, "Boot transfer PSI"); + psi_code = kmemdup(fw->data + IOSM_DEVLINK_HDR_SIZE, psi_size, + GFP_KERNEL); + if (!psi_code) + return -ENOMEM; + + ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, psi_size); + if (ret) { + dev_err(ipc_devlink->dev, "RPSI Image write failed"); + goto ipc_flash_psi_free; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, + IOSM_LER_ACK_SIZE, &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "ipc_devlink_sio_read ACK failed"); + goto ipc_flash_psi_free; + } + + if (bytes_read != IOSM_LER_ACK_SIZE) { + ret = -EINVAL; + goto ipc_flash_psi_free; + } + + snprintf(psi_ack_byte, sizeof(psi_ack_byte), "%x%x", read_data[0], + read_data[1]); + devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, + psi_ack_byte, "PSI ACK", 0, 0); + + if (read_data[0] == 0x00 && read_data[1] == 0xCD) { + dev_dbg(ipc_devlink->dev, "Coredump detected"); + ret = ipc_coredump_get_list(ipc_devlink, + rpsi_cmd_coredump_start); + if (ret) + dev_err(ipc_devlink->dev, "Failed to get cd list"); + } + +ipc_flash_psi_free: + kfree(psi_code); + return ret; +} + +/** + * ipc_flash_boot_ebl - Inject EBL image + * @ipc_devlink: Pointer to devlink structure + * @fw: FW image + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink, + const struct firmware *fw) +{ + u32 ebl_size = fw->size - IOSM_DEVLINK_HDR_SIZE; + u8 read_data[2]; + u32 bytes_read; + int ret; + + if (ipc_mmio_get_exec_stage(ipc_devlink->pcie->imem->mmio) != + IPC_MEM_EXEC_STAGE_PSI) { + devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, + "Invalid execution stage", + NULL, 0, 0); + return -EINVAL; + } + + dev_dbg(ipc_devlink->dev, "Boot transfer EBL"); + ret = ipc_devlink_send_cmd(ipc_devlink, rpsi_cmd_code_ebl, + IOSM_RPSI_LOAD_SIZE); + if (ret) { + dev_err(ipc_devlink->dev, "Sending rpsi_cmd_code_ebl failed"); + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "rpsi_cmd_code_ebl read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_READ_SIZE) { + ret = -EINVAL; + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&ebl_size, + sizeof(ebl_size)); + if (ret) { + dev_err(ipc_devlink->dev, "EBL length write failed"); + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_READ_SIZE) { + ret = -EINVAL; + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, + (u8 *)fw->data + IOSM_DEVLINK_HDR_SIZE, + ebl_size); + if (ret) { + dev_err(ipc_devlink->dev, "EBL data transfer failed"); + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_READ_SIZE) { + ret = -EINVAL; + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, + ipc_devlink->ebl_ctx.m_ebl_resp, + IOSM_EBL_RSP_SIZE, &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL response read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_EBL_RSP_SIZE) + ret = -EINVAL; + +ipc_flash_ebl_err: + return ret; +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.h b/drivers/net/wwan/iosm/iosm_ipc_flash.h new file mode 100644 index 00000000000000..132d59d60fbe75 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_flash.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef _IOSM_IPC_FLASH_H +#define _IOSM_IPC_FLASH_H + +/* Buffer size used to read the fls image */ +#define IOSM_FLS_BUF_SIZE 0x00100000 +/* Full erase start address */ +#define IOSM_ERASE_START_ADDR 0x00000000 +/* Erase length for NAND flash */ +#define IOSM_ERASE_LEN 0xFFFFFFFF +/* EBL response Header size */ +#define IOSM_EBL_HEAD_SIZE 8 +/* EBL payload size */ +#define IOSM_EBL_W_PAYL_SIZE 2048 +/* Total EBL pack size */ +#define IOSM_EBL_W_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_W_PAYL_SIZE) +/* EBL payload size */ +#define IOSM_EBL_DW_PAYL_SIZE 16384 +/* Total EBL pack size */ +#define IOSM_EBL_DW_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_DW_PAYL_SIZE) +/* EBL name size */ +#define IOSM_EBL_NAME 32 +/* Maximum supported error types */ +#define IOSM_MAX_ERRORS 8 +/* Read size for RPSI/EBL response */ +#define IOSM_READ_SIZE 2 +/* Link establishment response ack size */ +#define IOSM_LER_ACK_SIZE 2 +/* PSI ACK len */ +#define IOSM_PSI_ACK 8 +/* SWID capability for packed swid type */ +#define IOSM_EXT_CAP_SWID_OOS_PACK 0x02 +/* EBL error response buffer */ +#define IOSM_EBL_RSP_BUFF 0x0041 +/* SWID string length */ +#define IOSM_SWID_STR 64 +/* Load EBL command size */ +#define IOSM_RPSI_LOAD_SIZE 0 +/* EBL payload checksum */ +#define IOSM_EBL_CKSM 0x0000FFFF +/* SWID msg len and argument */ +#define IOSM_MSG_LEN_ARG 0 +/* Data to be sent to modem */ +#define IOSM_MDM_SEND_DATA 0x0000 +/* Data received from modem as part of erase check */ +#define IOSM_MDM_ERASE_RSP 0x0001 +/* Bit shift to calculate Checksum */ +#define IOSM_EBL_PAYL_SHIFT 16 +/* Flag To be set */ +#define IOSM_SET_FLAG 1 +/* Set flash erase check timeout to 100 msec */ +#define IOSM_FLASH_ERASE_CHECK_TIMEOUT 100 +/* Set flash erase check interval to 20 msec */ +#define IOSM_FLASH_ERASE_CHECK_INTERVAL 20 +/* Link establishment response ack size */ +#define IOSM_LER_RSP_SIZE 60 + +/** + * enum iosm_flash_package_type - Enum for the flashing operations + * @FLASH_SET_PROT_CONF: Write EBL capabilities + * @FLASH_SEC_START: Start writing the secpack + * @FLASH_SEC_END: Validate secpack end + * @FLASH_SET_ADDRESS: Set the address for flashing + * @FLASH_ERASE_START: Start erase before flashing + * @FLASH_ERASE_CHECK: Validate the erase functionality + * @FLASH_OOS_CONTROL: Retrieve data based on oos actions + * @FLASH_OOS_DATA_READ: Read data from EBL + * @FLASH_WRITE_IMAGE_RAW: Write the raw image to flash + */ +enum iosm_flash_package_type { + FLASH_SET_PROT_CONF = 0x0086, + FLASH_SEC_START = 0x0204, + FLASH_SEC_END, + FLASH_SET_ADDRESS = 0x0802, + FLASH_ERASE_START = 0x0805, + FLASH_ERASE_CHECK, + FLASH_OOS_CONTROL = 0x080C, + FLASH_OOS_DATA_READ = 0x080E, + FLASH_WRITE_IMAGE_RAW, +}; + +/** + * enum iosm_out_of_session_action - Actions possible over the + * OutOfSession command interface + * @FLASH_OOSC_ACTION_READ: Read data according to its type + * @FLASH_OOSC_ACTION_ERASE: Erase data according to its type + */ +enum iosm_out_of_session_action { + FLASH_OOSC_ACTION_READ = 2, + FLASH_OOSC_ACTION_ERASE = 3, +}; + +/** + * enum iosm_out_of_session_type - Data types that can be handled over the + * Out Of Session command Interface + * @FLASH_OOSC_TYPE_ALL_FLASH: The whole flash area + * @FLASH_OOSC_TYPE_SWID_TABLE: Read the swid table from the target + */ +enum iosm_out_of_session_type { + FLASH_OOSC_TYPE_ALL_FLASH = 8, + FLASH_OOSC_TYPE_SWID_TABLE = 16, +}; + +/** + * enum iosm_ebl_caps - EBL capability settings + * @IOSM_CAP_NOT_ENHANCED: If capability not supported + * @IOSM_CAP_USE_EXT_CAP: To be set if extended capability is set + * @IOSM_EXT_CAP_ERASE_ALL: Set Erase all capability + * @IOSM_EXT_CAP_COMMIT_ALL: Set the commit all capability + */ +enum iosm_ebl_caps { + IOSM_CAP_NOT_ENHANCED = 0x00, + IOSM_CAP_USE_EXT_CAP = 0x01, + IOSM_EXT_CAP_ERASE_ALL = 0x08, + IOSM_EXT_CAP_COMMIT_ALL = 0x20, +}; + +/** + * enum iosm_ebl_rsp - EBL response field + * @EBL_CAPS_FLAG: EBL capability flag + * @EBL_SKIP_ERASE: EBL skip erase flag + * @EBL_SKIP_CRC: EBL skip wr_pack crc + * @EBL_EXT_CAPS_HANDLED: EBL extended capability handled flag + * @EBL_OOS_CONFIG: EBL oos configuration + * @EBL_RSP_SW_INFO_VER: EBL SW info version + */ +enum iosm_ebl_rsp { + EBL_CAPS_FLAG = 50, + EBL_SKIP_ERASE = 54, + EBL_SKIP_CRC = 55, + EBL_EXT_CAPS_HANDLED = 57, + EBL_OOS_CONFIG = 64, + EBL_RSP_SW_INFO_VER = 70, +}; + +/** + * enum iosm_mdm_send_recv_data - Data to send to modem + * @IOSM_MDM_SEND_2: Send 2 bytes of payload + * @IOSM_MDM_SEND_4: Send 4 bytes of payload + * @IOSM_MDM_SEND_8: Send 8 bytes of payload + * @IOSM_MDM_SEND_16: Send 16 bytes of payload + */ +enum iosm_mdm_send_recv_data { + IOSM_MDM_SEND_2 = 2, + IOSM_MDM_SEND_4 = 4, + IOSM_MDM_SEND_8 = 8, + IOSM_MDM_SEND_16 = 16, +}; + +/** + * struct iosm_ebl_one_error - Structure containing error details + * @error_class: Error type- standard, security and text error + * @error_code: Specific error from error type + */ +struct iosm_ebl_one_error { + u16 error_class; + u16 error_code; +}; + +/** + * struct iosm_ebl_error- Structure with max error type supported + * @error: Array of one_error structure with max errors + */ +struct iosm_ebl_error { + struct iosm_ebl_one_error error[IOSM_MAX_ERRORS]; +}; + +/** + * struct iosm_swid_table - SWID table data for modem + * @number_of_data_sets: Number of swid types + * @sw_id_type: SWID type - SWID + * @sw_id_val: SWID value + * @rf_engine_id_type: RF engine ID type - RF_ENGINE_ID + * @rf_engine_id_val: RF engine ID value + */ +struct iosm_swid_table { + u32 number_of_data_sets; + char sw_id_type[IOSM_EBL_NAME]; + u32 sw_id_val; + char rf_engine_id_type[IOSM_EBL_NAME]; + u32 rf_engine_id_val; +}; + +/** + * struct iosm_flash_msg_control - Data sent to modem + * @action: Action to be performed + * @type: Type of action + * @length: Length of the action + * @arguments: Argument value sent to modem + */ +struct iosm_flash_msg_control { + __le32 action; + __le32 type; + __le32 length; + __le32 arguments; +}; + +/** + * struct iosm_flash_data - Header Data to be sent to modem + * @checksum: Checksum value calculated for the payload data + * @pack_id: Flash Action type + * @msg_length: Payload length + */ +struct iosm_flash_data { + __le16 checksum; + __le16 pack_id; + __le32 msg_length; +}; + +int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink, + const struct firmware *fw); + +int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink, + const struct firmware *fw); + +int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink, + u8 *mdm_rsp); + +int ipc_flash_link_establish(struct iosm_imem *ipc_imem); + +int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp); + +int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink, + const struct firmware *fw, u8 *mdm_rsp); +#endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index 9f00e36b7f7909..cff3b43ca4d7d3 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -6,6 +6,8 @@ #include #include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_devlink.h" +#include "iosm_ipc_flash.h" #include "iosm_ipc_imem.h" #include "iosm_ipc_port.h" @@ -263,9 +265,12 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem, switch (pipe->channel->ctype) { case IPC_CTYPE_CTRL: port_id = pipe->channel->channel_id; - - /* Pass the packet to the wwan layer. */ - wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb); + if (port_id == IPC_MEM_CTRL_CHL_ID_7) + ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, + skb); + else + wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, + skb); break; case IPC_CTYPE_WWAN: @@ -399,19 +404,8 @@ static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem) { struct ipc_mem_channel *channel; - if (ipc_imem->flash_channel_id < 0) { - ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL; - dev_err(ipc_imem->dev, "Missing flash app:%d", - ipc_imem->flash_channel_id); - return; - } - + channel = ipc_imem->ipc_devlink->devlink_sio.channel; ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio); - - /* Wake up the flash app to continue or to terminate depending - * on the CP ROM exit code. - */ - channel = &ipc_imem->channels[ipc_imem->flash_channel_id]; complete(&channel->ul_sem); } @@ -482,8 +476,8 @@ static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer) container_of(hr_timer, struct iosm_imem, startup_timer); if (ktime_to_ns(ipc_imem->hrtimer_period)) { - hrtimer_forward(&ipc_imem->startup_timer, ktime_get(), - ipc_imem->hrtimer_period); + hrtimer_forward_now(&ipc_imem->startup_timer, + ipc_imem->hrtimer_period); result = HRTIMER_RESTART; } @@ -572,7 +566,7 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) enum ipc_phase old_phase, phase; bool retry_allocation = false; bool ul_pending = false; - int ch_id, i; + int i; if (irq != IMEM_IRQ_DONT_CARE) ipc_imem->ev_irq_pending[irq] = false; @@ -696,11 +690,8 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) if ((phase == IPC_P_PSI || phase == IPC_P_EBL) && ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING && ipc_mmio_get_ipc_state(ipc_imem->mmio) == - IPC_MEM_DEVICE_IPC_RUNNING && - ipc_imem->flash_channel_id >= 0) { - /* Wake up the flash app to open the pipes. */ - ch_id = ipc_imem->flash_channel_id; - complete(&ipc_imem->channels[ch_id].ul_sem); + IPC_MEM_DEVICE_IPC_RUNNING) { + complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem); } /* Reset the expected CP state. */ @@ -1176,6 +1167,9 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem) ipc_port_deinit(ipc_imem->ipc_port); } + if (ipc_imem->ipc_devlink) + ipc_devlink_deinit(ipc_imem->ipc_devlink); + ipc_imem_device_ipc_uninit(ipc_imem); ipc_imem_channel_reset(ipc_imem); @@ -1258,6 +1252,7 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, void __iomem *mmio, struct device *dev) { struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL); + enum ipc_mem_exec_stage stage; if (!ipc_imem) return NULL; @@ -1272,9 +1267,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, ipc_imem->cp_version = 0; ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP; - /* Reset the flash channel id. */ - ipc_imem->flash_channel_id = -1; - /* Reset the max number of configured channels */ ipc_imem->nr_of_channels = 0; @@ -1328,8 +1320,21 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, goto imem_config_fail; } - return ipc_imem; + stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + if (stage == IPC_MEM_EXEC_STAGE_BOOT) { + /* Alloc and Register devlink */ + ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem); + if (!ipc_imem->ipc_devlink) { + dev_err(ipc_imem->dev, "Devlink register failed"); + goto imem_config_fail; + } + if (ipc_flash_link_establish(ipc_imem)) + goto devlink_channel_fail; + } + return ipc_imem; +devlink_channel_fail: + ipc_devlink_deinit(ipc_imem->ipc_devlink); imem_config_fail: hrtimer_cancel(&ipc_imem->td_alloc_timer); hrtimer_cancel(&ipc_imem->fast_update_timer); @@ -1361,3 +1366,51 @@ void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend) { ipc_imem->td_update_timer_suspended = suspend; } + +/* Verify the CP execution state, copy the chip info, + * change the execution phase to ROM + */ +static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem, + int arg, void *msg, + size_t msgsize) +{ + enum ipc_mem_exec_stage stage; + struct sk_buff *skb; + int rc = -EINVAL; + size_t size; + + /* Test the CP execution state. */ + stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + if (stage != IPC_MEM_EXEC_STAGE_BOOT) { + dev_err(ipc_imem->dev, + "Execution_stage: expected BOOT, received = %X", stage); + goto trigger_chip_info_fail; + } + /* Allocate a new sk buf for the chip info. */ + size = ipc_imem->mmio->chip_info_size; + if (size > IOSM_CHIP_INFO_SIZE_MAX) + goto trigger_chip_info_fail; + + skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size); + if (!skb) { + dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory"); + rc = -ENOMEM; + goto trigger_chip_info_fail; + } + /* Copy the chip info characters into the ipc_skb. */ + ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size); + /* First change to the ROM boot phase. */ + dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage); + ipc_imem->phase = ipc_imem_phase_update(ipc_imem); + ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb); + rc = 0; +trigger_chip_info_fail: + return rc; +} + +int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem) +{ + return ipc_task_queue_send_task(ipc_imem, + ipc_imem_devlink_trigger_chip_info_cb, + 0, NULL, 0, true); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index dc65b0712261f1..6be6708b4eec86 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -69,7 +69,7 @@ struct ipc_chnl_cfg; #define IMEM_IRQ_DONT_CARE (-1) -#define IPC_MEM_MAX_CHANNELS 7 +#define IPC_MEM_MAX_CHANNELS 8 #define IPC_MEM_MUX_IP_SESSION_ENTRIES 8 @@ -98,6 +98,7 @@ struct ipc_chnl_cfg; #define IPC_MEM_DL_ETH_OFFSET 16 #define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb)) +#define IOSM_CHIP_INFO_SIZE_MAX 100 #define FULLY_FUNCTIONAL 0 @@ -304,9 +305,9 @@ enum ipc_phase { * @ipc_port: IPC PORT data structure pointer * @pcie: IPC PCIe * @dev: Pointer to device structure - * @flash_channel_id: Reserved channel id for flashing to RAM. * @ipc_requested_state: Expected IPC state on CP. * @channels: Channel list with UL/DL pipe pairs. + * @ipc_devlink: IPC Devlink data structure pointer * @ipc_status: local ipc_status * @nr_of_channels: number of configured channels * @startup_timer: startup timer for NAND support. @@ -349,9 +350,9 @@ struct iosm_imem { struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS]; struct iosm_pcie *pcie; struct device *dev; - int flash_channel_id; enum ipc_mem_device_ipc_state ipc_requested_state; struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS]; + struct iosm_devlink *ipc_devlink; u32 ipc_status; u32 nr_of_channels; struct hrtimer startup_timer; @@ -575,4 +576,15 @@ void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem); */ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype, struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation); + +/** + * ipc_imem_devlink_trigger_chip_info - Inform devlink that the chip + * information are available if the + * flashing to RAM interworking shall be + * executed. + * @ipc_imem: Pointer to imem structure + * + * Returns: 0 on success, -1 on failure + */ +int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem); #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 0a472ce773700d..b885a657023598 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -6,6 +6,7 @@ #include #include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_devlink.h" #include "iosm_ipc_imem.h" #include "iosm_ipc_imem_ops.h" #include "iosm_ipc_port.h" @@ -331,3 +332,319 @@ int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb) out: return ret; } + +/* Open a SIO link to CP and return the channel instance */ +struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem) +{ + struct ipc_mem_channel *channel; + enum ipc_phase phase; + int channel_id; + + phase = ipc_imem_phase_update(ipc_imem); + switch (phase) { + case IPC_P_OFF: + case IPC_P_ROM: + /* Get a channel id as flash id and reserve it. */ + channel_id = ipc_imem_channel_alloc(ipc_imem, + IPC_MEM_CTRL_CHL_ID_7, + IPC_CTYPE_CTRL); + + if (channel_id < 0) { + dev_err(ipc_imem->dev, + "reservation of a flash channel id failed"); + goto error; + } + + ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id; + channel = &ipc_imem->channels[channel_id]; + + /* Enqueue chip info data to be read */ + if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) { + dev_err(ipc_imem->dev, "Enqueue of chip info failed"); + channel->state = IMEM_CHANNEL_FREE; + goto error; + } + + return channel; + + case IPC_P_PSI: + case IPC_P_EBL: + ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio); + if (ipc_imem->cp_version == -1) { + dev_err(ipc_imem->dev, "invalid CP version"); + goto error; + } + + channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id; + return ipc_imem_channel_open(ipc_imem, channel_id, + IPC_HP_CDEV_OPEN); + + default: + /* CP is in the wrong state (e.g. CRASH or CD_READY) */ + dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase); + } +error: + return NULL; +} + +/* Release a SIO channel link to CP. */ +void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink) +{ + struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem; + int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT; + enum ipc_mem_exec_stage exec_stage; + struct ipc_mem_channel *channel; + enum ipc_phase curr_phase; + int status = 0; + u32 tail = 0; + + channel = ipc_imem->ipc_devlink->devlink_sio.channel; + curr_phase = ipc_imem->phase; + /* Increase the total wait time to boot_check_timeout */ + do { + exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + if (exec_stage == IPC_MEM_EXEC_STAGE_RUN || + exec_stage == IPC_MEM_EXEC_STAGE_PSI) + break; + msleep(20); + boot_check_timeout -= 20; + } while (boot_check_timeout > 0); + + /* If there are any pending TDs then wait for Timeout/Completion before + * closing pipe. + */ + if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) { + status = wait_for_completion_interruptible_timeout + (&ipc_imem->ul_pend_sem, + msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); + if (status == 0) { + dev_dbg(ipc_imem->dev, + "Data Timeout on UL-Pipe:%d Head:%d Tail:%d", + channel->ul_pipe.pipe_nr, + channel->ul_pipe.old_head, + channel->ul_pipe.old_tail); + } + } + + ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, + &channel->dl_pipe, NULL, &tail); + + if (tail != channel->dl_pipe.old_tail) { + status = wait_for_completion_interruptible_timeout + (&ipc_imem->dl_pend_sem, + msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); + if (status == 0) { + dev_dbg(ipc_imem->dev, + "Data Timeout on DL-Pipe:%d Head:%d Tail:%d", + channel->dl_pipe.pipe_nr, + channel->dl_pipe.old_head, + channel->dl_pipe.old_tail); + } + } + + /* Due to wait for completion in messages, there is a small window + * between closing the pipe and updating the channel is closed. In this + * small window there could be HP update from Host Driver. Hence update + * the channel state as CLOSING to aviod unnecessary interrupt + * towards CP. + */ + channel->state = IMEM_CHANNEL_CLOSING; + /* Release the pipe resources */ + ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe); + ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe); +} + +void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, + struct sk_buff *skb) +{ + skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb); + complete(&ipc_devlink->devlink_sio.read_sem); +} + +/* PSI transfer */ +static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem, + struct ipc_mem_channel *channel, + unsigned char *buf, int count) +{ + int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT; + enum ipc_mem_exec_stage exec_stage; + + dma_addr_t mapping = 0; + int ret; + + ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping, + DMA_TO_DEVICE); + if (ret) + goto pcie_addr_map_fail; + + /* Save the PSI information for the CP ROM driver on the doorbell + * scratchpad. + */ + ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count); + ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT); + + ret = wait_for_completion_interruptible_timeout + (&channel->ul_sem, + msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT)); + + if (ret <= 0) { + dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d", + ret); + goto psi_transfer_fail; + } + /* If the PSI download fails, return the CP boot ROM exit code */ + if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT && + ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) { + ret = (-1) * ((int)ipc_imem->rom_exit_code); + goto psi_transfer_fail; + } + + dev_dbg(ipc_imem->dev, "PSI image successfully downloaded"); + + /* Wait psi_start_timeout milliseconds until the CP PSI image is + * running and updates the execution_stage field with + * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage. + */ + do { + exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + + if (exec_stage == IPC_MEM_EXEC_STAGE_PSI) + break; + + msleep(20); + psi_start_timeout -= 20; + } while (psi_start_timeout > 0); + + if (exec_stage != IPC_MEM_EXEC_STAGE_PSI) + goto psi_transfer_fail; /* Unknown status of CP PSI process. */ + + ipc_imem->phase = IPC_P_PSI; + + /* Enter the PSI phase. */ + dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage); + + /* Request the RUNNING state from CP and wait until it was reached + * or timeout. + */ + ipc_imem_ipc_init_check(ipc_imem); + + ret = wait_for_completion_interruptible_timeout + (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT)); + if (ret <= 0) { + dev_err(ipc_imem->dev, + "Failed PSI RUNNING state on CP, Error-%d", ret); + goto psi_transfer_fail; + } + + if (ipc_mmio_get_ipc_state(ipc_imem->mmio) != + IPC_MEM_DEVICE_IPC_RUNNING) { + dev_err(ipc_imem->dev, + "ch[%d] %s: unexpected CP IPC state %d, not RUNNING", + channel->channel_id, + ipc_imem_phase_get_string(ipc_imem->phase), + ipc_mmio_get_ipc_state(ipc_imem->mmio)); + + goto psi_transfer_fail; + } + + /* Create the flash channel for the transfer of the images. */ + if (!ipc_imem_sys_devlink_open(ipc_imem)) { + dev_err(ipc_imem->dev, "can't open flash_channel"); + goto psi_transfer_fail; + } + + ret = 0; +psi_transfer_fail: + ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE); +pcie_addr_map_fail: + return ret; +} + +int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, + unsigned char *buf, int count) +{ + struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem; + struct ipc_mem_channel *channel; + struct sk_buff *skb; + dma_addr_t mapping; + int ret; + + channel = ipc_imem->ipc_devlink->devlink_sio.channel; + + /* In the ROM phase the PSI image is passed to CP about a specific + * shared memory area and doorbell scratchpad directly. + */ + if (ipc_imem->phase == IPC_P_ROM) { + ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count); + /* If the PSI transfer fails then send crash + * Signature. + */ + if (ret > 0) + ipc_imem_msg_send_feature_set(ipc_imem, + IPC_MEM_INBAND_CRASH_SIG, + false); + goto out; + } + + /* Allocate skb memory for the uplink buffer. */ + skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping, + DMA_TO_DEVICE, 0); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + memcpy(skb_put(skb, count), buf, count); + + IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED; + + /* Add skb to the uplink skbuf accumulator. */ + skb_queue_tail(&channel->ul_list, skb); + + /* Inform the IPC tasklet to pass uplink IP packets to CP. */ + if (!ipc_imem_call_cdev_write(ipc_imem)) { + ret = wait_for_completion_interruptible(&channel->ul_sem); + + if (ret < 0) { + dev_err(ipc_imem->dev, + "ch[%d] no CP confirmation, status = %d", + channel->channel_id, ret); + ipc_pcie_kfree_skb(ipc_devlink->pcie, skb); + goto out; + } + } + ret = 0; +out: + return ret; +} + +int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data, + u32 bytes_to_read, u32 *bytes_read) +{ + struct sk_buff *skb = NULL; + int rc = 0; + + /* check skb is available in rx_list or wait for skb */ + devlink->devlink_sio.devlink_read_pend = 1; + while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) { + if (!wait_for_completion_interruptible_timeout + (&devlink->devlink_sio.read_sem, + msecs_to_jiffies(IPC_READ_TIMEOUT))) { + dev_err(devlink->dev, "Read timedout"); + rc = -ETIMEDOUT; + goto devlink_read_fail; + } + } + devlink->devlink_sio.devlink_read_pend = 0; + if (bytes_to_read < skb->len) { + dev_err(devlink->dev, "Invalid size,expected len %d", skb->len); + rc = -EINVAL; + goto devlink_read_fail; + } + *bytes_read = skb->len; + memcpy(data, skb->data, skb->len); + +devlink_read_fail: + ipc_pcie_kfree_skb(devlink->pcie, skb); + return rc; +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index 2007fe23e9a567..f0c88ac5643c62 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -9,7 +9,7 @@ #include "iosm_ipc_mux_codec.h" /* Maximum wait time for blocking read */ -#define IPC_READ_TIMEOUT 500 +#define IPC_READ_TIMEOUT 3000 /* The delay in ms for defering the unregister */ #define SIO_UNREGISTER_DEFER_DELAY_MS 1 @@ -98,4 +98,51 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id, */ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, enum ipc_mux_protocol mux_type); + +/** + * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP + * @ipc_imem: iosm_imem instance + * + * Return: channel instance on success, NULL for failure + */ +struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem); + +/** + * ipc_imem_sys_devlink_close - Release a Flash/CD channel link to CP + * @ipc_devlink: Pointer to ipc_devlink data-struct + * + */ +void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink); + +/** + * ipc_imem_sys_devlink_notify_rx - Receive downlink characters from CP, + * the downlink skbuf is added at the end of the + * downlink or rx list + * @ipc_devlink: Pointer to ipc_devlink data-struct + * @skb: Pointer to sk buffer + */ +void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, + struct sk_buff *skb); + +/** + * ipc_imem_sys_devlink_read - Copy the rx data and free the skbuf + * @ipc_devlink: Devlink instance + * @data: Buffer to read the data from modem + * @bytes_to_read: Size of destination buffer + * @bytes_read: Number of bytes read + * + * Return: 0 on success and failure value on error + */ +int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data, + u32 bytes_to_read, u32 *bytes_read); + +/** + * ipc_imem_sys_devlink_write - Route the uplink buffer to CP + * @ipc_devlink: Devlink_sio instance + * @buf: Pointer to buffer + * @count: Number of data bytes to write + * Return: 0 on success and failure value on error + */ +int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, + unsigned char *buf, int count); #endif diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index c58996c1e23093..fe8e21ad8ed9f0 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -494,6 +494,9 @@ static const struct net_device_ops xenvif_netdev_ops = { struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, unsigned int handle) { + static const u8 dummy_addr[ETH_ALEN] = { + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + }; int err; struct net_device *dev; struct xenvif *vif; @@ -551,8 +554,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, * stolen by an Ethernet bridge for STP purposes. * (FE:FF:FF:FF:FF:FF) */ - eth_broadcast_addr(dev->dev_addr); - dev->dev_addr[0] &= ~0x01; + eth_hw_addr_set(dev, dummy_addr); netif_carrier_off(dev); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 32d5bc4919d8cc..0f7fd159f0f29a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1474,7 +1474,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; RING_IDX rsp_prod, req_prod; - int err = -ENOMEM; + int err; err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), &tx_ring_ref, 1, &addr); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index fc41ba95f81d09..911f43986a8c9f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2161,6 +2161,7 @@ static int talk_to_netback(struct xenbus_device *dev, unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; + u8 addr[ETH_ALEN]; info->netdev->irq = 0; @@ -2174,11 +2175,12 @@ static int talk_to_netback(struct xenbus_device *dev, "feature-split-event-channels", 0); /* Read mac addr. */ - err = xen_net_read_mac(dev, info->netdev->dev_addr); + err = xen_net_read_mac(dev, addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out_unlocked; } + eth_hw_addr_set(info->netdev, addr); info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, "feature-xdp-headroom", 0); diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 051c43a2a52f86..f78670bf41e01c 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -335,7 +335,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client) return r; } - dev_dbg(dev, "I2C driver loaded\n"); return 0; } diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c index 86f593c73ed6b7..067295124eb9ea 100644 --- a/drivers/nfc/microread/i2c.c +++ b/drivers/nfc/microread/i2c.c @@ -237,8 +237,6 @@ static int microread_i2c_probe(struct i2c_client *client, struct microread_i2c_phy *phy; int r; - dev_dbg(&client->dev, "client %p\n", client); - phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy), GFP_KERNEL); if (!phy) @@ -262,8 +260,6 @@ static int microread_i2c_probe(struct i2c_client *client, if (r < 0) goto err_irq; - nfc_info(&client->dev, "Probed\n"); - return 0; err_irq: diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index 8edf761a6b2abb..e2a77a5fc88710 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -23,13 +23,9 @@ static int microread_mei_probe(struct mei_cl_device *cldev, struct nfc_mei_phy *phy; int r; - pr_info("Probing NFC microread\n"); - phy = nfc_mei_phy_alloc(cldev); - if (!phy) { - pr_err("Cannot allocate memory for microread mei phy.\n"); + if (!phy) return -ENOMEM; - } r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c index edac56b01fd13e..e83f65596a88a0 100644 --- a/drivers/nfc/nfcmrvl/fw_dnld.c +++ b/drivers/nfc/nfcmrvl/fw_dnld.c @@ -76,10 +76,8 @@ static struct sk_buff *alloc_lc_skb(struct nfcmrvl_private *priv, uint8_t plen) struct nci_data_hdr *hdr; skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL); - if (!skb) { - pr_err("no memory for data\n"); + if (!skb) return NULL; - } hdr = skb_put(skb, NCI_DATA_HDR_SIZE); hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL; diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index e6bf8cfe3aa7c9..673eb5e9b8876f 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -128,7 +128,6 @@ static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb) static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data) { struct pn533_i2c_phy *phy = data; - struct i2c_client *client; struct sk_buff *skb = NULL; int r; @@ -137,9 +136,6 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data) return IRQ_NONE; } - client = phy->i2c_dev; - dev_dbg(&client->dev, "IRQ\n"); - if (phy->hard_fault != 0) return IRQ_HANDLED; @@ -160,7 +156,7 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data) return IRQ_HANDLED; } -static struct pn533_phy_ops i2c_phy_ops = { +static const struct pn533_phy_ops i2c_phy_ops = { .send_frame = pn533_i2c_send_frame, .send_ack = pn533_i2c_send_ack, .abort_cmd = pn533_i2c_abort_cmd, diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 2f3f3fe9a0baa8..787bcbd290f7b4 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -1235,8 +1235,6 @@ static void pn533_listen_mode_timer(struct timer_list *t) { struct pn533 *dev = from_timer(dev, t, listen_timer); - dev_dbg(dev->dev, "Listen mode timeout\n"); - dev->cancel_listen = 1; pn533_poll_next_mod(dev); @@ -2173,7 +2171,7 @@ void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status) } if (skb == NULL) { - pr_err("NULL Frame -> link is dead\n"); + dev_err(dev->dev, "NULL Frame -> link is dead\n"); goto sched_wq; } @@ -2735,7 +2733,7 @@ EXPORT_SYMBOL_GPL(pn533_finalize_setup); struct pn533 *pn53x_common_init(u32 device_type, enum pn533_protocol_type protocol_type, void *phy, - struct pn533_phy_ops *phy_ops, + const struct pn533_phy_ops *phy_ops, struct pn533_frame_ops *fops, struct device *dev) { diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h index 5f94f38a2a08d2..09e35b8693f5a6 100644 --- a/drivers/nfc/pn533/pn533.h +++ b/drivers/nfc/pn533/pn533.h @@ -177,7 +177,7 @@ struct pn533 { struct device *dev; void *phy; - struct pn533_phy_ops *phy_ops; + const struct pn533_phy_ops *phy_ops; }; typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg, @@ -232,7 +232,7 @@ struct pn533_phy_ops { struct pn533 *pn53x_common_init(u32 device_type, enum pn533_protocol_type protocol_type, void *phy, - struct pn533_phy_ops *phy_ops, + const struct pn533_phy_ops *phy_ops, struct pn533_frame_ops *fops, struct device *dev); diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c index 7bdaf826307064..2caf997f9bc94a 100644 --- a/drivers/nfc/pn533/uart.c +++ b/drivers/nfc/pn533/uart.c @@ -123,7 +123,7 @@ static int pn532_dev_down(struct pn533 *dev) return 0; } -static struct pn533_phy_ops uart_phy_ops = { +static const struct pn533_phy_ops uart_phy_ops = { .send_frame = pn532_uart_send_frame, .send_ack = pn532_uart_send_ack, .abort_cmd = pn532_uart_abort_cmd, @@ -224,7 +224,7 @@ static int pn532_receive_buf(struct serdev_device *serdev, return i; } -static struct serdev_device_ops pn532_serdev_ops = { +static const struct serdev_device_ops pn532_serdev_ops = { .receive_buf = pn532_receive_buf, .write_wakeup = serdev_device_write_wakeup, }; diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index bd7f7478d18921..6f71ac72012ea5 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -429,7 +429,7 @@ static void pn533_send_complete(struct urb *urb) } } -static struct pn533_phy_ops usb_phy_ops = { +static const struct pn533_phy_ops usb_phy_ops = { .send_frame = pn533_usb_send_frame, .send_ack = pn533_usb_send_ack, .abort_cmd = pn533_usb_abort_cmd, diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c index 5c10aac085a415..c493f2dbd0e211 100644 --- a/drivers/nfc/pn544/mei.c +++ b/drivers/nfc/pn544/mei.c @@ -22,13 +22,9 @@ static int pn544_mei_probe(struct mei_cl_device *cldev, struct nfc_mei_phy *phy; int r; - pr_info("Probing NFC pn544\n"); - phy = nfc_mei_phy_alloc(cldev); - if (!phy) { - pr_err("Cannot allocate memory for pn544 mei phy.\n"); + if (!phy) return -ENOMEM; - } r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, @@ -46,8 +42,6 @@ static void pn544_mei_remove(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); - pr_info("Removing pn544\n"); - pn544_hci_remove(phy->hdev); nfc_mei_phy_free(phy); diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index 1af7a1e632cf21..c20fdbac51c507 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -357,6 +357,7 @@ s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo) int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info) { + struct device *dev = &fw_info->ndev->nfc_dev->dev; struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo; int ret; @@ -364,8 +365,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info) ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo); if (ret < 0) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Failed to get bootinfo, ret=%02x\n", ret); + dev_err(dev, "Failed to get bootinfo, ret=%02x\n", ret); goto err; } @@ -373,8 +373,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info) ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr); if (ret < 0) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Unknown hardware version\n"); + dev_err(dev, "Unknown hardware version\n"); goto err; } @@ -409,6 +408,7 @@ bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) { + struct device *dev = &fw_info->ndev->nfc_dev->dev; struct s3fwrn5_fw_image *fw = &fw_info->fw; u8 hash_data[SHA1_DIGEST_SIZE]; struct crypto_shash *tfm; @@ -421,8 +421,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) tfm = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(tfm)) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Cannot allocate shash (code=%pe)\n", tfm); + dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm); return PTR_ERR(tfm); } @@ -430,21 +429,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) crypto_free_shash(tfm); if (ret) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Cannot compute hash (code=%d)\n", ret); + dev_err(dev, "Cannot compute hash (code=%d)\n", ret); return ret; } /* Firmware update process */ - dev_info(&fw_info->ndev->nfc_dev->dev, - "Firmware update: %s\n", fw_info->fw_name); + dev_info(dev, "Firmware update: %s\n", fw_info->fw_name); ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data, SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size); if (ret < 0) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Unable to enter update mode\n"); + dev_err(dev, "Unable to enter update mode\n"); return ret; } @@ -452,21 +448,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) ret = s3fwrn5_fw_update_sector(fw_info, fw_info->base_addr + off, fw->image + off); if (ret < 0) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Firmware update error (code=%d)\n", ret); + dev_err(dev, "Firmware update error (code=%d)\n", ret); return ret; } } ret = s3fwrn5_fw_complete_update_mode(fw_info); if (ret < 0) { - dev_err(&fw_info->ndev->nfc_dev->dev, - "Unable to complete update mode\n"); + dev_err(dev, "Unable to complete update mode\n"); return ret; } - dev_info(&fw_info->ndev->nfc_dev->dev, - "Firmware update: success\n"); + dev_info(dev, "Firmware update: success\n"); return ret; } diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c index e374e670b36b19..ca6828f55ba0e9 100644 --- a/drivers/nfc/s3fwrn5/nci.c +++ b/drivers/nfc/s3fwrn5/nci.c @@ -47,6 +47,7 @@ const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = { int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name) { + struct device *dev = &info->ndev->nfc_dev->dev; const struct firmware *fw; struct nci_prop_fw_cfg_cmd fw_cfg; struct nci_prop_set_rfreg_cmd set_rfreg; @@ -55,7 +56,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name) int i, len; int ret; - ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev); + ret = request_firmware(&fw, fw_name, dev); if (ret < 0) return ret; @@ -77,13 +78,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name) /* Start rfreg configuration */ - dev_info(&info->ndev->nfc_dev->dev, - "rfreg configuration update: %s\n", fw_name); + dev_info(dev, "rfreg configuration update: %s\n", fw_name); ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL); if (ret < 0) { - dev_err(&info->ndev->nfc_dev->dev, - "Unable to start rfreg update\n"); + dev_err(dev, "Unable to start rfreg update\n"); goto out; } @@ -97,8 +96,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name) ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG, len+1, (__u8 *)&set_rfreg); if (ret < 0) { - dev_err(&info->ndev->nfc_dev->dev, - "rfreg update error (code=%d)\n", ret); + dev_err(dev, "rfreg update error (code=%d)\n", ret); goto out; } set_rfreg.index++; @@ -110,13 +108,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name) ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG, sizeof(stop_rfreg), (__u8 *)&stop_rfreg); if (ret < 0) { - dev_err(&info->ndev->nfc_dev->dev, - "Unable to stop rfreg update\n"); + dev_err(dev, "Unable to stop rfreg update\n"); goto out; } - dev_info(&info->ndev->nfc_dev->dev, - "rfreg configuration update: success\n"); + dev_info(dev, "rfreg configuration update: success\n"); out: release_firmware(fw); return ret; diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index ccf6152ebb9f26..cbd968f013c70d 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -157,7 +157,6 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy, static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) { struct st_nci_i2c_phy *phy = phy_id; - struct i2c_client *client; struct sk_buff *skb = NULL; int r; @@ -166,9 +165,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) return IRQ_NONE; } - client = phy->i2c_dev; - dev_dbg(&client->dev, "IRQ\n"); - if (phy->ndlc->hard_fault) return IRQ_HANDLED; diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c index e9dc313b333e29..755460a73c0dce 100644 --- a/drivers/nfc/st-nci/ndlc.c +++ b/drivers/nfc/st-nci/ndlc.c @@ -239,8 +239,6 @@ static void ndlc_t1_timeout(struct timer_list *t) { struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer); - pr_debug("\n"); - schedule_work(&ndlc->sm_work); } @@ -248,8 +246,6 @@ static void ndlc_t2_timeout(struct timer_list *t) { struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer); - pr_debug("\n"); - schedule_work(&ndlc->sm_work); } diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 5fd89f72969d91..7764b1a4c3cf88 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -638,8 +638,6 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx, { struct st_nci_info *info = nci_get_drvdata(ndev); - pr_debug("\n"); - switch (se_idx) { case ST_NCI_ESE_HOST_ID: info->se_info.cb = cb; @@ -671,8 +669,6 @@ static void st_nci_se_wt_timeout(struct timer_list *t) u8 param = 0x01; struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer); - pr_debug("\n"); - info->se_info.bwi_active = false; if (!info->se_info.xch_error) { @@ -692,8 +688,6 @@ static void st_nci_se_activation_timeout(struct timer_list *t) struct st_nci_info *info = from_timer(info, t, se_info.se_active_timer); - pr_debug("\n"); - info->se_info.se_active = false; complete(&info->se_info.req_completion); diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 0875b773fb413a..4e723992e74c24 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -169,7 +169,6 @@ static int st_nci_spi_read(struct st_nci_spi_phy *phy, static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) { struct st_nci_spi_phy *phy = phy_id; - struct spi_device *dev; struct sk_buff *skb = NULL; int r; @@ -178,9 +177,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) return IRQ_NONE; } - dev = phy->spi_dev; - dev_dbg(&dev->dev, "IRQ\n"); - if (phy->ndlc->hard_fault) return IRQ_HANDLED; diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 279d88128b2e4a..f126ce96a7df3a 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -421,7 +421,6 @@ static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy, static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; - struct i2c_client *client; int r; @@ -430,9 +429,6 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id) return IRQ_NONE; } - client = phy->i2c_dev; - dev_dbg(&client->dev, "IRQ\n"); - if (phy->hard_fault != 0) return IRQ_HANDLED; diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index c8bdf078d11152..a43fc4117fa570 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -257,8 +257,6 @@ static void st21nfca_se_wt_timeout(struct timer_list *t) struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer); - pr_debug("\n"); - info->se_info.bwi_active = false; if (!info->se_info.xch_error) { @@ -278,8 +276,6 @@ static void st21nfca_se_activation_timeout(struct timer_list *t) struct st21nfca_hci_info *info = from_timer(info, t, se_info.se_active_timer); - pr_debug("\n"); - info->se_info.se_active = false; complete(&info->se_info.req_completion); diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index 8890fcd59c39eb..29ca9c328df2b4 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -2170,8 +2170,6 @@ static int trf7970a_suspend(struct device *dev) struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); - dev_dbg(dev, "Suspend\n"); - mutex_lock(&trf->lock); trf7970a_shutdown(trf); @@ -2187,8 +2185,6 @@ static int trf7970a_resume(struct device *dev) struct trf7970a *trf = spi_get_drvdata(spi); int ret; - dev_dbg(dev, "Resume\n"); - mutex_lock(&trf->lock); ret = trf7970a_startup(trf); @@ -2206,8 +2202,6 @@ static int trf7970a_pm_runtime_suspend(struct device *dev) struct trf7970a *trf = spi_get_drvdata(spi); int ret; - dev_dbg(dev, "Runtime suspend\n"); - mutex_lock(&trf->lock); ret = trf7970a_power_down(trf); @@ -2223,8 +2217,6 @@ static int trf7970a_pm_runtime_resume(struct device *dev) struct trf7970a *trf = spi_get_drvdata(spi); int ret; - dev_dbg(dev, "Runtime resume\n"); - ret = trf7970a_power_up(trf); if (!ret) pm_runtime_mark_last_busy(dev); diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 3dfeae8912dfc4..80b5fd44ab1c7f 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -70,10 +70,6 @@ config OF_IRQ def_bool y depends on !SPARC && IRQ_DOMAIN -config OF_NET - depends on NETDEVICES - def_bool y - config OF_RESERVED_MEM def_bool OF_EARLY_FLATTREE diff --git a/drivers/of/Makefile b/drivers/of/Makefile index c13b982084a3a9..e0360a44306e29 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o obj-$(CONFIG_OF_PROMTREE) += pdt.o obj-$(CONFIG_OF_ADDRESS) += address.o obj-$(CONFIG_OF_IRQ) += irq.o -obj-$(CONFIG_OF_NET) += of_net.o obj-$(CONFIG_OF_UNITTEST) += unittest.o obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o obj-$(CONFIG_OF_RESOLVE) += resolver.o diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c index d2d0ed4b27c8fe..f650e19a315cc2 100644 --- a/drivers/pcmcia/pcmcia_cis.c +++ b/drivers/pcmcia/pcmcia_cis.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -398,7 +399,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple, void *priv) { struct net_device *dev = priv; - int i; if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID) return -EINVAL; @@ -412,8 +412,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple, dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n"); return -EINVAL; } - for (i = 0; i < 6; i++) - dev->dev_addr[i] = tuple->TupleData[i+2]; + eth_hw_addr_set(dev, &tuple->TupleData[2]); return 0; } diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c index b1adaecc26f848..bbfad209c890fc 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c @@ -183,7 +183,7 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, { struct mdio_device *mdiodev = usb3->mdiodev; - return mdiobus_write(mdiodev->bus, mdiodev->addr, reg, value); + return mdiodev_write(mdiodev, reg, value); } static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev) diff --git a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c index 4c7d11d2b37849..9e7434a0d3e007 100644 --- a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c @@ -29,14 +29,12 @@ static int ns2_pci_phy_init(struct phy *p) int rc; /* select the AFE 100MHz block page */ - rc = mdiobus_write(mdiodev->bus, mdiodev->addr, - BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK); + rc = mdiodev_write(mdiodev, BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK); if (rc) goto err; /* set the 100 MHz reference clock amplitude to 2.05 v */ - rc = mdiobus_write(mdiodev->bus, mdiodev->addr, - PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V); + rc = mdiodev_write(mdiodev, PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V); if (rc) goto err; diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h deleted file mode 100644 index ac524cf0f31fbf..00000000000000 --- a/drivers/ptp/idt8a340_reg.h +++ /dev/null @@ -1,720 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* idt8a340_reg.h - * - * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019 - * https://github.com/richardcochran/regen - * - * Hand modified to include some HW registers. - * Based on 4.8.0, SCSR rev C commit a03c7ae5 - */ -#ifndef HAVE_IDT8A340_REG -#define HAVE_IDT8A340_REG - -#define PAGE_ADDR_BASE 0x0000 -#define PAGE_ADDR 0x00fc - -#define HW_REVISION 0x8180 -#define REV_ID 0x007a - -#define HW_DPLL_0 (0x8a00) -#define HW_DPLL_1 (0x8b00) -#define HW_DPLL_2 (0x8c00) -#define HW_DPLL_3 (0x8d00) -#define HW_DPLL_4 (0x8e00) -#define HW_DPLL_5 (0x8f00) -#define HW_DPLL_6 (0x9000) -#define HW_DPLL_7 (0x9100) - -#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080) -#define HW_DPLL_TOD_CTRL_1 (0x089) -#define HW_DPLL_TOD_CTRL_2 (0x08A) -#define HW_DPLL_TOD_OVR__0 (0x098) -#define HW_DPLL_TOD_OUT_0__0 (0x0B0) - -#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740) -#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741) -#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742) -#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743) -#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744) -#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745) -#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746) -#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747) -#define HW_Q8_CH_SYNC_CTRL_0 (0xa748) -#define HW_Q8_CH_SYNC_CTRL_1 (0xa749) -#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a) -#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b) -#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c) -#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d) -#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e) -#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f) - -#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14 -#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15 -#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16 -#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17 - -#define SYNCTRL1_MASTER_SYNC_RST BIT(7) -#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5) -#define SYNCTRL1_TOD_SYNC_TRIG BIT(4) -#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3) -#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2) -#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1) -#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0) - -#define HW_Q8_CTRL_SPARE (0xa7d4) -#define HW_Q11_CTRL_SPARE (0xa7ec) - -/** - * Select FOD5 as sync_trigger for Q8 divider. - * Transition from logic zero to one - * sets trigger to sync Q8 divider. - * - * Unused when FOD4 is driving Q8 divider (normal operation). - */ -#define Q9_TO_Q8_SYNC_TRIG BIT(1) - -/** - * Enable FOD5 as driver for clock and sync for Q8 divider. - * Enable fanout buffer for FOD5. - * - * Unused when FOD4 is driving Q8 divider (normal operation). - */ -#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) - -/** - * Select FOD6 as sync_trigger for Q11 divider. - * Transition from logic zero to one - * sets trigger to sync Q11 divider. - * - * Unused when FOD7 is driving Q11 divider (normal operation). - */ -#define Q10_TO_Q11_SYNC_TRIG BIT(1) - -/** - * Enable FOD6 as driver for clock and sync for Q11 divider. - * Enable fanout buffer for FOD6. - * - * Unused when FOD7 is driving Q11 divider (normal operation). - */ -#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) - -#define RESET_CTRL 0xc000 -#define SM_RESET 0x0012 -#define SM_RESET_CMD 0x5A - -#define GENERAL_STATUS 0xc014 -#define BOOT_STATUS 0x0000 -#define HW_REV_ID 0x000A -#define BOND_ID 0x000B -#define HW_CSR_ID 0x000C -#define HW_IRQ_ID 0x000E - -#define MAJ_REL 0x0010 -#define MIN_REL 0x0011 -#define HOTFIX_REL 0x0012 - -#define PIPELINE_ID 0x0014 -#define BUILD_ID 0x0018 - -#define JTAG_DEVICE_ID 0x001c -#define PRODUCT_ID 0x001e - -#define OTP_SCSR_CONFIG_SELECT 0x0022 - -#define STATUS 0xc03c -#define DPLL_SYS_STATUS 0x0020 -#define DPLL_SYS_APLL_STATUS 0x0021 -#define USER_GPIO0_TO_7_STATUS 0x008a -#define USER_GPIO8_TO_15_STATUS 0x008b - -#define GPIO_USER_CONTROL 0xc160 -#define GPIO0_TO_7_OUT 0x0000 -#define GPIO8_TO_15_OUT 0x0001 - -#define STICKY_STATUS_CLEAR 0xc164 - -#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c - -#define ALERT_CFG 0xc188 - -#define SYS_DPLL_XO 0xc194 - -#define SYS_APLL 0xc19c - -#define INPUT_0 0xc1b0 - -#define INPUT_1 0xc1c0 - -#define INPUT_2 0xc1d0 - -#define INPUT_3 0xc200 - -#define INPUT_4 0xc210 - -#define INPUT_5 0xc220 - -#define INPUT_6 0xc230 - -#define INPUT_7 0xc240 - -#define INPUT_8 0xc250 - -#define INPUT_9 0xc260 - -#define INPUT_10 0xc280 - -#define INPUT_11 0xc290 - -#define INPUT_12 0xc2a0 - -#define INPUT_13 0xc2b0 - -#define INPUT_14 0xc2c0 - -#define INPUT_15 0xc2d0 - -#define REF_MON_0 0xc2e0 - -#define REF_MON_1 0xc2ec - -#define REF_MON_2 0xc300 - -#define REF_MON_3 0xc30c - -#define REF_MON_4 0xc318 - -#define REF_MON_5 0xc324 - -#define REF_MON_6 0xc330 - -#define REF_MON_7 0xc33c - -#define REF_MON_8 0xc348 - -#define REF_MON_9 0xc354 - -#define REF_MON_10 0xc360 - -#define REF_MON_11 0xc36c - -#define REF_MON_12 0xc380 - -#define REF_MON_13 0xc38c - -#define REF_MON_14 0xc398 - -#define REF_MON_15 0xc3a4 - -#define DPLL_0 0xc3b0 -#define DPLL_CTRL_REG_0 0x0002 -#define DPLL_CTRL_REG_1 0x0003 -#define DPLL_CTRL_REG_2 0x0004 -#define DPLL_TOD_SYNC_CFG 0x0031 -#define DPLL_COMBO_SLAVE_CFG_0 0x0032 -#define DPLL_COMBO_SLAVE_CFG_1 0x0033 -#define DPLL_SLAVE_REF_CFG 0x0034 -#define DPLL_REF_MODE 0x0035 -#define DPLL_PHASE_MEASUREMENT_CFG 0x0036 -#define DPLL_MODE 0x0037 - -#define DPLL_1 0xc400 - -#define DPLL_2 0xc438 - -#define DPLL_3 0xc480 - -#define DPLL_4 0xc4b8 - -#define DPLL_5 0xc500 - -#define DPLL_6 0xc538 - -#define DPLL_7 0xc580 - -#define SYS_DPLL 0xc5b8 - -#define DPLL_CTRL_0 0xc600 -#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001 -#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a - -#define DPLL_CTRL_1 0xc63c - -#define DPLL_CTRL_2 0xc680 - -#define DPLL_CTRL_3 0xc6bc - -#define DPLL_CTRL_4 0xc700 - -#define DPLL_CTRL_5 0xc73c - -#define DPLL_CTRL_6 0xc780 - -#define DPLL_CTRL_7 0xc7bc - -#define SYS_DPLL_CTRL 0xc800 - -#define DPLL_PHASE_0 0xc818 - -/* Signed 42-bit FFO in units of 2^(-53) */ -#define DPLL_WR_PHASE 0x0000 - -#define DPLL_PHASE_1 0xc81c - -#define DPLL_PHASE_2 0xc820 - -#define DPLL_PHASE_3 0xc824 - -#define DPLL_PHASE_4 0xc828 - -#define DPLL_PHASE_5 0xc82c - -#define DPLL_PHASE_6 0xc830 - -#define DPLL_PHASE_7 0xc834 - -#define DPLL_FREQ_0 0xc838 - -/* Signed 42-bit FFO in units of 2^(-53) */ -#define DPLL_WR_FREQ 0x0000 - -#define DPLL_FREQ_1 0xc840 - -#define DPLL_FREQ_2 0xc848 - -#define DPLL_FREQ_3 0xc850 - -#define DPLL_FREQ_4 0xc858 - -#define DPLL_FREQ_5 0xc860 - -#define DPLL_FREQ_6 0xc868 - -#define DPLL_FREQ_7 0xc870 - -#define DPLL_PHASE_PULL_IN_0 0xc880 -#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */ -#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */ -#define PULL_IN_CTRL 0x0007 - -#define DPLL_PHASE_PULL_IN_1 0xc888 - -#define DPLL_PHASE_PULL_IN_2 0xc890 - -#define DPLL_PHASE_PULL_IN_3 0xc898 - -#define DPLL_PHASE_PULL_IN_4 0xc8a0 - -#define DPLL_PHASE_PULL_IN_5 0xc8a8 - -#define DPLL_PHASE_PULL_IN_6 0xc8b0 - -#define DPLL_PHASE_PULL_IN_7 0xc8b8 - -#define GPIO_CFG 0xc8c0 -#define GPIO_CFG_GBL 0x0000 - -#define GPIO_0 0xc8c2 -#define GPIO_DCO_INC_DEC 0x0000 -#define GPIO_OUT_CTRL_0 0x0001 -#define GPIO_OUT_CTRL_1 0x0002 -#define GPIO_TOD_TRIG 0x0003 -#define GPIO_DPLL_INDICATOR 0x0004 -#define GPIO_LOS_INDICATOR 0x0005 -#define GPIO_REF_INPUT_DSQ_0 0x0006 -#define GPIO_REF_INPUT_DSQ_1 0x0007 -#define GPIO_REF_INPUT_DSQ_2 0x0008 -#define GPIO_REF_INPUT_DSQ_3 0x0009 -#define GPIO_MAN_CLK_SEL_0 0x000a -#define GPIO_MAN_CLK_SEL_1 0x000b -#define GPIO_MAN_CLK_SEL_2 0x000c -#define GPIO_SLAVE 0x000d -#define GPIO_ALERT_OUT_CFG 0x000e -#define GPIO_TOD_NOTIFICATION_CFG 0x000f -#define GPIO_CTRL 0x0010 - -#define GPIO_1 0xc8d4 - -#define GPIO_2 0xc8e6 - -#define GPIO_3 0xc900 - -#define GPIO_4 0xc912 - -#define GPIO_5 0xc924 - -#define GPIO_6 0xc936 - -#define GPIO_7 0xc948 - -#define GPIO_8 0xc95a - -#define GPIO_9 0xc980 - -#define GPIO_10 0xc992 - -#define GPIO_11 0xc9a4 - -#define GPIO_12 0xc9b6 - -#define GPIO_13 0xc9c8 - -#define GPIO_14 0xc9da - -#define GPIO_15 0xca00 - -#define OUT_DIV_MUX 0xca12 - -#define OUTPUT_0 0xca14 -/* FOD frequency output divider value */ -#define OUT_DIV 0x0000 -#define OUT_DUTY_CYCLE_HIGH 0x0004 -#define OUT_CTRL_0 0x0008 -#define OUT_CTRL_1 0x0009 -/* Phase adjustment in FOD cycles */ -#define OUT_PHASE_ADJ 0x000c - -#define OUTPUT_1 0xca24 - -#define OUTPUT_2 0xca34 - -#define OUTPUT_3 0xca44 - -#define OUTPUT_4 0xca54 - -#define OUTPUT_5 0xca64 - -#define OUTPUT_6 0xca80 - -#define OUTPUT_7 0xca90 - -#define OUTPUT_8 0xcaa0 - -#define OUTPUT_9 0xcab0 - -#define OUTPUT_10 0xcac0 - -#define OUTPUT_11 0xcad0 - -#define SERIAL 0xcae0 - -#define PWM_ENCODER_0 0xcb00 - -#define PWM_ENCODER_1 0xcb08 - -#define PWM_ENCODER_2 0xcb10 - -#define PWM_ENCODER_3 0xcb18 - -#define PWM_ENCODER_4 0xcb20 - -#define PWM_ENCODER_5 0xcb28 - -#define PWM_ENCODER_6 0xcb30 - -#define PWM_ENCODER_7 0xcb38 - -#define PWM_DECODER_0 0xcb40 - -#define PWM_DECODER_1 0xcb48 - -#define PWM_DECODER_2 0xcb50 - -#define PWM_DECODER_3 0xcb58 - -#define PWM_DECODER_4 0xcb60 - -#define PWM_DECODER_5 0xcb68 - -#define PWM_DECODER_6 0xcb70 - -#define PWM_DECODER_7 0xcb80 - -#define PWM_DECODER_8 0xcb88 - -#define PWM_DECODER_9 0xcb90 - -#define PWM_DECODER_10 0xcb98 - -#define PWM_DECODER_11 0xcba0 - -#define PWM_DECODER_12 0xcba8 - -#define PWM_DECODER_13 0xcbb0 - -#define PWM_DECODER_14 0xcbb8 - -#define PWM_DECODER_15 0xcbc0 - -#define PWM_USER_DATA 0xcbc8 - -#define TOD_0 0xcbcc - -/* Enable TOD counter, output channel sync and even-PPS mode */ -#define TOD_CFG 0x0000 - -#define TOD_1 0xcbce - -#define TOD_2 0xcbd0 - -#define TOD_3 0xcbd2 - - -#define TOD_WRITE_0 0xcc00 -/* 8-bit subns, 32-bit ns, 48-bit seconds */ -#define TOD_WRITE 0x0000 -/* Counter increments after TOD write is completed */ -#define TOD_WRITE_COUNTER 0x000c -/* TOD write trigger configuration */ -#define TOD_WRITE_SELECT_CFG_0 0x000d -/* TOD write trigger selection */ -#define TOD_WRITE_CMD 0x000f - -#define TOD_WRITE_1 0xcc10 - -#define TOD_WRITE_2 0xcc20 - -#define TOD_WRITE_3 0xcc30 - -#define TOD_READ_PRIMARY_0 0xcc40 -/* 8-bit subns, 32-bit ns, 48-bit seconds */ -#define TOD_READ_PRIMARY 0x0000 -/* Counter increments after TOD write is completed */ -#define TOD_READ_PRIMARY_COUNTER 0x000b -/* Read trigger configuration */ -#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c -/* Read trigger selection */ -#define TOD_READ_PRIMARY_CMD 0x000e - -#define TOD_READ_PRIMARY_1 0xcc50 - -#define TOD_READ_PRIMARY_2 0xcc60 - -#define TOD_READ_PRIMARY_3 0xcc80 - -#define TOD_READ_SECONDARY_0 0xcc90 - -#define TOD_READ_SECONDARY_1 0xcca0 - -#define TOD_READ_SECONDARY_2 0xccb0 - -#define TOD_READ_SECONDARY_3 0xccc0 - -#define OUTPUT_TDC_CFG 0xccd0 - -#define OUTPUT_TDC_0 0xcd00 - -#define OUTPUT_TDC_1 0xcd08 - -#define OUTPUT_TDC_2 0xcd10 - -#define OUTPUT_TDC_3 0xcd18 - -#define INPUT_TDC 0xcd20 - -#define SCRATCH 0xcf50 - -#define EEPROM 0xcf68 - -#define OTP 0xcf70 - -#define BYTE 0xcf80 - -/* Bit definitions for the MAJ_REL register */ -#define MAJOR_SHIFT (1) -#define MAJOR_MASK (0x7f) -#define PR_BUILD BIT(0) - -/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */ -#define GPIO0_LEVEL BIT(0) -#define GPIO1_LEVEL BIT(1) -#define GPIO2_LEVEL BIT(2) -#define GPIO3_LEVEL BIT(3) -#define GPIO4_LEVEL BIT(4) -#define GPIO5_LEVEL BIT(5) -#define GPIO6_LEVEL BIT(6) -#define GPIO7_LEVEL BIT(7) - -/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */ -#define GPIO8_LEVEL BIT(0) -#define GPIO9_LEVEL BIT(1) -#define GPIO10_LEVEL BIT(2) -#define GPIO11_LEVEL BIT(3) -#define GPIO12_LEVEL BIT(4) -#define GPIO13_LEVEL BIT(5) -#define GPIO14_LEVEL BIT(6) -#define GPIO15_LEVEL BIT(7) - -/* Bit definitions for the GPIO0_TO_7_OUT register */ -#define GPIO0_DRIVE_LEVEL BIT(0) -#define GPIO1_DRIVE_LEVEL BIT(1) -#define GPIO2_DRIVE_LEVEL BIT(2) -#define GPIO3_DRIVE_LEVEL BIT(3) -#define GPIO4_DRIVE_LEVEL BIT(4) -#define GPIO5_DRIVE_LEVEL BIT(5) -#define GPIO6_DRIVE_LEVEL BIT(6) -#define GPIO7_DRIVE_LEVEL BIT(7) - -/* Bit definitions for the GPIO8_TO_15_OUT register */ -#define GPIO8_DRIVE_LEVEL BIT(0) -#define GPIO9_DRIVE_LEVEL BIT(1) -#define GPIO10_DRIVE_LEVEL BIT(2) -#define GPIO11_DRIVE_LEVEL BIT(3) -#define GPIO12_DRIVE_LEVEL BIT(4) -#define GPIO13_DRIVE_LEVEL BIT(5) -#define GPIO14_DRIVE_LEVEL BIT(6) -#define GPIO15_DRIVE_LEVEL BIT(7) - -/* Bit definitions for the DPLL_TOD_SYNC_CFG register */ -#define TOD_SYNC_SOURCE_SHIFT (1) -#define TOD_SYNC_SOURCE_MASK (0x3) -#define TOD_SYNC_EN BIT(0) - -/* Bit definitions for the DPLL_MODE register */ -#define WRITE_TIMER_MODE BIT(6) -#define PLL_MODE_SHIFT (3) -#define PLL_MODE_MASK (0x7) -#define STATE_MODE_SHIFT (0) -#define STATE_MODE_MASK (0x7) - -/* Bit definitions for the GPIO_CFG_GBL register */ -#define SUPPLY_MODE_SHIFT (0) -#define SUPPLY_MODE_MASK (0x3) - -/* Bit definitions for the GPIO_DCO_INC_DEC register */ -#define INCDEC_DPLL_INDEX_SHIFT (0) -#define INCDEC_DPLL_INDEX_MASK (0x7) - -/* Bit definitions for the GPIO_OUT_CTRL_0 register */ -#define CTRL_OUT_0 BIT(0) -#define CTRL_OUT_1 BIT(1) -#define CTRL_OUT_2 BIT(2) -#define CTRL_OUT_3 BIT(3) -#define CTRL_OUT_4 BIT(4) -#define CTRL_OUT_5 BIT(5) -#define CTRL_OUT_6 BIT(6) -#define CTRL_OUT_7 BIT(7) - -/* Bit definitions for the GPIO_OUT_CTRL_1 register */ -#define CTRL_OUT_8 BIT(0) -#define CTRL_OUT_9 BIT(1) -#define CTRL_OUT_10 BIT(2) -#define CTRL_OUT_11 BIT(3) -#define CTRL_OUT_12 BIT(4) -#define CTRL_OUT_13 BIT(5) -#define CTRL_OUT_14 BIT(6) -#define CTRL_OUT_15 BIT(7) - -/* Bit definitions for the GPIO_TOD_TRIG register */ -#define TOD_TRIG_0 BIT(0) -#define TOD_TRIG_1 BIT(1) -#define TOD_TRIG_2 BIT(2) -#define TOD_TRIG_3 BIT(3) - -/* Bit definitions for the GPIO_DPLL_INDICATOR register */ -#define IND_DPLL_INDEX_SHIFT (0) -#define IND_DPLL_INDEX_MASK (0x7) - -/* Bit definitions for the GPIO_LOS_INDICATOR register */ -#define REFMON_INDEX_SHIFT (0) -#define REFMON_INDEX_MASK (0xf) -/* Active level of LOS indicator, 0=low 1=high */ -#define ACTIVE_LEVEL BIT(4) - -/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */ -#define DSQ_INP_0 BIT(0) -#define DSQ_INP_1 BIT(1) -#define DSQ_INP_2 BIT(2) -#define DSQ_INP_3 BIT(3) -#define DSQ_INP_4 BIT(4) -#define DSQ_INP_5 BIT(5) -#define DSQ_INP_6 BIT(6) -#define DSQ_INP_7 BIT(7) - -/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */ -#define DSQ_INP_8 BIT(0) -#define DSQ_INP_9 BIT(1) -#define DSQ_INP_10 BIT(2) -#define DSQ_INP_11 BIT(3) -#define DSQ_INP_12 BIT(4) -#define DSQ_INP_13 BIT(5) -#define DSQ_INP_14 BIT(6) -#define DSQ_INP_15 BIT(7) - -/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */ -#define DSQ_DPLL_0 BIT(0) -#define DSQ_DPLL_1 BIT(1) -#define DSQ_DPLL_2 BIT(2) -#define DSQ_DPLL_3 BIT(3) -#define DSQ_DPLL_4 BIT(4) -#define DSQ_DPLL_5 BIT(5) -#define DSQ_DPLL_6 BIT(6) -#define DSQ_DPLL_7 BIT(7) - -/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */ -#define DSQ_DPLL_SYS BIT(0) -#define GPIO_DSQ_LEVEL BIT(1) - -/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */ -#define DPLL_TOD_SHIFT (0) -#define DPLL_TOD_MASK (0x3) -#define TOD_READ_SECONDARY BIT(2) -#define GPIO_ASSERT_LEVEL BIT(3) - -/* Bit definitions for the GPIO_CTRL register */ -#define GPIO_FUNCTION_EN BIT(0) -#define GPIO_CMOS_OD_MODE BIT(1) -#define GPIO_CONTROL_DIR BIT(2) -#define GPIO_PU_PD_MODE BIT(3) -#define GPIO_FUNCTION_SHIFT (4) -#define GPIO_FUNCTION_MASK (0xf) - -/* Bit definitions for the OUT_CTRL_1 register */ -#define OUT_SYNC_DISABLE BIT(7) -#define SQUELCH_VALUE BIT(6) -#define SQUELCH_DISABLE BIT(5) -#define PAD_VDDO_SHIFT (2) -#define PAD_VDDO_MASK (0x7) -#define PAD_CMOSDRV_SHIFT (0) -#define PAD_CMOSDRV_MASK (0x3) - -/* Bit definitions for the TOD_CFG register */ -#define TOD_EVEN_PPS_MODE BIT(2) -#define TOD_OUT_SYNC_ENABLE BIT(1) -#define TOD_ENABLE BIT(0) - -/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */ -#define WR_PWM_DECODER_INDEX_SHIFT (4) -#define WR_PWM_DECODER_INDEX_MASK (0xf) -#define WR_REF_INDEX_SHIFT (0) -#define WR_REF_INDEX_MASK (0xf) - -/* Bit definitions for the TOD_WRITE_CMD register */ -#define TOD_WRITE_SELECTION_SHIFT (0) -#define TOD_WRITE_SELECTION_MASK (0xf) -/* 4.8.7 */ -#define TOD_WRITE_TYPE_SHIFT (4) -#define TOD_WRITE_TYPE_MASK (0x3) - -/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */ -#define RD_PWM_DECODER_INDEX_SHIFT (4) -#define RD_PWM_DECODER_INDEX_MASK (0xf) -#define RD_REF_INDEX_SHIFT (0) -#define RD_REF_INDEX_MASK (0xf) - -/* Bit definitions for the TOD_READ_PRIMARY_CMD register */ -#define TOD_READ_TRIGGER_MODE BIT(4) -#define TOD_READ_TRIGGER_SHIFT (0) -#define TOD_READ_TRIGGER_MASK (0xf) - -/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */ -#define COMBO_MASTER_HOLD BIT(0) - -/* Bit definitions for DPLL_SYS_STATUS register */ -#define DPLL_SYS_STATE_MASK (0xf) - -/* Bit definitions for SYS_APLL_STATUS register */ -#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0) -#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0 -#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1 - -#endif diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index f9b2d66b04433e..0e4bc8b9329dd2 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -284,11 +284,11 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, /* Create a posix clock and link it to the device. */ err = posix_clock_register(&ptp->clock, &ptp->dev); if (err) { - if (ptp->pps_source) - pps_unregister_source(ptp->pps_source); + if (ptp->pps_source) + pps_unregister_source(ptp->pps_source); if (ptp->kworker) - kthread_destroy_worker(ptp->kworker); + kthread_destroy_worker(ptp->kworker); put_device(&ptp->dev); diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index fa636951169e58..6bc5791a7ec5bb 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -6,7 +6,7 @@ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company. */ #include -#include +#include #include #include #include @@ -14,6 +14,10 @@ #include #include #include +#include +#include +#include +#include #include "ptp_private.h" #include "ptp_clockmatrix.h" @@ -32,16 +36,43 @@ static char *firmware; module_param(firmware, charp, 0); #define SETTIME_CORRECTION (0) +#define EXTTS_PERIOD_MS (95) -static int contains_full_configuration(const struct firmware *fw) +static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm); + +static inline int idtcm_read(struct idtcm *idtcm, + u16 module, + u16 regaddr, + u8 *buf, + u16 count) +{ + return regmap_bulk_read(idtcm->regmap, module + regaddr, buf, count); +} + +static inline int idtcm_write(struct idtcm *idtcm, + u16 module, + u16 regaddr, + u8 *buf, + u16 count) +{ + return regmap_bulk_write(idtcm->regmap, module + regaddr, buf, count); +} + +static int contains_full_configuration(struct idtcm *idtcm, + const struct firmware *fw) { - s32 full_count = FULL_FW_CFG_BYTES - FULL_FW_CFG_SKIPPED_BYTES; struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data; + u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH); + s32 full_count; s32 count = 0; u16 regaddr; u8 loaddr; s32 len; + /* 4 bytes skipped every 0x80 */ + full_count = (scratch - GPIO_USER_CONTROL) - + ((scratch >> 7) - (GPIO_USER_CONTROL >> 7)) * 4; + /* If the firmware contains 'full configuration' SM_RESET can be used * to ensure proper configuration. * @@ -57,7 +88,7 @@ static int contains_full_configuration(const struct firmware *fw) rec++; /* Top (status registers) and bottom are read-only */ - if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH) + if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch) continue; /* Page size 128, last 4 bytes of page skipped */ @@ -152,132 +183,17 @@ static int idtcm_strverscmp(const char *version1, const char *version2) return 0; } -static int idtcm_xfer_read(struct idtcm *idtcm, - u8 regaddr, - u8 *buf, - u16 count) -{ - struct i2c_client *client = idtcm->client; - struct i2c_msg msg[2]; - int cnt; - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = 1; - msg[0].buf = ®addr; - - msg[1].addr = client->addr; - msg[1].flags = I2C_M_RD; - msg[1].len = count; - msg[1].buf = buf; - - cnt = i2c_transfer(client->adapter, msg, 2); - - if (cnt < 0) { - dev_err(&client->dev, - "i2c_transfer failed at %d in %s, at addr: %04x!", - __LINE__, __func__, regaddr); - return cnt; - } else if (cnt != 2) { - dev_err(&client->dev, - "i2c_transfer sent only %d of %d messages", cnt, 2); - return -EIO; - } - - return 0; -} - -static int idtcm_xfer_write(struct idtcm *idtcm, - u8 regaddr, - u8 *buf, - u16 count) -{ - struct i2c_client *client = idtcm->client; - /* we add 1 byte for device register */ - u8 msg[IDTCM_MAX_WRITE_COUNT + 1]; - int cnt; - - if (count > IDTCM_MAX_WRITE_COUNT) - return -EINVAL; - - msg[0] = regaddr; - memcpy(&msg[1], buf, count); - - cnt = i2c_master_send(client, msg, count + 1); - - if (cnt < 0) { - dev_err(&client->dev, - "i2c_master_send failed at %d in %s, at addr: %04x!", - __LINE__, __func__, regaddr); - return cnt; - } - - return 0; -} - -static int idtcm_page_offset(struct idtcm *idtcm, u8 val) -{ - u8 buf[4]; - int err; - - if (idtcm->page_offset == val) - return 0; - - buf[0] = 0x0; - buf[1] = val; - buf[2] = 0x10; - buf[3] = 0x20; - - err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf)); - if (err) { - idtcm->page_offset = 0xff; - dev_err(&idtcm->client->dev, "failed to set page offset"); - } else { - idtcm->page_offset = val; - } - - return err; -} - -static int _idtcm_rdwr(struct idtcm *idtcm, - u16 regaddr, - u8 *buf, - u16 count, - bool write) +static enum fw_version idtcm_fw_version(const char *version) { - u8 hi; - u8 lo; - int err; - - hi = (regaddr >> 8) & 0xff; - lo = regaddr & 0xff; + enum fw_version ver = V_DEFAULT; - err = idtcm_page_offset(idtcm, hi); - if (err) - return err; - - if (write) - return idtcm_xfer_write(idtcm, lo, buf, count); - - return idtcm_xfer_read(idtcm, lo, buf, count); -} + if (idtcm_strverscmp(version, "4.8.7") >= 0) + ver = V487; -static int idtcm_read(struct idtcm *idtcm, - u16 module, - u16 regaddr, - u8 *buf, - u16 count) -{ - return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false); -} + if (idtcm_strverscmp(version, "5.2.0") >= 0) + ver = V520; -static int idtcm_write(struct idtcm *idtcm, - u16 module, - u16 regaddr, - u8 *buf, - u16 count) -{ - return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true); + return ver; } static int clear_boot_status(struct idtcm *idtcm) @@ -318,11 +234,82 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm) } while (i); - dev_warn(&idtcm->client->dev, "%s timed out", __func__); + dev_warn(idtcm->dev, "%s timed out", __func__); return -EBUSY; } +static int _idtcm_set_scsr_read_trig(struct idtcm_channel *channel, + enum scsr_read_trig_sel trig, u8 ref) +{ + struct idtcm *idtcm = channel->idtcm; + u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD); + u8 val; + int err; + + if (trig == SCSR_TOD_READ_TRIG_SEL_REFCLK) { + err = idtcm_read(idtcm, channel->tod_read_primary, + TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val)); + if (err) + return err; + + val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT); + val |= (ref << WR_REF_INDEX_SHIFT); + + err = idtcm_write(idtcm, channel->tod_read_primary, + TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val)); + if (err) + return err; + } + + err = idtcm_read(idtcm, channel->tod_read_primary, + tod_read_cmd, &val, sizeof(val)); + if (err) + return err; + + val &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT); + val |= (trig << TOD_READ_TRIGGER_SHIFT); + val &= ~TOD_READ_TRIGGER_MODE; /* single shot */ + + err = idtcm_write(idtcm, channel->tod_read_primary, + tod_read_cmd, &val, sizeof(val)); + return err; +} + +static int idtcm_enable_extts(struct idtcm_channel *channel, u8 todn, u8 ref, + bool enable) +{ + struct idtcm *idtcm = channel->idtcm; + u8 old_mask = idtcm->extts_mask; + u8 mask = 1 << todn; + int err = 0; + + if (todn >= MAX_TOD) + return -EINVAL; + + if (enable) { + if (ref > 0xF) /* E_REF_CLK15 */ + return -EINVAL; + if (idtcm->extts_mask & mask) + return 0; + err = _idtcm_set_scsr_read_trig(&idtcm->channel[todn], + SCSR_TOD_READ_TRIG_SEL_REFCLK, + ref); + if (err == 0) { + idtcm->extts_mask |= mask; + idtcm->event_channel[todn] = channel; + idtcm->channel[todn].refn = ref; + } + } else + idtcm->extts_mask &= ~mask; + + if (old_mask == 0 && idtcm->extts_mask) + schedule_delayed_work(&idtcm->extts_work, + msecs_to_jiffies(EXTTS_PERIOD_MS)); + + return err; +} + static int read_sys_apll_status(struct idtcm *idtcm, u8 *status) { return idtcm_read(idtcm, STATUS, DPLL_SYS_APLL_STATUS, status, @@ -359,7 +346,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm) } else if (dpll == DPLL_STATE_FREERUN || dpll == DPLL_STATE_HOLDOVER || dpll == DPLL_STATE_OPEN_LOOP) { - dev_warn(&idtcm->client->dev, + dev_warn(idtcm->dev, "No wait state: DPLL_SYS_STATE %d", dpll); return -EPERM; } @@ -367,7 +354,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm) msleep(LOCK_POLL_INTERVAL_MS); } while (time_is_after_jiffies(timeout)); - dev_warn(&idtcm->client->dev, + dev_warn(idtcm->dev, "%d ms lock timeout: SYS APLL Loss Lock %d SYS DPLL state %d", LOCK_TIMEOUT_MS, apll, dpll); @@ -377,50 +364,36 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm) static void wait_for_chip_ready(struct idtcm *idtcm) { if (wait_for_boot_status_ready(idtcm)) - dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0"); + dev_warn(idtcm->dev, "BOOT_STATUS != 0xA0"); if (wait_for_sys_apll_dpll_lock(idtcm)) - dev_warn(&idtcm->client->dev, + dev_warn(idtcm->dev, "Continuing while SYS APLL/DPLL is not locked"); } static int _idtcm_gettime(struct idtcm_channel *channel, - struct timespec64 *ts) + struct timespec64 *ts, u8 timeout) { struct idtcm *idtcm = channel->idtcm; + u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD); u8 buf[TOD_BYTE_COUNT]; - u8 timeout = 10; u8 trigger; int err; - err = idtcm_read(idtcm, channel->tod_read_primary, - TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger)); - if (err) - return err; - - trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT); - trigger |= (1 << TOD_READ_TRIGGER_SHIFT); - trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */ - - err = idtcm_write(idtcm, channel->tod_read_primary, - TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger)); - if (err) - return err; - /* wait trigger to be 0 */ - while (trigger & TOD_READ_TRIGGER_MASK) { + do { + if (timeout-- == 0) + return -EIO; + if (idtcm->calculate_overhead_flag) idtcm->start_time = ktime_get_raw(); err = idtcm_read(idtcm, channel->tod_read_primary, - TOD_READ_PRIMARY_CMD, &trigger, + tod_read_cmd, &trigger, sizeof(trigger)); if (err) return err; - - if (--timeout == 0) - return -EIO; - } + } while (trigger & TOD_READ_TRIGGER_MASK); err = idtcm_read(idtcm, channel->tod_read_primary, TOD_READ_PRIMARY, buf, sizeof(buf)); @@ -432,6 +405,79 @@ static int _idtcm_gettime(struct idtcm_channel *channel, return err; } +static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn) +{ + struct idtcm_channel *ptp_channel, *extts_channel; + struct ptp_clock_event event; + struct timespec64 ts; + u32 dco_delay = 0; + int err; + + extts_channel = &idtcm->channel[todn]; + ptp_channel = idtcm->event_channel[todn]; + if (extts_channel == ptp_channel) + dco_delay = ptp_channel->dco_delay; + + err = _idtcm_gettime(extts_channel, &ts, 1); + if (err == 0) { + event.type = PTP_CLOCK_EXTTS; + event.index = todn; + event.timestamp = timespec64_to_ns(&ts) - dco_delay; + ptp_clock_event(ptp_channel->ptp_clock, &event); + } + return err; +} + +static u8 idtcm_enable_extts_mask(struct idtcm_channel *channel, + u8 extts_mask, bool enable) +{ + struct idtcm *idtcm = channel->idtcm; + int i, err; + + for (i = 0; i < MAX_TOD; i++) { + u8 mask = 1 << i; + u8 refn = idtcm->channel[i].refn; + + if (extts_mask & mask) { + /* check extts before disabling it */ + if (enable == false) { + err = idtcm_extts_check_channel(idtcm, i); + /* trigger happened so we won't re-enable it */ + if (err == 0) + extts_mask &= ~mask; + } + (void)idtcm_enable_extts(channel, i, refn, enable); + } + } + + return extts_mask; +} + +static int _idtcm_gettime_immediate(struct idtcm_channel *channel, + struct timespec64 *ts) +{ + struct idtcm *idtcm = channel->idtcm; + u8 extts_mask = 0; + int err; + + /* Disable extts */ + if (idtcm->extts_mask) { + extts_mask = idtcm_enable_extts_mask(channel, idtcm->extts_mask, + false); + } + + err = _idtcm_set_scsr_read_trig(channel, + SCSR_TOD_READ_TRIG_SEL_IMMEDIATE, 0); + if (err == 0) + err = _idtcm_gettime(channel, ts, 10); + + /* Re-enable extts */ + if (extts_mask) + idtcm_enable_extts_mask(channel, extts_mask, true); + + return err; +} + static int _sync_pll_output(struct idtcm *idtcm, u8 pll, u8 sync_src, @@ -559,35 +605,10 @@ static int _sync_pll_output(struct idtcm *idtcm, return err; } -static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src) -{ - int err = 0; - - switch (tod_addr) { - case TOD_0: - *sync_src = SYNC_SOURCE_DPLL0_TOD_PPS; - break; - case TOD_1: - *sync_src = SYNC_SOURCE_DPLL1_TOD_PPS; - break; - case TOD_2: - *sync_src = SYNC_SOURCE_DPLL2_TOD_PPS; - break; - case TOD_3: - *sync_src = SYNC_SOURCE_DPLL3_TOD_PPS; - break; - default: - err = -EINVAL; - } - - return err; -} - static int idtcm_sync_pps_output(struct idtcm_channel *channel) { struct idtcm *idtcm = channel->idtcm; u8 pll; - u8 sync_src; u8 qn; u8 qn_plus_1; int err = 0; @@ -596,10 +617,6 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel) u8 temp; u16 output_mask = channel->output_mask; - err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src); - if (err) - return err; - err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, &temp, sizeof(temp)); if (err) @@ -655,8 +672,8 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel) } if (qn != 0 || qn_plus_1 != 0) - err = _sync_pll_output(idtcm, pll, sync_src, qn, - qn_plus_1); + err = _sync_pll_output(idtcm, pll, channel->sync_src, + qn, qn_plus_1); if (err) return err; @@ -666,8 +683,8 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel) } static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel, - struct timespec64 const *ts, - enum hw_tod_write_trig_sel wr_trig) + struct timespec64 const *ts, + enum hw_tod_write_trig_sel wr_trig) { struct idtcm *idtcm = channel->idtcm; u8 buf[TOD_BYTE_COUNT]; @@ -784,7 +801,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel, break; if (++count > 20) { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "Timed out waiting for the write counter"); return -EIO; } @@ -793,46 +810,46 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel, return 0; } -static int get_output_base_addr(u8 outn) +static int get_output_base_addr(enum fw_version ver, u8 outn) { int base; switch (outn) { case 0: - base = OUTPUT_0; + base = IDTCM_FW_REG(ver, V520, OUTPUT_0); break; case 1: - base = OUTPUT_1; + base = IDTCM_FW_REG(ver, V520, OUTPUT_1); break; case 2: - base = OUTPUT_2; + base = IDTCM_FW_REG(ver, V520, OUTPUT_2); break; case 3: - base = OUTPUT_3; + base = IDTCM_FW_REG(ver, V520, OUTPUT_3); break; case 4: - base = OUTPUT_4; + base = IDTCM_FW_REG(ver, V520, OUTPUT_4); break; case 5: - base = OUTPUT_5; + base = IDTCM_FW_REG(ver, V520, OUTPUT_5); break; case 6: - base = OUTPUT_6; + base = IDTCM_FW_REG(ver, V520, OUTPUT_6); break; case 7: - base = OUTPUT_7; + base = IDTCM_FW_REG(ver, V520, OUTPUT_7); break; case 8: - base = OUTPUT_8; + base = IDTCM_FW_REG(ver, V520, OUTPUT_8); break; case 9: - base = OUTPUT_9; + base = IDTCM_FW_REG(ver, V520, OUTPUT_9); break; case 10: - base = OUTPUT_10; + base = IDTCM_FW_REG(ver, V520, OUTPUT_10); break; case 11: - base = OUTPUT_11; + base = IDTCM_FW_REG(ver, V520, OUTPUT_11); break; default: base = -EINVAL; @@ -849,7 +866,7 @@ static int _idtcm_settime_deprecated(struct idtcm_channel *channel, err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB); if (err) { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "%s: Set HW ToD failed", __func__); return err; } @@ -929,9 +946,9 @@ static int idtcm_start_phase_pull_in(struct idtcm_channel *channel) return err; } -static int idtcm_do_phase_pull_in(struct idtcm_channel *channel, - s32 offset_ns, - u32 max_ffo_ppb) +static int do_phase_pull_in_fw(struct idtcm_channel *channel, + s32 offset_ns, + u32 max_ffo_ppb) { int err; @@ -1000,7 +1017,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta) s64 now; if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) { - err = idtcm_do_phase_pull_in(channel, delta, 0); + err = channel->do_phase_pull_in(channel, delta, 0); } else { idtcm->calculate_overhead_flag = 1; @@ -1008,7 +1025,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta) if (err) return err; - err = _idtcm_gettime(channel, &ts); + err = _idtcm_gettime_immediate(channel, &ts); if (err) return err; @@ -1032,7 +1049,9 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm) clear_boot_status(idtcm); - err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte)); + err = idtcm_write(idtcm, RESET_CTRL, + IDTCM_FW_REG(idtcm->fw_ver, V520, SM_RESET), + &byte, sizeof(byte)); if (!err) { for (i = 0; i < 30; i++) { @@ -1040,14 +1059,14 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm) read_boot_status(idtcm, &status); if (status == 0xA0) { - dev_dbg(&idtcm->client->dev, + dev_dbg(idtcm->dev, "SM_RESET completed in %d ms", i * 100); break; } } if (!status) - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "Timed out waiting for CM_RESET to complete"); } @@ -1144,12 +1163,12 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val) static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll) { if (index >= MAX_TOD) { - dev_err(&idtcm->client->dev, "ToD%d not supported", index); + dev_err(idtcm->dev, "ToD%d not supported", index); return -EINVAL; } if (pll >= MAX_PLL) { - dev_err(&idtcm->client->dev, "Pll%d not supported", pll); + dev_err(idtcm->dev, "Pll%d not supported", pll); return -EINVAL; } @@ -1167,7 +1186,7 @@ static int check_and_set_masks(struct idtcm *idtcm, switch (regaddr) { case TOD_MASK_ADDR: if ((val & 0xf0) || !(val & 0x0f)) { - dev_err(&idtcm->client->dev, "Invalid TOD mask 0x%02x", val); + dev_err(idtcm->dev, "Invalid TOD mask 0x%02x", val); err = -EINVAL; } else { idtcm->tod_mask = val; @@ -1198,13 +1217,13 @@ static void display_pll_and_masks(struct idtcm *idtcm) u8 i; u8 mask; - dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x", idtcm->tod_mask); + dev_dbg(idtcm->dev, "tod_mask = 0x%02x", idtcm->tod_mask); for (i = 0; i < MAX_TOD; i++) { mask = 1 << i; if (mask & idtcm->tod_mask) - dev_dbg(&idtcm->client->dev, + dev_dbg(idtcm->dev, "TOD%d pll = %d output_mask = 0x%04x", i, idtcm->channel[i].pll, idtcm->channel[i].output_mask); @@ -1214,6 +1233,7 @@ static void display_pll_and_masks(struct idtcm *idtcm) static int idtcm_load_firmware(struct idtcm *idtcm, struct device *dev) { + u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH); char fname[128] = FW_FILENAME; const struct firmware *fw; struct idtcm_fwrc *rec; @@ -1226,25 +1246,25 @@ static int idtcm_load_firmware(struct idtcm *idtcm, if (firmware) /* module parameter */ snprintf(fname, sizeof(fname), "%s", firmware); - dev_dbg(&idtcm->client->dev, "requesting firmware '%s'", fname); + dev_info(idtcm->dev, "requesting firmware '%s'", fname); err = request_firmware(&fw, fname, dev); if (err) { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "Failed at line %d in %s!", __LINE__, __func__); return err; } - dev_dbg(&idtcm->client->dev, "firmware size %zu bytes", fw->size); + dev_dbg(idtcm->dev, "firmware size %zu bytes", fw->size); rec = (struct idtcm_fwrc *) fw->data; - if (contains_full_configuration(fw)) + if (contains_full_configuration(idtcm, fw)) idtcm_state_machine_reset(idtcm); for (len = fw->size; len > 0; len -= sizeof(*rec)) { if (rec->reserved) { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "bad firmware, reserved field non-zero"); err = -EINVAL; } else { @@ -1263,7 +1283,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm, err = 0; /* Top (status registers) and bottom are read-only */ - if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH) + if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch) continue; /* Page size 128, last 4 bytes of page skipped */ @@ -1292,10 +1312,10 @@ static int idtcm_output_enable(struct idtcm_channel *channel, int err; u8 val; - base = get_output_base_addr(outn); + base = get_output_base_addr(idtcm->fw_ver, outn); if (!(base > 0)) { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "%s - Unsupported out%d", __func__, outn); return base; } @@ -1337,8 +1357,8 @@ static int idtcm_output_mask_enable(struct idtcm_channel *channel, } static int idtcm_perout_enable(struct idtcm_channel *channel, - bool enable, - struct ptp_perout_request *perout) + struct ptp_perout_request *perout, + bool enable) { struct idtcm *idtcm = channel->idtcm; unsigned int flags = perout->flags; @@ -1351,7 +1371,7 @@ static int idtcm_perout_enable(struct idtcm_channel *channel, err = idtcm_output_enable(channel, enable, perout->index); if (err) { - dev_err(&idtcm->client->dev, "Unable to set output enable"); + dev_err(idtcm->dev, "Unable to set output enable"); return err; } @@ -1360,534 +1380,600 @@ static int idtcm_perout_enable(struct idtcm_channel *channel, } static int idtcm_get_pll_mode(struct idtcm_channel *channel, - enum pll_mode *pll_mode) + enum pll_mode *mode) { struct idtcm *idtcm = channel->idtcm; int err; u8 dpll_mode; - err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE, + err = idtcm_read(idtcm, channel->dpll_n, + IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE), &dpll_mode, sizeof(dpll_mode)); if (err) return err; - *pll_mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK; + *mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK; return 0; } static int idtcm_set_pll_mode(struct idtcm_channel *channel, - enum pll_mode pll_mode) + enum pll_mode mode) { struct idtcm *idtcm = channel->idtcm; int err; u8 dpll_mode; - err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE, + err = idtcm_read(idtcm, channel->dpll_n, + IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE), &dpll_mode, sizeof(dpll_mode)); if (err) return err; dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT); - dpll_mode |= (pll_mode << PLL_MODE_SHIFT); - - channel->pll_mode = pll_mode; + dpll_mode |= (mode << PLL_MODE_SHIFT); - err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE, + err = idtcm_write(idtcm, channel->dpll_n, + IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE), &dpll_mode, sizeof(dpll_mode)); - if (err) - return err; - - return 0; + return err; } -/* PTP Hardware Clock interface */ - -/* - * Maximum absolute value for write phase offset in picoseconds - * - * Destination signed register is 32-bit register in resolution of 50ps - * - * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 - */ -static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns) +static int idtcm_get_manual_reference(struct idtcm_channel *channel, + enum manual_reference *ref) { struct idtcm *idtcm = channel->idtcm; + u8 dpll_manu_ref_cfg; int err; - u8 i; - u8 buf[4] = {0}; - s32 phase_50ps; - s64 offset_ps; - - if (channel->pll_mode != PLL_MODE_WRITE_PHASE) { - err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE); - if (err) - return err; - } - - offset_ps = (s64)delta_ns * 1000; - - /* - * Check for 32-bit signed max * 50: - * - * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 - */ - if (offset_ps > MAX_ABS_WRITE_PHASE_PICOSECONDS) - offset_ps = MAX_ABS_WRITE_PHASE_PICOSECONDS; - else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS) - offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS; - phase_50ps = div_s64(offset_ps, 50); + err = idtcm_read(idtcm, channel->dpll_ctrl_n, + DPLL_CTRL_DPLL_MANU_REF_CFG, + &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg)); + if (err) + return err; - for (i = 0; i < 4; i++) { - buf[i] = phase_50ps & 0xff; - phase_50ps >>= 8; - } + dpll_manu_ref_cfg &= (MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT); - err = idtcm_write(idtcm, channel->dpll_phase, DPLL_WR_PHASE, - buf, sizeof(buf)); + *ref = dpll_manu_ref_cfg >> MANUAL_REFERENCE_SHIFT; - return err; + return 0; } -static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm) +static int idtcm_set_manual_reference(struct idtcm_channel *channel, + enum manual_reference ref) { struct idtcm *idtcm = channel->idtcm; - u8 i; + u8 dpll_manu_ref_cfg; int err; - u8 buf[6] = {0}; - s64 fcw; - - if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) { - err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY); - if (err) - return err; - } - - /* - * Frequency Control Word unit is: 1.11 * 10^-10 ppm - * - * adjfreq: - * ppb * 10^9 - * FCW = ---------- - * 111 - * - * adjfine: - * ppm_16 * 5^12 - * FCW = ------------- - * 111 * 2^4 - */ - /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */ - fcw = scaled_ppm * 244140625ULL; + err = idtcm_read(idtcm, channel->dpll_ctrl_n, + DPLL_CTRL_DPLL_MANU_REF_CFG, + &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg)); + if (err) + return err; - fcw = div_s64(fcw, 1776); + dpll_manu_ref_cfg &= ~(MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT); - for (i = 0; i < 6; i++) { - buf[i] = fcw & 0xff; - fcw >>= 8; - } + dpll_manu_ref_cfg |= (ref << MANUAL_REFERENCE_SHIFT); - err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ, - buf, sizeof(buf)); + err = idtcm_write(idtcm, channel->dpll_ctrl_n, + DPLL_CTRL_DPLL_MANU_REF_CFG, + &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg)); return err; } -static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int configure_dpll_mode_write_frequency(struct idtcm_channel *channel) { - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; int err; - mutex_lock(&idtcm->reg_lock); + err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY); - err = _idtcm_gettime(channel, ts); if (err) - dev_err(&idtcm->client->dev, "Failed at line %d in %s!", - __LINE__, __func__); - - mutex_unlock(&idtcm->reg_lock); + dev_err(idtcm->dev, "Failed to set pll mode to write frequency"); + else + channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY; return err; } -static int idtcm_settime_deprecated(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +static int configure_dpll_mode_write_phase(struct idtcm_channel *channel) { - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; int err; - mutex_lock(&idtcm->reg_lock); + err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE); - err = _idtcm_settime_deprecated(channel, ts); if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); - - mutex_unlock(&idtcm->reg_lock); + dev_err(idtcm->dev, "Failed to set pll mode to write phase"); + else + channel->mode = PTP_PLL_MODE_WRITE_PHASE; return err; } -static int idtcm_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +static int configure_manual_reference_write_frequency(struct idtcm_channel *channel) { - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; int err; - mutex_lock(&idtcm->reg_lock); + err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_FREQUENCY); - err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE); if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); - - mutex_unlock(&idtcm->reg_lock); + dev_err(idtcm->dev, "Failed to set manual reference to write frequency"); + else + channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY; return err; } -static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta) +static int configure_manual_reference_write_phase(struct idtcm_channel *channel) { - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; int err; - mutex_lock(&idtcm->reg_lock); + err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_PHASE); - err = _idtcm_adjtime_deprecated(channel, delta); if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); - - mutex_unlock(&idtcm->reg_lock); + dev_err(idtcm->dev, "Failed to set manual reference to write phase"); + else + channel->mode = PTP_PLL_MODE_WRITE_PHASE; return err; } -static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) +static int idtcm_stop_phase_pull_in(struct idtcm_channel *channel) { - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); - struct idtcm *idtcm = channel->idtcm; - struct timespec64 ts; - enum scsr_tod_write_type_sel type; int err; - if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) { - err = idtcm_do_phase_pull_in(channel, delta, 0); - if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); - return err; - } - - if (delta >= 0) { - ts = ns_to_timespec64(delta); - type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS; - } else { - ts = ns_to_timespec64(-delta); - type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS; - } - - mutex_lock(&idtcm->reg_lock); - - err = _idtcm_settime(channel, &ts, type); + err = _idtcm_adjfine(channel, channel->current_freq_scaled_ppm); if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); + return err; - mutex_unlock(&idtcm->reg_lock); + channel->phase_pull_in = false; - return err; + return 0; } -static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta) +static long idtcm_work_handler(struct ptp_clock_info *ptp) { struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; - int err; - mutex_lock(&idtcm->reg_lock); + mutex_lock(idtcm->lock); - err = _idtcm_adjphase(channel, delta); - if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); + (void)idtcm_stop_phase_pull_in(channel); - mutex_unlock(&idtcm->reg_lock); + mutex_unlock(idtcm->lock); - return err; + /* Return a negative value here to not reschedule */ + return -1; } -static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +static s32 phase_pull_in_scaled_ppm(s32 current_ppm, s32 phase_pull_in_ppb) { - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); - struct idtcm *idtcm = channel->idtcm; - int err; + /* ppb = scaled_ppm * 125 / 2^13 */ + /* scaled_ppm = ppb * 2^13 / 125 */ - mutex_lock(&idtcm->reg_lock); + s64 max_scaled_ppm = div_s64((s64)PHASE_PULL_IN_MAX_PPB << 13, 125); + s64 scaled_ppm = div_s64((s64)phase_pull_in_ppb << 13, 125); - err = _idtcm_adjfine(channel, scaled_ppm); - if (err) - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); + current_ppm += scaled_ppm; - mutex_unlock(&idtcm->reg_lock); + if (current_ppm > max_scaled_ppm) + current_ppm = max_scaled_ppm; + else if (current_ppm < -max_scaled_ppm) + current_ppm = -max_scaled_ppm; - return err; + return current_ppm; } -static int idtcm_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +static int do_phase_pull_in_sw(struct idtcm_channel *channel, + s32 delta_ns, + u32 max_ffo_ppb) { + s32 current_ppm = channel->current_freq_scaled_ppm; + u32 duration_ms = MSEC_PER_SEC; + s32 delta_ppm; + s32 ppb; int err; - struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); - switch (rq->type) { - case PTP_CLK_REQ_PEROUT: - if (!on) { - err = idtcm_perout_enable(channel, false, &rq->perout); - if (err) - dev_err(&channel->idtcm->client->dev, - "Failed at line %d in %s!", - __LINE__, __func__); - return err; - } + /* If the ToD correction is less than PHASE_PULL_IN_MIN_THRESHOLD_NS, + * skip. The error introduced by the ToD adjustment procedure would + * be bigger than the required ToD correction + */ + if (abs(delta_ns) < PHASE_PULL_IN_MIN_THRESHOLD_NS) + return 0; - /* Only accept a 1-PPS aligned to the second. */ - if (rq->perout.start.nsec || rq->perout.period.sec != 1 || - rq->perout.period.nsec) - return -ERANGE; + if (max_ffo_ppb == 0) + max_ffo_ppb = PHASE_PULL_IN_MAX_PPB; - err = idtcm_perout_enable(channel, true, &rq->perout); - if (err) - dev_err(&channel->idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); - return err; - default: - break; + /* For most cases, keep phase pull-in duration 1 second */ + ppb = delta_ns; + while (abs(ppb) > max_ffo_ppb) { + duration_ms *= 2; + ppb /= 2; } - return -EOPNOTSUPP; + delta_ppm = phase_pull_in_scaled_ppm(current_ppm, ppb); + + err = _idtcm_adjfine(channel, delta_ppm); + + if (err) + return err; + + /* schedule the worker to cancel phase pull-in */ + ptp_schedule_worker(channel->ptp_clock, + msecs_to_jiffies(duration_ms) - 1); + + channel->phase_pull_in = true; + + return 0; } -static int _enable_pll_tod_sync(struct idtcm *idtcm, - u8 pll, - u8 sync_src, - u8 qn, - u8 qn_plus_1) +static int initialize_operating_mode_with_manual_reference(struct idtcm_channel *channel, + enum manual_reference ref) { - int err; - u8 val; - u16 dpll; - u16 out0 = 0, out1 = 0; + struct idtcm *idtcm = channel->idtcm; - if (qn == 0 && qn_plus_1 == 0) - return 0; + channel->mode = PTP_PLL_MODE_UNSUPPORTED; + channel->configure_write_frequency = configure_manual_reference_write_frequency; + channel->configure_write_phase = configure_manual_reference_write_phase; + channel->do_phase_pull_in = do_phase_pull_in_sw; - switch (pll) { - case 0: - dpll = DPLL_0; - if (qn) - out0 = OUTPUT_0; - if (qn_plus_1) - out1 = OUTPUT_1; - break; - case 1: - dpll = DPLL_1; - if (qn) - out0 = OUTPUT_2; - if (qn_plus_1) - out1 = OUTPUT_3; - break; - case 2: - dpll = DPLL_2; - if (qn) - out0 = OUTPUT_4; - if (qn_plus_1) - out1 = OUTPUT_5; - break; - case 3: - dpll = DPLL_3; - if (qn) - out0 = OUTPUT_6; - if (qn_plus_1) - out1 = OUTPUT_7; - break; - case 4: - dpll = DPLL_4; - if (qn) - out0 = OUTPUT_8; + switch (ref) { + case MANU_REF_WRITE_PHASE: + channel->mode = PTP_PLL_MODE_WRITE_PHASE; break; - case 5: - dpll = DPLL_5; - if (qn) - out0 = OUTPUT_9; - if (qn_plus_1) - out1 = OUTPUT_8; + case MANU_REF_WRITE_FREQUENCY: + channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY; break; - case 6: - dpll = DPLL_6; - if (qn) - out0 = OUTPUT_10; - if (qn_plus_1) - out1 = OUTPUT_11; + default: + dev_warn(idtcm->dev, + "Unsupported MANUAL_REFERENCE: 0x%02x", ref); + } + + return 0; +} + +static int initialize_operating_mode_with_pll_mode(struct idtcm_channel *channel, + enum pll_mode mode) +{ + struct idtcm *idtcm = channel->idtcm; + int err = 0; + + channel->mode = PTP_PLL_MODE_UNSUPPORTED; + channel->configure_write_frequency = configure_dpll_mode_write_frequency; + channel->configure_write_phase = configure_dpll_mode_write_phase; + channel->do_phase_pull_in = do_phase_pull_in_fw; + + switch (mode) { + case PLL_MODE_WRITE_PHASE: + channel->mode = PTP_PLL_MODE_WRITE_PHASE; break; - case 7: - dpll = DPLL_7; - if (qn) - out0 = OUTPUT_11; + case PLL_MODE_WRITE_FREQUENCY: + channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY; break; default: - return -EINVAL; + dev_err(idtcm->dev, + "Unsupported PLL_MODE: 0x%02x", mode); + err = -EINVAL; } - /* - * Enable OUTPUT OUT_SYNC. - */ - if (out0) { - err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val)); - if (err) - return err; + return err; +} - val &= ~OUT_SYNC_DISABLE; +static int initialize_dco_operating_mode(struct idtcm_channel *channel) +{ + enum manual_reference ref = MANU_REF_XO_DPLL; + enum pll_mode mode = PLL_MODE_DISABLED; + struct idtcm *idtcm = channel->idtcm; + int err; - err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val)); - if (err) + channel->mode = PTP_PLL_MODE_UNSUPPORTED; + + err = idtcm_get_pll_mode(channel, &mode); + if (err) { + dev_err(idtcm->dev, "Unable to read pll mode!"); + return err; + } + + if (mode == PLL_MODE_PLL) { + err = idtcm_get_manual_reference(channel, &ref); + if (err) { + dev_err(idtcm->dev, "Unable to read manual reference!"); return err; + } + err = initialize_operating_mode_with_manual_reference(channel, ref); + } else { + err = initialize_operating_mode_with_pll_mode(channel, mode); } - if (out1) { - err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val)); + if (channel->mode == PTP_PLL_MODE_WRITE_PHASE) + channel->configure_write_frequency(channel); + + return err; +} + +/* PTP Hardware Clock interface */ + +/** + * Maximum absolute value for write phase offset in picoseconds + * + * @channel: channel + * @delta_ns: delta in nanoseconds + * + * Destination signed register is 32-bit register in resolution of 50ps + * + * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 + */ +static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns) +{ + struct idtcm *idtcm = channel->idtcm; + int err; + u8 i; + u8 buf[4] = {0}; + s32 phase_50ps; + s64 offset_ps; + + if (channel->mode != PTP_PLL_MODE_WRITE_PHASE) { + err = channel->configure_write_phase(channel); if (err) return err; + } - val &= ~OUT_SYNC_DISABLE; + offset_ps = (s64)delta_ns * 1000; - err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val)); + /* + * Check for 32-bit signed max * 50: + * + * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 + */ + if (offset_ps > MAX_ABS_WRITE_PHASE_PICOSECONDS) + offset_ps = MAX_ABS_WRITE_PHASE_PICOSECONDS; + else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS) + offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS; + + phase_50ps = div_s64(offset_ps, 50); + + for (i = 0; i < 4; i++) { + buf[i] = phase_50ps & 0xff; + phase_50ps >>= 8; + } + + err = idtcm_write(idtcm, channel->dpll_phase, DPLL_WR_PHASE, + buf, sizeof(buf)); + + return err; +} + +static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm) +{ + struct idtcm *idtcm = channel->idtcm; + u8 i; + int err; + u8 buf[6] = {0}; + s64 fcw; + + if (channel->mode != PTP_PLL_MODE_WRITE_FREQUENCY) { + err = channel->configure_write_frequency(channel); if (err) return err; } - /* enable dpll sync tod pps, must be set before dpll_mode */ - err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val)); + /* + * Frequency Control Word unit is: 1.11 * 10^-10 ppm + * + * adjfreq: + * ppb * 10^9 + * FCW = ---------- + * 111 + * + * adjfine: + * ppm_16 * 5^12 + * FCW = ------------- + * 111 * 2^4 + */ + + /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */ + fcw = scaled_ppm * 244140625ULL; + + fcw = div_s64(fcw, 1776); + + for (i = 0; i < 6; i++) { + buf[i] = fcw & 0xff; + fcw >>= 8; + } + + err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ, + buf, sizeof(buf)); + + return err; +} + +static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err; + + mutex_lock(idtcm->lock); + err = _idtcm_gettime_immediate(channel, ts); + mutex_unlock(idtcm->lock); + if (err) - return err; + dev_err(idtcm->dev, "Failed at line %d in %s!", + __LINE__, __func__); + + return err; +} + +static int idtcm_settime_deprecated(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err; - val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT); - val |= (sync_src << TOD_SYNC_SOURCE_SHIFT); - val |= TOD_SYNC_EN; + mutex_lock(idtcm->lock); + err = _idtcm_settime_deprecated(channel, ts); + mutex_unlock(idtcm->lock); + + if (err) + dev_err(idtcm->dev, + "Failed at line %d in %s!", __LINE__, __func__); - return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val)); + return err; } -static int idtcm_enable_tod_sync(struct idtcm_channel *channel) +static int idtcm_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) { + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; - u8 pll; - u8 sync_src; - u8 qn; - u8 qn_plus_1; - u8 cfg; - int err = 0; - u16 output_mask = channel->output_mask; - u8 out8_mux = 0; - u8 out11_mux = 0; - u8 temp; + int err; + + mutex_lock(idtcm->lock); + err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE); + mutex_unlock(idtcm->lock); - /* - * set tod_out_sync_enable to 0. - */ - err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg)); if (err) - return err; + dev_err(idtcm->dev, + "Failed at line %d in %s!", __LINE__, __func__); - cfg &= ~TOD_OUT_SYNC_ENABLE; + return err; +} + +static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err; + + mutex_lock(idtcm->lock); + err = _idtcm_adjtime_deprecated(channel, delta); + mutex_unlock(idtcm->lock); - err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg)); if (err) - return err; + dev_err(idtcm->dev, + "Failed at line %d in %s!", __LINE__, __func__); - switch (channel->tod_n) { - case TOD_0: - sync_src = 0; - break; - case TOD_1: - sync_src = 1; - break; - case TOD_2: - sync_src = 2; - break; - case TOD_3: - sync_src = 3; - break; - default: - return -EINVAL; + return err; +} + +static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + struct timespec64 ts; + enum scsr_tod_write_type_sel type; + int err; + + if (channel->phase_pull_in == true) + return 0; + + mutex_lock(idtcm->lock); + + if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) { + err = channel->do_phase_pull_in(channel, delta, 0); + } else { + if (delta >= 0) { + ts = ns_to_timespec64(delta); + type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS; + } else { + ts = ns_to_timespec64(-delta); + type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS; + } + err = _idtcm_settime(channel, &ts, type); } - err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, &temp, sizeof(temp)); + mutex_unlock(idtcm->lock); + if (err) - return err; + dev_err(idtcm->dev, + "Failed at line %d in %s!", __LINE__, __func__); - if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) == - Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) - out8_mux = 1; + return err; +} + +static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err; + + mutex_lock(idtcm->lock); + err = _idtcm_adjphase(channel, delta); + mutex_unlock(idtcm->lock); - err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, &temp, sizeof(temp)); if (err) - return err; + dev_err(idtcm->dev, + "Failed at line %d in %s!", __LINE__, __func__); - if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) == - Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) - out11_mux = 1; + return err; +} - for (pll = 0; pll < 8; pll++) { - qn = 0; - qn_plus_1 = 0; +static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err; - if (pll < 4) { - /* First 4 pll has 2 outputs */ - qn = output_mask & 0x1; - output_mask = output_mask >> 1; - qn_plus_1 = output_mask & 0x1; - output_mask = output_mask >> 1; - } else if (pll == 4) { - if (out8_mux == 0) { - qn = output_mask & 0x1; - output_mask = output_mask >> 1; - } - } else if (pll == 5) { - if (out8_mux) { - qn_plus_1 = output_mask & 0x1; - output_mask = output_mask >> 1; - } - qn = output_mask & 0x1; - output_mask = output_mask >> 1; - } else if (pll == 6) { - qn = output_mask & 0x1; - output_mask = output_mask >> 1; - if (out11_mux) { - qn_plus_1 = output_mask & 0x1; - output_mask = output_mask >> 1; - } - } else if (pll == 7) { - if (out11_mux == 0) { - qn = output_mask & 0x1; - output_mask = output_mask >> 1; - } - } + if (channel->phase_pull_in == true) + return 0; - if (qn != 0 || qn_plus_1 != 0) - err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn, - qn_plus_1); - if (err) - return err; + if (scaled_ppm == channel->current_freq_scaled_ppm) + return 0; + + mutex_lock(idtcm->lock); + err = _idtcm_adjfine(channel, scaled_ppm); + mutex_unlock(idtcm->lock); + + if (err) + dev_err(idtcm->dev, + "Failed at line %d in %s!", __LINE__, __func__); + else + channel->current_freq_scaled_ppm = scaled_ppm; + + return err; +} + +static int idtcm_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err = -EOPNOTSUPP; + + mutex_lock(idtcm->lock); + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + if (!on) + err = idtcm_perout_enable(channel, &rq->perout, false); + /* Only accept a 1-PPS aligned to the second. */ + else if (rq->perout.start.nsec || rq->perout.period.sec != 1 || + rq->perout.period.nsec) + err = -ERANGE; + else + err = idtcm_perout_enable(channel, &rq->perout, true); + break; + case PTP_CLK_REQ_EXTTS: + err = idtcm_enable_extts(channel, rq->extts.index, + rq->extts.rsv[0], on); + break; + default: + break; } + mutex_unlock(idtcm->lock); + + if (err) + dev_err(channel->idtcm->dev, + "Failed in %s with err %d!", __func__, err); + return err; } @@ -1895,23 +1981,31 @@ static int idtcm_enable_tod(struct idtcm_channel *channel) { struct idtcm *idtcm = channel->idtcm; struct timespec64 ts = {0, 0}; + u16 tod_cfg = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_CFG); u8 cfg; int err; + /* STEELAI-366 - Temporary workaround for ts2phc compatibility */ + if (0) { + err = idtcm_output_mask_enable(channel, false); + if (err) + return err; + } + /* * Start the TOD clock ticking. */ - err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg)); + err = idtcm_read(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg)); if (err) return err; cfg |= TOD_ENABLE; - err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg)); + err = idtcm_write(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg)); if (err) return err; - if (idtcm->deprecated) + if (idtcm->fw_ver < V487) return _idtcm_settime_deprecated(channel, &ts); else return _idtcm_settime(channel, &ts, @@ -1939,12 +2033,9 @@ static void idtcm_set_version_info(struct idtcm *idtcm) snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u", major, minor, hotfix); - if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) - idtcm->deprecated = 0; - else - idtcm->deprecated = 1; + idtcm->fw_ver = idtcm_fw_version(idtcm->version); - dev_info(&idtcm->client->dev, + dev_info(idtcm->dev, "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d", major, minor, hotfix, product_id, hw_rev_id, config_select); @@ -1954,28 +2045,33 @@ static const struct ptp_clock_info idtcm_caps = { .owner = THIS_MODULE, .max_adj = 244000, .n_per_out = 12, + .n_ext_ts = MAX_TOD, .adjphase = &idtcm_adjphase, .adjfine = &idtcm_adjfine, .adjtime = &idtcm_adjtime, .gettime64 = &idtcm_gettime, .settime64 = &idtcm_settime, .enable = &idtcm_enable, + .do_aux_work = &idtcm_work_handler, }; static const struct ptp_clock_info idtcm_caps_deprecated = { .owner = THIS_MODULE, .max_adj = 244000, .n_per_out = 12, + .n_ext_ts = MAX_TOD, .adjphase = &idtcm_adjphase, .adjfine = &idtcm_adjfine, .adjtime = &idtcm_adjtime_deprecated, .gettime64 = &idtcm_gettime, .settime64 = &idtcm_settime_deprecated, .enable = &idtcm_enable, + .do_aux_work = &idtcm_work_handler, }; static int configure_channel_pll(struct idtcm_channel *channel) { + struct idtcm *idtcm = channel->idtcm; int err = 0; switch (channel->pll) { @@ -1997,7 +2093,7 @@ static int configure_channel_pll(struct idtcm_channel *channel) break; case 2: channel->dpll_freq = DPLL_FREQ_2; - channel->dpll_n = DPLL_2; + channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_2); channel->hw_dpll_n = HW_DPLL_2; channel->dpll_phase = DPLL_PHASE_2; channel->dpll_ctrl_n = DPLL_CTRL_2; @@ -2013,7 +2109,7 @@ static int configure_channel_pll(struct idtcm_channel *channel) break; case 4: channel->dpll_freq = DPLL_FREQ_4; - channel->dpll_n = DPLL_4; + channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_4); channel->hw_dpll_n = HW_DPLL_4; channel->dpll_phase = DPLL_PHASE_4; channel->dpll_ctrl_n = DPLL_CTRL_4; @@ -2029,7 +2125,7 @@ static int configure_channel_pll(struct idtcm_channel *channel) break; case 6: channel->dpll_freq = DPLL_FREQ_6; - channel->dpll_n = DPLL_6; + channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_6); channel->hw_dpll_n = HW_DPLL_6; channel->dpll_phase = DPLL_PHASE_6; channel->dpll_ctrl_n = DPLL_CTRL_6; @@ -2050,50 +2146,104 @@ static int configure_channel_pll(struct idtcm_channel *channel) return err; } -static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) +/* + * Compensate for the PTP DCO input-to-output delay. + * This delay is 18 FOD cycles. + */ +static u32 idtcm_get_dco_delay(struct idtcm_channel *channel) { - struct idtcm_channel *channel; + struct idtcm *idtcm = channel->idtcm; + u8 mbuf[8] = {0}; + u8 nbuf[2] = {0}; + u32 fodFreq; int err; + u64 m; + u16 n; - if (!(index < MAX_TOD)) - return -EINVAL; - - channel = &idtcm->channel[index]; + err = idtcm_read(idtcm, channel->dpll_ctrl_n, + DPLL_CTRL_DPLL_FOD_FREQ, mbuf, 6); + if (err) + return 0; - /* Set pll addresses */ - err = configure_channel_pll(channel); + err = idtcm_read(idtcm, channel->dpll_ctrl_n, + DPLL_CTRL_DPLL_FOD_FREQ + 6, nbuf, 2); if (err) - return err; + return 0; + + m = get_unaligned_le64(mbuf); + n = get_unaligned_le16(nbuf); + + if (n == 0) + n = 1; + + fodFreq = (u32)div_u64(m, n); + if (fodFreq >= 500000000) + return 18 * (u32)div_u64(NSEC_PER_SEC, fodFreq); + + return 0; +} + +static int configure_channel_tod(struct idtcm_channel *channel, u32 index) +{ + enum fw_version fw_ver = channel->idtcm->fw_ver; /* Set tod addresses */ switch (index) { case 0: - channel->tod_read_primary = TOD_READ_PRIMARY_0; - channel->tod_write = TOD_WRITE_0; - channel->tod_n = TOD_0; + channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0); + channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0); + channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0); + channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS; break; case 1: - channel->tod_read_primary = TOD_READ_PRIMARY_1; - channel->tod_write = TOD_WRITE_1; - channel->tod_n = TOD_1; + channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1); + channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1); + channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1); + channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS; break; case 2: - channel->tod_read_primary = TOD_READ_PRIMARY_2; - channel->tod_write = TOD_WRITE_2; - channel->tod_n = TOD_2; + channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2); + channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2); + channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2); + channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS; break; case 3: - channel->tod_read_primary = TOD_READ_PRIMARY_3; - channel->tod_write = TOD_WRITE_3; - channel->tod_n = TOD_3; + channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3); + channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3); + channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3); + channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS; break; default: return -EINVAL; } + return 0; +} + +static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) +{ + struct idtcm_channel *channel; + int err; + + if (!(index < MAX_TOD)) + return -EINVAL; + + channel = &idtcm->channel[index]; + channel->idtcm = idtcm; + channel->current_freq_scaled_ppm = 0; + + /* Set pll addresses */ + err = configure_channel_pll(channel); + if (err) + return err; + + /* Set tod addresses */ + err = configure_channel_tod(channel, index); + if (err) + return err; - if (idtcm->deprecated) + if (idtcm->fw_ver < V487) channel->caps = idtcm_caps_deprecated; else channel->caps = idtcm_caps; @@ -2101,30 +2251,19 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) snprintf(channel->caps.name, sizeof(channel->caps.name), "IDT CM TOD%u", index); - if (!idtcm->deprecated) { - err = idtcm_enable_tod_sync(channel); - if (err) { - dev_err(&idtcm->client->dev, - "Failed at line %d in %s!", __LINE__, __func__); - return err; - } - } - - /* Sync pll mode with hardware */ - err = idtcm_get_pll_mode(channel, &channel->pll_mode); - if (err) { - dev_err(&idtcm->client->dev, - "Error: %s - Unable to read pll mode", __func__); + err = initialize_dco_operating_mode(channel); + if (err) return err; - } err = idtcm_enable_tod(channel); if (err) { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "Failed at line %d in %s!", __LINE__, __func__); return err; } + channel->dco_delay = idtcm_get_dco_delay(channel); + channel->ptp_clock = ptp_clock_register(&channel->caps, NULL); if (IS_ERR(channel->ptp_clock)) { @@ -2136,12 +2275,59 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) if (!channel->ptp_clock) return -ENOTSUPP; - dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d", + dev_info(idtcm->dev, "PLL%d registered as ptp%d", index, channel->ptp_clock->index); return 0; } +static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index) +{ + struct idtcm_channel *channel; + int err; + + if (!(index < MAX_TOD)) + return -EINVAL; + + channel = &idtcm->channel[index]; + channel->idtcm = idtcm; + + /* Set tod addresses */ + err = configure_channel_tod(channel, index); + if (err) + return err; + + channel->idtcm = idtcm; + + return 0; +} + +static void idtcm_extts_check(struct work_struct *work) +{ + struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work); + int err, i; + + if (idtcm->extts_mask == 0) + return; + + mutex_lock(idtcm->lock); + for (i = 0; i < MAX_TOD; i++) { + u8 mask = 1 << i; + + if (idtcm->extts_mask & mask) { + err = idtcm_extts_check_channel(idtcm, i); + /* trigger clears itself, so clear the mask */ + if (err == 0) + idtcm->extts_mask &= ~mask; + } + } + + if (idtcm->extts_mask) + schedule_delayed_work(&idtcm->extts_work, + msecs_to_jiffies(EXTTS_PERIOD_MS)); + mutex_unlock(idtcm->lock); +} + static void ptp_clock_unregister_all(struct idtcm *idtcm) { u8 i; @@ -2149,7 +2335,6 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm) for (i = 0; i < MAX_TOD; i++) { channel = &idtcm->channel[i]; - if (channel->ptp_clock) ptp_clock_unregister(channel->ptp_clock); } @@ -2158,6 +2343,7 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm) static void set_default_masks(struct idtcm *idtcm) { idtcm->tod_mask = DEFAULT_TOD_MASK; + idtcm->extts_mask = 0; idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL; idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL; @@ -2170,158 +2356,86 @@ static void set_default_masks(struct idtcm *idtcm) idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3; } -static int idtcm_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int idtcm_probe(struct platform_device *pdev) { + struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent); struct idtcm *idtcm; int err; u8 i; - /* Unused for now */ - (void)id; - - idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL); + idtcm = devm_kzalloc(&pdev->dev, sizeof(struct idtcm), GFP_KERNEL); if (!idtcm) return -ENOMEM; - idtcm->client = client; - idtcm->page_offset = 0xff; + idtcm->dev = &pdev->dev; + idtcm->mfd = pdev->dev.parent; + idtcm->lock = &ddata->lock; + idtcm->regmap = ddata->regmap; idtcm->calculate_overhead_flag = 0; + INIT_DELAYED_WORK(&idtcm->extts_work, idtcm_extts_check); + set_default_masks(idtcm); - mutex_init(&idtcm->reg_lock); - mutex_lock(&idtcm->reg_lock); + mutex_lock(idtcm->lock); idtcm_set_version_info(idtcm); - err = idtcm_load_firmware(idtcm, &client->dev); + err = idtcm_load_firmware(idtcm, &pdev->dev); + if (err) - dev_warn(&idtcm->client->dev, "loading firmware failed with %d", err); + dev_warn(idtcm->dev, "loading firmware failed with %d", err); wait_for_chip_ready(idtcm); if (idtcm->tod_mask) { for (i = 0; i < MAX_TOD; i++) { - if (idtcm->tod_mask & (1 << i)) { + if (idtcm->tod_mask & (1 << i)) err = idtcm_enable_channel(idtcm, i); - if (err) { - dev_err(&idtcm->client->dev, - "idtcm_enable_channel %d failed!", i); - break; - } + else + err = idtcm_enable_extts_channel(idtcm, i); + if (err) { + dev_err(idtcm->dev, + "idtcm_enable_channel %d failed!", i); + break; } } } else { - dev_err(&idtcm->client->dev, + dev_err(idtcm->dev, "no PLLs flagged as PHCs, nothing to do"); err = -ENODEV; } - mutex_unlock(&idtcm->reg_lock); + mutex_unlock(idtcm->lock); if (err) { ptp_clock_unregister_all(idtcm); return err; } - i2c_set_clientdata(client, idtcm); + platform_set_drvdata(pdev, idtcm); return 0; } -static int idtcm_remove(struct i2c_client *client) +static int idtcm_remove(struct platform_device *pdev) { - struct idtcm *idtcm = i2c_get_clientdata(client); + struct idtcm *idtcm = platform_get_drvdata(pdev); ptp_clock_unregister_all(idtcm); - mutex_destroy(&idtcm->reg_lock); + cancel_delayed_work_sync(&idtcm->extts_work); return 0; } -#ifdef CONFIG_OF -static const struct of_device_id idtcm_dt_id[] = { - { .compatible = "idt,8a34000" }, - { .compatible = "idt,8a34001" }, - { .compatible = "idt,8a34002" }, - { .compatible = "idt,8a34003" }, - { .compatible = "idt,8a34004" }, - { .compatible = "idt,8a34005" }, - { .compatible = "idt,8a34006" }, - { .compatible = "idt,8a34007" }, - { .compatible = "idt,8a34008" }, - { .compatible = "idt,8a34009" }, - { .compatible = "idt,8a34010" }, - { .compatible = "idt,8a34011" }, - { .compatible = "idt,8a34012" }, - { .compatible = "idt,8a34013" }, - { .compatible = "idt,8a34014" }, - { .compatible = "idt,8a34015" }, - { .compatible = "idt,8a34016" }, - { .compatible = "idt,8a34017" }, - { .compatible = "idt,8a34018" }, - { .compatible = "idt,8a34019" }, - { .compatible = "idt,8a34040" }, - { .compatible = "idt,8a34041" }, - { .compatible = "idt,8a34042" }, - { .compatible = "idt,8a34043" }, - { .compatible = "idt,8a34044" }, - { .compatible = "idt,8a34045" }, - { .compatible = "idt,8a34046" }, - { .compatible = "idt,8a34047" }, - { .compatible = "idt,8a34048" }, - { .compatible = "idt,8a34049" }, - {}, -}; -MODULE_DEVICE_TABLE(of, idtcm_dt_id); -#endif - -static const struct i2c_device_id idtcm_i2c_id[] = { - { "8a34000" }, - { "8a34001" }, - { "8a34002" }, - { "8a34003" }, - { "8a34004" }, - { "8a34005" }, - { "8a34006" }, - { "8a34007" }, - { "8a34008" }, - { "8a34009" }, - { "8a34010" }, - { "8a34011" }, - { "8a34012" }, - { "8a34013" }, - { "8a34014" }, - { "8a34015" }, - { "8a34016" }, - { "8a34017" }, - { "8a34018" }, - { "8a34019" }, - { "8a34040" }, - { "8a34041" }, - { "8a34042" }, - { "8a34043" }, - { "8a34044" }, - { "8a34045" }, - { "8a34046" }, - { "8a34047" }, - { "8a34048" }, - { "8a34049" }, - {}, -}; -MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id); - -static struct i2c_driver idtcm_driver = { +static struct platform_driver idtcm_driver = { .driver = { - .of_match_table = of_match_ptr(idtcm_dt_id), - .name = "idtcm", + .name = "8a3400x-phc", }, - .probe = idtcm_probe, - .remove = idtcm_remove, - .id_table = idtcm_i2c_id, + .probe = idtcm_probe, + .remove = idtcm_remove, }; -module_i2c_driver(idtcm_driver); +module_platform_driver(idtcm_driver); diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h index fb323271063e26..0f3059ae1fffa7 100644 --- a/drivers/ptp/ptp_clockmatrix.h +++ b/drivers/ptp/ptp_clockmatrix.h @@ -9,8 +9,8 @@ #define PTP_IDTCLOCKMATRIX_H #include - -#include "idt8a340_reg.h" +#include +#include #define FW_FILENAME "idtcm.bin" #define MAX_TOD (4) @@ -44,7 +44,6 @@ #define DEFAULT_TOD2_PTP_PLL (2) #define DEFAULT_TOD3_PTP_PLL (3) -#define POST_SM_RESET_DELAY_MS (3000) #define PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED (150000) #define PHASE_PULL_IN_THRESHOLD_NS (15000) #define TOD_WRITE_OVERHEAD_COUNT_MAX (2) @@ -57,66 +56,26 @@ #define IDTCM_MAX_WRITE_COUNT (512) -#define FULL_FW_CFG_BYTES (SCRATCH - GPIO_USER_CONTROL) -#define FULL_FW_CFG_SKIPPED_BYTES (((SCRATCH >> 7) \ - - (GPIO_USER_CONTROL >> 7)) \ - * 4) /* 4 bytes skipped every 0x80 */ - -/* Values of DPLL_N.DPLL_MODE.PLL_MODE */ -enum pll_mode { - PLL_MODE_MIN = 0, - PLL_MODE_NORMAL = PLL_MODE_MIN, - PLL_MODE_WRITE_PHASE = 1, - PLL_MODE_WRITE_FREQUENCY = 2, - PLL_MODE_GPIO_INC_DEC = 3, - PLL_MODE_SYNTHESIS = 4, - PLL_MODE_PHASE_MEASUREMENT = 5, - PLL_MODE_DISABLED = 6, - PLL_MODE_MAX = PLL_MODE_DISABLED, -}; - -enum hw_tod_write_trig_sel { - HW_TOD_WR_TRIG_SEL_MIN = 0, - HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN, - HW_TOD_WR_TRIG_SEL_RESERVED = 1, - HW_TOD_WR_TRIG_SEL_TOD_PPS = 2, - HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3, - HW_TOD_WR_TRIG_SEL_PWM_PPS = 4, - HW_TOD_WR_TRIG_SEL_GPIO = 5, - HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6, - WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC, -}; - -/* 4.8.7 only */ -enum scsr_tod_write_trig_sel { - SCSR_TOD_WR_TRIG_SEL_DISABLE = 0, - SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1, - SCSR_TOD_WR_TRIG_SEL_REFCLK = 2, - SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3, - SCSR_TOD_WR_TRIG_SEL_TODPPS = 4, - SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5, - SCSR_TOD_WR_TRIG_SEL_GPIO = 6, - SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO, -}; +#define PHASE_PULL_IN_MAX_PPB (144000) +#define PHASE_PULL_IN_MIN_THRESHOLD_NS (2) -/* 4.8.7 only */ -enum scsr_tod_write_type_sel { - SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0, - SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1, - SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2, - SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS, +/* + * Return register address based on passed in firmware version + */ +#define IDTCM_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER)) +enum fw_version { + V_DEFAULT = 0, + V487 = 1, + V520 = 2, }; -/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */ -enum dpll_state { - DPLL_STATE_MIN = 0, - DPLL_STATE_FREERUN = DPLL_STATE_MIN, - DPLL_STATE_LOCKACQ = 1, - DPLL_STATE_LOCKREC = 2, - DPLL_STATE_LOCKED = 3, - DPLL_STATE_HOLDOVER = 4, - DPLL_STATE_OPEN_LOOP = 5, - DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP, +/* PTP PLL Mode */ +enum ptp_pll_mode { + PTP_PLL_MODE_MIN = 0, + PTP_PLL_MODE_WRITE_FREQUENCY = PTP_PLL_MODE_MIN, + PTP_PLL_MODE_WRITE_PHASE, + PTP_PLL_MODE_UNSUPPORTED, + PTP_PLL_MODE_MAX = PTP_PLL_MODE_UNSUPPORTED, }; struct idtcm; @@ -134,26 +93,40 @@ struct idtcm_channel { u16 tod_write; u16 tod_n; u16 hw_dpll_n; - enum pll_mode pll_mode; + u8 sync_src; + enum ptp_pll_mode mode; + int (*configure_write_frequency)(struct idtcm_channel *channel); + int (*configure_write_phase)(struct idtcm_channel *channel); + int (*do_phase_pull_in)(struct idtcm_channel *channel, + s32 offset_ns, u32 max_ffo_ppb); + s32 current_freq_scaled_ppm; + bool phase_pull_in; + u32 dco_delay; + /* last input trigger for extts */ + u8 refn; u8 pll; u16 output_mask; }; struct idtcm { struct idtcm_channel channel[MAX_TOD]; - struct i2c_client *client; - u8 page_offset; + struct device *dev; u8 tod_mask; char version[16]; - u8 deprecated; - + enum fw_version fw_ver; + /* Polls for external time stamps */ + u8 extts_mask; + struct delayed_work extts_work; + /* Remember the ptp channel to report extts */ + struct idtcm_channel *event_channel[MAX_TOD]; + /* Mutex to protect operations from being interrupted */ + struct mutex *lock; + struct device *mfd; + struct regmap *regmap; /* Overhead calculation for adjtime */ u8 calculate_overhead_flag; s64 tod_write_overhead_ns; ktime_t start_time; - - /* Protects I2C read/modify/write registers from concurrent access */ - struct mutex reg_lock; }; struct idtcm_fwrc { diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index caf9b37c5eb1e0..34f943c8c9fd84 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -72,7 +73,7 @@ struct tod_reg { u32 status; u32 uart_polarity; u32 version; - u32 correction_sec; + u32 adj_sec; u32 __pad0[3]; u32 uart_baud; u32 __pad1[3]; @@ -124,6 +125,55 @@ struct img_reg { u32 version; }; +struct gpio_reg { + u32 gpio1; + u32 __pad0; + u32 gpio2; + u32 __pad1; +}; + +struct irig_master_reg { + u32 ctrl; + u32 status; + u32 __pad0; + u32 version; + u32 adj_sec; + u32 mode_ctrl; +}; + +#define IRIG_M_CTRL_ENABLE BIT(0) + +struct irig_slave_reg { + u32 ctrl; + u32 status; + u32 __pad0; + u32 version; + u32 adj_sec; + u32 mode_ctrl; +}; + +#define IRIG_S_CTRL_ENABLE BIT(0) + +struct dcf_master_reg { + u32 ctrl; + u32 status; + u32 __pad0; + u32 version; + u32 adj_sec; +}; + +#define DCF_M_CTRL_ENABLE BIT(0) + +struct dcf_slave_reg { + u32 ctrl; + u32 status; + u32 __pad0; + u32 version; + u32 adj_sec; +}; + +#define DCF_S_CTRL_ENABLE BIT(0) + struct ptp_ocp_flash_info { const char *name; int pci_offset; @@ -131,11 +181,17 @@ struct ptp_ocp_flash_info { void *data; }; -struct ptp_ocp_ext_info { +struct ptp_ocp_i2c_info { const char *name; + unsigned long fixed_rate; + size_t data_size; + void *data; +}; + +struct ptp_ocp_ext_info { int index; irqreturn_t (*irq_fcn)(int irq, void *priv); - int (*enable)(void *priv, bool enable); + int (*enable)(void *priv, u32 req, bool enable); }; struct ptp_ocp_ext_src { @@ -153,9 +209,17 @@ struct ptp_ocp { struct tod_reg __iomem *tod; struct pps_reg __iomem *pps_to_ext; struct pps_reg __iomem *pps_to_clk; + struct gpio_reg __iomem *pps_select; + struct gpio_reg __iomem *sma; + struct irig_master_reg __iomem *irig_out; + struct irig_slave_reg __iomem *irig_in; + struct dcf_master_reg __iomem *dcf_out; + struct dcf_slave_reg __iomem *dcf_in; + struct tod_reg __iomem *nmea_out; struct ptp_ocp_ext_src *pps; struct ptp_ocp_ext_src *ts0; struct ptp_ocp_ext_src *ts1; + struct ptp_ocp_ext_src *ts2; struct img_reg __iomem *image; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; @@ -163,16 +227,25 @@ struct ptp_ocp { struct platform_device *spi_flash; struct clk_hw *i2c_clk; struct timer_list watchdog; + struct dentry *debug_root; time64_t gnss_lost; int id; int n_irqs; int gnss_port; + int gnss2_port; int mac_port; /* miniature atomic clock */ + int nmea_port; u8 serial[6]; - int flash_start; bool has_serial; + u32 pps_req_map; + int flash_start; + u32 utc_tai_offset; + u32 ts_window_adjust; }; +#define OCP_REQ_TIMESTAMP BIT(0) +#define OCP_REQ_PPS BIT(1) + struct ocp_resource { unsigned long offset; int size; @@ -180,6 +253,7 @@ struct ocp_resource { int (*setup)(struct ptp_ocp *bp, struct ocp_resource *r); void *extra; unsigned long bp_offset; + const char * const name; }; static int ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r); @@ -189,7 +263,7 @@ static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r); static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r); static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r); static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv); -static int ptp_ocp_ts_enable(void *priv, bool enable); +static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable); #define bp_assign_entry(bp, res, val) ({ \ uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \ @@ -197,7 +271,7 @@ static int ptp_ocp_ts_enable(void *priv, bool enable); }) #define OCP_RES_LOCATION(member) \ - .bp_offset = offsetof(struct ptp_ocp, member) + .name = #member, .bp_offset = offsetof(struct ptp_ocp, member) #define OCP_MEM_RESOURCE(member) \ OCP_RES_LOCATION(member), .setup = ptp_ocp_register_mem @@ -215,16 +289,17 @@ static int ptp_ocp_ts_enable(void *priv, bool enable); OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext /* This is the MSI vector mapping used. - * 0: N/C + * 0: TS3 (and PPS) * 1: TS0 * 2: TS1 - * 3: GPS - * 4: GPS2 (n/c) + * 3: GNSS + * 4: GNSS2 * 5: MAC - * 6: SPI IMU (inertial measurement unit) - * 7: I2C oscillator - * 8: HWICAP + * 6: TS2 + * 7: I2C controller + * 8: HWICAP (notused) * 9: SPI Flash + * 10: NMEA */ static struct ocp_resource ocp_fb_resource[] = { @@ -236,7 +311,7 @@ static struct ocp_resource ocp_fb_resource[] = { OCP_EXT_RESOURCE(ts0), .offset = 0x01010000, .size = 0x10000, .irq_vec = 1, .extra = &(struct ptp_ocp_ext_info) { - .name = "ts0", .index = 0, + .index = 0, .irq_fcn = ptp_ocp_ts_irq, .enable = ptp_ocp_ts_enable, }, @@ -245,7 +320,25 @@ static struct ocp_resource ocp_fb_resource[] = { OCP_EXT_RESOURCE(ts1), .offset = 0x01020000, .size = 0x10000, .irq_vec = 2, .extra = &(struct ptp_ocp_ext_info) { - .name = "ts1", .index = 1, + .index = 1, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(ts2), + .offset = 0x01060000, .size = 0x10000, .irq_vec = 6, + .extra = &(struct ptp_ocp_ext_info) { + .index = 2, + .irq_fcn = ptp_ocp_ts_irq, + .enable = ptp_ocp_ts_enable, + }, + }, + { + OCP_EXT_RESOURCE(pps), + .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0, + .extra = &(struct ptp_ocp_ext_info) { + .index = 3, .irq_fcn = ptp_ocp_ts_irq, .enable = ptp_ocp_ts_enable, }, @@ -262,22 +355,62 @@ static struct ocp_resource ocp_fb_resource[] = { OCP_MEM_RESOURCE(tod), .offset = 0x01050000, .size = 0x10000, }, + { + OCP_MEM_RESOURCE(irig_in), + .offset = 0x01070000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(irig_out), + .offset = 0x01080000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(dcf_in), + .offset = 0x01090000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(dcf_out), + .offset = 0x010A0000, .size = 0x10000, + }, + { + OCP_MEM_RESOURCE(nmea_out), + .offset = 0x010B0000, .size = 0x10000, + }, { OCP_MEM_RESOURCE(image), .offset = 0x00020000, .size = 0x1000, }, + { + OCP_MEM_RESOURCE(pps_select), + .offset = 0x00130000, .size = 0x1000, + }, + { + OCP_MEM_RESOURCE(sma), + .offset = 0x00140000, .size = 0x1000, + }, { OCP_I2C_RESOURCE(i2c_ctrl), .offset = 0x00150000, .size = 0x10000, .irq_vec = 7, + .extra = &(struct ptp_ocp_i2c_info) { + .name = "xiic-i2c", + .fixed_rate = 50000000, + }, }, { OCP_SERIAL_RESOURCE(gnss_port), .offset = 0x00160000 + 0x1000, .irq_vec = 3, }, + { + OCP_SERIAL_RESOURCE(gnss2_port), + .offset = 0x00170000 + 0x1000, .irq_vec = 4, + }, { OCP_SERIAL_RESOURCE(mac_port), .offset = 0x00180000 + 0x1000, .irq_vec = 5, }, + { + OCP_SERIAL_RESOURCE(nmea_port), + .offset = 0x00190000 + 0x1000, .irq_vec = 10, + }, { OCP_SPI_RESOURCE(spi_flash), .offset = 0x00310000, .size = 0x10000, .irq_vec = 9, @@ -309,10 +442,12 @@ MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id); static DEFINE_MUTEX(ptp_ocp_lock); static DEFINE_IDR(ptp_ocp_idr); -static struct { +struct ocp_selector { const char *name; int value; -} ptp_ocp_clock[] = { +}; + +static struct ocp_selector ptp_ocp_clock[] = { { .name = "NONE", .value = 0 }, { .name = "TOD", .value = 1 }, { .name = "IRIG", .value = 2 }, @@ -322,33 +457,71 @@ static struct { { .name = "DCF", .value = 6 }, { .name = "REGS", .value = 0xfe }, { .name = "EXT", .value = 0xff }, + { } +}; + +static struct ocp_selector ptp_ocp_sma_in[] = { + { .name = "10Mhz", .value = 0x00 }, + { .name = "PPS1", .value = 0x01 }, + { .name = "PPS2", .value = 0x02 }, + { .name = "TS1", .value = 0x04 }, + { .name = "TS2", .value = 0x08 }, + { .name = "IRIG", .value = 0x10 }, + { .name = "DCF", .value = 0x20 }, + { } +}; + +static struct ocp_selector ptp_ocp_sma_out[] = { + { .name = "10Mhz", .value = 0x00 }, + { .name = "PHC", .value = 0x01 }, + { .name = "MAC", .value = 0x02 }, + { .name = "GNSS", .value = 0x04 }, + { .name = "GNSS2", .value = 0x08 }, + { .name = "IRIG", .value = 0x10 }, + { .name = "DCF", .value = 0x20 }, + { } }; static const char * -ptp_ocp_clock_name_from_val(int val) +ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val) { int i; - for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) - if (ptp_ocp_clock[i].value == val) - return ptp_ocp_clock[i].name; + for (i = 0; tbl[i].name; i++) + if (tbl[i].value == val) + return tbl[i].name; return NULL; } static int -ptp_ocp_clock_val_from_name(const char *name) +ptp_ocp_select_val_from_name(struct ocp_selector *tbl, const char *name) { - const char *clk; + const char *select; int i; - for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) { - clk = ptp_ocp_clock[i].name; - if (!strncasecmp(name, clk, strlen(clk))) - return ptp_ocp_clock[i].value; + for (i = 0; tbl[i].name; i++) { + select = tbl[i].name; + if (!strncasecmp(name, select, strlen(select))) + return tbl[i].value; } return -EINVAL; } +static ssize_t +ptp_ocp_select_table_show(struct ocp_selector *tbl, char *buf) +{ + ssize_t count; + int i; + + count = 0; + for (i = 0; tbl[i].name; i++) + count += sysfs_emit_at(buf, count, "%s ", tbl[i].name); + if (count) + count--; + count += sysfs_emit_at(buf, count, "\n"); + return count; +} + static int __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts, struct ptp_system_timestamp *sts) @@ -356,10 +529,9 @@ __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts, u32 ctrl, time_sec, time_ns; int i; - ctrl = ioread32(&bp->reg->ctrl); - ctrl |= OCP_CTRL_READ_TIME_REQ; - ptp_read_system_prets(sts); + + ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE; iowrite32(ctrl, &bp->reg->ctrl); for (i = 0; i < 100; i++) { @@ -369,6 +541,12 @@ __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts, } ptp_read_system_postts(sts); + if (sts && bp->ts_window_adjust) { + s64 ns = timespec64_to_ns(&sts->post_ts); + + sts->post_ts = ns_to_timespec64(ns - bp->ts_window_adjust); + } + time_ns = ioread32(&bp->reg->time_ns); time_sec = ioread32(&bp->reg->time_sec); @@ -408,8 +586,7 @@ __ptp_ocp_settime_locked(struct ptp_ocp *bp, const struct timespec64 *ts) iowrite32(time_ns, &bp->reg->adjust_ns); iowrite32(time_sec, &bp->reg->adjust_sec); - ctrl = ioread32(&bp->reg->ctrl); - ctrl |= OCP_CTRL_ADJUST_TIME; + ctrl = OCP_CTRL_ADJUST_TIME | OCP_CTRL_ENABLE; iowrite32(ctrl, &bp->reg->ctrl); /* restore clock selection */ @@ -422,9 +599,6 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info); unsigned long flags; - if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC) - return 0; - spin_lock_irqsave(&bp->lock, flags); __ptp_ocp_settime_locked(bp, ts); spin_unlock_irqrestore(&bp->lock, flags); @@ -432,26 +606,39 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) return 0; } +static void +__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) +{ + u32 select, ctrl; + + select = ioread32(&bp->reg->select); + iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select); + + iowrite32(adj_val, &bp->reg->offset_ns); + iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns); + + ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE; + iowrite32(ctrl, &bp->reg->ctrl); + + /* restore clock selection */ + iowrite32(select >> 16, &bp->reg->select); +} + static int ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns) { struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info); - struct timespec64 ts; unsigned long flags; - int err; + u32 adj_ns, sign; - if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC) - return 0; + sign = delta_ns < 0 ? BIT(31) : 0; + adj_ns = sign ? -delta_ns : delta_ns; spin_lock_irqsave(&bp->lock, flags); - err = __ptp_ocp_gettime_locked(bp, &ts, NULL); - if (likely(!err)) { - timespec64_add_ns(&ts, delta_ns); - __ptp_ocp_settime_locked(bp, &ts); - } + __ptp_ocp_adjtime_locked(bp, sign | adj_ns); spin_unlock_irqrestore(&bp->lock, flags); - return err; + return 0; } static int @@ -464,7 +651,7 @@ ptp_ocp_null_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) } static int -ptp_ocp_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns) +ptp_ocp_null_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns) { return -EOPNOTSUPP; } @@ -475,10 +662,12 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, { struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info); struct ptp_ocp_ext_src *ext = NULL; + u32 req; int err; switch (rq->type) { case PTP_CLK_REQ_EXTTS: + req = OCP_REQ_TIMESTAMP; switch (rq->extts.index) { case 0: ext = bp->ts0; @@ -486,18 +675,33 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, case 1: ext = bp->ts1; break; + case 2: + ext = bp->ts2; + break; + case 3: + ext = bp->pps; + break; } break; case PTP_CLK_REQ_PPS: + req = OCP_REQ_PPS; ext = bp->pps; break; + case PTP_CLK_REQ_PEROUT: + if (on && + (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0)) + return -EINVAL; + /* This is a request for 1PPS on an output SMA. + * Allow, but assume manual configuration. + */ + return 0; default: return -EOPNOTSUPP; } err = -ENXIO; if (ext) - err = ext->info->enable(ext, on); + err = ext->info->enable(ext, req, on); return err; } @@ -510,10 +714,11 @@ static const struct ptp_clock_info ptp_ocp_clock_info = { .settime64 = ptp_ocp_settime, .adjtime = ptp_ocp_adjtime, .adjfine = ptp_ocp_null_adjfine, - .adjphase = ptp_ocp_adjphase, + .adjphase = ptp_ocp_null_adjphase, .enable = ptp_ocp_enable, .pps = true, - .n_ext_ts = 2, + .n_ext_ts = 4, + .n_per_out = 1, }; static void @@ -526,8 +731,7 @@ __ptp_ocp_clear_drift_locked(struct ptp_ocp *bp) iowrite32(0, &bp->reg->drift_ns); - ctrl = ioread32(&bp->reg->ctrl); - ctrl |= OCP_CTRL_ADJUST_DRIFT; + ctrl = OCP_CTRL_ADJUST_DRIFT | OCP_CTRL_ENABLE; iowrite32(ctrl, &bp->reg->ctrl); /* restore clock selection */ @@ -559,6 +763,28 @@ ptp_ocp_watchdog(struct timer_list *t) mod_timer(&bp->watchdog, jiffies + HZ); } +static void +ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp) +{ + ktime_t start, end; + ktime_t delay; + u32 ctrl; + + ctrl = ioread32(&bp->reg->ctrl); + ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE; + + iowrite32(ctrl, &bp->reg->ctrl); + + start = ktime_get_ns(); + + ctrl = ioread32(&bp->reg->ctrl); + + end = ktime_get_ns(); + + delay = end - start; + bp->ts_window_adjust = (delay >> 5) * 3; +} + static int ptp_ocp_init_clock(struct ptp_ocp *bp) { @@ -566,9 +792,7 @@ ptp_ocp_init_clock(struct ptp_ocp *bp) bool sync; u32 ctrl; - /* make sure clock is enabled */ - ctrl = ioread32(&bp->reg->ctrl); - ctrl |= OCP_CTRL_ENABLE; + ctrl = OCP_CTRL_ENABLE; iowrite32(ctrl, &bp->reg->ctrl); /* NO DRIFT Correction */ @@ -587,22 +811,57 @@ ptp_ocp_init_clock(struct ptp_ocp *bp) return -ENODEV; } + ptp_ocp_estimate_pci_timing(bp); + sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC; if (!sync) { - ktime_get_real_ts64(&ts); + ktime_get_clocktai_ts64(&ts); ptp_ocp_settime(&bp->ptp_info, &ts); } - if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL)) - dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n", - ts.tv_sec, ts.tv_nsec, - sync ? "in-sync" : "UNSYNCED"); - timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0); - mod_timer(&bp->watchdog, jiffies + HZ); + /* If there is a clock supervisor, then enable the watchdog */ + if (bp->pps_to_clk) { + timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0); + mod_timer(&bp->watchdog, jiffies + HZ); + } return 0; } +static void +ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + bp->utc_tai_offset = val; + + if (bp->irig_out) + iowrite32(val, &bp->irig_out->adj_sec); + if (bp->dcf_out) + iowrite32(val, &bp->dcf_out->adj_sec); + if (bp->nmea_out) + iowrite32(val, &bp->nmea_out->adj_sec); + + spin_unlock_irqrestore(&bp->lock, flags); +} + +static void +ptp_ocp_tod_init(struct ptp_ocp *bp) +{ + u32 ctrl, reg; + + ctrl = ioread32(&bp->tod->ctrl); + ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE; + ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B); + iowrite32(ctrl, &bp->tod->ctrl); + + reg = ioread32(&bp->tod->utc_status); + if (reg & TOD_STATUS_UTC_VALID) + ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK); +} + static void ptp_ocp_tod_info(struct ptp_ocp *bp) { @@ -620,11 +879,6 @@ ptp_ocp_tod_info(struct ptp_ocp *bp) dev_info(&bp->pdev->dev, "TOD Version %d.%d.%d\n", version >> 24, (version >> 16) & 0xff, version & 0xffff); - ctrl = ioread32(&bp->tod->ctrl); - ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE; - ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B); - iowrite32(ctrl, &bp->tod->ctrl); - ctrl = ioread32(&bp->tod->ctrl); idx = ctrl & TOD_CTRL_PROTOCOL ? 4 : 0; idx += (ctrl >> 16) & 3; @@ -639,7 +893,7 @@ ptp_ocp_tod_info(struct ptp_ocp *bp) reg = ioread32(&bp->tod->status); dev_info(&bp->pdev->dev, "status: %x\n", reg); - reg = ioread32(&bp->tod->correction_sec); + reg = ioread32(&bp->tod->adj_sec); dev_info(&bp->pdev->dev, "correction: %d\n", reg); reg = ioread32(&bp->tod->utc_status); @@ -695,6 +949,9 @@ ptp_ocp_get_serial_number(struct ptp_ocp *bp) struct device *dev; int err; + if (!bp->i2c_ctrl) + return; + dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild); if (!dev) { dev_err(&bp->pdev->dev, "Can't find I2C adapter\n"); @@ -720,21 +977,6 @@ ptp_ocp_get_serial_number(struct ptp_ocp *bp) put_device(dev); } -static void -ptp_ocp_info(struct ptp_ocp *bp) -{ - u32 version, select; - - version = ioread32(&bp->reg->version); - select = ioread32(&bp->reg->select); - dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n", - version >> 24, (version >> 16) & 0xff, version & 0xffff, - ptp_ocp_clock_name_from_val(select >> 16), - ptp_clock_index(bp->ptp)); - - ptp_ocp_tod_info(bp); -} - static struct device * ptp_ocp_find_flash(struct ptp_ocp *bp) { @@ -910,18 +1152,6 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r) unsigned long start; int id; - /* XXX hack to work around old FPGA */ - if (bp->n_irqs < 10) { - dev_err(&bp->pdev->dev, "FPGA does not have SPI devices\n"); - return 0; - } - - if (r->irq_vec > bp->n_irqs) { - dev_err(&bp->pdev->dev, "spi device irq %d out of range\n", - r->irq_vec); - return 0; - } - start = pci_resource_start(pdev, 0) + r->offset; ptp_ocp_set_mem_resource(&res[0], start, r->size); ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec)); @@ -944,41 +1174,41 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r) static struct platform_device * ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id) { + struct ptp_ocp_i2c_info *info; struct resource res[2]; unsigned long start; + info = r->extra; start = pci_resource_start(pdev, 0) + r->offset; ptp_ocp_set_mem_resource(&res[0], start, r->size); ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec)); - return platform_device_register_resndata(&pdev->dev, "xiic-i2c", - id, res, 2, NULL, 0); + return platform_device_register_resndata(&pdev->dev, info->name, + id, res, 2, + info->data, info->data_size); } static int ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r) { struct pci_dev *pdev = bp->pdev; + struct ptp_ocp_i2c_info *info; struct platform_device *p; struct clk_hw *clk; char buf[32]; int id; - if (r->irq_vec > bp->n_irqs) { - dev_err(&bp->pdev->dev, "i2c device irq %d out of range\n", - r->irq_vec); - return 0; - } - + info = r->extra; id = pci_dev_id(bp->pdev); sprintf(buf, "AXI.%d", id); - clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0, 50000000); + clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0, + info->fixed_rate); if (IS_ERR(clk)) return PTR_ERR(clk); bp->i2c_clk = clk; - sprintf(buf, "xiic-i2c.%d", id); + sprintf(buf, "%s.%d", info->name, id); devm_clk_hw_register_clkdev(&pdev->dev, clk, NULL, buf); p = ptp_ocp_i2c_bus(bp->pdev, r, id); if (IS_ERR(p)) @@ -997,26 +1227,51 @@ ptp_ocp_ts_irq(int irq, void *priv) struct ptp_clock_event ev; u32 sec, nsec; + if (ext == ext->bp->pps) { + if (ext->bp->pps_req_map & OCP_REQ_PPS) { + ev.type = PTP_CLOCK_PPS; + ptp_clock_event(ext->bp->ptp, &ev); + } + + if ((ext->bp->pps_req_map & ~OCP_REQ_PPS) == 0) + goto out; + } + /* XXX should fix API - this converts s/ns -> ts -> s/ns */ sec = ioread32(®->time_sec); nsec = ioread32(®->time_ns); ev.type = PTP_CLOCK_EXTTS; ev.index = ext->info->index; - ev.timestamp = sec * 1000000000ULL + nsec; + ev.timestamp = sec * NSEC_PER_SEC + nsec; ptp_clock_event(ext->bp->ptp, &ev); +out: iowrite32(1, ®->intr); /* write 1 to ack */ return IRQ_HANDLED; } static int -ptp_ocp_ts_enable(void *priv, bool enable) +ptp_ocp_ts_enable(void *priv, u32 req, bool enable) { struct ptp_ocp_ext_src *ext = priv; struct ts_reg __iomem *reg = ext->mem; + struct ptp_ocp *bp = ext->bp; + + if (ext == bp->pps) { + u32 old_map = bp->pps_req_map; + + if (enable) + bp->pps_req_map |= req; + else + bp->pps_req_map &= ~req; + + /* if no state change, just return */ + if ((!!old_map ^ !!bp->pps_req_map) == 0) + return 0; + } if (enable) { iowrite32(1, ®->enable); @@ -1033,7 +1288,7 @@ ptp_ocp_ts_enable(void *priv, bool enable) static void ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext) { - ext->info->enable(ext, false); + ext->info->enable(ext, ~0, false); pci_free_irq(ext->bp->pdev, ext->irq_vec, ext); kfree(ext); } @@ -1059,7 +1314,7 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r) ext->irq_vec = r->irq_vec; err = pci_request_irq(pdev, r->irq_vec, ext->info->irq_fcn, NULL, - ext, "ocp%d.%s", bp->id, ext->info->name); + ext, "ocp%d.%s", bp->id, r->name); if (err) { dev_err(&pdev->dev, "Could not get irq %d\n", r->irq_vec); goto out; @@ -1101,12 +1356,6 @@ ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r) { int port; - if (r->irq_vec > bp->n_irqs) { - dev_err(&bp->pdev->dev, "serial device irq %d out of range\n", - r->irq_vec); - return 0; - } - port = ptp_ocp_serial_line(bp, r); if (port < 0) return port; @@ -1130,15 +1379,40 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r) return 0; } +static void +ptp_ocp_nmea_out_init(struct ptp_ocp *bp) +{ + if (!bp->nmea_out) + return; + + iowrite32(0, &bp->nmea_out->ctrl); /* disable */ + iowrite32(7, &bp->nmea_out->uart_baud); /* 115200 */ + iowrite32(1, &bp->nmea_out->ctrl); /* enable */ +} + /* FB specific board initializers; last "resource" registered. */ static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) { bp->flash_start = 1024 * 4096; + ptp_ocp_tod_init(bp); + ptp_ocp_nmea_out_init(bp); + return ptp_ocp_init_clock(bp); } +static bool +ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r) +{ + bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs; + + if (!allow) + dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n", + r->irq_vec, r->name); + return allow; +} + static int ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data) { @@ -1147,49 +1421,505 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data) table = (struct ocp_resource *)driver_data; for (r = table; r->setup; r++) { + if (!ptp_ocp_allow_irq(bp, r)) + continue; err = r->setup(bp, r); - if (err) + if (err) { + dev_err(&bp->pdev->dev, + "Could not register %s: err %d\n", + r->name, err); break; + } } return err; } -static ssize_t -serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) +static void +ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable) { - struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 ctrl; + bool on; + + ctrl = ioread32(reg); + on = ctrl & bit; + if (on ^ enable) { + ctrl &= ~bit; + ctrl |= enable ? bit : 0; + iowrite32(ctrl, reg); + } +} - if (!bp->has_serial) - ptp_ocp_get_serial_number(bp); +static void +ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->irig_out->ctrl, + IRIG_M_CTRL_ENABLE, enable); +} - return sysfs_emit(buf, "%pM\n", bp->serial); +static void +ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->irig_in->ctrl, + IRIG_S_CTRL_ENABLE, enable); } -static DEVICE_ATTR_RO(serialnum); -static ssize_t -gnss_sync_show(struct device *dev, struct device_attribute *attr, char *buf) +static void +ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable) { - struct ptp_ocp *bp = dev_get_drvdata(dev); - ssize_t ret; + return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl, + DCF_M_CTRL_ENABLE, enable); +} - if (bp->gnss_lost) - ret = sysfs_emit(buf, "LOST @ %ptT\n", &bp->gnss_lost); - else - ret = sysfs_emit(buf, "SYNC\n"); +static void +ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl, + DCF_S_CTRL_ENABLE, enable); +} - return ret; +static void +__handle_signal_outputs(struct ptp_ocp *bp, u32 val) +{ + ptp_ocp_irig_out(bp, val & 0x00100010); + ptp_ocp_dcf_out(bp, val & 0x00200020); } -static DEVICE_ATTR_RO(gnss_sync); -static ssize_t -clock_source_show(struct device *dev, struct device_attribute *attr, char *buf) +static void +__handle_signal_inputs(struct ptp_ocp *bp, u32 val) +{ + ptp_ocp_irig_in(bp, val & 0x00100010); + ptp_ocp_dcf_in(bp, val & 0x00200020); +} + +/* + * ANT0 == gps (in) + * ANT1 == sma1 (in) + * ANT2 == sma2 (in) + * ANT3 == sma3 (out) + * ANT4 == sma4 (out) + */ + +enum ptp_ocp_sma_mode { + SMA_MODE_IN, + SMA_MODE_OUT, +}; + +static struct ptp_ocp_sma_connector { + enum ptp_ocp_sma_mode mode; + bool fixed_mode; + u16 default_out_idx; +} ptp_ocp_sma_map[4] = { + { + .mode = SMA_MODE_IN, + .fixed_mode = true, + }, + { + .mode = SMA_MODE_IN, + .fixed_mode = true, + }, + { + .mode = SMA_MODE_OUT, + .fixed_mode = true, + .default_out_idx = 0, /* 10Mhz */ + }, + { + .mode = SMA_MODE_OUT, + .fixed_mode = true, + .default_out_idx = 1, /* PHC */ + }, +}; + +static ssize_t +ptp_ocp_show_output(u32 val, char *buf, int default_idx) +{ + const char *name; + ssize_t count; + + count = sysfs_emit(buf, "OUT: "); + name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val); + if (!name) + name = ptp_ocp_sma_out[default_idx].name; + count += sysfs_emit_at(buf, count, "%s\n", name); + return count; +} + +static ssize_t +ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in) +{ + const char *name; + ssize_t count; + int i; + + count = sysfs_emit(buf, "IN: "); + for (i = 0; i < ARRAY_SIZE(ptp_ocp_sma_in); i++) { + if (val & ptp_ocp_sma_in[i].value) { + name = ptp_ocp_sma_in[i].name; + count += sysfs_emit_at(buf, count, "%s ", name); + } + } + if (!val && zero_in) + count += sysfs_emit_at(buf, count, "%s ", zero_in); + if (count) + count--; + count += sysfs_emit_at(buf, count, "\n"); + return count; +} + +static int +sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode) +{ + struct ocp_selector *tbl[] = { ptp_ocp_sma_in, ptp_ocp_sma_out }; + int idx, count, dir; + char **argv; + int ret; + + argv = argv_split(GFP_KERNEL, buf, &count); + if (!argv) + return -ENOMEM; + + ret = -EINVAL; + if (!count) + goto out; + + idx = 0; + dir = *mode == SMA_MODE_IN ? 0 : 1; + if (!strcasecmp("IN:", argv[idx])) { + dir = 0; + idx++; + } + if (!strcasecmp("OUT:", argv[0])) { + dir = 1; + idx++; + } + *mode = dir == 0 ? SMA_MODE_IN : SMA_MODE_OUT; + + ret = 0; + for (; idx < count; idx++) + ret |= ptp_ocp_select_val_from_name(tbl[dir], argv[idx]); + if (ret < 0) + ret = -EINVAL; + +out: + argv_free(argv); + return ret; +} + +static ssize_t +ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf, + const char *zero_in) +{ + struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1]; + + if (sma->mode == SMA_MODE_IN) + return ptp_ocp_show_inputs(val, buf, zero_in); + + return ptp_ocp_show_output(val, buf, sma->default_out_idx); +} + +static ssize_t +sma1_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + + val = ioread32(&bp->sma->gpio1) & 0x3f; + return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name); +} + +static ssize_t +sma2_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + + val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f; + return ptp_ocp_sma_show(bp, 2, val, buf, NULL); +} + +static ssize_t +sma3_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + + val = ioread32(&bp->sma->gpio2) & 0x3f; + return ptp_ocp_sma_show(bp, 3, val, buf, NULL); +} + +static ssize_t +sma4_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + + val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f; + return ptp_ocp_sma_show(bp, 4, val, buf, NULL); +} + +static void +ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift) +{ + unsigned long flags; + u32 gpio, mask; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + gpio = ioread32(&bp->sma->gpio2); + gpio = (gpio & mask) | (val << shift); + + __handle_signal_outputs(bp, gpio); + + iowrite32(gpio, &bp->sma->gpio2); + + spin_unlock_irqrestore(&bp->lock, flags); +} + +static void +ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift) +{ + unsigned long flags; + u32 gpio, mask; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + gpio = ioread32(&bp->sma->gpio1); + gpio = (gpio & mask) | (val << shift); + + __handle_signal_inputs(bp, gpio); + + iowrite32(gpio, &bp->sma->gpio1); + + spin_unlock_irqrestore(&bp->lock, flags); +} + +static ssize_t +ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift) +{ + struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1]; + enum ptp_ocp_sma_mode mode; + int val; + + mode = sma->mode; + val = sma_parse_inputs(buf, &mode); + if (val < 0) + return val; + + if (mode != sma->mode && sma->fixed_mode) + return -EOPNOTSUPP; + + if (mode != sma->mode) { + pr_err("Mode changes not supported yet.\n"); + return -EOPNOTSUPP; + } + + if (sma->mode == SMA_MODE_IN) + ptp_ocp_sma_store_inputs(bp, val, shift); + else + ptp_ocp_sma_store_output(bp, val, shift); + + return 0; +} + +static ssize_t +sma1_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + int err; + + err = ptp_ocp_sma_store(bp, buf, 1, 0); + return err ? err : count; +} + +static ssize_t +sma2_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + int err; + + err = ptp_ocp_sma_store(bp, buf, 2, 16); + return err ? err : count; +} + +static ssize_t +sma3_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + int err; + + err = ptp_ocp_sma_store(bp, buf, 3, 0); + return err ? err : count; +} + +static ssize_t +sma4_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + int err; + + err = ptp_ocp_sma_store(bp, buf, 4, 16); + return err ? err : count; +} +static DEVICE_ATTR_RW(sma1); +static DEVICE_ATTR_RW(sma2); +static DEVICE_ATTR_RW(sma3); +static DEVICE_ATTR_RW(sma4); + +static ssize_t +available_sma_inputs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ptp_ocp_select_table_show(ptp_ocp_sma_in, buf); +} +static DEVICE_ATTR_RO(available_sma_inputs); + +static ssize_t +available_sma_outputs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ptp_ocp_select_table_show(ptp_ocp_sma_out, buf); +} +static DEVICE_ATTR_RO(available_sma_outputs); + +static ssize_t +serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + + if (!bp->has_serial) + ptp_ocp_get_serial_number(bp); + + return sysfs_emit(buf, "%pM\n", bp->serial); +} +static DEVICE_ATTR_RO(serialnum); + +static ssize_t +gnss_sync_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + ssize_t ret; + + if (bp->gnss_lost) + ret = sysfs_emit(buf, "LOST @ %ptT\n", &bp->gnss_lost); + else + ret = sysfs_emit(buf, "SYNC\n"); + + return ret; +} +static DEVICE_ATTR_RO(gnss_sync); + +static ssize_t +utc_tai_offset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", bp->utc_tai_offset); +} + +static ssize_t +utc_tai_offset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + int err; + u32 val; + + err = kstrtou32(buf, 0, &val); + if (err) + return err; + + ptp_ocp_utc_distribute(bp, val); + + return count; +} +static DEVICE_ATTR_RW(utc_tai_offset); + +static ssize_t +ts_window_adjust_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", bp->ts_window_adjust); +} + +static ssize_t +ts_window_adjust_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + int err; + u32 val; + + err = kstrtou32(buf, 0, &val); + if (err) + return err; + + bp->ts_window_adjust = val; + + return count; +} +static DEVICE_ATTR_RW(ts_window_adjust); + +static ssize_t +irig_b_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + u32 val; + + val = ioread32(&bp->irig_out->ctrl); + val = (val >> 16) & 0x07; + return sysfs_emit(buf, "%d\n", val); +} + +static ssize_t +irig_b_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_ocp *bp = dev_get_drvdata(dev); + unsigned long flags; + int err; + u32 reg; + u8 val; + + err = kstrtou8(buf, 0, &val); + if (err) + return err; + if (val > 7) + return -EINVAL; + + reg = ((val & 0x7) << 16); + + spin_lock_irqsave(&bp->lock, flags); + iowrite32(0, &bp->irig_out->ctrl); /* disable */ + iowrite32(reg, &bp->irig_out->ctrl); /* change mode */ + iowrite32(reg | IRIG_M_CTRL_ENABLE, &bp->irig_out->ctrl); + spin_unlock_irqrestore(&bp->lock, flags); + + return count; +} +static DEVICE_ATTR_RW(irig_b_mode); + +static ssize_t +clock_source_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ptp_ocp *bp = dev_get_drvdata(dev); const char *p; u32 select; select = ioread32(&bp->reg->select); - p = ptp_ocp_clock_name_from_val(select >> 16); + p = ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16); return sysfs_emit(buf, "%s\n", p); } @@ -1202,7 +1932,7 @@ clock_source_store(struct device *dev, struct device_attribute *attr, unsigned long flags; int val; - val = ptp_ocp_clock_val_from_name(buf); + val = ptp_ocp_select_val_from_name(ptp_ocp_clock, buf); if (val < 0) return val; @@ -1218,19 +1948,7 @@ static ssize_t available_clock_sources_show(struct device *dev, struct device_attribute *attr, char *buf) { - const char *clk; - ssize_t count; - int i; - - count = 0; - for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) { - clk = ptp_ocp_clock[i].name; - count += sysfs_emit_at(buf, count, "%s ", clk); - } - if (count) - count--; - count += sysfs_emit_at(buf, count, "\n"); - return count; + return ptp_ocp_select_table_show(ptp_ocp_clock, buf); } static DEVICE_ATTR_RO(available_clock_sources); @@ -1239,10 +1957,258 @@ static struct attribute *timecard_attrs[] = { &dev_attr_gnss_sync.attr, &dev_attr_clock_source.attr, &dev_attr_available_clock_sources.attr, + &dev_attr_sma1.attr, + &dev_attr_sma2.attr, + &dev_attr_sma3.attr, + &dev_attr_sma4.attr, + &dev_attr_available_sma_inputs.attr, + &dev_attr_available_sma_outputs.attr, + &dev_attr_irig_b_mode.attr, + &dev_attr_utc_tai_offset.attr, + &dev_attr_ts_window_adjust.attr, NULL, }; ATTRIBUTE_GROUPS(timecard); +static const char * +gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def) +{ + const char *ans; + + if (gpio & (1 << bit)) + ans = pri; + else if (gpio & (1 << (bit + 16))) + ans = sec; + else + ans = def; + return ans; +} + +static void +gpio_multi_map(char *buf, u32 gpio, u32 bit, + const char *pri, const char *sec, const char *def) +{ + char *ans = buf; + + strcpy(ans, def); + if (gpio & (1 << bit)) + ans += sprintf(ans, "%s ", pri); + if (gpio & (1 << (bit + 16))) + ans += sprintf(ans, "%s ", sec); +} + +static int +ptp_ocp_summary_show(struct seq_file *s, void *data) +{ + struct device *dev = s->private; + struct ptp_system_timestamp sts; + u32 sma_in, sma_out, ctrl, val; + struct ts_reg __iomem *ts_reg; + struct timespec64 ts; + struct ptp_ocp *bp; + const char *src; + bool on, map; + char *buf; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + + bp = dev_get_drvdata(dev); + sma_in = ioread32(&bp->sma->gpio1); + sma_out = ioread32(&bp->sma->gpio2); + + seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp)); + + sma1_show(dev, NULL, buf); + seq_printf(s, " sma1: %s", buf); + + sma2_show(dev, NULL, buf); + seq_printf(s, " sma2: %s", buf); + + sma3_show(dev, NULL, buf); + seq_printf(s, " sma3: %s", buf); + + sma4_show(dev, NULL, buf); + seq_printf(s, " sma4: %s", buf); + + if (bp->ts0) { + ts_reg = bp->ts0->mem; + on = ioread32(&ts_reg->enable); + src = "GNSS"; + seq_printf(s, "%7s: %s, src: %s\n", "TS0", + on ? " ON" : "OFF", src); + } + + if (bp->ts1) { + ts_reg = bp->ts1->mem; + on = ioread32(&ts_reg->enable); + src = gpio_map(sma_in, 2, "sma1", "sma2", "----"); + seq_printf(s, "%7s: %s, src: %s\n", "TS1", + on ? " ON" : "OFF", src); + } + + if (bp->ts2) { + ts_reg = bp->ts2->mem; + on = ioread32(&ts_reg->enable); + src = gpio_map(sma_in, 3, "sma1", "sma2", "----"); + seq_printf(s, "%7s: %s, src: %s\n", "TS2", + on ? " ON" : "OFF", src); + } + + if (bp->pps) { + ts_reg = bp->pps->mem; + src = "PHC"; + on = ioread32(&ts_reg->enable); + map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP); + seq_printf(s, "%7s: %s, src: %s\n", "TS3", + on && map ? " ON" : "OFF", src); + + map = !!(bp->pps_req_map & OCP_REQ_PPS); + seq_printf(s, "%7s: %s, src: %s\n", "PPS", + on && map ? " ON" : "OFF", src); + } + + if (bp->irig_out) { + ctrl = ioread32(&bp->irig_out->ctrl); + on = ctrl & IRIG_M_CTRL_ENABLE; + val = ioread32(&bp->irig_out->status); + gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----"); + seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG", + on ? " ON" : "OFF", val, (ctrl >> 16), buf); + } + + if (bp->irig_in) { + on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE; + val = ioread32(&bp->irig_in->status); + src = gpio_map(sma_in, 4, "sma1", "sma2", "----"); + seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in", + on ? " ON" : "OFF", val, src); + } + + if (bp->dcf_out) { + on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE; + val = ioread32(&bp->dcf_out->status); + gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----"); + seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF", + on ? " ON" : "OFF", val, buf); + } + + if (bp->dcf_in) { + on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE; + val = ioread32(&bp->dcf_in->status); + src = gpio_map(sma_in, 5, "sma1", "sma2", "----"); + seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in", + on ? " ON" : "OFF", val, src); + } + + if (bp->nmea_out) { + on = ioread32(&bp->nmea_out->ctrl) & 1; + val = ioread32(&bp->nmea_out->status); + seq_printf(s, "%7s: %s, error: %d\n", "NMEA", + on ? " ON" : "OFF", val); + } + + /* compute src for PPS1, used below. */ + if (bp->pps_select) { + val = ioread32(&bp->pps_select->gpio1); + if (val & 0x01) + src = gpio_map(sma_in, 0, "sma1", "sma2", "----"); + else if (val & 0x02) + src = "MAC"; + else if (val & 0x04) + src = "GNSS"; + else + src = "----"; + } else { + src = "?"; + } + + /* assumes automatic switchover/selection */ + val = ioread32(&bp->reg->select); + switch (val >> 16) { + case 0: + sprintf(buf, "----"); + break; + case 2: + sprintf(buf, "IRIG"); + break; + case 3: + sprintf(buf, "%s via PPS1", src); + break; + case 6: + sprintf(buf, "DCF"); + break; + default: + strcpy(buf, "unknown"); + break; + } + val = ioread32(&bp->reg->status); + seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf, + val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced"); + + /* reuses PPS1 src from earlier */ + seq_printf(s, "MAC PPS1 src: %s\n", src); + + src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2"); + seq_printf(s, "MAC PPS2 src: %s\n", src); + + if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) { + struct timespec64 sys_ts; + s64 pre_ns, post_ns, ns; + + pre_ns = timespec64_to_ns(&sts.pre_ts); + post_ns = timespec64_to_ns(&sts.post_ts); + ns = (pre_ns + post_ns) / 2; + ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC; + sys_ts = ns_to_timespec64(ns); + + seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC", + ts.tv_sec, ts.tv_nsec, &ts); + seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS", + sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts, + bp->utc_tai_offset); + seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "", + timespec64_to_ns(&ts) - ns, + post_ns - pre_ns); + } + + free_page((unsigned long)buf); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary); + +static struct dentry *ptp_ocp_debugfs_root; + +static void +ptp_ocp_debugfs_add_device(struct ptp_ocp *bp) +{ + struct dentry *d; + + d = debugfs_create_dir(dev_name(&bp->dev), ptp_ocp_debugfs_root); + bp->debug_root = d; + debugfs_create_file("summary", 0444, bp->debug_root, + &bp->dev, &ptp_ocp_summary_fops); +} + +static void +ptp_ocp_debugfs_remove_device(struct ptp_ocp *bp) +{ + debugfs_remove_recursive(bp->debug_root); +} + +static void +ptp_ocp_debugfs_init(void) +{ + ptp_ocp_debugfs_root = debugfs_create_dir("timecard", NULL); +} + +static void +ptp_ocp_debugfs_fini(void) +{ + debugfs_remove_recursive(ptp_ocp_debugfs_root); +} + static void ptp_ocp_dev_release(struct device *dev) { @@ -1270,7 +2236,9 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) bp->ptp_info = ptp_ocp_clock_info; spin_lock_init(&bp->lock); bp->gnss_port = -1; + bp->gnss2_port = -1; bp->mac_port = -1; + bp->nmea_port = -1; bp->pdev = pdev; device_initialize(&bp->dev); @@ -1332,10 +2300,18 @@ ptp_ocp_complete(struct ptp_ocp *bp) sprintf(buf, "ttyS%d", bp->gnss_port); ptp_ocp_link_child(bp, buf, "ttyGNSS"); } + if (bp->gnss2_port != -1) { + sprintf(buf, "ttyS%d", bp->gnss2_port); + ptp_ocp_link_child(bp, buf, "ttyGNSS2"); + } if (bp->mac_port != -1) { sprintf(buf, "ttyS%d", bp->mac_port); ptp_ocp_link_child(bp, buf, "ttyMAC"); } + if (bp->nmea_port != -1) { + sprintf(buf, "ttyS%d", bp->nmea_port); + ptp_ocp_link_child(bp, buf, "ttyNMEA"); + } sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp)); ptp_ocp_link_child(bp, buf, "ptp"); @@ -1346,13 +2322,53 @@ ptp_ocp_complete(struct ptp_ocp *bp) if (device_add_groups(&bp->dev, timecard_groups)) pr_err("device add groups failed\n"); + ptp_ocp_debugfs_add_device(bp); + return 0; } static void -ptp_ocp_resource_summary(struct ptp_ocp *bp) +ptp_ocp_phc_info(struct ptp_ocp *bp) { + struct timespec64 ts; + u32 version, select; + bool sync; + + version = ioread32(&bp->reg->version); + select = ioread32(&bp->reg->select); + dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n", + version >> 24, (version >> 16) & 0xff, version & 0xffff, + ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16), + ptp_clock_index(bp->ptp)); + + sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC; + if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL)) + dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n", + ts.tv_sec, ts.tv_nsec, + sync ? "in-sync" : "UNSYNCED"); +} + +static void +ptp_ocp_serial_info(struct device *dev, const char *name, int port, int baud) +{ + if (port != -1) + dev_info(dev, "%5s: /dev/ttyS%-2d @ %6d\n", name, port, baud); +} + +static void +ptp_ocp_info(struct ptp_ocp *bp) +{ + static int nmea_baud[] = { + 1200, 2400, 4800, 9600, 19200, 38400, + 57600, 115200, 230400, 460800, 921600, + 1000000, 2000000 + }; struct device *dev = &bp->pdev->dev; + u32 reg; + + ptp_ocp_phc_info(bp); + if (bp->tod) + ptp_ocp_tod_info(bp); if (bp->image) { u32 ver = ioread32(&bp->image->version); @@ -1365,10 +2381,17 @@ ptp_ocp_resource_summary(struct ptp_ocp *bp) dev_info(dev, "golden image, version %d\n", ver >> 16); } - if (bp->gnss_port != -1) - dev_info(dev, "GNSS @ /dev/ttyS%d 115200\n", bp->gnss_port); - if (bp->mac_port != -1) - dev_info(dev, "MAC @ /dev/ttyS%d 57600\n", bp->mac_port); + ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200); + ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200); + ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600); + if (bp->nmea_out && bp->nmea_port != -1) { + int baud = -1; + + reg = ioread32(&bp->nmea_out->uart_baud); + if (reg < ARRAY_SIZE(nmea_baud)) + baud = nmea_baud[reg]; + ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port, baud); + } } static void @@ -1386,6 +2409,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp) static void ptp_ocp_detach(struct ptp_ocp *bp) { + ptp_ocp_debugfs_remove_device(bp); ptp_ocp_detach_sysfs(bp); if (timer_pending(&bp->watchdog)) del_timer_sync(&bp->watchdog); @@ -1393,12 +2417,18 @@ ptp_ocp_detach(struct ptp_ocp *bp) ptp_ocp_unregister_ext(bp->ts0); if (bp->ts1) ptp_ocp_unregister_ext(bp->ts1); + if (bp->ts2) + ptp_ocp_unregister_ext(bp->ts2); if (bp->pps) ptp_ocp_unregister_ext(bp->pps); if (bp->gnss_port != -1) serial8250_unregister_port(bp->gnss_port); + if (bp->gnss2_port != -1) + serial8250_unregister_port(bp->gnss2_port); if (bp->mac_port != -1) serial8250_unregister_port(bp->mac_port); + if (bp->nmea_port != -1) + serial8250_unregister_port(bp->nmea_port); if (bp->spi_flash) platform_device_unregister(bp->spi_flash); if (bp->i2c_ctrl) @@ -1425,10 +2455,6 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; } - err = devlink_register(devlink); - if (err) - goto out_free; - err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_device\n"); @@ -1445,7 +2471,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id) * allow this - if not all of the IRQ's are returned, skip the * extra devices and just register the clock. */ - err = pci_alloc_irq_vectors(pdev, 1, 10, PCI_IRQ_MSI | PCI_IRQ_MSIX); + err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (err < 0) { dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err); goto out; @@ -1470,8 +2496,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out; ptp_ocp_info(bp); - ptp_ocp_resource_summary(bp); - + devlink_register(devlink); return 0; out: @@ -1480,10 +2505,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id) out_disable: pci_disable_device(pdev); out_unregister: - devlink_unregister(devlink); -out_free: devlink_free(devlink); - return err; } @@ -1493,11 +2515,11 @@ ptp_ocp_remove(struct pci_dev *pdev) struct ptp_ocp *bp = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(bp); + devlink_unregister(devlink); ptp_ocp_detach(bp); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); - devlink_unregister(devlink); devlink_free(devlink); } @@ -1554,6 +2576,8 @@ ptp_ocp_init(void) const char *what; int err; + ptp_ocp_debugfs_init(); + what = "timecard class"; err = class_register(&timecard_class); if (err) @@ -1576,6 +2600,7 @@ ptp_ocp_init(void) out_notifier: class_unregister(&timecard_class); out: + ptp_ocp_debugfs_fini(); pr_err(KBUILD_MODNAME ": failed to register %s: %d\n", what, err); return err; } @@ -1586,6 +2611,7 @@ ptp_ocp_fini(void) bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier); pci_unregister_driver(&ptp_ocp_driver); class_unregister(&timecard_class); + ptp_ocp_debugfs_fini(); } module_init(ptp_ocp_init); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 20efafe4789735..efbb5e5eca05f5 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -24,19 +24,6 @@ #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) static struct kmem_cache *qdio_q_cache; -static struct kmem_cache *qdio_aob_cache; - -struct qaob *qdio_allocate_aob(void) -{ - return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); -} -EXPORT_SYMBOL_GPL(qdio_allocate_aob); - -void qdio_release_aob(struct qaob *aob) -{ - kmem_cache_free(qdio_aob_cache, aob); -} -EXPORT_SYMBOL_GPL(qdio_release_aob); /** * qdio_free_buffers() - free qdio buffers @@ -447,39 +434,22 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr) int __init qdio_setup_init(void) { - int rc; - qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 256, 0, NULL); if (!qdio_q_cache) return -ENOMEM; - qdio_aob_cache = kmem_cache_create("qdio_aob", - sizeof(struct qaob), - sizeof(struct qaob), - 0, - NULL); - if (!qdio_aob_cache) { - rc = -ENOMEM; - goto free_qdio_q_cache; - } - /* Check for OSA/FCP thin interrupts (bit 67). */ DBF_EVENT("thinint:%1d", (css_general_characteristics.aif_osa) ? 1 : 0); /* Check for QEBSM support in general (bit 58). */ DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm); - rc = 0; -out: - return rc; -free_qdio_q_cache: - kmem_cache_destroy(qdio_q_cache); - goto out; + + return 0; } void qdio_setup_exit(void) { - kmem_cache_destroy(qdio_aob_cache); kmem_cache_destroy(qdio_q_cache); } diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 06281a0a055210..de2423c72b022d 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -182,7 +182,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); static void ctcmpc_chx_resend(fsm_instance *, int, void *); static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); -/** +/* * Check return code of a preceding ccw_device call, halt_IO etc... * * ch : The channel, the error belongs to. @@ -223,7 +223,7 @@ void ctcm_purge_skb_queue(struct sk_buff_head *q) } } -/** +/* * NOP action for statemachines */ static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) @@ -234,7 +234,7 @@ static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) * Actions for channel - statemachines. */ -/** +/* * Normal data has been send. Free the corresponding * skb (it's in io_queue), reset dev->tbusy and * revert to idle state. @@ -322,7 +322,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) ctcm_clear_busy_do(dev); } -/** +/* * Initial data is sent. * Notify device statemachine that we are up and * running. @@ -344,7 +344,7 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); } -/** +/* * Got normal data, check for sanity, queue it up, allocate new buffer * trigger bottom half, and initiate next read. * @@ -421,7 +421,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) ctcm_ccw_check_rc(ch, rc, "normal RX"); } -/** +/* * Initialize connection by sending a __u16 of value 0. * * fi An instance of a channel statemachine. @@ -497,7 +497,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) } } -/** +/* * Got initial data, check it. If OK, * notify device statemachine that we are up and * running. @@ -538,7 +538,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg) } } -/** +/* * Set channel into extended mode. * * fi An instance of a channel statemachine. @@ -578,7 +578,7 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) ch->retry = 0; } -/** +/* * Setup channel. * * fi An instance of a channel statemachine. @@ -641,7 +641,7 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) } } -/** +/* * Shutdown a channel. * * fi An instance of a channel statemachine. @@ -682,7 +682,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) } } -/** +/* * Cleanup helper for chx_fail and chx_stopped * cleanup channels queue and notify interface statemachine. * @@ -728,7 +728,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state, } } -/** +/* * A channel has successfully been halted. * Cleanup it's queue and notify interface statemachine. * @@ -741,7 +741,7 @@ static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); } -/** +/* * A stop command from device statemachine arrived and we are in * not operational mode. Set state to stopped. * @@ -754,7 +754,7 @@ static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_STOPPED); } -/** +/* * A machine check for no path, not operational status or gone device has * happened. * Cleanup queue and notify interface statemachine. @@ -768,7 +768,7 @@ static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); } -/** +/* * Handle error during setup of channel. * * fi An instance of a channel statemachine. @@ -817,7 +817,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) } } -/** +/* * Restart a channel after an error. * * fi An instance of a channel statemachine. @@ -858,7 +858,7 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) } } -/** +/* * Handle error during RX initial handshake (exchange of * 0-length block header) * @@ -893,7 +893,7 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) } } -/** +/* * Notify device statemachine if we gave up initialization * of RX channel. * @@ -914,7 +914,7 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); } -/** +/* * Handle RX Unit check remote reset (remote disconnected) * * fi An instance of a channel statemachine. @@ -946,7 +946,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) ccw_device_halt(ch2->cdev, 0); } -/** +/* * Handle error during TX channel initialization. * * fi An instance of a channel statemachine. @@ -978,7 +978,7 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) } } -/** +/* * Handle TX timeout by retrying operation. * * fi An instance of a channel statemachine. @@ -1050,7 +1050,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) return; } -/** +/* * Handle fatal errors during an I/O command. * * fi An instance of a channel statemachine. @@ -1198,7 +1198,7 @@ int ch_fsm_len = ARRAY_SIZE(ch_fsm); * Actions for mpc channel statemachine. */ -/** +/* * Normal data has been send. Free the corresponding * skb (it's in io_queue), reset dev->tbusy and * revert to idle state. @@ -1361,7 +1361,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) return; } -/** +/* * Got normal data, check for sanity, queue it up, allocate new buffer * trigger bottom half, and initiate next read. * @@ -1464,7 +1464,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) } -/** +/* * Initialize connection by sending a __u16 of value 0. * * fi An instance of a channel statemachine. @@ -1516,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) return; } -/** +/* * Got initial data, check it. If OK, * notify device statemachine that we are up and * running. @@ -2043,7 +2043,7 @@ int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); * Actions for interface - statemachine. */ -/** +/* * Startup channels by sending CTC_EVENT_START to each channel. * * fi An instance of an interface statemachine. @@ -2068,7 +2068,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg) } } -/** +/* * Shutdown channels by sending CTC_EVENT_STOP to each channel. * * fi An instance of an interface statemachine. @@ -2122,7 +2122,7 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) DEV_EVENT_START, dev); } -/** +/* * Called from channel statemachine * when a channel is up and running. * @@ -2183,7 +2183,7 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) } } -/** +/* * Called from device statemachine * when a channel has been shutdown. * diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index fd705429708e80..5ea7eeb07002b5 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -55,7 +55,7 @@ /* Some common global variables */ -/** +/* * The root device for ctcm group devices */ static struct device *ctcm_root_dev; @@ -65,7 +65,7 @@ static struct device *ctcm_root_dev; */ struct channel *channels; -/** +/* * Unpack a just received skb and hand it over to * upper layers. * @@ -180,7 +180,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) } } -/** +/* * Release a specific channel in the channel list. * * ch Pointer to channel struct to be released. @@ -192,7 +192,7 @@ static void channel_free(struct channel *ch) fsm_newstate(ch->fsm, CTC_STATE_IDLE); } -/** +/* * Remove a specific channel in the channel list. * * ch Pointer to channel struct to be released. @@ -240,7 +240,7 @@ static void channel_remove(struct channel *ch) chid, ok ? "OK" : "failed"); } -/** +/* * Get a specific channel from the channel list. * * type Type of channel we are interested in. @@ -300,7 +300,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) } -/** +/* * Check sense of a unit check. * * ch The channel, the sense code belongs to. @@ -414,7 +414,7 @@ int ctcm_ch_alloc_buffer(struct channel *ch) * Interface API for upper network layers */ -/** +/* * Open an interface. * Called from generic network layer when ifconfig up is run. * @@ -432,7 +432,7 @@ int ctcm_open(struct net_device *dev) return 0; } -/** +/* * Close an interface. * Called from generic network layer when ifconfig down is run. * @@ -451,7 +451,7 @@ int ctcm_close(struct net_device *dev) } -/** +/* * Transmit a packet. * This is a helper function for ctcm_tx(). * @@ -822,7 +822,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) return rc; } -/** +/* * Start transmission of a packet. * Called from generic network device layer. * @@ -975,7 +975,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) } -/** +/* * Sets MTU of an interface. * * dev Pointer to interface struct. @@ -1007,7 +1007,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) return 0; } -/** +/* * Returns interface statistics of a device. * * dev Pointer to interface struct. @@ -1144,7 +1144,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) return dev; } -/** +/* * Main IRQ handler. * * cdev The ccw_device the interrupt is for. @@ -1257,7 +1257,7 @@ static const struct device_type ctcm_devtype = { .groups = ctcm_attr_groups, }; -/** +/* * Add ctcm specific attributes. * Add ctcm private data. * @@ -1293,7 +1293,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev) return 0; } -/** +/* * Add a new channel to the list of channels. * Keeps the channel list sorted. * @@ -1343,7 +1343,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev)); ch->type = type; - /** + /* * "static" ccws are used in the following way: * * ccw[0..2] (Channel program for generic I/O): @@ -1471,7 +1471,7 @@ static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id) return type; } -/** +/* * * Setup an interface. * @@ -1595,7 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) return result; } -/** +/* * Shutdown an interface. * * cgdev Device to be shut down. @@ -1738,7 +1738,7 @@ static void print_banner(void) pr_info("CTCM driver initialized\n"); } -/** +/* * Initialize module. * This is called just after the module is loaded. * diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index f0436f555c62a5..88abfb5e8045c6 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -1016,7 +1016,7 @@ void mpc_channel_action(struct channel *ch, int direction, int action) CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); } -/** +/* * Unpack a just received skb and hand it over to * upper layers. * special MPC version of unpack_skb. @@ -1211,7 +1211,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) __func__, dev->name, ch, ch->id); } -/** +/* * tasklet helper for mpc's skb unpacking. * * ch The channel to work on. @@ -1320,7 +1320,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) * CTCM_PROTO_MPC only */ -/** +/* * NOP action for statemachines */ static void mpc_action_nop(fsm_instance *fi, int event, void *arg) @@ -1426,7 +1426,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) } } -/** +/* * Handle mpc group action timeout. * MPC Group Station FSM action * CTCM_PROTO_MPC only diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c index eb07862bd36a03..98c4864932d221 100644 --- a/drivers/s390/net/fsm.c +++ b/drivers/s390/net/fsm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * A generic FSM based on fsm used in isdn4linux * */ diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 26cc943d203404..5f7e28de8b1504 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -555,7 +555,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_disable; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (ret) goto err_resource; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 440219bcaa2bed..2a6479740600ef 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -40,18 +40,18 @@ #error Cannot compile lcs.c without some net devices switched on. #endif -/** +/* * initialization string for output */ static char version[] __initdata = "LCS driver"; -/** +/* * the root device for lcs group devices */ static struct device *lcs_root_dev; -/** +/* * Some prototypes. */ static void lcs_tasklet(unsigned long); @@ -62,14 +62,14 @@ static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); #endif /* CONFIG_IP_MULTICAST */ static int lcs_recovery(void *ptr); -/** +/* * Debug Facility Stuff */ static char debug_buffer[255]; static debug_info_t *lcs_dbf_setup; static debug_info_t *lcs_dbf_trace; -/** +/* * LCS Debug Facility functions */ static void @@ -96,7 +96,7 @@ lcs_register_debug_facility(void) return 0; } -/** +/* * Allocate io buffers. */ static int @@ -123,7 +123,7 @@ lcs_alloc_channel(struct lcs_channel *channel) return 0; } -/** +/* * Free io buffers. */ static void @@ -151,7 +151,7 @@ lcs_cleanup_channel(struct lcs_channel *channel) lcs_free_channel(channel); } -/** +/* * LCS free memory for card and channels. */ static void @@ -162,7 +162,7 @@ lcs_free_card(struct lcs_card *card) kfree(card); } -/** +/* * LCS alloc memory for card and channels */ static struct lcs_card * @@ -402,7 +402,7 @@ lcs_do_start_thread(struct lcs_card *card, unsigned long thread) return rc; } -/** +/* * Initialize channels,card and state machines. */ static void @@ -451,7 +451,8 @@ static void lcs_clear_multicast_list(struct lcs_card *card) spin_unlock_irqrestore(&card->ipm_lock, flags); #endif } -/** + +/* * Cleanup channels,card and state machines. */ static void @@ -468,7 +469,7 @@ lcs_cleanup_card(struct lcs_card *card) lcs_cleanup_channel(&card->read); } -/** +/* * Start channel. */ static int @@ -517,7 +518,7 @@ lcs_clear_channel(struct lcs_channel *channel) } -/** +/* * Stop channel. */ static int @@ -545,7 +546,7 @@ lcs_stop_channel(struct lcs_channel *channel) return 0; } -/** +/* * start read and write channel */ static int @@ -565,7 +566,7 @@ lcs_start_channels(struct lcs_card *card) return rc; } -/** +/* * stop read and write channel */ static int @@ -577,7 +578,7 @@ lcs_stop_channels(struct lcs_card *card) return 0; } -/** +/* * Get empty buffer. */ static struct lcs_buffer * @@ -610,7 +611,7 @@ lcs_get_buffer(struct lcs_channel *channel) return buffer; } -/** +/* * Resume channel program if the channel is suspended. */ static int @@ -636,7 +637,7 @@ __lcs_resume_channel(struct lcs_channel *channel) } -/** +/* * Make a buffer ready for processing. */ static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) @@ -678,7 +679,7 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) return rc; } -/** +/* * Mark the buffer as processed. Take care of the suspend bit * of the previous buffer. This function is called from * interrupt context, so the lock must not be taken. @@ -712,7 +713,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) return __lcs_resume_channel(channel); } -/** +/* * Put a processed buffer back to state empty. */ static void @@ -728,7 +729,7 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); } -/** +/* * Get buffer for a lan command. */ static struct lcs_buffer * @@ -785,7 +786,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd) return reply; } -/** +/* * Notifier function for lancmd replies. Called from read irq. */ static void @@ -813,7 +814,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) spin_unlock(&card->lock); } -/** +/* * Emit buffer of a lan command. */ static void @@ -877,7 +878,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, return rc ? -EIO : 0; } -/** +/* * LCS startup command */ static int @@ -895,7 +896,7 @@ lcs_send_startup(struct lcs_card *card, __u8 initiator) return lcs_send_lancmd(card, buffer, NULL); } -/** +/* * LCS shutdown command */ static int @@ -912,7 +913,7 @@ lcs_send_shutdown(struct lcs_card *card) return lcs_send_lancmd(card, buffer, NULL); } -/** +/* * LCS lanstat command */ static void @@ -939,7 +940,7 @@ lcs_send_lanstat(struct lcs_card *card) return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); } -/** +/* * send stoplan command */ static int @@ -958,7 +959,7 @@ lcs_send_stoplan(struct lcs_card *card, __u8 initiator) return lcs_send_lancmd(card, buffer, NULL); } -/** +/* * send startlan command */ static void @@ -986,7 +987,7 @@ lcs_send_startlan(struct lcs_card *card, __u8 initiator) } #ifdef CONFIG_IP_MULTICAST -/** +/* * send setipm command (Multicast) */ static int @@ -1010,7 +1011,7 @@ lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) return lcs_send_lancmd(card, buffer, NULL); } -/** +/* * send delipm command (Multicast) */ static int @@ -1034,7 +1035,7 @@ lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) return lcs_send_lancmd(card, buffer, NULL); } -/** +/* * check if multicast is supported by LCS */ static void @@ -1074,7 +1075,7 @@ lcs_check_multicast_support(struct lcs_card *card) return -EOPNOTSUPP; } -/** +/* * set or del multicast address on LCS card */ static void @@ -1129,7 +1130,7 @@ lcs_fix_multicast_list(struct lcs_card *card) spin_unlock_irqrestore(&card->ipm_lock, flags); } -/** +/* * get mac address for the relevant Multicast address */ static void @@ -1139,7 +1140,7 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) ip_eth_mc_map(ipm, mac); } -/** +/* * function called by net device to handle multicast address relevant things */ static void lcs_remove_mc_addresses(struct lcs_card *card, @@ -1260,7 +1261,7 @@ lcs_register_mc_addresses(void *data) } #endif /* CONFIG_IP_MULTICAST */ -/** +/* * function called by net device to * handle multicast address relevant things */ @@ -1355,7 +1356,7 @@ lcs_schedule_recovery(struct lcs_card *card) schedule_work(&card->kernel_thread_starter); } -/** +/* * IRQ Handler for LCS channels */ static void @@ -1439,7 +1440,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) tasklet_schedule(&channel->irq_tasklet); } -/** +/* * Tasklet for IRQ handler */ static void @@ -1476,7 +1477,7 @@ lcs_tasklet(unsigned long data) wake_up(&channel->wait_q); } -/** +/* * Finish current tx buffer and make it ready for transmit. */ static void @@ -1490,7 +1491,7 @@ __lcs_emit_txbuffer(struct lcs_card *card) card->tx_emitted++; } -/** +/* * Callback for finished tx buffers. */ static void @@ -1515,7 +1516,7 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) spin_unlock(&card->lock); } -/** +/* * Packet transmit function called by network stack */ static int @@ -1593,7 +1594,7 @@ lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) return rc; } -/** +/* * send startlan and lanstat command to make LCS device ready */ static int @@ -1648,7 +1649,7 @@ lcs_startlan(struct lcs_card *card) return rc; } -/** +/* * LCS detect function * setup channels and make them I/O ready */ @@ -1680,7 +1681,7 @@ lcs_detect(struct lcs_card *card) return rc; } -/** +/* * LCS Stop card */ static int @@ -1705,7 +1706,7 @@ lcs_stopcard(struct lcs_card *card) return rc; } -/** +/* * Kernel Thread helper functions for LGW initiated commands */ static void @@ -1721,7 +1722,7 @@ lcs_start_kernel_thread(struct work_struct *work) #endif } -/** +/* * Process control frames. */ static void @@ -1748,7 +1749,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) lcs_notify_lancmd_waiters(card, cmd); } -/** +/* * Unpack network packet. */ static void @@ -1779,7 +1780,7 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) netif_rx(skb); } -/** +/* * LCS main routine to get packets and lancmd replies from the buffers */ static void @@ -1829,7 +1830,7 @@ lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) lcs_ready_buffer(&card->read, buffer); } -/** +/* * get network statistics for ifconfig and other user programs */ static struct net_device_stats * @@ -1842,7 +1843,7 @@ lcs_getstats(struct net_device *dev) return &card->stats; } -/** +/* * stop lcs device * This function will be called by user doing ifconfig xxx down */ @@ -1866,7 +1867,7 @@ lcs_stop_device(struct net_device *dev) return rc; } -/** +/* * start lcs device and make it runnable * This function will be called by user doing ifconfig xxx up */ @@ -1892,7 +1893,7 @@ lcs_open_device(struct net_device *dev) return rc; } -/** +/* * show function for portno called by cat or similar things */ static ssize_t @@ -1908,7 +1909,7 @@ lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%d\n", card->portno); } -/** +/* * store the value which is piped to file portno */ static ssize_t @@ -2033,7 +2034,7 @@ static const struct device_type lcs_devtype = { .groups = lcs_attr_groups, }; -/** +/* * lcs_probe_device is called on establishing a new ccwgroup_device. */ static int @@ -2077,7 +2078,7 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev) return register_netdev(card->dev); } -/** +/* * lcs_new_device will be called by setting the group device online. */ static const struct net_device_ops lcs_netdev_ops = { @@ -2161,7 +2162,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) card->dev->ml_priv = card; card->dev->netdev_ops = &lcs_netdev_ops; card->dev->dev_port = card->portno; - memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); + eth_hw_addr_set(card->dev, card->mac); #ifdef CONFIG_IP_MULTICAST if (!lcs_check_multicast_support(card)) card->dev->netdev_ops = &lcs_mc_netdev_ops; @@ -2199,7 +2200,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) return -ENODEV; } -/** +/* * lcs_shutdown_device, called when setting the group device offline. */ static int @@ -2240,7 +2241,7 @@ lcs_shutdown_device(struct ccwgroup_device *ccwgdev) return __lcs_shutdown_device(ccwgdev, 0); } -/** +/* * drive lcs recovery after startup and startlan initiated by Lan Gateway */ static int @@ -2271,7 +2272,7 @@ lcs_recovery(void *ptr) return 0; } -/** +/* * lcs_remove_device, free buffers and card */ static void @@ -2315,7 +2316,7 @@ static struct ccw_driver lcs_ccw_driver = { .int_class = IRQIO_LCS, }; -/** +/* * LCS ccwgroup driver registration */ static struct ccwgroup_driver lcs_group_driver = { @@ -2351,7 +2352,7 @@ static const struct attribute_group *lcs_drv_attr_groups[] = { NULL, }; -/** +/* * LCS Module/Kernel initialization function */ static int @@ -2389,7 +2390,7 @@ __init lcs_init_module(void) } -/** +/* * LCS module cleanup function */ static void diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 5a0c2f07a3a252..981e7b1c6b9622 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -58,7 +58,7 @@ MODULE_AUTHOR ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); -/** +/* * Debug Facility stuff */ #define IUCV_DBF_SETUP_NAME "iucv_setup" @@ -107,7 +107,7 @@ DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); debug_sprintf_event(iucv_dbf_trace, level, text ); \ } while (0) -/** +/* * some more debug stuff */ #define PRINTK_HEADER " iucv: " /* for debugging */ @@ -118,7 +118,7 @@ static struct device_driver netiucv_driver = { .bus = &iucv_bus, }; -/** +/* * Per connection profiling data */ struct connection_profile { @@ -133,7 +133,7 @@ struct connection_profile { unsigned long tx_max_pending; }; -/** +/* * Representation of one iucv connection */ struct iucv_connection { @@ -154,13 +154,13 @@ struct iucv_connection { char userdata[17]; }; -/** +/* * Linked list of all connection structs. */ static LIST_HEAD(iucv_connection_list); static DEFINE_RWLOCK(iucv_connection_rwlock); -/** +/* * Representation of event-data for the * connection state machine. */ @@ -169,7 +169,7 @@ struct iucv_event { void *data; }; -/** +/* * Private part of the network device structure */ struct netiucv_priv { @@ -180,7 +180,7 @@ struct netiucv_priv { struct device *dev; }; -/** +/* * Link level header for a packet. */ struct ll_header { @@ -195,7 +195,7 @@ struct ll_header { #define NETIUCV_QUEUELEN_DEFAULT 50 #define NETIUCV_TIMEOUT_5SEC 5000 -/** +/* * Compatibility macros for busy handling * of network devices. */ @@ -223,7 +223,7 @@ static u8 iucvMagic_ebcdic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; -/** +/* * Convert an iucv userId to its printable * form (strip whitespace at end). * @@ -262,7 +262,7 @@ static char *netiucv_printuser(struct iucv_connection *conn) return netiucv_printname(conn->userid, 8); } -/** +/* * States of the interface statemachine. */ enum dev_states { @@ -270,7 +270,7 @@ enum dev_states { DEV_STATE_STARTWAIT, DEV_STATE_STOPWAIT, DEV_STATE_RUNNING, - /** + /* * MUST be always the last element!! */ NR_DEV_STATES @@ -283,7 +283,7 @@ static const char *dev_state_names[] = { "Running", }; -/** +/* * Events of the interface statemachine. */ enum dev_events { @@ -291,7 +291,7 @@ enum dev_events { DEV_EVENT_STOP, DEV_EVENT_CONUP, DEV_EVENT_CONDOWN, - /** + /* * MUST be always the last element!! */ NR_DEV_EVENTS @@ -304,11 +304,11 @@ static const char *dev_event_names[] = { "Connection down", }; -/** +/* * Events of the connection statemachine */ enum conn_events { - /** + /* * Events, representing callbacks from * lowlevel iucv layer) */ @@ -320,23 +320,23 @@ enum conn_events { CONN_EVENT_RX, CONN_EVENT_TXDONE, - /** + /* * Events, representing errors return codes from * calls to lowlevel iucv layer */ - /** + /* * Event, representing timer expiry. */ CONN_EVENT_TIMER, - /** + /* * Events, representing commands from upper levels. */ CONN_EVENT_START, CONN_EVENT_STOP, - /** + /* * MUST be always the last element!! */ NR_CONN_EVENTS, @@ -357,55 +357,55 @@ static const char *conn_event_names[] = { "Stop", }; -/** +/* * States of the connection statemachine. */ enum conn_states { - /** + /* * Connection not assigned to any device, * initial state, invalid */ CONN_STATE_INVALID, - /** + /* * Userid assigned but not operating */ CONN_STATE_STOPPED, - /** + /* * Connection registered, * no connection request sent yet, * no connection request received */ CONN_STATE_STARTWAIT, - /** + /* * Connection registered and connection request sent, * no acknowledge and no connection request received yet. */ CONN_STATE_SETUPWAIT, - /** + /* * Connection up and running idle */ CONN_STATE_IDLE, - /** + /* * Data sent, awaiting CONN_EVENT_TXDONE */ CONN_STATE_TX, - /** + /* * Error during registration. */ CONN_STATE_REGERR, - /** + /* * Error during registration. */ CONN_STATE_CONNERR, - /** + /* * MUST be always the last element!! */ NR_CONN_STATES, @@ -424,7 +424,7 @@ static const char *conn_state_names[] = { }; -/** +/* * Debug Facility Stuff */ static debug_info_t *iucv_dbf_setup = NULL; @@ -556,7 +556,7 @@ static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser) fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); } -/** +/* * NOP action for statemachines */ static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) @@ -567,7 +567,7 @@ static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) * Actions of the connection statemachine */ -/** +/* * netiucv_unpack_skb * @conn: The connection where this skb has been received. * @pskb: The received skb. @@ -993,7 +993,7 @@ static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); * Actions for interface - statemachine. */ -/** +/* * dev_action_start * @fi: An instance of an interface statemachine. * @event: The event, just happened. @@ -1012,7 +1012,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg) fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); } -/** +/* * Shutdown connection by sending CONN_EVENT_STOP to it. * * @param fi An instance of an interface statemachine. @@ -1034,7 +1034,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg) fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); } -/** +/* * Called from connection statemachine * when a connection is up and running. * @@ -1067,7 +1067,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) } } -/** +/* * Called from connection statemachine * when a connection has been shutdown. * @@ -1107,7 +1107,7 @@ static const fsm_node dev_fsm[] = { static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); -/** +/* * Transmit a packet. * This is a helper function for netiucv_tx(). * @@ -1144,7 +1144,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, spin_unlock_irqrestore(&conn->collect_lock, saveflags); } else { struct sk_buff *nskb = skb; - /** + /* * Copy the skb to a new allocated skb in lowmem only if the * data is located above 2G in memory or tailroom is < 2. */ @@ -1164,7 +1164,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, } copied = 1; } - /** + /* * skb now is below 2G and has enough room. Add headers. */ header.next = nskb->len + NETIUCV_HDRLEN; @@ -1194,7 +1194,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, if (copied) dev_kfree_skb(nskb); else { - /** + /* * Remove our headers. They get added * again on retransmit. */ @@ -1217,7 +1217,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, * Interface API for upper network layers */ -/** +/* * Open an interface. * Called from generic network layer when ifconfig up is run. * @@ -1233,7 +1233,7 @@ static int netiucv_open(struct net_device *dev) return 0; } -/** +/* * Close an interface. * Called from generic network layer when ifconfig down is run. * @@ -1249,7 +1249,7 @@ static int netiucv_close(struct net_device *dev) return 0; } -/** +/* * Start transmission of a packet. * Called from generic network device layer. * @@ -1266,7 +1266,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) int rc; IUCV_DBF_TEXT(trace, 4, __func__); - /** + /* * Some sanity checks ... */ if (skb == NULL) { @@ -1282,7 +1282,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - /** + /* * If connection is not running, try to restart it * and throw away packet. */ @@ -1304,7 +1304,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; } -/** +/* * netiucv_stats * @dev: Pointer to interface struct. * @@ -1745,7 +1745,7 @@ static void netiucv_unregister_device(struct device *dev) device_unregister(dev); } -/** +/* * Allocate and initialize a new connection structure. * Add it to the list of netiucv connections; */ @@ -1802,7 +1802,7 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, return NULL; } -/** +/* * Release a connection structure and remove it from the * list of netiucv connections. */ @@ -1826,7 +1826,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn) kfree_skb(conn->tx_buff); } -/** +/* * Release everything of a net device. */ static void netiucv_free_netdevice(struct net_device *dev) @@ -1848,7 +1848,7 @@ static void netiucv_free_netdevice(struct net_device *dev) } } -/** +/* * Initialize a net device. (Called from kernel in alloc_netdev()) */ static const struct net_device_ops netiucv_netdev_ops = { @@ -1873,7 +1873,7 @@ static void netiucv_setup_netdevice(struct net_device *dev) dev->netdev_ops = &netiucv_netdev_ops; } -/** +/* * Allocate and initialize everything of a net device. */ static struct net_device *netiucv_init_netdevice(char *username, char *userdata) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a5aa0bdc61d695..20dca4c0384a44 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -545,7 +545,6 @@ static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue) struct qeth_qdio_info { atomic_t state; /* input */ - int no_in_queues; struct qeth_qdio_q *in_q; struct qeth_qdio_q *c_q; struct qeth_qdio_buffer_pool in_buf_pool; @@ -771,8 +770,6 @@ struct qeth_discipline { void (*remove) (struct ccwgroup_device *); int (*set_online)(struct qeth_card *card, bool carrier_ok); void (*set_offline)(struct qeth_card *card); - int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, - void __user *data, int cmd); int (*control_event_handler)(struct qeth_card *card, struct qeth_ipa_cmd *cmd); }; @@ -1087,6 +1084,7 @@ int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); +__printf(3, 4) void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e9807d2996a9d7..26c55f67289f01 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(qeth_dbf); static struct kmem_cache *qeth_core_header_cache; static struct kmem_cache *qeth_qdio_outbuf_cache; +static struct kmem_cache *qeth_qaob_cache; static struct device *qeth_core_root_dev; static struct dentry *qeth_debugfs_root; @@ -354,8 +355,8 @@ static int qeth_cq_init(struct qeth_card *card) qdio_reset_buffers(card->qdio.c_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); card->qdio.c_q->next_buf_to_init = 127; - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, - card->qdio.no_in_queues - 1, 0, 127, NULL); + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127, + NULL); if (rc) { QETH_CARD_TEXT_(card, 2, "1err%d", rc); goto out; @@ -375,21 +376,16 @@ static int qeth_alloc_cq(struct qeth_card *card) dev_err(&card->gdev->dev, "Failed to create completion queue\n"); return -ENOMEM; } - - card->qdio.no_in_queues = 2; } else { QETH_CARD_TEXT(card, 2, "nocq"); card->qdio.c_q = NULL; - card->qdio.no_in_queues = 1; } - QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); return 0; } static void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { - --card->qdio.no_in_queues; qeth_free_qdio_queue(card->qdio.c_q); card->qdio.c_q = NULL; } @@ -1338,7 +1334,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) { if (buf->aob) - qdio_release_aob(buf->aob); + kmem_cache_free(qeth_qaob_cache, buf->aob); kmem_cache_free(qeth_qdio_outbuf_cache, buf); } @@ -1458,7 +1454,6 @@ static void qeth_init_qdio_info(struct qeth_card *card) card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; /* inbound */ - card->qdio.no_in_queues = 1; card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; if (IS_IQD(card)) card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; @@ -1930,9 +1925,9 @@ static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, * @card: qeth_card structure pointer * @iob: qeth_cmd_buffer pointer * @reply_cb: callback function pointer - * @cb_card: pointer to the qeth_card structure - * @cb_reply: pointer to the qeth_reply structure - * @cb_cmd: pointer to the original iob for non-IPA + * cb_card: pointer to the qeth_card structure + * cb_reply: pointer to the qeth_reply structure + * cb_cmd: pointer to the original iob for non-IPA * commands, or to the qeth_ipa_cmd structure * for the IPA commands. * @reply_param: private pointer passed to the callback @@ -2629,7 +2624,7 @@ static void qeth_free_qdio_queues(struct qeth_card *card) qeth_free_cq(card); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) - dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); + consume_skb(card->qdio.in_q->bufs[j].rx_skb); } qeth_free_qdio_queue(card->qdio.in_q); card->qdio.in_q = NULL; @@ -3039,7 +3034,7 @@ static int qeth_send_ipa_cmd_cb(struct qeth_card *card, return (cmd->hdr.return_code) ? -EIO : 0; } -/** +/* * qeth_send_ipa_cmd() - send an IPA command * * See qeth_send_control_data() for explanation of the arguments. @@ -3554,7 +3549,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, !qeth_iqd_is_mcast_queue(card, queue) && count == 1) { if (!buf->aob) - buf->aob = qdio_allocate_aob(); + buf->aob = kmem_cache_zalloc(qeth_qaob_cache, + GFP_ATOMIC); if (buf->aob) { struct qeth_qaob_priv1 *priv; @@ -3780,7 +3776,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_schedule_recovery(card); } -/** +/* * Note: Function assumes that we have 4 outbound queues. */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) @@ -3877,12 +3873,14 @@ static unsigned int qeth_count_elements(struct sk_buff *skb, /** * qeth_add_hw_header() - add a HW header to an skb. + * @queue: TX queue that the skb will be placed on. * @skb: skb that the HW header should be added to. * @hdr: double pointer to a qeth_hdr. When returning with >= 0, * it contains a valid pointer to a qeth_hdr. * @hdr_len: length of the HW header. * @proto_len: length of protocol headers that need to be in same page as the * HW header. + * @elements: returns the required number of buffer elements for this skb. * * Returns the pushed length. If the header can't be pushed on * (eg. because it would cross a page boundary), it is allocated from @@ -4375,7 +4373,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) return -EADDRNOTAVAIL; - ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); + eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr); return 0; } @@ -5046,7 +5044,7 @@ int qeth_vm_request_mac(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "badmac"); QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); } else { - ether_addr_copy(card->dev->dev_addr, response->mac); + eth_hw_addr_set(card->dev, response->mac); } out: @@ -5139,6 +5137,7 @@ static int qeth_qdio_establish(struct qeth_card *card) struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; struct qeth_qib_parms *qib_parms = NULL; struct qdio_initialize init_data; + unsigned int no_input_qs = 1; unsigned int i; int rc = 0; @@ -5153,8 +5152,10 @@ static int qeth_qdio_establish(struct qeth_card *card) } in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; - if (card->options.cq == QETH_CQ_ENABLED) + if (card->options.cq == QETH_CQ_ENABLED) { in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; + no_input_qs++; + } for (i = 0; i < card->qdio.no_out_queues; i++) out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; @@ -5164,7 +5165,7 @@ static int qeth_qdio_establish(struct qeth_card *card) QDIO_QETH_QFMT; init_data.qib_param_field_format = 0; init_data.qib_param_field = (void *)qib_parms; - init_data.no_input_qs = card->qdio.no_in_queues; + init_data.no_input_qs = no_input_qs; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = qeth_qdio_input_handler; init_data.output_handler = qeth_qdio_output_handler; @@ -5604,7 +5605,7 @@ static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, if (uses_frags) napi_free_frags(napi); else - dev_kfree_skb_any(skb); + kfree_skb(skb); return; } @@ -5795,7 +5796,7 @@ static int qeth_extract_skb(struct qeth_card *card, if (uses_frags) napi_free_frags(napi); else - dev_kfree_skb_any(skb); + kfree_skb(skb); QETH_CARD_STAT_INC(card, rx_length_errors); } @@ -6600,10 +6601,7 @@ int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *d rc = qeth_query_oat_command(card, data); break; default: - if (card->discipline->do_ioctl) - rc = card->discipline->do_ioctl(dev, rq, data, cmd); - else - rc = -EOPNOTSUPP; + rc = -EOPNOTSUPP; } if (rc) QETH_CARD_TEXT_(card, 2, "ioce%x", rc); @@ -7177,6 +7175,16 @@ static int __init qeth_core_init(void) rc = -ENOMEM; goto cqslab_err; } + + qeth_qaob_cache = kmem_cache_create("qeth_qaob", + sizeof(struct qaob), + sizeof(struct qaob), + 0, NULL); + if (!qeth_qaob_cache) { + rc = -ENOMEM; + goto qaob_err; + } + rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; @@ -7189,6 +7197,8 @@ static int __init qeth_core_init(void) ccwgroup_err: ccw_driver_unregister(&qeth_ccw_driver); ccw_err: + kmem_cache_destroy(qeth_qaob_cache); +qaob_err: kmem_cache_destroy(qeth_qdio_outbuf_cache); cqslab_err: kmem_cache_destroy(qeth_core_header_cache); @@ -7207,6 +7217,7 @@ static void __exit qeth_core_exit(void) qeth_clear_dbf_list(); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); + kmem_cache_destroy(qeth_qaob_cache); kmem_cache_destroy(qeth_qdio_outbuf_cache); kmem_cache_destroy(qeth_core_header_cache); root_device_unregister(qeth_core_root_dev); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index dc6c00768d9195..0347fc1847863a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -71,7 +71,7 @@ static int qeth_l2_send_setdelmac_cb(struct qeth_card *card, return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code); } -static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, +static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac, enum qeth_ipa_cmds ipacmd) { struct qeth_ipa_cmd *cmd; @@ -88,7 +88,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL); } -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) +static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac) { int rc; @@ -121,11 +121,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) QETH_CARD_TEXT(card, 2, "L2Wmac"); rc = qeth_l2_send_setdelmac(card, mac, cmd); if (rc == -EADDRINUSE) - QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n", - CARD_DEVID(card)); + QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n", + ether_addr_to_u64(mac), CARD_DEVID(card)); else if (rc) - QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n", - CARD_DEVID(card), rc); + QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n", + ether_addr_to_u64(mac), CARD_DEVID(card), rc); return rc; } @@ -138,8 +138,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) QETH_CARD_TEXT(card, 2, "L2Rmac"); rc = qeth_l2_send_setdelmac(card, mac, cmd); if (rc) - QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n", - CARD_DEVID(card), rc); + QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n", + ether_addr_to_u64(mac), CARD_DEVID(card), rc); return rc; } @@ -377,7 +377,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) if (rc) return rc; ether_addr_copy(old_addr, dev->dev_addr); - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); if (card->info.dev_addr_is_registered) qeth_l2_remove_mac(card, old_addr); @@ -661,13 +661,13 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code, card->dev, &info.info, NULL); QETH_CARD_TEXT(card, 4, "andelmac"); QETH_CARD_TEXT_(card, 4, - "mc%012lx", ether_addr_to_u64(ntfy_mac)); + "mc%012llx", ether_addr_to_u64(ntfy_mac)); } else { call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, card->dev, &info.info, NULL); QETH_CARD_TEXT(card, 4, "anaddmac"); QETH_CARD_TEXT_(card, 4, - "mc%012lx", ether_addr_to_u64(ntfy_mac)); + "mc%012llx", ether_addr_to_u64(ntfy_mac)); } } @@ -765,8 +765,8 @@ static void qeth_l2_br2dev_worker(struct work_struct *work) int err = 0; kfree(br2dev_event_work); - QETH_CARD_TEXT_(card, 4, "b2dw%04x", event); - QETH_CARD_TEXT_(card, 4, "ma%012lx", ether_addr_to_u64(addr)); + QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event); + QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr)); rcu_read_lock(); /* Verify preconditions are still valid: */ @@ -795,7 +795,7 @@ static void qeth_l2_br2dev_worker(struct work_struct *work) if (err) { QETH_CARD_TEXT(card, 2, "b2derris"); QETH_CARD_TEXT_(card, 2, - "err%02x%03d", event, + "err%02lx%03d", event, lowerdev->ifindex); } } @@ -813,7 +813,7 @@ static void qeth_l2_br2dev_worker(struct work_struct *work) break; } if (err) - QETH_CARD_TEXT_(card, 2, "b2derr%02x", event); + QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event); } unlock: @@ -878,7 +878,7 @@ static int qeth_l2_switchdev_event(struct notifier_block *unused, while (lowerdev) { if (qeth_l2_must_learn(lowerdev, dstdev)) { card = lowerdev->ml_priv; - QETH_CARD_TEXT_(card, 4, "b2dqw%03x", event); + QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event); rc = qeth_l2_br2dev_queue_work(brdev, lowerdev, dstdev, event, fdb_info->addr); @@ -2430,7 +2430,6 @@ const struct qeth_discipline qeth_l2_discipline = { .remove = qeth_l2_remove_device, .set_online = qeth_l2_set_online, .set_offline = qeth_l2_set_offline, - .do_ioctl = NULL, .control_event_handler = qeth_l2_control_event, }; EXPORT_SYMBOL_GPL(qeth_l2_discipline); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6fd3e288f05955..48a886f7af6217 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -492,7 +492,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) * IP address takeover related functions */ -/** +/* * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. * * Caller must hold ip_lock. @@ -913,8 +913,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr)) return -EADDRNOTAVAIL; - ether_addr_copy(card->dev->dev_addr, - cmd->data.create_destroy_addr.mac_addr); + eth_hw_addr_set(card->dev, cmd->data.create_destroy_addr.mac_addr); return 0; } @@ -1512,7 +1511,8 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) return rc; } -static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) +static int qeth_l3_ndo_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) { struct qeth_card *card = dev->ml_priv; struct qeth_arp_cache_entry arp_entry; @@ -1553,7 +1553,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __use rc = qeth_l3_arp_flush_cache(card); break; default: - rc = -EOPNOTSUPP; + rc = qeth_siocdevprivate(dev, rq, data, cmd); } return rc; } @@ -1842,7 +1842,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_eth_ioctl = qeth_do_ioctl, - .ndo_siocdevprivate = qeth_siocdevprivate, + .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, .ndo_tx_timeout = qeth_tx_timeout, @@ -1858,7 +1858,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_eth_ioctl = qeth_do_ioctl, - .ndo_siocdevprivate = qeth_siocdevprivate, + .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, .ndo_tx_timeout = qeth_tx_timeout, @@ -2072,7 +2072,6 @@ const struct qeth_discipline qeth_l3_discipline = { .remove = qeth_l3_remove_device, .set_online = qeth_l3_set_online, .set_offline = qeth_l3_set_offline, - .do_ioctl = qeth_l3_do_ioctl, .control_event_handler = qeth_l3_control_event, }; EXPORT_SYMBOL_GPL(qeth_l3_discipline); diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c index 747af96dd15c13..e8bc8d9e458314 100644 --- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c @@ -22,9 +22,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, u32 task_retry_id, u8 fcp_cmd_payload[32]) { - struct e4_fcoe_task_context *ctx = task_params->context; + struct fcoe_task_context *ctx = task_params->context; const u8 val_byte = ctx->ystorm_ag_context.byte0; - struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx; + struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx; @@ -115,9 +115,9 @@ int init_initiator_midpath_unsolicited_fcoe_task( struct scsi_sgl_task_params *rx_sgl_task_params, u8 fw_to_place_fc_header) { - struct e4_fcoe_task_context *ctx = task_params->context; + struct fcoe_task_context *ctx = task_params->context; const u8 val_byte = ctx->ystorm_ag_context.byte0; - struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx; + struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx; diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h index 1ee31a5f063b17..7125e484bf932a 100644 --- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h @@ -10,7 +10,7 @@ struct fcoe_task_params { /* Output parameter [set/filled by the HSI function] */ - struct e4_fcoe_task_context *context; + struct fcoe_task_context *context; /* Output parameter [set/filled by the HSI function] */ struct fcoe_wqe *sqe; diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index ba94413fe2ead1..631a15969d21b6 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -141,7 +141,7 @@ struct qedf_ioreq { struct completion tm_done; struct completion abts_done; struct completion cleanup_done; - struct e4_fcoe_task_context *task; + struct fcoe_task_context *task; struct fcoe_task_params *task_params; struct scsi_sgl_task_params *sgl_task_params; int idx; @@ -503,7 +503,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, unsigned int timer_msec); extern int qedf_init_mp_req(struct qedf_ioreq *io_req); extern void qedf_init_mp_task(struct qedf_ioreq *io_req, - struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe); + struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe); extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport); extern void qedf_ring_doorbell(struct qedf_rport *fcport); extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 625e58ccb8c89f..1ff5bc314fc0a8 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -16,7 +16,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, struct qedf_ioreq *els_req; struct qedf_mp_req *mp_req; struct fc_frame_header *fc_hdr; - struct e4_fcoe_task_context *task; + struct fcoe_task_context *task; int rc = 0; uint32_t did, sid; uint16_t xid; diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 3404782988d582..b649f835d436b6 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -584,7 +584,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, } static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, - struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx, + struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { enum fcoe_task_type task_type; @@ -602,7 +602,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, /* Note init_initiator_rw_fcoe_task memsets the task context */ io_req->task = task_ctx; - memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); @@ -674,7 +674,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, } void qedf_init_mp_task(struct qedf_ioreq *io_req, - struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) + struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_rport *fcport = io_req->fcport; @@ -692,7 +692,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req, memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); - memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); /* Setup the task from io_req for easy reference */ @@ -850,7 +850,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) struct Scsi_Host *host = sc_cmd->device->host; struct fc_lport *lport = shost_priv(host); struct qedf_ctx *qedf = lport_priv(lport); - struct e4_fcoe_task_context *task_ctx; + struct fcoe_task_context *task_ctx; u16 xid; struct fcoe_wqe *sqe; u16 sqe_idx; @@ -2293,7 +2293,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, uint8_t tm_flags) { struct qedf_ioreq *io_req; - struct e4_fcoe_task_context *task; + struct fcoe_task_context *task; struct qedf_ctx *qedf = fcport->qedf; struct fc_lport *lport = qedf->lport; int rc = 0; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 42d0d941dba5c3..0da32fd3302ea9 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2170,7 +2170,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp) struct qedf_ctx *qedf = fp->qedf; struct global_queue *que; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block_e4 *sb = sb_info->sb_virt; + struct status_block *sb = sb_info->sb_virt; u16 prod_idx; /* Get the pointer to the global CQ this completion is on */ @@ -2197,7 +2197,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp) { struct qedf_ctx *qedf = fp->qedf; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block_e4 *sb = sb_info->sb_virt; + struct status_block *sb = sb_info->sb_virt; struct global_queue *que; u16 prod_idx; struct fcoe_cqe *cqe; @@ -2688,12 +2688,12 @@ void qedf_fp_io_handler(struct work_struct *work) static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int ret; sb_virt = dma_alloc_coherent(&qedf->pdev->dev, - sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); + sizeof(struct status_block), &sb_phys, GFP_KERNEL); if (!sb_virt) { QEDF_ERR(&qedf->dbg_ctx, @@ -3416,7 +3416,9 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) qedf->devlink = qed_ops->common->devlink_register(qedf->cdev); if (IS_ERR(qedf->devlink)) { QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n"); + rc = PTR_ERR(qedf->devlink); qedf->devlink = NULL; + goto err2; } } diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index 42f5afb60055c2..8deb2001dc2ff9 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -136,7 +136,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused) { struct qedi_fastpath *fp = NULL; struct qed_sb_info *sb_info = NULL; - struct status_block_e4 *sb = NULL; + struct status_block *sb = NULL; struct global_queue *que = NULL; int id; u16 prod_idx; @@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused) sb_info = fp->sb_info; sb = sb_info->sb_virt; prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] & - STATUS_BLOCK_E4_PROD_INDEX_MASK); + STATUS_BLOCK_PROD_INDEX_MASK); seq_printf(s, "SB PROD IDX: %d\n", prod_idx); que = qedi->global_queues[fp->sb_id]; seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index d01cd829ef975f..84a4204a2cb472 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -85,7 +85,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; - struct e4_iscsi_task_context *task_ctx; + struct iscsi_task_context *task_ctx; struct iscsi_text_rsp *resp_hdr_ptr; struct iscsi_text_response_hdr *cqe_text_response; struct qedi_cmd *cmd; @@ -261,7 +261,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi, { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; - struct e4_iscsi_task_context *task_ctx; + struct iscsi_task_context *task_ctx; struct iscsi_login_rsp *resp_hdr_ptr; struct iscsi_login_response_hdr *cqe_login_response; struct qedi_cmd *cmd; @@ -970,7 +970,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; - struct e4_iscsi_task_context *fw_task_ctx; + struct iscsi_task_context *fw_task_ctx; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_login_req *login_hdr; struct scsi_sge *resp_sge = NULL; @@ -990,9 +990,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1073,7 +1073,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; - struct e4_iscsi_task_context *fw_task_ctx; + struct iscsi_task_context *fw_task_ctx; struct iscsi_logout *logout_hdr = NULL; struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_cmd *qedi_cmd; @@ -1091,9 +1091,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1434,7 +1434,7 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask, struct iscsi_tmf_request_hdr tmf_pdu_header; struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; - struct e4_iscsi_task_context *fw_task_ctx; + struct iscsi_task_context *fw_task_ctx; struct iscsi_tm *tmf_hdr; struct qedi_cmd *qedi_cmd; struct qedi_cmd *cmd; @@ -1454,9 +1454,9 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask, return -ENOMEM; fw_task_ctx = - (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1548,7 +1548,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; - struct e4_iscsi_task_context *fw_task_ctx; + struct iscsi_task_context *fw_task_ctx; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_text *text_hdr; struct scsi_sge *req_sge = NULL; @@ -1570,9 +1570,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1649,7 +1649,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; - struct e4_iscsi_task_context *fw_task_ctx; + struct iscsi_task_context *fw_task_ctx; struct iscsi_nopout *nopout_hdr; struct scsi_sge *resp_sge = NULL; struct qedi_cmd *qedi_cmd; @@ -1669,9 +1669,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1991,7 +1991,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) struct iscsi_task_params task_params; struct iscsi_conn_params conn_params; struct scsi_initiator_cmd_params cmd_params; - struct e4_iscsi_task_context *fw_task_ctx; + struct iscsi_task_context *fw_task_ctx; struct iscsi_cls_conn *cls_conn; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; @@ -2014,9 +2014,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) return -ENOMEM; fw_task_ctx = - (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); + memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); cmd->task_id = tid; diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c index 52772904ef5d86..642556a1ce1cc7 100644 --- a/drivers/scsi/qedi/qedi_fw_api.c +++ b/drivers/scsi/qedi/qedi_fw_api.c @@ -202,7 +202,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params, struct data_hdr *pdu_header, enum iscsi_task_type task_type) { - struct e4_iscsi_task_context *context; + struct iscsi_task_context *context; u32 val; u16 index; u8 val_byte; @@ -224,7 +224,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params, cpu_to_le16(task_params->conn_icid); SET_FIELD(context->ustorm_ag_context.flags1, - E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); context->ustorm_st_context.task_type = task_type; context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; @@ -254,7 +254,7 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc, static void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, - struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, + struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, u32 remaining_recv_len, u32 expected_data_transfer_len, u8 num_sges, bool tx_dif_conn_err_en) { @@ -266,12 +266,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, ustorm_st_cxt->exp_data_transfer_len = val; SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges); SET_FIELD(ustorm_ag_cxt->flags2, - E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, + USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, tx_dif_conn_err_en ? 1 : 0); } static -void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context, +void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context, struct iscsi_conn_params *conn_params, enum iscsi_task_type task_type, u32 task_size, @@ -470,7 +470,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, } } -static void set_local_completion_context(struct e4_iscsi_task_context *context) +static void set_local_completion_context(struct iscsi_task_context *context) { SET_FIELD(context->ystorm_st_context.state.flags, YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1); @@ -487,7 +487,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params, struct scsi_dif_task_params *dif_task_params) { u32 exp_data_transfer_len = conn_params->max_burst_length; - struct e4_iscsi_task_context *cxt; + struct iscsi_task_context *cxt; bool slow_io = false; u32 task_size, val; u8 num_sges = 0; @@ -615,7 +615,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { - struct e4_iscsi_task_context *cxt; + struct iscsi_task_context *cxt; cxt = task_params->context; @@ -657,7 +657,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_sgl_task_params, struct scsi_sgl_task_params *rx_sgl_task_params) { - struct e4_iscsi_task_context *cxt; + struct iscsi_task_context *cxt; cxt = task_params->context; @@ -703,7 +703,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { - struct e4_iscsi_task_context *cxt; + struct iscsi_task_context *cxt; cxt = task_params->context; @@ -758,7 +758,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { - struct e4_iscsi_task_context *cxt; + struct iscsi_task_context *cxt; cxt = task_params->context; diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h index 10f19f0af0a32b..df2d471a7b5106 100644 --- a/drivers/scsi/qedi/qedi_fw_iscsi.h +++ b/drivers/scsi/qedi/qedi_fw_iscsi.h @@ -10,7 +10,7 @@ #include "qedi_fw_scsi.h" struct iscsi_task_params { - struct e4_iscsi_task_context *context; + struct iscsi_task_context *context; struct iscsi_wqe *sqe; u32 tx_io_size; u32 rx_io_size; diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index a31c5de74754c7..a282860da0aa08 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -182,7 +182,7 @@ struct qedi_cmd { struct scsi_cmnd *scsi_cmd; struct scatterlist *sg; struct qedi_io_bdt io_tbl; - struct e4_iscsi_task_context request; + struct iscsi_task_context request; unsigned char *sense_buffer; dma_addr_t sense_buffer_dma; u16 task_id; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e6dc0b495a829f..1dec814d8788c6 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -351,12 +351,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi) static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int ret; sb_virt = dma_alloc_coherent(&qedi->pdev->dev, - sizeof(struct status_block_e4), &sb_phys, + sizeof(struct status_block), &sb_phys, GFP_KERNEL); if (!sb_virt) { QEDI_ERR(&qedi->dbg_ctx, @@ -865,7 +865,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; - qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; + qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT; + qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT; qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { @@ -1259,7 +1260,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp) { struct qedi_ctx *qedi = fp->qedi; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block_e4 *sb = sb_info->sb_virt; + struct status_block *sb = sb_info->sb_virt; struct qedi_percpu_s *p = NULL; struct global_queue *que; u16 prod_idx; @@ -1315,7 +1316,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp) struct qedi_ctx *qedi = fp->qedi; struct global_queue *que; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block_e4 *sb = sb_info->sb_virt; + struct status_block *sb = sb_info->sb_virt; u16 prod_idx; barrier(); diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index 4df32bc4c7a6ec..07d52cafbb3133 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -24,6 +24,7 @@ config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" depends on FSL_MC_BUS select SOC_BUS + select DIMLIB help Driver for the DPAA2 DPIO object. A DPIO provides queue and buffer management facilities for software to interact with diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h index e13fd3ac193940..2fbcb78cdaaf13 100644 --- a/drivers/soc/fsl/dpio/dpio-cmd.h +++ b/drivers/soc/fsl/dpio/dpio-cmd.h @@ -46,6 +46,9 @@ struct dpio_rsp_get_attr { __le64 qbman_portal_ci_addr; /* cmd word 3 */ __le32 qbman_version; + __le32 pad1; + /* cmd word 4 */ + __le32 clk; }; struct dpio_stashing_dest { diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c index 7f397b4ad878da..dd948889eeab8b 100644 --- a/drivers/soc/fsl/dpio/dpio-driver.c +++ b/drivers/soc/fsl/dpio/dpio-driver.c @@ -162,6 +162,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_get_attr; } desc.qman_version = dpio_attrs.qbman_version; + desc.qman_clk = dpio_attrs.clk; err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); if (err) { diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index 7351f303055063..3fd0d08402877c 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "dpio.h" @@ -28,6 +29,14 @@ struct dpaa2_io { spinlock_t lock_notifications; struct list_head notifications; struct device *dev; + + /* Net DIM */ + struct dim rx_dim; + /* protect against concurrent Net DIM updates */ + spinlock_t dim_lock; + u16 event_ctr; + u64 bytes; + u64 frames; }; struct dpaa2_io_store { @@ -100,6 +109,17 @@ struct dpaa2_io *dpaa2_io_service_select(int cpu) } EXPORT_SYMBOL_GPL(dpaa2_io_service_select); +static void dpaa2_io_dim_work(struct work_struct *w) +{ + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim); + + dpaa2_io_set_irq_coalescing(d, moder.usec); + dim->state = DIM_START_MEASURE; +} + /** * dpaa2_io_create() - create a dpaa2_io object. * @desc: the dpaa2_io descriptor @@ -114,6 +134,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, struct device *dev) { struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); + u32 qman_256_cycles_per_ns; if (!obj) return NULL; @@ -127,7 +148,15 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, obj->dpio_desc = *desc; obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; + obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk; obj->swp_desc.qman_version = obj->dpio_desc.qman_version; + + /* Compute how many 256 QBMAN cycles fit into one ns. This is because + * the interrupt timeout period register needs to be specified in QBMAN + * clock cycles in increments of 256. + */ + qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000); + obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns; obj->swp = qbman_swp_init(&obj->swp_desc); if (!obj->swp) { @@ -138,6 +167,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, INIT_LIST_HEAD(&obj->node); spin_lock_init(&obj->lock_mgmt_cmd); spin_lock_init(&obj->lock_notifications); + spin_lock_init(&obj->dim_lock); INIT_LIST_HEAD(&obj->notifications); /* For now only enable DQRR interrupts */ @@ -155,6 +185,12 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, obj->dev = dev; + memset(&obj->rx_dim, 0, sizeof(obj->rx_dim)); + INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work); + obj->event_ctr = 0; + obj->bytes = 0; + obj->frames = 0; + return obj; } @@ -194,6 +230,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj) struct qbman_swp *swp; u32 status; + obj->event_ctr++; + swp = obj->swp; status = qbman_swp_interrupt_read_status(swp); if (!status) @@ -779,3 +817,82 @@ int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num) return 0; } EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count); + +/** + * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values + * @d: the given DPIO object + * @irq_holdoff: interrupt holdoff (timeout) period in us + * + * Return 0 for success, or negative error code on error. + */ +int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff) +{ + struct qbman_swp *swp = d->swp; + + return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1, + irq_holdoff); +} +EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing); + +/** + * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters + * @d: the given DPIO object + * @irq_holdoff: interrupt holdoff (timeout) period in us + */ +void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff) +{ + struct qbman_swp *swp = d->swp; + + qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff); +} +EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing); + +/** + * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing + * @d: the given DPIO object + * @use_adaptive_rx_coalesce: adaptive coalescing state + */ +void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d, + int use_adaptive_rx_coalesce) +{ + d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce; +} +EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing); + +/** + * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state + * @d: the given DPIO object + * + * Return 1 when adaptive coalescing is enabled on the DPIO object and 0 + * otherwise. + */ +int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d) +{ + return d->swp->use_adaptive_rx_coalesce; +} +EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing); + +/** + * dpaa2_io_update_net_dim() - Update Net DIM + * @d: the given DPIO object + * @frames: how many frames have been dequeued by the user since the last call + * @bytes: how many bytes have been dequeued by the user since the last call + */ +void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes) +{ + struct dim_sample dim_sample = {}; + + if (!d->swp->use_adaptive_rx_coalesce) + return; + + spin_lock(&d->dim_lock); + + d->bytes += bytes; + d->frames += frames; + + dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample); + net_dim(&d->rx_dim, dim_sample); + + spin_unlock(&d->dim_lock); +} +EXPORT_SYMBOL(dpaa2_io_update_net_dim); diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c index af74c597a67562..8ed606ffaac5e2 100644 --- a/drivers/soc/fsl/dpio/dpio.c +++ b/drivers/soc/fsl/dpio/dpio.c @@ -162,6 +162,7 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io, attr->qbman_portal_ci_offset = le64_to_cpu(dpio_rsp->qbman_portal_ci_addr); attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version); + attr->clk = le32_to_cpu(dpio_rsp->clk); return 0; } diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h index da06f725809805..7fda44f0d7f4da 100644 --- a/drivers/soc/fsl/dpio/dpio.h +++ b/drivers/soc/fsl/dpio/dpio.h @@ -59,6 +59,7 @@ int dpio_disable(struct fsl_mc_io *mc_io, * @num_priorities: Number of priorities for the notification channel (1-8); * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' * @qbman_version: QBMAN version + * @clk: QBMAN clock frequency value in Hz */ struct dpio_attr { int id; @@ -68,6 +69,7 @@ struct dpio_attr { enum dpio_channel_mode channel_mode; u8 num_priorities; u32 qbman_version; + u32 clk; }; int dpio_get_attributes(struct fsl_mc_io *mc_io, diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index f13da4d7d1c526..3474bf5f88d5d9 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -29,6 +29,7 @@ #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 #define QBMAN_CINH_SWP_DQPI 0xa00 +#define QBMAN_CINH_SWP_DQRR_ITR 0xa80 #define QBMAN_CINH_SWP_DCAP 0xac0 #define QBMAN_CINH_SWP_SDQCR 0xb00 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 @@ -38,6 +39,7 @@ #define QBMAN_CINH_SWP_IER 0xe40 #define QBMAN_CINH_SWP_ISDR 0xe80 #define QBMAN_CINH_SWP_IIR 0xec0 +#define QBMAN_CINH_SWP_ITPR 0xf40 /* CENA register offsets */ #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) @@ -355,6 +357,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) & p->eqcr.pi_ci_mask; p->eqcr.available = p->eqcr.pi_ring_size; + /* Initialize the software portal with a irq timeout period of 0us */ + qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0); + return p; } @@ -1796,3 +1801,56 @@ u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a) { return le32_to_cpu(a->fill); } + +/** + * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values + * @p: the software portal object + * @irq_threshold: interrupt threshold + * @irq_holdoff: interrupt holdoff (timeout) period in us + * + * Return 0 for success, or negative error code on error. + */ +int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold, + u32 irq_holdoff) +{ + u32 itp, max_holdoff; + + /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles + * increments. This depends on the QBMAN internal frequency. + */ + itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns; + if (itp > 4096) { + max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000; + pr_err("irq_holdoff must be <= %uus\n", max_holdoff); + return -EINVAL; + } + + if (irq_threshold >= p->dqrr.dqrr_size) { + pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1); + return -EINVAL; + } + + p->irq_threshold = irq_threshold; + p->irq_holdoff = irq_holdoff; + + qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold); + qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp); + + return 0; +} + +/** + * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters + * @p: the software portal object + * @irq_threshold: interrupt threshold (an IRQ is generated when there are more + * DQRR entries in the portal than the threshold) + * @irq_holdoff: interrupt holdoff (timeout) period in us + */ +void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold, + u32 *irq_holdoff) +{ + if (irq_threshold) + *irq_threshold = p->irq_threshold; + if (irq_holdoff) + *irq_holdoff = p->irq_holdoff; +} diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h index c7c2225b7d914d..b23883dd2725f6 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.h +++ b/drivers/soc/fsl/dpio/qbman-portal.h @@ -24,6 +24,8 @@ struct qbman_swp_desc { void *cena_bar; /* Cache-enabled portal base address */ void __iomem *cinh_bar; /* Cache-inhibited portal base address */ u32 qman_version; + u32 qman_clk; + u32 qman_256_cycles_per_ns; }; #define QBMAN_SWP_INTERRUPT_EQRI 0x01 @@ -156,6 +158,11 @@ struct qbman_swp { } eqcr; spinlock_t access_spinlock; + + /* Interrupt coalescing */ + u32 irq_threshold; + u32 irq_holdoff; + int use_adaptive_rx_coalesce; }; /* Function pointers */ @@ -648,4 +655,10 @@ static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) return qbman_swp_dqrr_next_ptr(s); } +int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold, + u32 irq_holdoff); + +void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold, + u32 *irq_holdoff); + #endif /* __FSL_QBMAN_PORTAL_H */ diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 5d24c1b6663b73..d5785c0d06b0f9 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -409,7 +409,7 @@ int cvm_oct_common_init(struct net_device *dev) struct octeon_ethernet *priv = netdev_priv(dev); int ret; - ret = of_get_mac_address(priv->of_node, dev->dev_addr); + ret = of_get_ethdev_address(priv->of_node, dev); if (ret) eth_hw_addr_random(dev); diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 8fcdf89da8aa9c..1dc849378a0fec 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -4614,14 +4614,9 @@ static int qlge_probe(struct pci_dev *pdev, goto netdev_free; } - err = devlink_register(devlink); - if (err) - goto netdev_free; - err = qlge_health_create_reporters(qdev); - if (err) - goto devlink_unregister; + goto netdev_free; /* Start up the timer to trigger EEH if * the bus goes dead @@ -4632,10 +4627,9 @@ static int qlge_probe(struct pci_dev *pdev, qlge_display_dev_info(ndev); atomic_set(&qdev->lb_count, 0); cards_found++; + devlink_register(devlink); return 0; -devlink_unregister: - devlink_unregister(devlink); netdev_free: free_netdev(ndev); devlink_free: @@ -4660,13 +4654,13 @@ static void qlge_remove(struct pci_dev *pdev) struct net_device *ndev = qdev->ndev; struct devlink *devlink = priv_to_devlink(qdev); + devlink_unregister(devlink); del_timer_sync(&qdev->timer); qlge_cancel_all_work_sync(qdev); unregister_netdev(ndev); qlge_release_all(pdev); pci_disable_device(pdev); devlink_health_reporter_destroy(qdev->reporter); - devlink_unregister(devlink); devlink_free(devlink); free_netdev(ndev); } diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index 0b468f5d55bc5d..068ed8417e5a9e 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -267,6 +267,8 @@ static const struct net_device_ops pn_netdev_ops = { static void pn_net_setup(struct net_device *dev) { + const u8 addr = PN_MEDIA_USB; + dev->features = 0; dev->type = ARPHRD_PHONET; dev->flags = IFF_POINTOPOINT | IFF_NOARP; @@ -274,8 +276,9 @@ static void pn_net_setup(struct net_device *dev) dev->min_mtu = PHONET_MIN_MTU; dev->max_mtu = PHONET_MAX_MTU; dev->hard_header_len = 1; - dev->dev_addr[0] = PN_MEDIA_USB; dev->addr_len = 1; + dev_addr_set(dev, &addr); + dev->tx_queue_len = 1; dev->netdev_ops = &pn_netdev_ops; diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 01a848adf5903c..3163b313a470a8 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -15,7 +15,7 @@ struct mlx5_vdpa_direct_mr { u64 start; u64 end; u32 perm; - struct mlx5_core_mkey mr; + u32 mr; struct sg_table sg_head; int log_size; int nsg; @@ -25,7 +25,7 @@ struct mlx5_vdpa_direct_mr { }; struct mlx5_vdpa_mr { - struct mlx5_core_mkey mkey; + u32 mkey; /* list of direct MRs descendants of this indirect mr */ struct list_head head; @@ -99,9 +99,9 @@ int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn); void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn); int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev); void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev); -int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in, +int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, int inlen); -int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey); +int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, bool *change_map); int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index ff010c6d0cd39e..a639b9208d4148 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -88,7 +88,7 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) { - mlx5_vdpa_destroy_mkey(mvdev, &mr->mr); + mlx5_vdpa_destroy_mkey(mvdev, mr->mr); } static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) @@ -162,7 +162,7 @@ static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, v } if (preve == dmr->start) { - klm->key = cpu_to_be32(dmr->mr.key); + klm->key = cpu_to_be32(dmr->mr); klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); preve = dmr->end; } else { @@ -217,7 +217,7 @@ static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey) { - mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey); + mlx5_vdpa_destroy_mkey(mvdev, mkey->mkey); } static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, @@ -449,7 +449,7 @@ static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey); + mlx5_vdpa_destroy_mkey(mvdev, mr->mkey); } static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src) diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c index 15e266d0e27a56..9800f9bec225a5 100644 --- a/drivers/vdpa/mlx5/core/resources.c +++ b/drivers/vdpa/mlx5/core/resources.c @@ -198,12 +198,11 @@ void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn) mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in); } -int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in, +int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, int inlen) { u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {}; u32 mkey_index; - void *mkc; int err; MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); @@ -213,22 +212,18 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mk if (err) return err; - mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index); - mkey->iova = MLX5_GET64(mkc, mkc, start_addr); - mkey->size = MLX5_GET64(mkc, mkc, len); - mkey->key |= mlx5_idx_to_mkey(mkey_index); - mkey->pd = MLX5_GET(mkc, mkc, pd); + *mkey |= mlx5_idx_to_mkey(mkey_index); return 0; } -int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey) +int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey) { u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {}; MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid); MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); - MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey)); return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in); } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index bd56de7484dcb1..5c7d2a953dbd60 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -865,7 +865,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); - MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); + MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index db0e099c23991a..b30a1bc74fc7e9 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -238,27 +238,26 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including * TX/RX Checksum offloading and TSO for non-tunnelled packets. */ -#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 -#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 -#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 -#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 -#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 -#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 -#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 -#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 -#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 -#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 -#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 -#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 -#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 -#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 -#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000 -#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000 -#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000 - -/* Define below the capability flags that are not offloads */ -#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 +#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0) +#define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1) +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3) +#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4) +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5) +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) +/* used to negotiate communicating link speeds in Mbps */ +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) +#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18) +#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19) +#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20) +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21) +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22) +#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23) +#define VIRTCHNL_VF_OFFLOAD_USO BIT(25) +#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) +#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) + #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 37f36dad18bd4e..a241dcf50f39f7 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -123,6 +123,8 @@ struct device; */ unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node); +unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node); void bitmap_free(const unsigned long *bitmap); /* Managed variants of the above. */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 2746fd8042162c..3536ab432b30cb 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -517,6 +517,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define cgroup_bpf_enabled(atype) (0) #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3db6f6c95489e7..2be6dfd68df995 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -48,6 +48,7 @@ extern struct idr btf_idr; extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj; +typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); @@ -142,7 +143,8 @@ struct bpf_map_ops { int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee); - int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, + int (*map_for_each_callback)(struct bpf_map *map, + bpf_callback_t callback_fn, void *callback_ctx, u64 flags); /* BTF name and id of struct allocated by map_alloc */ @@ -166,6 +168,7 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; + u64 map_extra; /* any per-map-type extra fields */ u32 map_flags; int spin_lock_off; /* >=0 valid offset, <0 error */ int timer_off; /* >=0 valid offset, <0 error */ @@ -173,15 +176,15 @@ struct bpf_map { int numa_node; u32 btf_key_type_id; u32 btf_value_type_id; + u32 btf_vmlinux_value_type_id; struct btf *btf; #ifdef CONFIG_MEMCG_KMEM struct mem_cgroup *memcg; #endif char name[BPF_OBJ_NAME_LEN]; - u32 btf_vmlinux_value_type_id; bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ - /* 22 bytes hole */ + /* 14 bytes hole */ /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. @@ -511,7 +514,7 @@ struct bpf_verifier_ops { const struct btf_type *t, int off, int size, enum bpf_access_type atype, u32 *next_btf_id); - bool (*check_kfunc_call)(u32 kfunc_btf_id); + bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner); }; struct bpf_prog_offload_ops { @@ -875,6 +878,7 @@ struct bpf_prog_aux { void *jit_data; /* JIT specific data. arch dependent */ struct bpf_jit_poke_descriptor *poke_tab; struct bpf_kfunc_desc_tab *kfunc_tab; + struct bpf_kfunc_btf_tab *kfunc_btf_tab; u32 size_poke_tab; struct bpf_ksym ksym; const struct bpf_prog_ops *ops; @@ -884,6 +888,7 @@ struct bpf_prog_aux { struct bpf_prog *prog; struct user_struct *user; u64 load_time; /* ns since boottime */ + u32 verified_insns; struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; #ifdef CONFIG_SECURITY @@ -998,6 +1003,10 @@ bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, + struct bpf_prog *prog, + const struct btf_func_model *model, + void *image, void *image_end); static inline bool bpf_try_module_get(const void *data, struct module *owner) { if (owner == BPF_MODULE_OWNER) @@ -1012,6 +1021,22 @@ static inline void bpf_module_put(const void *data, struct module *owner) else module_put(owner); } + +#ifdef CONFIG_NET +/* Define it here to avoid the use of forward declaration */ +struct bpf_dummy_ops_state { + int val; +}; + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *cb); + int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, + char a3, unsigned long a4); +}; + +int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +#endif #else static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) { @@ -1092,6 +1117,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); +const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); @@ -1639,10 +1665,33 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); -bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); +bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner); bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); + +static inline bool bpf_tracing_ctx_access(int off, int size, + enum bpf_access_type type) +{ + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + return true; +} + +static inline bool bpf_tracing_btf_ctx_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (!bpf_tracing_ctx_access(off, size, type)) + return false; + return btf_ctx_access(off, size, type, prog, info); +} + int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, @@ -1860,7 +1909,8 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, return -ENOTSUPP; } -static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) +static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, + struct module *owner) { return false; } @@ -2091,6 +2141,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; extern const struct bpf_func_proto bpf_copy_from_user_proto; extern const struct bpf_func_proto bpf_snprintf_btf_proto; extern const struct bpf_func_proto bpf_snprintf_proto; @@ -2105,6 +2156,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto; extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; extern const struct bpf_func_proto bpf_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_sk_getsockopt_proto; +extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); @@ -2220,6 +2272,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id); +#define MAX_BPRINTF_VARARGS 12 + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 **bin_buf, u32 num_args); void bpf_bprintf_cleanup(void); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index bbe1eefa4c8a90..48a91c51c015d7 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -125,6 +125,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops) BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint) BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5424124dbe365a..c8a78e830fcab0 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -527,5 +527,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, const struct bpf_prog *tgt_prog, u32 btf_id, struct bpf_attach_target_info *tgt_info); +void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 546e27fc6d4624..46e1757d06a35d 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -3,6 +3,7 @@ #ifndef _LINUX_BPFPTR_H #define _LINUX_BPFPTR_H +#include #include typedef sockptr_t bpfptr_t; diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index c2c2147dfeb8cb..747fad26403366 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -32,6 +32,7 @@ #define PHY_ID_BCM72113 0x35905310 #define PHY_ID_BCM72116 0x35905350 +#define PHY_ID_BCM72165 0x35905340 #define PHY_ID_BCM7250 0xae025280 #define PHY_ID_BCM7255 0xae025120 #define PHY_ID_BCM7260 0xae025190 @@ -49,6 +50,7 @@ #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 +#define PHY_ID_BCM7712 0x35905330 #define PHY_ID_BCM_CYGNUS 0xae025200 #define PHY_ID_BCM_OMEGA 0xae025100 @@ -66,6 +68,7 @@ #define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008 #define PHY_BRCM_EN_MASTER_MODE 0x00000010 +#define PHY_BRCM_IDDQ_SUSPEND 0x00000020 /* Broadcom BCM7xxx specific workarounds */ #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) @@ -83,6 +86,7 @@ #define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */ #define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ +#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */ #define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ #define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ #define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */ @@ -233,6 +237,7 @@ #define MII_BCM54XX_EXP_EXP08 0x0F08 #define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 #define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 +#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100 #define MII_BCM54XX_EXP_EXP75 0x0f75 #define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c #define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 @@ -241,6 +246,12 @@ #define MII_BCM54XX_EXP_EXP97 0x0f97 #define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c +/* Top-MISC expansion registers */ +#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06) +#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0) +#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2) +#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3) + /* * BCM5482: Secondary SerDes registers */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 214fde93214b97..203eef993d763f 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -5,6 +5,7 @@ #define _LINUX_BTF_H 1 #include +#include #include #include @@ -238,4 +239,42 @@ static inline const char *btf_name_by_offset(const struct btf *btf, } #endif +struct kfunc_btf_id_set { + struct list_head list; + struct btf_id_set *set; + struct module *owner; +}; + +struct kfunc_btf_id_list; + +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES +void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s); +void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s); +bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, + struct module *owner); +#else +static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s) +{ +} +static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s) +{ +} +static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, + u32 kfunc_id, struct module *owner) +{ + return false; +} +#endif + +#define DEFINE_KFUNC_BTF_ID_SET(set, name) \ + struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \ + THIS_MODULE } + +extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; +extern struct kfunc_btf_id_list prog_test_kfunc_list; + #endif diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index 9de6e9053e34fa..20b50baf3a02d8 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -19,6 +19,9 @@ /* Megahertz */ #define CAN_MHZ 1000000UL +#define CAN_CTRLMODE_TDC_MASK \ + (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL) + /* * struct can_tdc - CAN FD Transmission Delay Compensation parameters * @@ -28,34 +31,54 @@ * * To solve this issue, ISO 11898-1 introduces in section 11.3.3 * "Transmitter delay compensation" a SSP (Secondary Sample Point) - * equal to the distance, in time quanta, from the start of the bit - * time on the TX pin to the actual measurement on the RX pin. + * equal to the distance from the start of the bit time on the TX pin + * to the actual measurement on the RX pin. * * This structure contains the parameters to calculate that SSP. * - * @tdcv: Transmitter Delay Compensation Value. Distance, in time - * quanta, from when the bit is sent on the TX pin to when it is - * received on the RX pin of the transmitter. Possible options: + * -+----------- one bit ----------+-- TX pin + * |<--- Sample Point --->| * - * 0: automatic mode. The controller dynamically measures @tdcv - * for each transmitted CAN FD frame. + * --+----------- one bit ----------+-- RX pin + * |<-------- TDCV -------->| + * |<------- TDCO ------->| + * |<----------- Secondary Sample Point ---------->| * - * Other values: manual mode. Use the fixed provided value. + * To increase precision, contrary to the other bittiming parameters + * which are measured in time quanta, the TDC parameters are measured + * in clock periods (also referred as "minimum time quantum" in ISO + * 11898-1). * - * @tdco: Transmitter Delay Compensation Offset. Offset value, in time - * quanta, defining the distance between the start of the bit - * reception on the RX pin of the transceiver and the SSP - * position such that SSP = @tdcv + @tdco. + * @tdcv: Transmitter Delay Compensation Value. The time needed for + * the signal to propagate, i.e. the distance, in clock periods, + * from the start of the bit on the TX pin to when it is received + * on the RX pin. @tdcv depends on the controller modes: + * + * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically + * measures @tdcv for each transmitted CAN FD frame and the + * value provided here should be ignored. + * + * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv + * value. * - * If @tdco is zero, then TDC is disabled and both @tdcv and - * @tdcf should be ignored. + * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are + * mutually exclusive. Only one can be set at a time. If both + * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset, + * TDC is disabled and all the values of this structure should be + * ignored. + * + * @tdco: Transmitter Delay Compensation Offset. Offset value, in + * clock periods, defining the distance between the start of the + * bit reception on the RX pin of the transceiver and the SSP + * position such that SSP = @tdcv + @tdco. * * @tdcf: Transmitter Delay Compensation Filter window. Defines the - * minimum value for the SSP position in time quanta. If SSP is - * less than @tdcf, then no delay compensations occur and the - * normal sampling point is used instead. The feature is enabled - * if and only if @tdcv is set to zero (automatic mode) and @tdcf - * is configured to a value greater than @tdco. + * minimum value for the SSP position in clock periods. If the + * SSP position is less than @tdcf, then no delay compensations + * occur and the normal sampling point is used instead. The + * feature is enabled if and only if @tdcv is set to zero + * (automatic mode) and @tdcf is configured to a value greater + * than @tdco. */ struct can_tdc { u32 tdcv; @@ -67,19 +90,32 @@ struct can_tdc { * struct can_tdc_const - CAN hardware-dependent constant for * Transmission Delay Compensation * - * @tdcv_max: Transmitter Delay Compensation Value maximum value. - * Should be set to zero if the controller does not support - * manual mode for tdcv. + * @tdcv_min: Transmitter Delay Compensation Value minimum value. If + * the controller does not support manual mode for tdcv + * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is + * ignored. + * @tdcv_max: Transmitter Delay Compensation Value maximum value. If + * the controller does not support manual mode for tdcv + * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is + * ignored. + * + * @tdco_min: Transmitter Delay Compensation Offset minimum value. * @tdco_max: Transmitter Delay Compensation Offset maximum value. * Should not be zero. If the controller does not support TDC, * then the pointer to this structure should be NULL. + * + * @tdcf_min: Transmitter Delay Compensation Filter window minimum + * value. If @tdcf_max is zero, this value is ignored. * @tdcf_max: Transmitter Delay Compensation Filter window maximum * value. Should be set to zero if the controller does not * support this feature. */ struct can_tdc_const { + u32 tdcv_min; u32 tdcv_max; + u32 tdco_min; u32 tdco_max; + u32 tdcf_min; u32 tdcf_max; }; @@ -87,7 +123,9 @@ struct can_tdc_const { int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc); -void can_calc_tdco(struct net_device *dev); +void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, + const struct can_bittiming *dbt, + u32 *ctrlmode, u32 ctrlmode_supported); #else /* !CONFIG_CAN_CALC_BITTIMING */ static inline int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, @@ -97,7 +135,10 @@ can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, return -EINVAL; } -static inline void can_calc_tdco(struct net_device *dev) +static inline void +can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, + const struct can_bittiming *dbt, + u32 *ctrlmode, u32 ctrlmode_supported) { } #endif /* CONFIG_CAN_CALC_BITTIMING */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 2413253e54c704..45f19d9db5ca72 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -82,6 +82,7 @@ struct can_priv { enum can_state *state); int (*do_get_berr_counter)(const struct net_device *dev, struct can_berr_counter *bec); + int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv); unsigned int echo_skb_max; struct sk_buff **echo_skb; @@ -96,6 +97,39 @@ struct can_priv { #endif }; +static inline bool can_tdc_is_enabled(const struct can_priv *priv) +{ + return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK); +} + +/* + * can_get_relative_tdco() - TDCO relative to the sample point + * + * struct can_tdc::tdco represents the absolute offset from TDCV. Some + * controllers use instead an offset relative to the Sample Point (SP) + * such that: + * + * SSP = TDCV + absolute TDCO + * = TDCV + SP + relative TDCO + * + * -+----------- one bit ----------+-- TX pin + * |<--- Sample Point --->| + * + * --+----------- one bit ----------+-- RX pin + * |<-------- TDCV -------->| + * |<------------------------>| absolute TDCO + * |<--- Sample Point --->| + * | |<->| relative TDCO + * |<------------- Secondary Sample Point ------------>| + */ +static inline s32 can_get_relative_tdco(const struct can_priv *priv) +{ + const struct can_bittiming *dbt = &priv->data_bittiming; + s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg + + dbt->phase_seg1) * dbt->brp; + + return (s32)priv->tdc.tdco - sample_point_in_tc; +} /* helper to define static CAN controller features at device creation time */ static inline void can_set_static_ctrlmode(struct net_device *dev, diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index c7fa4a3498fe80..254b165f2b4419 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -9,6 +9,7 @@ #include struct dsa_switch; +struct dsa_port; struct sk_buff; struct net_device; @@ -45,9 +46,9 @@ void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num); -u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); +u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp); -u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); +u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp); int dsa_8021q_rx_switch_id(u16 vid); diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index 8ae999f587c48e..d42010cf54682d 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -242,9 +242,9 @@ static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type) packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0); } -static inline void ocelot_ifh_set_vid(void *injection, u64 vid) +static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci) { - packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0); + packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0); } /* Determine the PTP REW_OP to use for injecting the given skb */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index 9e07079528a53f..e6c78be40bde71 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -69,7 +69,6 @@ struct sja1105_port { struct kthread_work xmit_work; struct sk_buff_head xmit_queue; struct sja1105_tagger_data *data; - struct dsa_port *dp; bool hwts_tx_en; }; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index c58d5045148547..2ad71cc90b37d8 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -26,9 +26,16 @@ #ifdef __KERNEL__ struct device; +struct fwnode_handle; + int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); +int platform_get_ethdev_address(struct device *dev, struct net_device *netdev); unsigned char *arch_get_platform_mac_address(void); int nvmem_get_mac_address(struct device *dev, void *addrbuf); +int device_get_mac_address(struct device *dev, char *addr); +int device_get_ethdev_address(struct device *dev, struct net_device *netdev); +int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr); + u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; @@ -227,8 +234,6 @@ static inline void eth_random_addr(u8 *addr) addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ } -#define random_ether_addr(addr) eth_random_addr(addr) - /** * eth_broadcast_addr - Assign broadcast address * @addr: Pointer to a six-byte array containing the Ethernet address @@ -262,8 +267,11 @@ static inline void eth_zero_addr(u8 *addr) */ static inline void eth_hw_addr_random(struct net_device *dev) { + u8 addr[ETH_ALEN]; + + eth_random_addr(addr); + __dev_addr_set(dev, addr, ETH_ALEN); dev->addr_assign_type = NET_ADDR_RANDOM; - eth_random_addr(dev->dev_addr); } /** @@ -323,7 +331,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst, struct net_device *src) { dst->addr_assign_type = src->addr_assign_type; - ether_addr_copy(dst->dev_addr, src->dev_addr); + eth_hw_addr_set(dst, src->dev_addr); } /** @@ -543,6 +551,27 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) #endif } +/** + * eth_hw_addr_gen - Generate and assign Ethernet address to a port + * @dev: pointer to port's net_device structure + * @base_addr: base Ethernet address + * @id: offset to add to the base address + * + * Generate a MAC address using a base address and an offset and assign it + * to a net_device. Commonly used by switch drivers which need to compute + * addresses for all their ports. addr_assign_type is not changed. + */ +static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr, + unsigned int id) +{ + u64 u = ether_addr_to_u64(base_addr); + u8 addr[ETH_ALEN]; + + u += id; + u64_to_ether_addr(u, addr); + eth_hw_addr_set(dev, addr); +} + /** * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame * @skb: Buffer to pad diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 849524b55d89a6..845a0ffc16ee85 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -94,6 +94,7 @@ struct ethtool_link_ext_state_info { enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; enum ethtool_link_ext_substate_cable_issue cable_issue; + enum ethtool_link_ext_substate_module module; u8 __link_ext_substate; }; }; @@ -415,6 +416,17 @@ struct ethtool_module_eeprom { u8 *data; }; +/** + * struct ethtool_module_power_mode_params - module power mode parameters + * @policy: The power mode policy enforced by the host for the plug-in module. + * @mode: The operational power mode of the plug-in module. Should be filled by + * device drivers on get operations. + */ +struct ethtool_module_power_mode_params { + enum ethtool_module_power_mode_policy policy; + enum ethtool_module_power_mode mode; +}; + /** * struct ethtool_ops - optional netdev operations * @cap_link_lanes_supported: indicates if the driver supports lanes @@ -580,6 +592,11 @@ struct ethtool_module_eeprom { * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics. * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics. * Set %ranges to a pointer to zero-terminated array of byte ranges. + * @get_module_power_mode: Get the power mode policy for the plug-in module + * used by the network device and its operational power mode, if + * plugged-in. + * @set_module_power_mode: Set the power mode policy for the plug-in module + * used by the network device. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must @@ -705,6 +722,12 @@ struct ethtool_ops { void (*get_rmon_stats)(struct net_device *dev, struct ethtool_rmon_stats *rmon_stats, const struct ethtool_rmon_hist_range **ranges); + int (*get_module_power_mode)(struct net_device *dev, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack); + int (*set_module_power_mode)(struct net_device *dev, + const struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack); }; int ethtool_check_ops(const struct ethtool_ops *ops); diff --git a/include/linux/filter.h b/include/linux/filter.h index b956eeb0f9a8be..24b7ed2677afd2 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -360,10 +360,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = TGT }) -/* Function call */ +/* Convert function address to BPF immediate */ -#define BPF_CAST_CALL(x) \ - ((u64 (*)(u64, u64, u64, u64, u64))(x)) +#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base) #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ @@ -371,7 +370,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ - .imm = ((FUNC) - __bpf_call_base) }) + .imm = BPF_CALL_IMM(FUNC) }) /* Raw code statement block */ @@ -554,9 +553,9 @@ struct bpf_binary_header { }; struct bpf_prog_stats { - u64 cnt; - u64 nsecs; - u64 misses; + u64_stats_t cnt; + u64_stats_t nsecs; + u64_stats_t misses; struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); @@ -615,13 +614,14 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, if (static_branch_unlikely(&bpf_stats_enabled_key)) { struct bpf_prog_stats *stats; u64 start = sched_clock(); + unsigned long flags; ret = dfunc(ctx, prog->insnsi, prog->bpf_func); stats = this_cpu_ptr(prog->stats); - u64_stats_update_begin(&stats->syncp); - stats->cnt++; - stats->nsecs += sched_clock() - start; - u64_stats_update_end(&stats->syncp); + flags = u64_stats_update_begin_irqsave(&stats->syncp); + u64_stats_inc(&stats->cnt); + u64_stats_add(&stats->nsecs, sched_clock() - start); + u64_stats_update_end_irqrestore(&stats->syncp, flags); } else { ret = dfunc(ctx, prog->insnsi, prog->bpf_func); } diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index ada3dd79cd083b..11d7af260f201f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1988,6 +1988,44 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, int mcs, bool ext_nss_bw_capable, unsigned int max_vht_nss); +/** + * enum ieee80211_ap_reg_power - regulatory power for a Access Point + * + * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode + * @IEEE80211_REG_LPI: Indoor Access Point + * @IEEE80211_REG_SP: Standard power Access Point + * @IEEE80211_REG_VLP: Very low power Access Point + * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal + * @IEEE80211_REG_AP_POWER_MAX: maximum value + */ +enum ieee80211_ap_reg_power { + IEEE80211_REG_UNSET_AP, + IEEE80211_REG_LPI_AP, + IEEE80211_REG_SP_AP, + IEEE80211_REG_VLP_AP, + IEEE80211_REG_AP_POWER_AFTER_LAST, + IEEE80211_REG_AP_POWER_MAX = + IEEE80211_REG_AP_POWER_AFTER_LAST - 1, +}; + +/** + * enum ieee80211_client_reg_power - regulatory power for a client + * + * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode + * @IEEE80211_REG_DEFAULT_CLIENT: Default Client + * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client + * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal + * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value + */ +enum ieee80211_client_reg_power { + IEEE80211_REG_UNSET_CLIENT, + IEEE80211_REG_DEFAULT_CLIENT, + IEEE80211_REG_SUBORDINATE_CLIENT, + IEEE80211_REG_CLIENT_POWER_AFTER_LAST, + IEEE80211_REG_CLIENT_POWER_MAX = + IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1, +}; + /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 #define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 @@ -2084,6 +2122,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20 #define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16 +#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13 /* 802.11ax HE PHY capabilities */ #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index a038feb63f23ed..518b484a7f07eb 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -133,6 +133,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) +#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \ + ARP_EVICT_NOCARRIER) struct in_ifaddr { struct hlist_node hash; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ef4a69865737ce..20c1f968da7c12 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -79,6 +79,7 @@ struct ipv6_devconf { __u32 ioam6_id; __u32 ioam6_id_wide; __u8 ioam6_enabled; + __u8 ndisc_evict_nocarrier; struct ctl_table_header *sysctl_header; }; @@ -282,7 +283,6 @@ struct ipv6_pinfo { __be32 rcv_flowinfo; __u32 dst_cookie; - __u32 rx_dst_cookie; struct ipv6_mc_socklist __rcu *ipv6_mc_list; struct ipv6_ac_socklist *ipv6_ac_list; diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 5e6dc38f418e48..9f3587a61e145c 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -349,6 +349,32 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set); +int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, + u16 mask, u16 set); + +static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum) +{ + return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum); +} + +static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum, + u16 val) +{ + return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val); +} + +static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum, + u16 mask, u16 set) +{ + return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set); +} + +static inline int mdiodev_modify_changed(struct mdio_device *mdiodev, + u32 regnum, u16 mask, u16 set) +{ + return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum, + mask, set); +} static inline u32 mdiobus_c45_addr(int devad, u16 regnum) { diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h index 92d763230bdf66..a18c1539a152a0 100644 --- a/include/linux/mfd/idt8a340_reg.h +++ b/include/linux/mfd/idt8a340_reg.h @@ -506,6 +506,10 @@ #define STATE_MODE_SHIFT (0) #define STATE_MODE_MASK (0x7) +/* Bit definitions for the DPLL_MANU_REF_CFG register */ +#define MANUAL_REFERENCE_SHIFT (0) +#define MANUAL_REFERENCE_MASK (0x1f) + /* Bit definitions for the GPIO_CFG_GBL register */ #define SUPPLY_MODE_SHIFT (0) #define SUPPLY_MODE_MASK (0x3) @@ -654,7 +658,7 @@ /* Values of DPLL_N.DPLL_MODE.PLL_MODE */ enum pll_mode { PLL_MODE_MIN = 0, - PLL_MODE_NORMAL = PLL_MODE_MIN, + PLL_MODE_PLL = PLL_MODE_MIN, PLL_MODE_WRITE_PHASE = 1, PLL_MODE_WRITE_FREQUENCY = 2, PLL_MODE_GPIO_INC_DEC = 3, @@ -664,6 +668,31 @@ enum pll_mode { PLL_MODE_MAX = PLL_MODE_DISABLED, }; +/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */ +enum manual_reference { + MANU_REF_MIN = 0, + MANU_REF_CLK0 = MANU_REF_MIN, + MANU_REF_CLK1, + MANU_REF_CLK2, + MANU_REF_CLK3, + MANU_REF_CLK4, + MANU_REF_CLK5, + MANU_REF_CLK6, + MANU_REF_CLK7, + MANU_REF_CLK8, + MANU_REF_CLK9, + MANU_REF_CLK10, + MANU_REF_CLK11, + MANU_REF_CLK12, + MANU_REF_CLK13, + MANU_REF_CLK14, + MANU_REF_CLK15, + MANU_REF_WRITE_PHASE, + MANU_REF_WRITE_FREQUENCY, + MANU_REF_XO_DPLL, + MANU_REF_MAX = MANU_REF_XO_DPLL, +}; + enum hw_tod_write_trig_sel { HW_TOD_WR_TRIG_SEL_MIN = 0, HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN, diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 3d43c60b49fa8d..1f7c33b2f5a3f4 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -28,6 +28,7 @@ #define PHY_ID_KSZ9031 0x00221620 #define PHY_ID_KSZ9131 0x00221640 #define PHY_ID_LAN8814 0x00221660 +#define PHY_ID_LAN8804 0x00221670 #define PHY_ID_KSZ886X 0x00221430 #define PHY_ID_KSZ8863 0x00221435 diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 30bb59fe970cbb..6646634a0b9d44 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1436,7 +1436,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); -int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr, int port, int qpn, u16 prio, u64 *reg_id); void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index a858bcb6220b5d..1834c8fad12ea1 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -92,26 +92,4 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); -static inline u64 mlx4_mac_to_u64(u8 *addr) -{ - u64 mac = 0; - int i; - - for (i = 0; i < ETH_ALEN; i++) { - mac <<= 8; - mac |= addr[i]; - } - return mac; -} - -static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) -{ - int i; - - for (i = ETH_ALEN; i > 0; i--) { - addr[i - 1] = mac & 0xFF; - mac >>= 8; - } -} - #endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 66eaf0aa7f698b..9c25edfd59a676 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -290,6 +290,7 @@ enum { MLX5_UMR_INLINE = (1 << 7), }; +#define MLX5_UMR_KLM_ALIGNMENT 4 #define MLX5_UMR_MTT_ALIGNMENT 0x40 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT @@ -541,19 +542,21 @@ struct mlx5_cmd_layout { u8 status_own; }; -enum mlx5_fatal_assert_bit_offsets { - MLX5_RFR_OFFSET = 31, +enum mlx5_rfr_severity_bit_offsets { + MLX5_RFR_BIT_OFFSET = 0x7, }; struct health_buffer { - __be32 assert_var[5]; - __be32 rsvd0[3]; + __be32 assert_var[6]; + __be32 rsvd0[2]; __be32 assert_exit_ptr; __be32 assert_callra; - __be32 rsvd1[2]; + __be32 rsvd1[1]; + __be32 time; __be32 fw_ver; __be32 hw_id; - __be32 rfr; + u8 rfr_severity; + u8 rsvd2[3]; u8 irisc_index; u8 synd; __be16 ext_synd; @@ -577,7 +580,9 @@ struct mlx5_init_seg { __be32 rsvd1[120]; __be32 initializing; struct health_buffer health; - __be32 rsvd2[880]; + __be32 rsvd2[878]; + __be32 cmd_exec_to; + __be32 cmd_q_init_to; __be32 internal_timer_h; __be32 internal_timer_l; __be32 rsvd3[2]; @@ -795,10 +800,23 @@ struct mlx5_cqe64 { u8 tls_outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; - u8 lro_tcppsh_abort_dupack; - u8 lro_min_ttl; - __be16 lro_tcp_win; - __be32 lro_ack_seq_num; + union { + struct { + u8 tcppsh_abort_dupack; + u8 min_ttl; + __be16 tcp_win; + __be32 ack_seq_num; + } lro; + struct { + u8 reserved0:1; + u8 match:1; + u8 flush:1; + u8 reserved3:5; + u8 header_size; + __be16 header_entry_index; + __be32 data_offset; + } shampo; + }; __be32 rss_hash_result; u8 rss_hash_type; u8 ml_path; @@ -868,7 +886,7 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { - return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; + return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; } static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) @@ -1182,7 +1200,9 @@ enum mlx5_cap_type { MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, + MLX5_CAP_DEV_SHAMPO = 0x1d, MLX5_CAP_GENERAL_2 = 0x20, + MLX5_CAP_PORT_SELECTION = 0x25, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1340,6 +1360,20 @@ enum mlx5_qcam_feature_groups { MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) +#define MLX5_CAP_PORT_SELECTION(mdev, cap) \ + MLX5_GET(port_selection_cap, \ + mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap) + +#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \ + MLX5_GET(port_selection_cap, \ + mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap) + +#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ + MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) + +#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \ + MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) @@ -1412,6 +1446,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_IPSEC(mdev, cap)\ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap) +#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\ + MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, @@ -1456,6 +1493,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } +#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2 +#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f17d2101af7a0e..a623ec635947a1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -59,15 +59,13 @@ #define MLX5_ADEV_NAME "mlx5_core" +#define MLX5_IRQ_EQ_CTRL (U8_MAX) + enum { MLX5_BOARD_ID_LEN = 64, }; enum { - /* one minute for the sake of bringup. Generally, commands must always - * complete and we may need to increase this timeout value - */ - MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; @@ -136,6 +134,7 @@ enum { MLX5_REG_MCIA = 0x9014, MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MRTC = 0x902d, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, MLX5_REG_MTRC_STDB = 0x9042, @@ -154,6 +153,7 @@ enum { MLX5_REG_MIRC = 0x9162, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, + MLX5_REG_DTOR = 0xC00E, }; enum mlx5_qpts_trust_state { @@ -357,22 +357,6 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; -enum { - MLX5_MKEY_MR = 1, - MLX5_MKEY_MW, - MLX5_MKEY_INDIRECT_DEVX, -}; - -struct mlx5_core_mkey { - u64 iova; - u64 size; - u32 key; - u32 pd; - u32 type; - struct wait_queue_head wait; - refcount_t usecount; -}; - #define MLX5_24BIT_MASK ((1 << 24) - 1) enum mlx5_res_type { @@ -441,6 +425,7 @@ struct mlx5_core_health { struct work_struct report_work; struct devlink_health_reporter *fw_reporter; struct devlink_health_reporter *fw_fatal_reporter; + struct delayed_work update_fw_log_ts_work; }; struct mlx5_qp_table { @@ -653,7 +638,7 @@ struct mlx5e_resources { struct mlx5e_hw_objs { u32 pdn; struct mlx5_td td; - struct mlx5_core_mkey mkey; + u32 mkey; struct mlx5_sq_bfreg bfreg; } hw_objs; struct devlink_port dl_port; @@ -752,6 +737,7 @@ struct mlx5_core_dev { u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; u8 embedded_cpu; } caps; + struct mlx5_timeouts *timeouts; u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; @@ -1005,8 +991,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); -int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); -int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_flush(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); @@ -1024,13 +1008,11 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - u32 *in, int inlen); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *out, int outlen); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, + int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, + int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_pagealloc_init(struct mlx5_core_dev *dev); @@ -1242,6 +1224,16 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) return MLX5_CAP_GEN(dev, native_port_num); } +static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev) +{ + int idx = MLX5_CAP_GEN(dev, native_port_num); + + if (idx >= 1 && idx <= MLX5_MAX_PORTS) + return idx - 1; + else + return PCI_FUNC(dev->pdev->devfn); +} + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; @@ -1250,11 +1242,12 @@ static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev) { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; + int err; - devlink_param_driverinit_value_get(devlink, - DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, - &val); - return val.vbool; + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + &val); + return err ? MLX5_CAP_GEN(dev, roce) : val.vbool; } #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index cea6ecb4b73ee7..ea3ff5a8ced3e4 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -4,7 +4,6 @@ #ifndef MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H -#define MLX5_IRQ_VEC_COMP_BASE 1 #define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_SPARE_EQE (0x80) diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 4ab5c1fc1270df..97afcea39a7bf4 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, #define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) #define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ +#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT /* 0x7FF is a reserved mapping */ #define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK +/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */ +#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1) +#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \ + ESW_TUN_OPTS_BITS) | \ + ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN) +#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \ + GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \ + ESW_TUN_OPTS_OFFSET + 1) u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 0106c67e8ccbda..cd2d4c572367ec 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -83,6 +83,9 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, MLX5_FLOW_NAMESPACE_RDMA_TX, + MLX5_FLOW_NAMESPACE_PORT_SEL, + MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS, + MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, }; enum { @@ -97,6 +100,7 @@ enum { struct mlx5_pkt_reformat; struct mlx5_modify_hdr; +struct mlx5_flow_definer; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_namespace; @@ -241,6 +245,10 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_flow_destination *old_dest); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); + +/* As mlx5_fc_create() but doesn't queue stats refresh thread. */ +struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); + void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, @@ -257,6 +265,13 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); +struct mlx5_flow_definer * +mlx5_create_match_definer(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type ns_type, u16 format_id, + u32 *match_mask); +void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, + struct mlx5_flow_definer *definer); +int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer); struct mlx5_pkt_reformat_params { int type; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 993204a6c1a137..3636df90899a24 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -94,6 +94,7 @@ enum { enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, + MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, @@ -342,7 +343,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_geneve_oam[0x1]; u8 outer_geneve_protocol_type[0x1]; u8 outer_geneve_opt_len[0x1]; - u8 reserved_at_1e[0x1]; + u8 source_vhca_port[0x1]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; @@ -393,6 +394,14 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 metadata_reg_c_0[0x1]; }; +struct mlx5_ifc_flow_table_fields_supported_2_bits { + u8 reserved_at_0[0xe]; + u8 bth_opcode[0x1]; + u8 reserved_at_f[0x11]; + + u8 reserved_at_20[0x60]; +}; + struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; u8 reserved_at_1[0x1]; @@ -539,7 +548,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { union mlx5_ifc_gre_key_bits gre_key; u8 vxlan_vni[0x18]; - u8 reserved_at_b8[0x8]; + u8 bth_opcode[0x8]; u8 geneve_vni[0x18]; u8 reserved_at_d8[0x7]; @@ -756,7 +765,15 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_at_e00[0x1200]; + u8 reserved_at_e00[0x700]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma; + + u8 reserved_at_1580[0x280]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma; + + u8 reserved_at_1880[0x780]; u8 sw_steering_nic_rx_action_drop_icm_address[0x40]; @@ -767,6 +784,18 @@ struct mlx5_ifc_flow_table_nic_cap_bits { u8 reserved_at_20c0[0x5f40]; }; +struct mlx5_ifc_port_selection_cap_bits { + u8 reserved_at_0[0x10]; + u8 port_select_flow_table[0x1]; + u8 reserved_at_11[0xf]; + + u8 reserved_at_20[0x1e0]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection; + + u8 reserved_at_400[0x7c00]; +}; + enum { MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, @@ -1306,7 +1335,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vhca_resource_manager[0x1]; u8 hca_cap_2[0x1]; - u8 reserved_at_21[0x2]; + u8 reserved_at_21[0x1]; + u8 dtor[0x1]; u8 event_on_vhca_state_teardown_request[0x1]; u8 event_on_vhca_state_in_use[0x1]; u8 event_on_vhca_state_active[0x1]; @@ -1336,7 +1366,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_b0[0x1]; u8 uplink_follow[0x1]; u8 ts_cqe_to_dest_cqn[0x1]; - u8 reserved_at_b3[0xd]; + u8 reserved_at_b3[0x7]; + u8 shampo[0x1]; + u8 reserved_at_bb[0x5]; u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; @@ -1514,7 +1546,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; - u8 reserved_at_248[0x2]; + u8 port_selection_cap[0x1]; + u8 reserved_at_248[0x1]; u8 umem_uid_0[0x1]; u8 reserved_at_250[0x5]; u8 log_pg_sz[0x8]; @@ -1587,7 +1620,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_tis_per_sq[0x5]; u8 ext_stride_num_range[0x1]; - u8 reserved_at_3a1[0x2]; + u8 roce_rw_supported[0x1]; + u8 reserved_at_3a2[0x1]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@ -1716,7 +1750,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4]; - u8 reserved_at_6e0[0x10]; + u8 max_num_match_definer[0x10]; u8 sf_base_id[0x10]; u8 flex_parser_id_gtpu_dw_2[0x4]; @@ -1731,7 +1765,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_760[0x20]; u8 vhca_tunnel_commands[0x40]; - u8 reserved_at_7c0[0x40]; + u8 match_definer_format_supported[0x40]; }; struct mlx5_ifc_cmd_hca_cap_2_bits { @@ -1750,6 +1784,7 @@ enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, + MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8, MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, @@ -1876,7 +1911,21 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; - u8 reserved_at_140[0x4c0]; + u8 reserved_at_140[0x80]; + + u8 headers_mkey[0x20]; + + u8 shampo_enable[0x1]; + u8 reserved_at_1e1[0x4]; + u8 log_reservation_size[0x3]; + u8 reserved_at_1e8[0x5]; + u8 log_max_num_of_packets_per_reservation[0x3]; + u8 reserved_at_1f0[0x6]; + u8 log_headers_entry_size[0x2]; + u8 reserved_at_1f8[0x4]; + u8 log_headers_buffer_entry_num[0x4]; + + u8 reserved_at_200[0x400]; struct mlx5_ifc_cmd_pas_bits pas[]; }; @@ -2807,6 +2856,40 @@ struct mlx5_ifc_dropped_packet_logged_bits { u8 reserved_at_0[0xe0]; }; +struct mlx5_ifc_default_timeout_bits { + u8 to_multiplier[0x3]; + u8 reserved_at_3[0x9]; + u8 to_value[0x14]; +}; + +struct mlx5_ifc_dtor_reg_bits { + u8 reserved_at_0[0x20]; + + struct mlx5_ifc_default_timeout_bits pcie_toggle_to; + + u8 reserved_at_40[0x60]; + + struct mlx5_ifc_default_timeout_bits health_poll_to; + + struct mlx5_ifc_default_timeout_bits full_crdump_to; + + struct mlx5_ifc_default_timeout_bits fw_reset_to; + + struct mlx5_ifc_default_timeout_bits flush_on_err_to; + + struct mlx5_ifc_default_timeout_bits pci_sync_update_to; + + struct mlx5_ifc_default_timeout_bits tear_down_to; + + struct mlx5_ifc_default_timeout_bits fsm_reactivate_to; + + struct mlx5_ifc_default_timeout_bits reclaim_pages_to; + + struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to; + + u8 reserved_at_1c0[0x40]; +}; + enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, @@ -3118,6 +3201,20 @@ struct mlx5_ifc_roce_addr_layout_bits { u8 reserved_at_e0[0x20]; }; +struct mlx5_ifc_shampo_cap_bits { + u8 reserved_at_0[0x3]; + u8 shampo_log_max_reservation_size[0x5]; + u8 reserved_at_8[0x3]; + u8 shampo_log_min_reservation_size[0x5]; + u8 shampo_min_mss_size[0x10]; + + u8 reserved_at_20[0x3]; + u8 shampo_max_log_headers_entry_size[0x5]; + u8 reserved_at_28[0x18]; + + u8 reserved_at_40[0x7c0]; +}; + union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2; @@ -3128,6 +3225,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; + struct mlx5_ifc_port_selection_cap_bits port_selection_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_debug_cap_bits debug_cap; @@ -3135,6 +3233,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; + struct mlx5_ifc_shampo_cap_bits shampo_cap; u8 reserved_at_0[0x8000]; }; @@ -3309,8 +3408,9 @@ enum { }; enum { - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, + MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0), + MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1), + MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2), }; enum { @@ -3335,7 +3435,7 @@ struct mlx5_ifc_tirc_bits { u8 reserved_at_80[0x4]; u8 lro_timeout_period_usecs[0x10]; - u8 lro_enable_mask[0x4]; + u8 packet_merge_mask[0x4]; u8 lro_max_ip_payload_size[0x8]; u8 reserved_at_a0[0x40]; @@ -3517,6 +3617,18 @@ enum { MLX5_RQC_STATE_ERR = 0x3, }; +enum { + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0, + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1, + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2, +}; + +enum { + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0, + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1, + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2, +}; + struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; u8 delay_drop_en[0x1]; @@ -3549,7 +3661,13 @@ struct mlx5_ifc_rqc_bits { u8 reserved_at_c0[0x10]; u8 hairpin_peer_vhca[0x10]; - u8 reserved_at_e0[0xa0]; + u8 reserved_at_e0[0x46]; + u8 shampo_no_match_alignment_granularity[0x2]; + u8 reserved_at_128[0x6]; + u8 shampo_match_criteria_type[0x2]; + u8 reservation_timeout[0x10]; + + u8 reserved_at_140[0x40]; struct mlx5_ifc_wq_bits wq; }; @@ -4097,13 +4215,19 @@ struct mlx5_ifc_health_buffer_bits { u8 assert_callra[0x20]; - u8 reserved_at_140[0x40]; + u8 reserved_at_140[0x20]; + + u8 time[0x20]; u8 fw_version[0x20]; u8 hw_id[0x20]; - u8 reserved_at_1c0[0x20]; + u8 rfr[0x1]; + u8 reserved_at_1c1[0x3]; + u8 valid[0x1]; + u8 severity[0x3]; + u8 reserved_at_1c8[0x18]; u8 irisc_index[0x8]; u8 synd[0x8]; @@ -5616,6 +5740,236 @@ struct mlx5_ifc_query_fte_in_bits { u8 reserved_at_120[0xe0]; }; +struct mlx5_ifc_match_definer_format_0_bits { + u8 reserved_at_0[0x100]; + + u8 metadata_reg_c_0[0x20]; + + u8 metadata_reg_c_1[0x20]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_dmac_15_0[0x10]; + u8 outer_ethertype[0x10]; + + u8 reserved_at_180[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 outer_ip_frag[0x1]; + u8 outer_qp_type[0x2]; + u8 outer_encap_type[0x2]; + u8 port_number[0x2]; + u8 outer_l3_type[0x2]; + u8 outer_l4_type[0x2]; + u8 outer_first_vlan_type[0x2]; + u8 outer_first_vlan_prio[0x3]; + u8 outer_first_vlan_cfi[0x1]; + u8 outer_first_vlan_vid[0xc]; + + u8 outer_l4_type_ext[0x4]; + u8 reserved_at_1a4[0x2]; + u8 outer_ipsec_layer[0x2]; + u8 outer_l2_type[0x2]; + u8 force_lb[0x1]; + u8 outer_l2_ok[0x1]; + u8 outer_l3_ok[0x1]; + u8 outer_l4_ok[0x1]; + u8 outer_second_vlan_type[0x2]; + u8 outer_second_vlan_prio[0x3]; + u8 outer_second_vlan_cfi[0x1]; + u8 outer_second_vlan_vid[0xc]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 inner_ipv4_checksum_ok[0x1]; + u8 inner_l4_checksum_ok[0x1]; + u8 outer_ipv4_checksum_ok[0x1]; + u8 outer_l4_checksum_ok[0x1]; + u8 inner_l3_ok[0x1]; + u8 inner_l4_ok[0x1]; + u8 outer_l3_ok_duplicate[0x1]; + u8 outer_l4_ok_duplicate[0x1]; + u8 outer_tcp_cwr[0x1]; + u8 outer_tcp_ece[0x1]; + u8 outer_tcp_urg[0x1]; + u8 outer_tcp_ack[0x1]; + u8 outer_tcp_psh[0x1]; + u8 outer_tcp_rst[0x1]; + u8 outer_tcp_syn[0x1]; + u8 outer_tcp_fin[0x1]; +}; + +struct mlx5_ifc_match_definer_format_22_bits { + u8 reserved_at_0[0x100]; + + u8 outer_ip_src_addr[0x20]; + + u8 outer_ip_dest_addr[0x20]; + + u8 outer_l4_sport[0x10]; + u8 outer_l4_dport[0x10]; + + u8 reserved_at_160[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 outer_ip_frag[0x1]; + u8 outer_qp_type[0x2]; + u8 outer_encap_type[0x2]; + u8 port_number[0x2]; + u8 outer_l3_type[0x2]; + u8 outer_l4_type[0x2]; + u8 outer_first_vlan_type[0x2]; + u8 outer_first_vlan_prio[0x3]; + u8 outer_first_vlan_cfi[0x1]; + u8 outer_first_vlan_vid[0xc]; + + u8 metadata_reg_c_0[0x20]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 outer_dmac_15_0[0x10]; +}; + +struct mlx5_ifc_match_definer_format_23_bits { + u8 reserved_at_0[0x100]; + + u8 inner_ip_src_addr[0x20]; + + u8 inner_ip_dest_addr[0x20]; + + u8 inner_l4_sport[0x10]; + u8 inner_l4_dport[0x10]; + + u8 reserved_at_160[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 inner_ip_frag[0x1]; + u8 inner_qp_type[0x2]; + u8 inner_encap_type[0x2]; + u8 port_number[0x2]; + u8 inner_l3_type[0x2]; + u8 inner_l4_type[0x2]; + u8 inner_first_vlan_type[0x2]; + u8 inner_first_vlan_prio[0x3]; + u8 inner_first_vlan_cfi[0x1]; + u8 inner_first_vlan_vid[0xc]; + + u8 tunnel_header_0[0x20]; + + u8 inner_dmac_47_16[0x20]; + + u8 inner_smac_47_16[0x20]; + + u8 inner_smac_15_0[0x10]; + u8 inner_dmac_15_0[0x10]; +}; + +struct mlx5_ifc_match_definer_format_29_bits { + u8 reserved_at_0[0xc0]; + + u8 outer_ip_dest_addr[0x80]; + + u8 outer_ip_src_addr[0x80]; + + u8 outer_l4_sport[0x10]; + u8 outer_l4_dport[0x10]; + + u8 reserved_at_1e0[0x20]; +}; + +struct mlx5_ifc_match_definer_format_30_bits { + u8 reserved_at_0[0xa0]; + + u8 outer_ip_dest_addr[0x80]; + + u8 outer_ip_src_addr[0x80]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 outer_dmac_15_0[0x10]; +}; + +struct mlx5_ifc_match_definer_format_31_bits { + u8 reserved_at_0[0xc0]; + + u8 inner_ip_dest_addr[0x80]; + + u8 inner_ip_src_addr[0x80]; + + u8 inner_l4_sport[0x10]; + u8 inner_l4_dport[0x10]; + + u8 reserved_at_1e0[0x20]; +}; + +struct mlx5_ifc_match_definer_format_32_bits { + u8 reserved_at_0[0xa0]; + + u8 inner_ip_dest_addr[0x80]; + + u8 inner_ip_src_addr[0x80]; + + u8 inner_dmac_47_16[0x20]; + + u8 inner_smac_47_16[0x20]; + + u8 inner_smac_15_0[0x10]; + u8 inner_dmac_15_0[0x10]; +}; + +struct mlx5_ifc_match_definer_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x10]; + u8 format_id[0x10]; + + u8 reserved_at_a0[0x160]; + + u8 match_mask[16][0x20]; +}; + +struct mlx5_ifc_general_obj_in_cmd_hdr_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 vhca_tunnel_id[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_general_obj_out_cmd_hdr_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_match_definer_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_match_definer_bits obj_context; +}; + +struct mlx5_ifc_create_match_definer_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@ -6369,7 +6723,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits { u8 reserved_at_3c[0x1]; u8 hash[0x1]; u8 reserved_at_3e[0x1]; - u8 lro[0x1]; + u8 packet_merge[0x1]; }; struct mlx5_ifc_modify_tir_out_bits { @@ -7569,7 +7923,7 @@ struct mlx5_ifc_dealloc_uar_out_bits { struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8089,6 +8443,11 @@ struct mlx5_ifc_create_flow_group_out_bits { u8 reserved_at_60[0x20]; }; +enum { + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1, +}; + enum { MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@ -8110,7 +8469,9 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 reserved_at_60[0x20]; u8 table_type[0x8]; - u8 reserved_at_88[0x18]; + u8 reserved_at_88[0x4]; + u8 group_type[0x4]; + u8 reserved_at_90[0x10]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; @@ -8125,7 +8486,10 @@ struct mlx5_ifc_create_flow_group_in_bits { u8 end_flow_index[0x20]; - u8 reserved_at_140[0xa0]; + u8 reserved_at_140[0x10]; + u8 match_definer_id[0x10]; + + u8 reserved_at_160[0x80]; u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; @@ -8416,7 +8780,7 @@ struct mlx5_ifc_alloc_uar_out_bits { struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -10060,6 +10424,17 @@ struct mlx5_ifc_pddr_reg_bits { union mlx5_ifc_pddr_reg_page_data_auto_bits page_data; }; +struct mlx5_ifc_mrtc_reg_bits { + u8 time_synced[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0x20]; + + u8 time_h[0x20]; + + u8 time_l[0x20]; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -10121,6 +10496,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mirc_reg_bits mirc_reg; struct mlx5_ifc_mfrl_reg_bits mfrl_reg; struct mlx5_ifc_mtutc_reg_bits mtutc_reg; + struct mlx5_ifc_mrtc_reg_bits mrtc_reg; u8 reserved_at_0[0x60e0]; }; @@ -10398,9 +10774,16 @@ struct mlx5_ifc_dcbx_param_bits { u8 reserved_at_a0[0x160]; }; +enum { + MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0, + MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT, +}; + struct mlx5_ifc_lagc_bits { u8 fdb_selection_mode[0x1]; - u8 reserved_at_1[0x1c]; + u8 reserved_at_1[0x14]; + u8 port_select_mode[0x3]; + u8 reserved_at_18[0x5]; u8 lag_state[0x3]; u8 reserved_at_20[0x14]; @@ -10614,29 +10997,6 @@ struct mlx5_ifc_dealloc_memic_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_general_obj_in_cmd_hdr_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 vhca_tunnel_id[0x10]; - u8 obj_type[0x10]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_general_obj_out_cmd_hdr_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; -}; - struct mlx5_ifc_umem_bits { u8 reserved_at_0[0x80]; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f92686cf2cdb6d..8f3131477ec695 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -105,18 +105,7 @@ struct page { struct page_pool *pp; unsigned long _pp_mapping_pad; unsigned long dma_addr; - union { - /** - * dma_addr_upper: might require a 64-bit - * value on 32-bit architectures. - */ - unsigned long dma_addr_upper; - /** - * For frag page support, not supported in - * 32-bit architectures with 64-bit DMA. - */ - atomic_long_t pp_frag_count; - }; + atomic_long_t pp_frag_count; }; struct { /* slab, slob and slub */ union { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d79163208dfdbf..3ec42495a43a56 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1861,6 +1861,7 @@ enum netdev_ml_priv_type { * @xps_maps: XXX: need comments on this one * @miniq_egress: clsact qdisc specific data for * egress processing + * @nf_hooks_egress: netfilter hooks executed for egress packets * @qdisc_hash: qdisc hash table * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) @@ -1916,7 +1917,6 @@ enum netdev_ml_priv_type { * @sfp_bus: attached &struct sfp_bus structure. * * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@ -2161,6 +2161,9 @@ struct net_device { #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc __rcu *miniq_egress; #endif +#ifdef CONFIG_NETFILTER_EGRESS + struct nf_hook_entries __rcu *nf_hooks_egress; +#endif #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); @@ -2250,7 +2253,6 @@ struct net_device { struct phy_device *phydev; struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; - struct lock_class_key *qdisc_running_key; bool proto_down; unsigned wol_enabled:1; unsigned threaded:1; @@ -2360,13 +2362,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, #define netdev_lockdep_set_classes(dev) \ { \ static struct lock_class_key qdisc_tx_busylock_key; \ - static struct lock_class_key qdisc_running_key; \ static struct lock_class_key qdisc_xmit_lock_key; \ static struct lock_class_key dev_addr_list_lock_key; \ unsigned int i; \ \ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ - (dev)->qdisc_running_key = &qdisc_running_key; \ lockdep_set_class(&(dev)->addr_list_lock, \ &dev_addr_list_lock_key); \ for (i = 0; i < (dev)->num_tx_queues; i++) \ @@ -2955,6 +2955,7 @@ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); +bool netdev_name_in_use(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); void dev_close(struct net_device *dev); @@ -4642,7 +4643,7 @@ void __hw_addr_init(struct netdev_hw_addr_list *list); /* Functions used for device addresses handling */ static inline void -__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len) +__dev_addr_set(struct net_device *dev, const void *addr, size_t len) { memcpy(dev->dev_addr, addr, len); } @@ -4654,7 +4655,7 @@ static inline void dev_addr_set(struct net_device *dev, const u8 *addr) static inline void dev_addr_mod(struct net_device *dev, unsigned int offset, - const u8 *addr, size_t len) + const void *addr, size_t len) { memcpy(&dev->dev_addr[offset], addr, len); } @@ -4800,8 +4801,6 @@ struct netdev_nested_priv { bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); -struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, - struct list_head **iter); #ifdef CONFIG_LOCKDEP static LIST_HEAD(net_unlink_list); @@ -5236,7 +5235,7 @@ static inline void netif_keep_dst(struct net_device *dev) static inline bool netif_reduces_vlan_mtu(struct net_device *dev) { /* TODO: reserve and use an additional IFF bit, if we get more users */ - return dev->priv_flags & IFF_MACSEC; + return netif_is_macsec(dev); } extern struct pernet_operations __net_initdata loopback_net_ops; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 4f9a4b3c589261..a40aaf645fa479 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -54,9 +54,8 @@ int arpt_register_table(struct net *net, const struct xt_table *table, const struct nf_hook_ops *ops); void arpt_unregister_table(struct net *net, const char *name); void arpt_unregister_table_pre_exit(struct net *net, const char *name); -extern unsigned int arpt_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct xt_table *table); +extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 10a01978bc0d38..a13296d6c7ceb2 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -112,9 +112,8 @@ extern int ebt_register_table(struct net *net, const struct nf_hook_ops *ops); extern void ebt_unregister_table(struct net *net, const char *tablename); void ebt_unregister_table_pre_exit(struct net *net, const char *tablename); -extern unsigned int ebt_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct ebt_table *table); +extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); /* True if the hook mask denotes that the rule is in a base chain, * used in the check() functions */ diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h deleted file mode 100644 index a13774be2eb5b3..00000000000000 --- a/include/linux/netfilter_ingress.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _NETFILTER_INGRESS_H_ -#define _NETFILTER_INGRESS_H_ - -#include -#include - -#ifdef CONFIG_NETFILTER_INGRESS -static inline bool nf_hook_ingress_active(const struct sk_buff *skb) -{ -#ifdef CONFIG_JUMP_LABEL - if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) - return false; -#endif - return rcu_access_pointer(skb->dev->nf_hooks_ingress); -} - -/* caller must hold rcu_read_lock */ -static inline int nf_hook_ingress(struct sk_buff *skb) -{ - struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress); - struct nf_hook_state state; - int ret; - - /* Must recheck the ingress hook head, in the event it became NULL - * after the check in nf_hook_ingress_active evaluated to true. - */ - if (unlikely(!e)) - return 0; - - nf_hook_state_init(&state, NF_NETDEV_INGRESS, - NFPROTO_NETDEV, skb->dev, NULL, NULL, - dev_net(skb->dev), NULL); - ret = nf_hook_slow(skb, &state, e, 0); - if (ret == 0) - return -1; - - return ret; -} - -static inline void nf_hook_ingress_init(struct net_device *dev) -{ - RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL); -} -#else /* CONFIG_NETFILTER_INGRESS */ -static inline int nf_hook_ingress_active(struct sk_buff *skb) -{ - return 0; -} - -static inline int nf_hook_ingress(struct sk_buff *skb) -{ - return 0; -} - -static inline void nf_hook_ingress_init(struct net_device *dev) {} -#endif /* CONFIG_NETFILTER_INGRESS */ -#endif /* _NETFILTER_INGRESS_H_ */ diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 8d09bfe850dc37..132b0e4a6d4df6 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -63,9 +63,9 @@ struct ipt_error { } extern void *ipt_alloc_initial_table(const struct xt_table *); -extern unsigned int ipt_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct xt_table *table); +extern unsigned int ipt_do_table(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 79e73fd7d965c2..8b8885a73c7649 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -29,9 +29,8 @@ int ip6t_register_table(struct net *net, const struct xt_table *table, const struct nf_hook_ops *ops); void ip6t_unregister_table_pre_exit(struct net *net, const char *name); void ip6t_unregister_table_exit(struct net *net, const char *name); -extern unsigned int ip6t_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct xt_table *table); +extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h new file mode 100644 index 00000000000000..b71b57a83bb4f0 --- /dev/null +++ b/include/linux/netfilter_netdev.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NETFILTER_NETDEV_H_ +#define _NETFILTER_NETDEV_H_ + +#include +#include + +#ifdef CONFIG_NETFILTER_INGRESS +static inline bool nf_hook_ingress_active(const struct sk_buff *skb) +{ +#ifdef CONFIG_JUMP_LABEL + if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) + return false; +#endif + return rcu_access_pointer(skb->dev->nf_hooks_ingress); +} + +/* caller must hold rcu_read_lock */ +static inline int nf_hook_ingress(struct sk_buff *skb) +{ + struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress); + struct nf_hook_state state; + int ret; + + /* Must recheck the ingress hook head, in the event it became NULL + * after the check in nf_hook_ingress_active evaluated to true. + */ + if (unlikely(!e)) + return 0; + + nf_hook_state_init(&state, NF_NETDEV_INGRESS, + NFPROTO_NETDEV, skb->dev, NULL, NULL, + dev_net(skb->dev), NULL); + ret = nf_hook_slow(skb, &state, e, 0); + if (ret == 0) + return -1; + + return ret; +} + +#else /* CONFIG_NETFILTER_INGRESS */ +static inline int nf_hook_ingress_active(struct sk_buff *skb) +{ + return 0; +} + +static inline int nf_hook_ingress(struct sk_buff *skb) +{ + return 0; +} +#endif /* CONFIG_NETFILTER_INGRESS */ + +#ifdef CONFIG_NETFILTER_EGRESS +static inline bool nf_hook_egress_active(void) +{ +#ifdef CONFIG_JUMP_LABEL + if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS])) + return false; +#endif + return true; +} + +/** + * nf_hook_egress - classify packets before transmission + * @skb: packet to be classified + * @rc: result code which shall be returned by __dev_queue_xmit() on failure + * @dev: netdev whose egress hooks shall be applied to @skb + * + * Returns @skb on success or %NULL if the packet was consumed or filtered. + * Caller must hold rcu_read_lock. + * + * On ingress, packets are classified first by tc, then by netfilter. + * On egress, the order is reversed for symmetry. Conceptually, tc and + * netfilter can be thought of as layers, with netfilter layered above tc: + * When tc redirects a packet to another interface, netfilter is not applied + * because the packet is on the tc layer. + * + * The nf_skip_egress flag controls whether netfilter is applied on egress. + * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the + * packet passes through tc and netfilter. Because __dev_queue_xmit() may be + * called recursively by tunnel drivers such as vxlan, the flag is reverted to + * false after sch_handle_egress(). This ensures that netfilter is applied + * both on the overlay and underlying network. + */ +static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, + struct net_device *dev) +{ + struct nf_hook_entries *e; + struct nf_hook_state state; + int ret; + +#ifdef CONFIG_NETFILTER_SKIP_EGRESS + if (skb->nf_skip_egress) + return skb; +#endif + + e = rcu_dereference(dev->nf_hooks_egress); + if (!e) + return skb; + + nf_hook_state_init(&state, NF_NETDEV_EGRESS, + NFPROTO_NETDEV, dev, NULL, NULL, + dev_net(dev), NULL); + ret = nf_hook_slow(skb, &state, e, 0); + + if (ret == 1) { + return skb; + } else if (ret < 0) { + *rc = NET_XMIT_DROP; + return NULL; + } else { /* ret == 0 */ + *rc = NET_XMIT_SUCCESS; + return NULL; + } +} +#else /* CONFIG_NETFILTER_EGRESS */ +static inline bool nf_hook_egress_active(void) +{ + return false; +} + +static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, + struct net_device *dev) +{ + return skb; +} +#endif /* CONFIG_NETFILTER_EGRESS */ + +static inline void nf_skip_egress(struct sk_buff *skb, bool skip) +{ +#ifdef CONFIG_NETFILTER_SKIP_EGRESS + skb->nf_skip_egress = skip; +#endif +} + +static inline void nf_hook_netdev_init(struct net_device *dev) +{ +#ifdef CONFIG_NETFILTER_INGRESS + RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL); +#endif +#ifdef CONFIG_NETFILTER_EGRESS + RCU_INIT_POINTER(dev->nf_hooks_egress, NULL); +#endif +} + +#endif /* _NETFILTER_NETDEV_H_ */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 61b1c7fcc401ee..1ec631838af9b5 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -156,10 +156,6 @@ bool netlink_strict_get_check(struct sk_buff *skb); int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); -int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, - __u32 portid, __u32 group, gfp_t allocation, - int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), - void *filter_data); int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); int netlink_register_notifier(struct notifier_block *nb); int netlink_unregister_notifier(struct notifier_block *nb); diff --git a/include/linux/of_net.h b/include/linux/of_net.h index daef3b0d9270de..0484b613ca6477 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -8,12 +8,13 @@ #include -#ifdef CONFIG_OF_NET +#if defined(CONFIG_OF) && defined(CONFIG_NET) #include struct net_device; extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface); extern int of_get_mac_address(struct device_node *np, u8 *mac); +int of_get_ethdev_address(struct device_node *np, struct net_device *dev); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np, @@ -27,6 +28,11 @@ static inline int of_get_mac_address(struct device_node *np, u8 *mac) return -ENODEV; } +static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev) +{ + return -ENODEV; +} + static inline struct net_device *of_find_net_device_by_node(struct device_node *np) { return NULL; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 22212f2a821995..0dcfd265beed5b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -57,6 +57,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include struct perf_callchain_entry { @@ -1615,4 +1616,26 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event, extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); #endif +/* + * Snapshot branch stack on software events. + * + * Branch stack can be very useful in understanding software events. For + * example, when a long function, e.g. sys_perf_event_open, returns an + * errno, it is not obvious why the function failed. Branch stack could + * provide very helpful information in this type of scenarios. + * + * On software event, it is necessary to stop the hardware branch recorder + * fast. Otherwise, the hardware register/buffer will be flushed with + * entries of the triggering event. Therefore, static call is used to + * stop the hardware recorder. + */ + +/* + * cnt is the number of entries allocated for entries. + * Return number of entries copied to . + */ +typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries, + unsigned int cnt); +DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); + #endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 736e1d1a47c408..96e43fbb2dd89f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -155,6 +155,40 @@ typedef enum { PHY_INTERFACE_MODE_MAX, } phy_interface_t; +/* PHY interface mode bitmap handling */ +#define DECLARE_PHY_INTERFACE_MASK(name) \ + DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX) + +static inline void phy_interface_zero(unsigned long *intf) +{ + bitmap_zero(intf, PHY_INTERFACE_MODE_MAX); +} + +static inline bool phy_interface_empty(const unsigned long *intf) +{ + return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX); +} + +static inline void phy_interface_and(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX); +} + +static inline void phy_interface_or(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX); +} + +static inline void phy_interface_set_rgmii(unsigned long *intf) +{ + __set_bit(PHY_INTERFACE_MODE_RGMII, intf); + __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf); + __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf); + __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf); +} + /* * phy_supported_speeds - return all speeds currently supported by a PHY device */ @@ -1584,6 +1618,7 @@ int genphy_c45_config_aneg(struct phy_device *phydev); int genphy_c45_loopback(struct phy_device *phydev, bool enable); int genphy_c45_pma_resume(struct phy_device *phydev); int genphy_c45_pma_suspend(struct phy_device *phydev); +int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable); /* Generic C45 PHY driver */ extern struct phy_driver genphy_c45_driver; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 237291196ce28f..f037470b6fb33b 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -67,6 +67,8 @@ enum phylink_op_type { * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. + * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx + * are supported by the MAC/PCS. */ struct phylink_config { struct device *dev; @@ -76,6 +78,7 @@ struct phylink_config { bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); + DECLARE_PHY_INTERFACE_MASK(supported_interfaces); }; /** @@ -133,8 +136,14 @@ struct phylink_mac_ops { * based on @state->advertising and/or @state->speed and update * @state->interface accordingly. See phylink_helper_basex_speed(). * - * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the - * MAC driver to return all supported link modes. + * When @config->supported_interfaces has been set, phylink will iterate + * over the supported interfaces to determine the full capability of the + * MAC. The validation function must not print errors if @state->interface + * is set to an unexpected value. + * + * When @config->supported_interfaces is empty, phylink will call this + * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and + * expects the MAC driver to return all supported link modes. * * If the @state->interface mode is not supported, then the @supported * mask must be cleared. @@ -484,6 +493,7 @@ int phylink_speed_up(struct phylink *pl); #define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) void phylink_set_port_modes(unsigned long *bits); +void phylink_set_10g_modes(unsigned long *mask); void phylink_helper_basex_speed(struct phylink_link_state *state); void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h index 1d30bf2782318a..2b5676ff35bec7 100644 --- a/include/linux/platform_data/brcmfmac.h +++ b/include/linux/platform_data/brcmfmac.h @@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry { */ struct brcmfmac_pd_cc { int table_size; - struct brcmfmac_pd_cc_entry table[0]; + struct brcmfmac_pd_cc_entry table[]; }; /** diff --git a/include/linux/property.h b/include/linux/property.h index 357513a977e5d3..88fa726a76df7d 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -15,6 +15,7 @@ #include struct device; +struct net_device; enum dev_prop_type { DEV_PROP_U8, @@ -389,11 +390,7 @@ const void *device_get_match_data(struct device *dev); int device_get_phy_mode(struct device *dev); -void *device_get_mac_address(struct device *dev, char *addr, int alen); - int fwnode_get_phy_mode(struct fwnode_handle *fwnode); -void *fwnode_get_mac_address(struct fwnode_handle *fwnode, - char *addr, int alen); struct fwnode_handle *fwnode_graph_get_next_endpoint( const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 0a3807e927c58d..827624840ee294 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #ifndef _COMMON_HSI_H @@ -47,10 +47,10 @@ #define ISCSI_CDU_TASK_SEG_TYPE 0 #define FCOE_CDU_TASK_SEG_TYPE 0 #define RDMA_CDU_TASK_SEG_TYPE 1 +#define ETH_CDU_TASK_SEG_TYPE 2 #define FW_ASSERT_GENERAL_ATTN_IDX 32 - /* Queue Zone sizes in bytes */ #define TSTORM_QZONE_SIZE 8 #define MSTORM_QZONE_SIZE 16 @@ -60,9 +60,12 @@ #define PSTORM_QZONE_SIZE 0 #define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 -#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 -#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 -#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 +#define ETH_MAX_RXQ_VF_DEFAULT 16 +#define ETH_MAX_RXQ_VF_DOUBLE 48 +#define ETH_MAX_RXQ_VF_QUAD 112 + +#define ETH_RGSRC_CTX_SIZE 6 +#define ETH_TGSRC_CTX_SIZE 6 /********************************/ /* CORE (LIGHT L2) FW CONSTANTS */ @@ -89,8 +92,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 42 -#define FW_REVISION_VERSION 2 +#define FW_MINOR_VERSION 59 +#define FW_REVISION_VERSION 1 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -112,6 +115,7 @@ #define MAX_NUM_VFS (MAX_NUM_VFS_K2) #define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2) #define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) #define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2) @@ -133,7 +137,7 @@ #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) /* CIDs */ -#define NUM_OF_CONNECTION_TYPES_E4 (8) +#define NUM_OF_CONNECTION_TYPES (8) #define NUM_OF_LCIDS (320) #define NUM_OF_LTIDS (320) @@ -144,7 +148,7 @@ #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) /* Tools Version */ -#define TOOLS_VERSION 10 +#define TOOLS_VERSION 11 /*****************/ /* CDU CONSTANTS */ @@ -162,6 +166,7 @@ #define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) #define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) #define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) +#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d) /*****************/ /* DQ CONSTANTS */ @@ -302,6 +307,9 @@ /* PWM address mapping */ #define DQ_PWM_OFFSET_DPM_BASE 0x0 #define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28 +#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30 +#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38 #define DQ_PWM_OFFSET_XCM16_BASE 0x40 #define DQ_PWM_OFFSET_XCM32_BASE 0x44 #define DQ_PWM_OFFSET_UCM16_BASE 0x48 @@ -325,6 +333,13 @@ #define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \ (DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4) +#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \ + (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \ + (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4) +#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \ + (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1) + #define DQ_REGION_SHIFT (12) /* DPM */ @@ -360,6 +375,7 @@ /* Number of global Vport/QCN rate limiters */ #define MAX_QM_GLOBAL_RLS 256 +#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 @@ -379,7 +395,7 @@ #define CAU_FSM_ETH_TX 1 /* Number of Protocol Indices per Status Block */ -#define PIS_PER_SB_E4 12 +#define PIS_PER_SB 12 #define MAX_PIS_PER_SB PIS_PER_SB #define CAU_HC_STOPPED_STATE 3 @@ -700,6 +716,13 @@ enum mf_mode { MAX_MF_MODE }; +/* Per protocol packet duplication enable bit vector. If set, duplicate + * offloaded traffic to LL2 debug queueu. + */ +struct offload_pkt_dup_enable { + __le16 enable_vector; +}; + /* Per-protocol connection types */ enum protocol_type { PROTOCOLID_TCP_ULP, @@ -717,6 +740,12 @@ enum protocol_type { MAX_PROTOCOL_TYPE }; +/* Pstorm packet duplication config */ +struct pstorm_pkt_dup_cfg { + struct offload_pkt_dup_enable enable; + __le16 reserved[3]; +}; + struct regpair { __le32 lo; __le32 hi; @@ -728,10 +757,24 @@ struct rdma_eqe_destroy_qp { u8 reserved[4]; }; +/* RoCE Suspend Event Data */ +struct rdma_eqe_suspend_qp { + __le32 cid; + u8 reserved[4]; +}; + /* RDMA Event Data Union */ union rdma_eqe_data { struct regpair async_handle; struct rdma_eqe_destroy_qp rdma_destroy_qp_data; + struct rdma_eqe_suspend_qp rdma_suspend_qp_data; +}; + +/* Tstorm packet duplication config */ +struct tstorm_pkt_dup_cfg { + struct offload_pkt_dup_enable enable; + __le16 reserved; + __le32 cid; }; struct tstorm_queue_zone { @@ -891,6 +934,15 @@ struct db_legacy_addr { #define DB_LEGACY_ADDR_ICID_SHIFT 5 }; +/* Structure for doorbell address, in legacy mode, without DEMS */ +struct db_legacy_wo_dems_addr { + __le32 addr; +#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF +#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2 +}; + /* Structure for doorbell address, in PWM mode */ struct db_pwm_addr { __le32 addr; @@ -906,6 +958,31 @@ struct db_pwm_addr { #define DB_PWM_ADDR_RESERVED1_SHIFT 28 }; +/* Parameters to RDMA firmware, passed in EDPM doorbell */ +struct db_rdma_24b_icid_dpm_params { + __le32 params; +#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF +#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16 +#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7 +#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24 +#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27 +#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 +}; + /* Parameters to RDMA firmware, passed in EDPM doorbell */ struct db_rdma_dpm_params { __le32 params; @@ -1220,21 +1297,41 @@ struct rdif_task_context { __le32 reserved2; }; +/* Searcher Table struct */ +struct src_entry_header { + __le32 flags; +#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1 +#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0 +#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1 +#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1 +#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF +#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2 + __le32 magic_number; + struct regpair next_ptr; +}; + +/* Enumeration for address type */ +enum src_header_next_ptr_type_enum { + e_physical_addr, + e_logical_addr, + MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM +}; + /* Status block structure */ -struct status_block_e4 { - __le16 pi_array[PIS_PER_SB_E4]; +struct status_block { + __le16 pi_array[PIS_PER_SB]; __le32 sb_num; -#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF -#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 -#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F -#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 -#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF -#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 __le32 prod_index; -#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF -#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 -#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF -#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; /* Tdif context */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index cd1207ad4ada34..c84e08bc680255 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -67,6 +67,7 @@ /* Ethernet vport update constants */ #define ETH_FILTER_RULES_COUNT 10 #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32) #define ETH_RSS_KEY_SIZE_REGS 10 #define ETH_RSS_ENGINE_NUM_K2 207 #define ETH_RSS_ENGINE_NUM_BB 127 diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 68eda1c21cdede..7ba0abc867f10a 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -150,49 +150,49 @@ struct ystorm_fcoe_task_st_ctx { u8 reserved2[8]; }; -struct e4_ystorm_fcoe_task_ag_ctx { +struct ystorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -206,73 +206,73 @@ struct e4_ystorm_fcoe_task_ag_ctx { __le32 reg2; }; -struct e4_tstorm_fcoe_task_ag_ctx { +struct tstorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 -#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 u8 flags1; -#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 -#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 -#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 -#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 -#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 u8 flags3; -#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 -#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 -#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 -#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 -#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 -#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 cleanup_state; __le16 last_sent_tid; __le32 rec_rr_tov_exp_timeout; @@ -352,49 +352,49 @@ struct tstorm_fcoe_task_st_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; }; -struct e4_mstorm_fcoe_task_ag_ctx { +struct mstorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 icid; u8 flags0; -#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 -#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 -#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 cleanup_state; __le32 received_bytes; u8 byte3; @@ -440,56 +440,56 @@ struct mstorm_fcoe_task_st_ctx { struct scsi_cached_sges data_desc; }; -struct e4_ustorm_fcoe_task_ag_ctx { +struct ustorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 -#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 global_cq_num; @@ -499,18 +499,18 @@ struct e4_ustorm_fcoe_task_ag_ctx { }; /* FCoE task context */ -struct e4_fcoe_task_context { +struct fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct tdif_task_context tdif_context; - struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; - struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; struct timers_context timer_context; struct tstorm_fcoe_task_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; struct mstorm_fcoe_task_st_ctx mstorm_st_context; - struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; struct rdif_task_context rdif_context; }; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 157019f716f104..1a60285a01e322 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -714,49 +714,49 @@ struct ystorm_iscsi_task_st_ctx { union iscsi_task_hdr pdu_hdr; }; -struct e4_ystorm_iscsi_task_ag_ctx { +struct ystorm_iscsi_task_ag_ctx { u8 reserved; u8 byte1; __le16 word0; u8 flags0; -#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */ -#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7 +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */ +#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7 u8 flags1; -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 TTT; u8 byte3; @@ -764,49 +764,49 @@ struct e4_ystorm_iscsi_task_ag_ctx { __le16 word1; }; -struct e4_mstorm_iscsi_task_ag_ctx { +struct mstorm_iscsi_task_ag_ctx { u8 cdu_validation; u8 byte1; __le16 task_cid; u8 flags0; -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 u8 flags1; -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -814,56 +814,56 @@ struct e4_mstorm_iscsi_task_ag_ctx { __le16 word1; }; -struct e4_ustorm_iscsi_task_ag_ctx { +struct ustorm_iscsi_task_ag_ctx { u8 reserved; u8 state; __le16 icid; u8 flags0; -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 u8 flags1; -#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 rcv_cont_len; @@ -952,14 +952,14 @@ struct ustorm_iscsi_task_st_ctx { }; /* iscsi task context */ -struct e4_iscsi_task_context { +struct iscsi_task_context { struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; struct regpair ystorm_ag_padding[2]; struct tdif_task_context tdif_context; - struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; struct regpair mstorm_ag_padding[2]; - struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; struct mstorm_iscsi_task_st_ctx mstorm_st_context; struct ustorm_iscsi_task_st_ctx ustorm_st_context; struct rdif_task_context rdif_context; @@ -1431,73 +1431,73 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_tcp_pkt_cnt; }; -struct e4_tstorm_iscsi_task_ag_ctx { +struct tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h index 5a2ab06063085b..cc7c7481a0e02a 100644 --- a/include/linux/qed/nvmetcp_common.h +++ b/include/linux/qed/nvmetcp_common.h @@ -410,7 +410,7 @@ struct e5_ystorm_nvmetcp_task_ag_ctx { u8 byte2; u8 byte3; u8 byte4; - u8 e4_reserved7; + u8 reserved7; }; struct e5_mstorm_nvmetcp_task_ag_ctx { @@ -445,7 +445,7 @@ struct e5_mstorm_nvmetcp_task_ag_ctx { u8 byte2; u8 byte3; u8 byte4; - u8 e4_reserved7; + u8 reserved7; }; struct e5_ustorm_nvmetcp_task_ag_ctx { @@ -489,17 +489,17 @@ struct e5_ustorm_nvmetcp_task_ag_ctx { #define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 u8 flags3; u8 flags4; -#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_MASK 0x3 -#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_SHIFT 0 -#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 -#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_SHIFT 2 -#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_MASK 0x1 -#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_SHIFT 3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3 #define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF #define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 u8 byte2; u8 byte3; - u8 e4_reserved8; + u8 reserved8; __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 rcv_cont_len; diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f34dbd0db7952c..a84063492c71ae 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) } /** - * @brief qed_chain_advance_page - + * qed_chain_advance_page(): Advance the next element across pages for a + * linked chain. * - * Advance the next element across pages for a linked chain + * @p_chain: P_chain. + * @p_next_elem: P_next_elem. + * @idx_to_inc: Idx_to_inc. + * @page_to_inc: page_to_inc. * - * @param p_chain - * @param p_next_elem - * @param idx_to_inc - * @param page_to_inc + * Return: Void. */ static inline void qed_chain_advance_page(struct qed_chain *p_chain, @@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain, } while (0) /** - * @brief qed_chain_return_produced - + * qed_chain_return_produced(): A chain in which the driver "Produces" + * elements should use this API + * to indicate previous produced elements + * are now consumed. * - * A chain in which the driver "Produces" elements should use this API - * to indicate previous produced elements are now consumed. + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_return_produced(struct qed_chain *p_chain) { @@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain) } /** - * @brief qed_chain_produce - + * qed_chain_produce(): A chain in which the driver "Produces" + * elements should use this to get a pointer to + * the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for + * new element. * - * A chain in which the driver "Produces" elements should use this to get - * a pointer to the next element which can be "Produced". It's driver - * responsibility to validate that the chain has room for new element. + * @p_chain: Chain. * - * @param p_chain - * - * @return void*, a pointer to next element + * Return: void*, a pointer to next element. */ static inline void *qed_chain_produce(struct qed_chain *p_chain) { @@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) } /** - * @brief qed_chain_get_capacity - - * - * Get the maximum number of BDs in chain + * qed_chain_get_capacity(): Get the maximum number of BDs in chain * - * @param p_chain - * @param num + * @p_chain: Chain. * - * @return number of unusable BDs + * Return: number of unusable BDs. */ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) { @@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) } /** - * @brief qed_chain_recycle_consumed - + * qed_chain_recycle_consumed(): Returns an element which was + * previously consumed; + * Increments producers so they could + * be written to FW. * - * Returns an element which was previously consumed; - * Increments producers so they could be written to FW. + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) { @@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) } /** - * @brief qed_chain_consume - + * qed_chain_consume(): A Chain in which the driver utilizes data written + * by a different source (i.e., FW) should use this to + * access passed buffers. * - * A Chain in which the driver utilizes data written by a different source - * (i.e., FW) should use this to access passed buffers. + * @p_chain: Chain. * - * @param p_chain - * - * @return void*, a pointer to the next buffer written + * Return: void*, a pointer to the next buffer written. */ static inline void *qed_chain_consume(struct qed_chain *p_chain) { @@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) } /** - * @brief qed_chain_reset - Resets the chain to its start state + * qed_chain_reset(): Resets the chain to its start state. + * + * @p_chain: pointer to a previously allocated chain. * - * @param p_chain pointer to a previously allocated chain + * Return Void. */ static inline void qed_chain_reset(struct qed_chain *p_chain) { @@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } /** - * @brief qed_chain_get_last_elem - + * qed_chain_get_last_elem(): Returns a pointer to the last element of the + * chain. * - * Returns a pointer to the last element of the chain + * @p_chain: Chain. * - * @param p_chain - * - * @return void* + * Return: void*. */ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) { @@ -563,10 +565,13 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) } /** - * @brief qed_chain_set_prod - sets the prod to the given value + * qed_chain_set_prod(): sets the prod to the given value. + * + * @p_chain: Chain. + * @prod_idx: Prod Idx. + * @p_prod_elem: Prod elem. * - * @param prod_idx - * @param p_prod_elem + * Return Void. */ static inline void qed_chain_set_prod(struct qed_chain *p_chain, u32 prod_idx, void *p_prod_elem) @@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain, } /** - * @brief qed_chain_pbl_zero_mem - set chain memory to 0 + * qed_chain_pbl_zero_mem(): set chain memory to 0. + * + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) { diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 812a4d75116338..e1bf3219b4e6aa 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -145,12 +145,6 @@ struct qed_filter_mcast_params { unsigned char mac[64][ETH_ALEN]; }; -union qed_filter_type_params { - enum qed_filter_rx_mode_type accept_flags; - struct qed_filter_ucast_params ucast; - struct qed_filter_mcast_params mcast; -}; - enum qed_filter_type { QED_FILTER_TYPE_UCAST, QED_FILTER_TYPE_MCAST, @@ -158,11 +152,6 @@ enum qed_filter_type { QED_MAX_FILTER_TYPES, }; -struct qed_filter_params { - enum qed_filter_type type; - union qed_filter_type_params filter; -}; - struct qed_tunn_params { u16 vxlan_port; u8 update_vxlan_port; @@ -314,8 +303,14 @@ struct qed_eth_ops { int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); - int (*filter_config)(struct qed_dev *cdev, - struct qed_filter_params *params); + int (*filter_config_rx_mode)(struct qed_dev *cdev, + enum qed_filter_rx_mode_type type); + + int (*filter_config_ucast)(struct qed_dev *cdev, + struct qed_filter_ucast_params *params); + + int (*filter_config_mcast)(struct qed_dev *cdev, + struct qed_filter_mcast_params *params); int (*fastpath_stop)(struct qed_dev *cdev); @@ -336,7 +331,7 @@ struct qed_eth_ops { int (*configure_arfs_searcher)(struct qed_dev *cdev, enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); - int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac); + int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac); }; const struct qed_eth_ops *qed_get_eth_ops(void); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 850b989916703b..0dae7fcc5ef21e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -24,6 +24,9 @@ #include #include +#define QED_TX_SWS_TIMER_DFLT 500 +#define QED_TWO_MSL_TIMER_DFLT 4000 + enum dcbx_protocol_type { DCBX_PROTOCOL_ISCSI, DCBX_PROTOCOL_FCOE, @@ -588,7 +591,7 @@ enum qed_int_mode { }; struct qed_sb_info { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; u32 sb_ack; /* Last given ack */ u16 igu_sb_id; @@ -613,7 +616,6 @@ enum qed_hw_err_type { enum qed_dev_type { QED_DEV_TYPE_BB, QED_DEV_TYPE_AH, - QED_DEV_TYPE_E5, }; struct qed_dev_info { @@ -819,47 +821,47 @@ struct qed_common_cb_ops { struct qed_selftest_ops { /** - * @brief selftest_interrupt - Perform interrupt test + * selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_interrupt)(struct qed_dev *cdev); /** - * @brief selftest_memory - Perform memory test + * selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_memory)(struct qed_dev *cdev); /** - * @brief selftest_register - Perform register test + * selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_register)(struct qed_dev *cdev); /** - * @brief selftest_clock - Perform clock test + * selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); /** - * @brief selftest_nvram - Perform nvram test + * selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_nvram) (struct qed_dev *cdev); }; @@ -927,47 +929,53 @@ struct qed_common_ops { enum qed_hw_err_type err_type); /** - * @brief can_link_change - can the instance change the link or not + * can_link_change(): can the instance change the link or not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return true if link-change is allowed, false otherwise. + * Return: true if link-change is allowed, false otherwise. */ bool (*can_link_change)(struct qed_dev *cdev); /** - * @brief set_link - set links according to params + * set_link(): set links according to params. * - * @param cdev - * @param params - values used to override the default link configuration + * @cdev: Qed dev pointer. + * @params: values used to override the default link configuration. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_link)(struct qed_dev *cdev, struct qed_link_params *params); /** - * @brief get_link - returns the current link state. + * get_link(): returns the current link state. + * + * @cdev: Qed dev pointer. + * @if_link: structure to be filled with current link configuration. * - * @param cdev - * @param if_link - structure to be filled with current link configuration. + * Return: Void. */ void (*get_link)(struct qed_dev *cdev, struct qed_link_output *if_link); /** - * @brief - drains chip in case Tx completions fail to arrive due to pause. + * drain(): drains chip in case Tx completions fail to arrive due to pause. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Int. */ int (*drain)(struct qed_dev *cdev); /** - * @brief update_msglvl - update module debug level + * update_msglvl(): update module debug level. + * + * @cdev: Qed dev pointer. + * @dp_module: Debug module. + * @dp_level: Debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * Return: Void. */ void (*update_msglvl)(struct qed_dev *cdev, u32 dp_module, @@ -981,70 +989,73 @@ struct qed_common_ops { struct qed_chain *p_chain); /** - * @brief nvm_flash - Flash nvm data. + * nvm_flash(): Flash nvm data. * - * @param cdev - * @param name - file containing the data + * @cdev: Qed dev pointer. + * @name: file containing the data. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*nvm_flash)(struct qed_dev *cdev, const char *name); /** - * @brief nvm_get_image - reads an entire image from nvram + * nvm_get_image(): reads an entire image from nvram. * - * @param cdev - * @param type - type of the request nvram image - * @param buf - preallocated buffer to fill with the image - * @param len - length of the allocated buffer + * @cdev: Qed dev pointer. + * @type: type of the request nvram image. + * @buf: preallocated buffer to fill with the image. + * @len: length of the allocated buffer. * - * @return 0 on success, error otherwise + * Return: 0 on success, error otherwise. */ int (*nvm_get_image)(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len); /** - * @brief set_coalesce - Configure Rx coalesce value in usec + * set_coalesce(): Configure Rx coalesce value in usec. * - * @param cdev - * @param rx_coal - Rx coalesce value in usec - * @param tx_coal - Tx coalesce value in usec - * @param qid - Queue index - * @param sb_id - Status Block Id + * @cdev: Qed dev pointer. + * @rx_coal: Rx coalesce value in usec. + * @tx_coal: Tx coalesce value in usec. + * @handle: Handle. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle); /** - * @brief set_led - Configure LED mode + * set_led() - Configure LED mode. * - * @param cdev - * @param mode - LED mode + * @cdev: Qed dev pointer. + * @mode: LED mode. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); /** - * @brief attn_clr_enable - Prevent attentions from being reasserted + * attn_clr_enable(): Prevent attentions from being reasserted. * - * @param cdev - * @param clr_enable + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable. + * + * Return: Void. */ void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. + * + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Dddress of where db_data is stored. + * @db_width: Doorbell is 32b or 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_is_32b - doorbell is 32b pr 64b - * @param db_is_user - doorbell recovery addresses are user or kernel space + * Return: Int. */ int (*db_recovery_add)(struct qed_dev *cdev, void __iomem *db_addr, @@ -1053,114 +1064,130 @@ struct qed_common_ops { enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * db_recovery_del(): remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the - * entry to delete. + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address where db_data is stored. Serves as key for the + * entry to delete. + * + * Return: Int. */ int (*db_recovery_del)(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); /** - * @brief recovery_process - Trigger a recovery process + * recovery_process(): Trigger a recovery process. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*recovery_process)(struct qed_dev *cdev); /** - * @brief recovery_prolog - Execute the prolog operations of a recovery process + * recovery_prolog(): Execute the prolog operations of a recovery process. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*recovery_prolog)(struct qed_dev *cdev); /** - * @brief update_drv_state - API to inform the change in the driver state. + * update_drv_state(): API to inform the change in the driver state. * - * @param cdev - * @param active + * @cdev: Qed dev pointer. + * @active: Active * + * Return: Int. */ int (*update_drv_state)(struct qed_dev *cdev, bool active); /** - * @brief update_mac - API to inform the change in the mac address + * update_mac(): API to inform the change in the mac address. * - * @param cdev - * @param mac + * @cdev: Qed dev pointer. + * @mac: MAC. * + * Return: Int. */ - int (*update_mac)(struct qed_dev *cdev, u8 *mac); + int (*update_mac)(struct qed_dev *cdev, const u8 *mac); /** - * @brief update_mtu - API to inform the change in the mtu + * update_mtu(): API to inform the change in the mtu. * - * @param cdev - * @param mtu + * @cdev: Qed dev pointer. + * @mtu: MTU. * + * Return: Int. */ int (*update_mtu)(struct qed_dev *cdev, u16 mtu); /** - * @brief update_wol - update of changes in the WoL configuration + * update_wol(): Update of changes in the WoL configuration. * - * @param cdev - * @param enabled - true iff WoL should be enabled. + * @cdev: Qed dev pointer. + * @enabled: true iff WoL should be enabled. + * + * Return: Int. */ int (*update_wol) (struct qed_dev *cdev, bool enabled); /** - * @brief read_module_eeprom + * read_module_eeprom(): Read EEPROM. + * + * @cdev: Qed dev pointer. + * @buf: buffer. + * @dev_addr: PHY device memory region. + * @offset: offset into eeprom contents to be read. + * @len: buffer length, i.e., max bytes to be read. * - * @param cdev - * @param buf - buffer - * @param dev_addr - PHY device memory region - * @param offset - offset into eeprom contents to be read - * @param len - buffer length, i.e., max bytes to be read + * Return: Int. */ int (*read_module_eeprom)(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len); /** - * @brief get_affin_hwfn_idx + * get_affin_hwfn_idx(): Get affine HW function. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: u8. */ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); /** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param buf - buffer - * @param cmd - NVM CFG command id - * @param entity_id - Entity id + * read_nvm_cfg(): Read NVM config attribute value. + * + * @cdev: Qed dev pointer. + * @buf: Buffer. + * @cmd: NVM CFG command id. + * @entity_id: Entity id. * + * Return: Int. */ int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, u32 entity_id); /** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param cmd - NVM CFG command id + * read_nvm_cfg_len(): Read NVM config attribute value. * - * @return config id length, 0 on error. + * @cdev: Qed dev pointer. + * @cmd: NVM CFG command id. + * + * Return: config id length, 0 on error. */ int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); /** - * @brief set_grc_config - Configure value for grc config id. - * @param cdev - * @param cfg_id - grc config id - * @param val - grc config value + * set_grc_config(): Configure value for grc config id. + * + * @cdev: Qed dev pointer. + * @cfg_id: grc config id + * @val: grc config value * + * Return: Int. */ int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); @@ -1386,7 +1413,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) u16 rc = 0; prod = le32_to_cpu(sb_info->sb_virt->prod_index) & - STATUS_BLOCK_E4_PROD_INDEX_MASK; + STATUS_BLOCK_PROD_INDEX_MASK; if (sb_info->sb_ack != prod) { sb_info->sb_ack = prod; rc |= QED_SB_IDX; @@ -1397,18 +1424,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) } /** + * qed_sb_ack(): This function creates an update command for interrupts + * that is written to the IGU. * - * @brief This function creates an update command for interrupts that is - * written to the IGU. - * - * @param sb_info - This is the structure allocated and - * initialized per status block. Assumption is - * that it was initialized using qed_sb_init - * @param int_cmd - Enable/Disable/Nop - * @param upd_flg - whether igu consumer should be - * updated. + * @sb_info: This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @int_cmd: Enable/Disable/Nop + * @upd_flg: Whether igu consumer should be updated. * - * @return inline void + * Return: inline void. */ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 04180d9af560e4..494cdc3cd840b2 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. - * @change_mac Change MAC of interface + * @change_mac: Change MAC of interface * @param cdev * @param handle - the connection handle. * @param mac - new MAC to configure. diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index ff808d2488835e..5b67cd03276eb4 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags { struct qed_ll2_ops { /** - * @brief start - initializes ll2 + * start(): Initializes ll2. * - * @param cdev - * @param params - protocol driver configuration for the ll2. + * @cdev: Qed dev pointer. + * @params: Protocol driver configuration for the ll2. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params); /** - * @brief stop - stops the ll2 + * stop(): Stops the ll2 * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*stop)(struct qed_dev *cdev); /** - * @brief start_xmit - transmits an skb over the ll2 interface + * start_xmit(): Transmits an skb over the ll2 interface * - * @param cdev - * @param skb - * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. + * @cdev: Qed dev pointer. + * @skb: SKB. + * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, unsigned long xmit_flags); /** - * @brief register_cb_ops - protocol driver register the callback for Rx/Tx + * register_cb_ops(): Protocol driver register the callback for Rx/Tx * packets. Should be called before `start'. * - * @param cdev - * @param cookie - to be passed to the callback functions. - * @param ops - the callback functions to register for Rx / Tx. + * @cdev: Qed dev pointer. + * @cookie: to be passed to the callback functions. + * @ops: the callback functions to register for Rx / Tx. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ void (*register_cb_ops)(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie); /** - * @brief get LL2 related statistics + * get_stats(): Get LL2 related statistics. * - * @param cdev - * @param stats - pointer to struct that would be filled with stats + * @cdev: Qed dev pointer. + * @stats: Pointer to struct that would be filled with stats. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats); }; diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h index 14671bc19ed11f..1d51df347560d9 100644 --- a/include/linux/qed/qed_nvmetcp_if.h +++ b/include/linux/qed/qed_nvmetcp_if.h @@ -171,6 +171,23 @@ struct nvmetcp_task_params { * @param dest_port * @clear_all_filters: Clear all filters. * @param cdev + * @init_read_io: Init read IO. + * @task_params + * @cmd_pdu_header + * @nvme_cmd + * @sgl_task_params + * @init_write_io: Init write IO. + * @task_params + * @cmd_pdu_header + * @nvme_cmd + * @sgl_task_params + * @init_icreq_exchange: Exchange ICReq. + * @task_params + * @init_conn_req_pdu_hdr + * @tx_sgl_task_params + * @rx_sgl_task_params + * @init_task_cleanup: Init task cleanup. + * @task_params */ struct qed_nvmetcp_ops { const struct qed_common_ops *common; diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index aeb242cefebfa8..3b76c07fbcf8e1 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -662,7 +662,8 @@ struct qed_rdma_ops { u8 connection_handle, struct qed_ll2_stats *p_stats); int (*ll2_set_mac_filter)(struct qed_dev *cdev, - u8 *old_mac_address, u8 *new_mac_address); + u8 *old_mac_address, + const u8 *new_mac_address); int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index bab078b25834ca..6dfed163ab6c99 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -27,6 +27,7 @@ #define RDMA_MAX_PDS (64 * 1024) #define RDMA_MAX_XRC_SRQS (1024) #define RDMA_MAX_SRQS (32 * 1024) +#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128) #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 841e2f0f5240ba..0bd6520329f6fd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -652,6 +652,7 @@ typedef unsigned char *sk_buff_data_t; * @tc_at_ingress: used within tc_classify to distinguish in/egress * @redirected: packet was redirected by packet classifier * @from_ingress: packet was redirected from the ingress path + * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -868,6 +869,9 @@ struct sk_buff { #ifdef CONFIG_NET_REDIRECT __u8 from_ingress:1; #endif +#ifdef CONFIG_NETFILTER_SKIP_EGRESS + __u8 nf_skip_egress:1; +#endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; #endif @@ -4239,6 +4243,9 @@ enum skb_ext_id { #endif #if IS_ENABLED(CONFIG_MPTCP) SKB_EXT_MPTCP, +#endif +#if IS_ENABLED(CONFIG_MCTP_FLOWS) + SKB_EXT_MCTP, #endif SKB_EXT_NUM, /* must be last */ }; diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 1ce9a9eb223b66..b4256847c70791 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -509,8 +509,22 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) #if IS_ENABLED(CONFIG_NET_SOCK_MSG) -/* We only have one bit so far. */ -#define BPF_F_PTR_MASK ~(BPF_F_INGRESS) +#define BPF_F_STRPARSER (1UL << 1) + +/* We only have two bits so far. */ +#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) + +static inline bool skb_bpf_strparser(const struct sk_buff *skb) +{ + unsigned long sk_redir = skb->_sk_redir; + + return sk_redir & BPF_F_STRPARSER; +} + +static inline void skb_bpf_set_strparser(struct sk_buff *skb) +{ + skb->_sk_redir |= BPF_F_STRPARSER; +} static inline bool skb_bpf_ingress(const struct sk_buff *skb) { diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h index fa1d6af0164ee9..d683251a0b409d 100644 --- a/include/linux/soc/marvell/octeontx2/asm.h +++ b/include/linux/soc/marvell/octeontx2/asm.h @@ -5,6 +5,7 @@ #ifndef __SOC_OTX2_ASM_H #define __SOC_OTX2_ASM_H +#include #if defined(CONFIG_ARM64) /* * otx2_lmt_flush is used for LMT store operation. @@ -34,9 +35,23 @@ : [rf] "+r"(val) \ : [rs] "r"(addr)); \ }) + +static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr) +{ + u64 result; + + asm volatile (".cpu generic+lse\n" + "ldadda %x[i], %x[r], [%[b]]" + : [r] "=r" (result), "+m" (*ptr) + : [i] "r" (incr), [b] "r" (ptr) + : "memory"); + return result; +} + #else #define otx2_lmt_flush(ioaddr) ({ 0; }) #define cn10k_lmt_flush(val, addr) ({ addr = val; }) +#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; }) #endif #endif /* __SOC_OTX2_ASM_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 041d6032a34899..8ef26d89ef495f 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -364,6 +364,8 @@ struct ucred { #define SOL_KCM 281 #define SOL_TLS 282 #define SOL_XDP 283 +#define SOL_MPTCP 284 +#define SOL_MCTP 285 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index e81856c0ba134f..e8ec116c916bf7 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -83,6 +83,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p) return local64_read(&p->v); } +static inline void u64_stats_set(u64_stats_t *p, u64 val) +{ + local64_set(&p->v, val); +} + static inline void u64_stats_add(u64_stats_t *p, unsigned long val) { local64_add(val, &p->v); @@ -104,6 +109,11 @@ static inline u64 u64_stats_read(const u64_stats_t *p) return p->v; } +static inline void u64_stats_set(u64_stats_t *p, u64 val) +{ + p->v = val; +} + static inline void u64_stats_add(u64_stats_t *p, unsigned long val) { p->v += val; diff --git a/include/net/act_api.h b/include/net/act_api.h index f19f7f4a463cd9..b5b624c7e48883 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -30,13 +30,13 @@ struct tc_action { atomic_t tcfa_bindcnt; int tcfa_action; struct tcf_t tcfa_tm; - struct gnet_stats_basic_packed tcfa_bstats; - struct gnet_stats_basic_packed tcfa_bstats_hw; + struct gnet_stats_basic_sync tcfa_bstats; + struct gnet_stats_basic_sync tcfa_bstats_hw; struct gnet_stats_queue tcfa_qstats; struct net_rate_estimator __rcu *tcfa_rate_est; spinlock_t tcfa_lock; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; - struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; + struct gnet_stats_basic_sync __percpu *cpu_bstats; + struct gnet_stats_basic_sync __percpu *cpu_bstats_hw; struct gnet_stats_queue __percpu *cpu_qstats; struct tc_cookie __rcu *act_cookie; struct tcf_chain __rcu *goto_chain; @@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a, struct sk_buff *skb) { if (likely(a->cpu_bstats)) { - bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb); + bstats_update(this_cpu_ptr(a->cpu_bstats), skb); return; } spin_lock(&a->tcfa_lock); diff --git a/include/net/amt.h b/include/net/amt.h new file mode 100644 index 00000000000000..7a4db8b903eed5 --- /dev/null +++ b/include/net/amt.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2021 Taehee Yoo + */ +#ifndef _NET_AMT_H_ +#define _NET_AMT_H_ + +#include +#include + +enum amt_msg_type { + AMT_MSG_DISCOVERY = 1, + AMT_MSG_ADVERTISEMENT, + AMT_MSG_REQUEST, + AMT_MSG_MEMBERSHIP_QUERY, + AMT_MSG_MEMBERSHIP_UPDATE, + AMT_MSG_MULTICAST_DATA, + AMT_MSG_TEARDOWM, + __AMT_MSG_MAX, +}; + +#define AMT_MSG_MAX (__AMT_MSG_MAX - 1) + +enum amt_ops { + /* A*B */ + AMT_OPS_INT, + /* A+B */ + AMT_OPS_UNI, + /* A-B */ + AMT_OPS_SUB, + /* B-A */ + AMT_OPS_SUB_REV, + __AMT_OPS_MAX, +}; + +#define AMT_OPS_MAX (__AMT_OPS_MAX - 1) + +enum amt_filter { + AMT_FILTER_FWD, + AMT_FILTER_D_FWD, + AMT_FILTER_FWD_NEW, + AMT_FILTER_D_FWD_NEW, + AMT_FILTER_ALL, + AMT_FILTER_NONE_NEW, + AMT_FILTER_BOTH, + AMT_FILTER_BOTH_NEW, + __AMT_FILTER_MAX, +}; + +#define AMT_FILTER_MAX (__AMT_FILTER_MAX - 1) + +enum amt_act { + AMT_ACT_GMI, + AMT_ACT_GMI_ZERO, + AMT_ACT_GT, + AMT_ACT_STATUS_FWD_NEW, + AMT_ACT_STATUS_D_FWD_NEW, + AMT_ACT_STATUS_NONE_NEW, + __AMT_ACT_MAX, +}; + +#define AMT_ACT_MAX (__AMT_ACT_MAX - 1) + +enum amt_status { + AMT_STATUS_INIT, + AMT_STATUS_SENT_DISCOVERY, + AMT_STATUS_RECEIVED_DISCOVERY, + AMT_STATUS_SENT_ADVERTISEMENT, + AMT_STATUS_RECEIVED_ADVERTISEMENT, + AMT_STATUS_SENT_REQUEST, + AMT_STATUS_RECEIVED_REQUEST, + AMT_STATUS_SENT_QUERY, + AMT_STATUS_RECEIVED_QUERY, + AMT_STATUS_SENT_UPDATE, + AMT_STATUS_RECEIVED_UPDATE, + __AMT_STATUS_MAX, +}; + +#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1) + +struct amt_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 type:4, + version:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 version:4, + type:4; +#else +#error "Please fix " +#endif +} __packed; + +struct amt_header_discovery { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 type:4, + version:4, + reserved:24; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 version:4, + type:4, + reserved:24; +#else +#error "Please fix " +#endif + __be32 nonce; +} __packed; + +struct amt_header_advertisement { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 type:4, + version:4, + reserved:24; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 version:4, + type:4, + reserved:24; +#else +#error "Please fix " +#endif + __be32 nonce; + __be32 ip4; +} __packed; + +struct amt_header_request { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 type:4, + version:4, + reserved1:7, + p:1, + reserved2:16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 version:4, + type:4, + p:1, + reserved1:7, + reserved2:16; +#else +#error "Please fix " +#endif + __be32 nonce; +} __packed; + +struct amt_header_membership_query { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u64 type:4, + version:4, + reserved:6, + l:1, + g:1, + response_mac:48; +#elif defined(__BIG_ENDIAN_BITFIELD) + u64 version:4, + type:4, + g:1, + l:1, + reserved:6, + response_mac:48; +#else +#error "Please fix " +#endif + __be32 nonce; +} __packed; + +struct amt_header_membership_update { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u64 type:4, + version:4, + reserved:8, + response_mac:48; +#elif defined(__BIG_ENDIAN_BITFIELD) + u64 version:4, + type:4, + reserved:8, + response_mac:48; +#else +#error "Please fix " +#endif + __be32 nonce; +} __packed; + +struct amt_header_mcast_data { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u16 type:4, + version:4, + reserved:8; +#elif defined(__BIG_ENDIAN_BITFIELD) + u16 version:4, + type:4, + reserved:8; +#else +#error "Please fix " +#endif +} __packed; + +struct amt_headers { + union { + struct amt_header_discovery discovery; + struct amt_header_advertisement advertisement; + struct amt_header_request request; + struct amt_header_membership_query query; + struct amt_header_membership_update update; + struct amt_header_mcast_data data; + }; +} __packed; + +struct amt_gw_headers { + union { + struct amt_header_discovery discovery; + struct amt_header_request request; + struct amt_header_membership_update update; + }; +} __packed; + +struct amt_relay_headers { + union { + struct amt_header_advertisement advertisement; + struct amt_header_membership_query query; + struct amt_header_mcast_data data; + }; +} __packed; + +struct amt_skb_cb { + struct amt_tunnel_list *tunnel; +}; + +struct amt_tunnel_list { + struct list_head list; + /* Protect All resources under an amt_tunne_list */ + spinlock_t lock; + struct amt_dev *amt; + u32 nr_groups; + u32 nr_sources; + enum amt_status status; + struct delayed_work gc_wq; + __be16 source_port; + __be32 ip4; + __be32 nonce; + siphash_key_t key; + u64 mac:48, + reserved:16; + struct rcu_head rcu; + struct hlist_head groups[]; +}; + +union amt_addr { + __be32 ip4; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr ip6; +#endif +}; + +/* RFC 3810 + * + * When the router is in EXCLUDE mode, the router state is represented + * by the notation EXCLUDE (X,Y), where X is called the "Requested List" + * and Y is called the "Exclude List". All sources, except those from + * the Exclude List, will be forwarded by the router + */ +enum amt_source_status { + AMT_SOURCE_STATUS_NONE, + /* Node of Requested List */ + AMT_SOURCE_STATUS_FWD, + /* Node of Exclude List */ + AMT_SOURCE_STATUS_D_FWD, +}; + +/* protected by gnode->lock */ +struct amt_source_node { + struct hlist_node node; + struct amt_group_node *gnode; + struct delayed_work source_timer; + union amt_addr source_addr; + enum amt_source_status status; +#define AMT_SOURCE_OLD 0 +#define AMT_SOURCE_NEW 1 + u8 flags; + struct rcu_head rcu; +}; + +/* Protected by amt_tunnel_list->lock */ +struct amt_group_node { + struct amt_dev *amt; + union amt_addr group_addr; + union amt_addr host_addr; + bool v6; + u8 filter_mode; + u32 nr_sources; + struct amt_tunnel_list *tunnel_list; + struct hlist_node node; + struct delayed_work group_timer; + struct rcu_head rcu; + struct hlist_head sources[]; +}; + +struct amt_dev { + struct net_device *dev; + struct net_device *stream_dev; + struct net *net; + /* Global lock for amt device */ + spinlock_t lock; + /* Used only in relay mode */ + struct list_head tunnel_list; + struct gro_cells gro_cells; + + /* Protected by RTNL */ + struct delayed_work discovery_wq; + /* Protected by RTNL */ + struct delayed_work req_wq; + /* Protected by RTNL */ + struct delayed_work secret_wq; + /* AMT status */ + enum amt_status status; + /* Generated key */ + siphash_key_t key; + struct socket __rcu *sock; + u32 max_groups; + u32 max_sources; + u32 hash_buckets; + u32 hash_seed; + /* Default 128 */ + u32 max_tunnels; + /* Default 128 */ + u32 nr_tunnels; + /* Gateway or Relay mode */ + u32 mode; + /* Default 2268 */ + __be16 relay_port; + /* Default 2268 */ + __be16 gw_port; + /* Outer local ip */ + __be32 local_ip; + /* Outer remote ip */ + __be32 remote_ip; + /* Outer discovery ip */ + __be32 discovery_ip; + /* Only used in gateway mode */ + __be32 nonce; + /* Gateway sent request and received query */ + bool ready4; + bool ready6; + u8 req_cnt; + u8 qi; + u64 qrv; + u64 qri; + /* Used only in gateway mode */ + u64 mac:48, + reserved:16; +}; + +#define AMT_TOS 0xc0 +#define AMT_IPHDR_OPTS 4 +#define AMT_IP6HDR_OPTS 8 +#define AMT_GC_INTERVAL (30 * 1000) +#define AMT_MAX_GROUP 32 +#define AMT_MAX_SOURCE 128 +#define AMT_HSIZE_SHIFT 8 +#define AMT_HSIZE (1 << AMT_HSIZE_SHIFT) + +#define AMT_DISCOVERY_TIMEOUT 5000 +#define AMT_INIT_REQ_TIMEOUT 1 +#define AMT_INIT_QUERY_INTERVAL 125 +#define AMT_MAX_REQ_TIMEOUT 120 +#define AMT_MAX_REQ_COUNT 3 +#define AMT_SECRET_TIMEOUT 60000 +#define IANA_AMT_UDP_PORT 2268 +#define AMT_MAX_TUNNELS 128 +#define AMT_MAX_REQS 128 +#define AMT_GW_HLEN (sizeof(struct iphdr) + \ + sizeof(struct udphdr) + \ + sizeof(struct amt_gw_headers)) +#define AMT_RELAY_HLEN (sizeof(struct iphdr) + \ + sizeof(struct udphdr) + \ + sizeof(struct amt_relay_headers)) + +static inline bool netif_is_amt(const struct net_device *dev) +{ + return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "amt"); +} + +static inline u64 amt_gmi(const struct amt_dev *amt) +{ + return ((amt->qrv * amt->qi) + amt->qri) * 1000; +} + +#endif /* _NET_AMT_H_ */ diff --git a/include/net/ax25.h b/include/net/ax25.h index 8b7eb46ad72d88..03d409de61ad0e 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -304,7 +304,7 @@ extern spinlock_t ax25_list_lock; void ax25_cb_add(ax25_cb *); struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int); struct sock *ax25_get_socket(ax25_address *, ax25_address *, int); -ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, +ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *, struct net_device *); void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); void ax25_destroy_socket(ax25_cb *); @@ -384,10 +384,11 @@ struct ax25_linkfail { void ax25_linkfail_register(struct ax25_linkfail *lf); void ax25_linkfail_release(struct ax25_linkfail *lf); -int __must_check ax25_listen_register(ax25_address *, struct net_device *); -void ax25_listen_release(ax25_address *, struct net_device *); +int __must_check ax25_listen_register(const ax25_address *, + struct net_device *); +void ax25_listen_release(const ax25_address *, struct net_device *); int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); -int ax25_listen_mine(ax25_address *, struct net_device *); +int ax25_listen_mine(const ax25_address *, struct net_device *); void ax25_link_failed(ax25_cb *, int); int ax25_protocol_is_registered(unsigned int); @@ -401,8 +402,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb); extern const struct header_ops ax25_header_ops; /* ax25_out.c */ -ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, - ax25_digi *, struct net_device *); +ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *, + ax25_address *, ax25_digi *, struct net_device *); void ax25_output(ax25_cb *, int, struct sk_buff *); void ax25_kick(ax25_cb *); void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 9125effbf4483d..3271870fd85e37 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -153,6 +153,30 @@ struct bt_voice { #define BT_SCM_PKT_STATUS 0x03 +#define BT_CODEC 19 + +struct bt_codec_caps { + __u8 len; + __u8 data[]; +} __packed; + +struct bt_codec { + __u8 id; + __u16 cid; + __u16 vid; + __u8 data_path; + __u8 num_caps; +} __packed; + +struct bt_codecs { + __u8 num_codecs; + struct bt_codec codecs[]; +} __packed; + +#define BT_CODEC_CVSD 0x02 +#define BT_CODEC_TRANSPARENT 0x03 +#define BT_CODEC_MSBC 0x05 + __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) @@ -420,6 +444,72 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, return NULL; } +/* Shall not be called with lock_sock held */ +static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk, + struct msghdr *msg, + size_t len, size_t mtu, + size_t headroom, size_t tailroom) +{ + struct sk_buff *skb; + size_t size = min_t(size_t, len, mtu); + int err; + + skb = bt_skb_send_alloc(sk, size + headroom + tailroom, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) + return ERR_PTR(err); + + skb_reserve(skb, headroom); + skb_tailroom_reserve(skb, mtu, tailroom); + + if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) { + kfree_skb(skb); + return ERR_PTR(-EFAULT); + } + + skb->priority = sk->sk_priority; + + return skb; +} + +/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments + * accourding to the MTU. + */ +static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, + struct msghdr *msg, + size_t len, size_t mtu, + size_t headroom, size_t tailroom) +{ + struct sk_buff *skb, **frag; + + skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); + if (IS_ERR_OR_NULL(skb)) + return skb; + + len -= skb->len; + if (!len) + return skb; + + /* Add remaining data over MTU as continuation fragments */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + struct sk_buff *tmp; + + tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); + if (IS_ERR(tmp)) { + kfree_skb(skb); + return tmp; + } + + len -= tmp->len; + + *frag = tmp; + frag = &(*frag)->next; + } + + return skb; +} + int bt_to_errno(u16 code); void hci_sock_set_flag(struct sock *sk, int nr); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index b80415011dcd5b..63065bc01b766c 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -330,6 +330,8 @@ enum { HCI_ENABLE_LL_PRIVACY, HCI_CMD_PENDING, HCI_FORCE_NO_MITM, + HCI_QUALITY_REPORT, + HCI_OFFLOAD_CODECS_ENABLED, __HCI_NUM_FLAGS, }; @@ -871,6 +873,40 @@ struct hci_cp_logical_link_cancel { __u8 flow_spec_id; } __packed; +#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d +struct hci_coding_format { + __u8 id; + __le16 cid; + __le16 vid; +} __packed; + +struct hci_cp_enhanced_setup_sync_conn { + __le16 handle; + __le32 tx_bandwidth; + __le32 rx_bandwidth; + struct hci_coding_format tx_coding_format; + struct hci_coding_format rx_coding_format; + __le16 tx_codec_frame_size; + __le16 rx_codec_frame_size; + __le32 in_bandwidth; + __le32 out_bandwidth; + struct hci_coding_format in_coding_format; + struct hci_coding_format out_coding_format; + __le16 in_coded_data_size; + __le16 out_coded_data_size; + __u8 in_pcm_data_format; + __u8 out_pcm_data_format; + __u8 in_pcm_sample_payload_msb_pos; + __u8 out_pcm_sample_payload_msb_pos; + __u8 in_data_path; + __u8 out_data_path; + __u8 in_transport_unit_size; + __u8 out_transport_unit_size; + __le16 max_latency; + __le16 pkt_type; + __u8 retrans_effort; +} __packed; + struct hci_rp_logical_link_cancel { __u8 status; __u8 phy_handle; @@ -1250,6 +1286,14 @@ struct hci_rp_read_local_oob_ext_data { __u8 rand256[16]; } __packed; +#define HCI_CONFIGURE_DATA_PATH 0x0c83 +struct hci_op_configure_data_path { + __u8 direction; + __u8 data_path_id; + __u8 vnd_len; + __u8 vnd_data[]; +} __packed; + #define HCI_OP_READ_LOCAL_VERSION 0x1001 struct hci_rp_read_local_version { __u8 status; @@ -1307,6 +1351,28 @@ struct hci_rp_read_data_block_size { } __packed; #define HCI_OP_READ_LOCAL_CODECS 0x100b +struct hci_std_codecs { + __u8 num; + __u8 codec[]; +} __packed; + +struct hci_vnd_codec { + /* company id */ + __le16 cid; + /* vendor codec id */ + __le16 vid; +} __packed; + +struct hci_vnd_codecs { + __u8 num; + struct hci_vnd_codec codec[]; +} __packed; + +struct hci_rp_read_local_supported_codecs { + __u8 status; + struct hci_std_codecs std_codecs; + struct hci_vnd_codecs vnd_codecs; +} __packed; #define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c struct hci_rp_read_local_pairing_opts { @@ -1315,6 +1381,54 @@ struct hci_rp_read_local_pairing_opts { __u8 max_key_size; } __packed; +#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d +struct hci_std_codec_v2 { + __u8 id; + __u8 transport; +} __packed; + +struct hci_std_codecs_v2 { + __u8 num; + struct hci_std_codec_v2 codec[]; +} __packed; + +struct hci_vnd_codec_v2 { + __u8 id; + __le16 cid; + __le16 vid; + __u8 transport; +} __packed; + +struct hci_vnd_codecs_v2 { + __u8 num; + struct hci_vnd_codec_v2 codec[]; +} __packed; + +struct hci_rp_read_local_supported_codecs_v2 { + __u8 status; + struct hci_std_codecs_v2 std_codecs; + struct hci_vnd_codecs_v2 vendor_codecs; +} __packed; + +#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e +struct hci_op_read_local_codec_caps { + __u8 id; + __le16 cid; + __le16 vid; + __u8 transport; + __u8 direction; +} __packed; + +struct hci_codec_caps { + __u8 len; + __u8 data[]; +} __packed; + +struct hci_rp_read_local_codec_caps { + __u8 status; + __u8 num_caps; +} __packed; + #define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b struct hci_rp_read_page_scan_activity { __u8 status; @@ -2551,6 +2665,9 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) #define hci_iso_data_len(h) ((h) & 0x3fff) #define hci_iso_data_flags(h) ((h) >> 14) +/* codec transport types */ +#define HCI_TRANSPORT_SCO_ESCO 0x01 + /* le24 support */ static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3]) { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index a7360c8c72f82a..dd8840e70e250e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -131,6 +131,17 @@ struct bdaddr_list { u8 bdaddr_type; }; +struct codec_list { + struct list_head list; + u8 id; + __u16 cid; + __u16 vid; + u8 transport; + u8 num_caps; + u32 len; + struct hci_codec_caps caps[]; +}; + struct bdaddr_list_with_irk { struct list_head list; bdaddr_t bdaddr; @@ -536,6 +547,7 @@ struct hci_dev { struct list_head pend_le_conns; struct list_head pend_le_reports; struct list_head blocked_keys; + struct list_head local_codecs; struct hci_dev_stats stat; @@ -605,7 +617,12 @@ struct hci_dev { int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); void (*cmd_timeout)(struct hci_dev *hdev); - bool (*prevent_wake)(struct hci_dev *hdev); + bool (*wakeup)(struct hci_dev *hdev); + int (*set_quality_report)(struct hci_dev *hdev, bool enable); + int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); + int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, + struct bt_codec *codec, __u8 *vnd_len, + __u8 **vnd_data); }; #define HCI_PHY_HANDLE(handle) (handle & 0xff) @@ -699,6 +716,7 @@ struct hci_conn { struct amp_mgr *amp_mgr; struct hci_conn *link; + struct bt_codec codec; void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); void (*security_cfm_cb) (struct hci_conn *conn, u8 status); @@ -760,6 +778,7 @@ extern struct mutex hci_cb_list_lock; hci_dev_clear_flag(hdev, HCI_LE_ADV); \ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ } while (0) /* ----- HCI interface to upper protocols ----- */ @@ -1099,13 +1118,14 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u16 conn_timeout, enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, - u8 dst_type, u8 sec_level, u16 conn_timeout, - u8 role, bdaddr_t *direct_rpa); + u8 dst_type, bool dst_resolved, u8 sec_level, + u16 conn_timeout, u8 role, + bdaddr_t *direct_rpa); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type, enum conn_reasons conn_reason); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, - __u16 setting); + __u16 setting, struct bt_codec *codec); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, @@ -1360,6 +1380,8 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 scan_rsp_len, u8 *scan_rsp_data); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); +u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); +bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); @@ -1442,6 +1464,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); /* Use LL Privacy based address resolution if supported */ #define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) +/* Use enhanced synchronous connection if command is supported */ +#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08) + /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40)) @@ -1609,43 +1634,6 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, mutex_unlock(&hci_cb_list_lock); } -static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, - size_t *data_len) -{ - size_t parsed = 0; - - if (eir_len < 2) - return NULL; - - while (parsed < eir_len - 1) { - u8 field_len = eir[0]; - - if (field_len == 0) - break; - - parsed += field_len + 1; - - if (parsed > eir_len) - break; - - if (eir[1] != type) { - eir += field_len + 1; - continue; - } - - /* Zero length data */ - if (field_len == 1) - return NULL; - - if (data_len) - *data_len = field_len - 1; - - return &eir[2]; - } - - return NULL; -} - static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) { if (addr_type != ADDR_LE_DEV_RANDOM) @@ -1867,4 +1855,9 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, #define SCO_AIRMODE_CVSD 0x0000 #define SCO_AIRMODE_TRANSP 0x0003 +#define LOCAL_CODEC_ACL_MASK BIT(0) +#define LOCAL_CODEC_SCO_MASK BIT(1) + +#define TRANSPORT_TYPE_MAX 0x04 + #endif /* __HCI_CORE_H */ diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 40296ed976a977..4202c609bb0b09 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -130,7 +130,8 @@ static inline void skb_mark_napi_id(struct sk_buff *skb, static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL - WRITE_ONCE(sk->sk_napi_id, skb->napi_id); + if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) + WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif sk_rx_queue_set(sk, skb); } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 27336fc7046749..423f97b982ffd8 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -739,6 +739,22 @@ struct cfg80211_tid_config { struct cfg80211_tid_cfg tid_conf[]; }; +/** + * struct cfg80211_fils_aad - FILS AAD data + * @macaddr: STA MAC address + * @kek: FILS KEK + * @kek_len: FILS KEK length + * @snonce: STA Nonce + * @anonce: AP Nonce + */ +struct cfg80211_fils_aad { + const u8 *macaddr; + const u8 *kek; + u8 kek_len; + const u8 *snonce; + const u8 *anonce; +}; + /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition @@ -1040,6 +1056,36 @@ struct cfg80211_crypto_settings { enum nl80211_sae_pwe_mechanism sae_pwe; }; +/** + * struct cfg80211_mbssid_config - AP settings for multi bssid + * + * @tx_wdev: pointer to the transmitted interface in the MBSSID set + * @index: index of this AP in the multi bssid group. + * @ema: set to true if the beacons should be sent out in EMA mode. + */ +struct cfg80211_mbssid_config { + struct wireless_dev *tx_wdev; + u8 index; + bool ema; +}; + +/** + * struct cfg80211_mbssid_elems - Multiple BSSID elements + * + * @cnt: Number of elements in array %elems. + * + * @elem: Array of multiple BSSID element(s) to be added into Beacon frames. + * @elem.data: Data for multiple BSSID elements. + * @elem.len: Length of data. + */ +struct cfg80211_mbssid_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[]; +}; + /** * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) @@ -1058,6 +1104,7 @@ struct cfg80211_crypto_settings { * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) + * @mbssid_ies: multiple BSSID elements * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token @@ -1075,6 +1122,7 @@ struct cfg80211_beacon_data { const u8 *probe_resp; const u8 *lci; const u8 *civicloc; + struct cfg80211_mbssid_elems *mbssid_ies; s8 ftm_responder; size_t head_len, tail_len; @@ -1189,6 +1237,7 @@ enum cfg80211_ap_settings_flags { * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters + * @mbssid_config: AP settings for multiple bssid */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -1221,6 +1270,7 @@ struct cfg80211_ap_settings { struct cfg80211_he_bss_color he_bss_color; struct cfg80211_fils_discovery fils_discovery; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; + struct cfg80211_mbssid_config mbssid_config; }; /** @@ -4018,6 +4068,10 @@ struct mgmt_frame_regs { * @set_sar_specs: Update the SAR (TX power) settings. * * @color_change: Initiate a color change. + * + * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use + * those to decrypt (Re)Association Request and encrypt (Re)Association + * Response frame. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4348,6 +4402,8 @@ struct cfg80211_ops { int (*color_change)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_color_change_settings *params); + int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_fils_aad *fils_aad); }; /* @@ -4981,6 +5037,13 @@ struct wiphy_iftype_akm_suites { * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities * @rfkill: a pointer to the rfkill structure + * + * @mbssid_max_interfaces: maximum number of interfaces supported by the driver + * in a multiple BSSID set. This field must be set to a non-zero value + * by the driver to advertise MBSSID support. + * @ema_max_profile_periodicity: maximum profile periodicity supported by + * the driver. Setting this field to a non-zero value indicates that the + * driver supports enhanced multi-BSSID advertisements (EMA AP). */ struct wiphy { struct mutex mtx; @@ -5125,6 +5188,9 @@ struct wiphy { struct rfkill *rfkill; + u8 mbssid_max_interfaces; + u8 ema_max_profile_periodicity; + char priv[] __aligned(NETDEV_ALIGN); }; @@ -5490,7 +5556,7 @@ struct wireless_dev { unsigned long unprot_beacon_reported; }; -static inline u8 *wdev_address(struct wireless_dev *wdev) +static inline const u8 *wdev_address(struct wireless_dev *wdev) { if (wdev->netdev) return wdev->netdev->dev_addr; @@ -6308,6 +6374,17 @@ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid, u64_to_ether_addr(new_bssid_u64, new_bssid); } +/** + * cfg80211_get_ies_channel_number - returns the channel number from ies + * @ie: IEs + * @ielen: length of IEs + * @band: enum nl80211_band of the channel + * + * Returns the channel number, or -1 if none could be determined. + */ +int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band); + /** * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check diff --git a/include/net/codel.h b/include/net/codel.h index a6e428f8013508..a6c9e34e62b87c 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -102,6 +102,9 @@ static inline u32 codel_time_to_us(codel_time_t val) * @interval: width of moving time window * @mtu: device mtu, or minimal queue backlog in bytes. * @ecn: is Explicit Congestion Notification enabled + * @ce_threshold_selector: apply ce_threshold to packets matching this value + * in the diffserv/ECN byte of the IP header + * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison */ struct codel_params { codel_time_t target; @@ -109,6 +112,8 @@ struct codel_params { codel_time_t interval; u32 mtu; bool ecn; + u8 ce_threshold_selector; + u8 ce_threshold_mask; }; /** diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h index d289b91dcd65ec..137d40d8cbeb75 100644 --- a/include/net/codel_impl.h +++ b/include/net/codel_impl.h @@ -54,6 +54,8 @@ static void codel_params_init(struct codel_params *params) params->interval = MS2TIME(100); params->target = MS2TIME(5); params->ce_threshold = CODEL_DISABLED_THRESHOLD; + params->ce_threshold_mask = 0; + params->ce_threshold_selector = 0; params->ecn = false; } @@ -246,9 +248,19 @@ static struct sk_buff *codel_dequeue(void *ctx, vars->rec_inv_sqrt); } end: - if (skb && codel_time_after(vars->ldelay, params->ce_threshold) && - INET_ECN_set_ce(skb)) - stats->ce_mark++; + if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) { + bool set_ce = true; + + if (params->ce_threshold_mask) { + int dsfield = skb_get_dsfield(skb); + + set_ce = (dsfield >= 0 && + (((u8)dsfield & params->ce_threshold_mask) == + params->ce_threshold_selector)); + } + if (set_ce && INET_ECN_set_ce(skb)) + stats->ce_mark++; + } return skb; } diff --git a/include/net/datalink.h b/include/net/datalink.h index a9663229b91394..d9b7faaa539f22 100644 --- a/include/net/datalink.h +++ b/include/net/datalink.h @@ -12,7 +12,7 @@ struct datalink_proto { int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); int (*request)(struct datalink_proto *, struct sk_buff *, - unsigned char *); + const unsigned char *); struct list_head node; }; diff --git a/include/net/devlink.h b/include/net/devlink.h index 154cf0dbca3727..aab3d007c57725 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -21,45 +21,7 @@ #include #include -#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \ - (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX) - -struct devlink_dev_stats { - u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; - u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; -}; - -struct devlink_ops; - -struct devlink { - u32 index; - struct list_head port_list; - struct list_head rate_list; - struct list_head sb_list; - struct list_head dpipe_table_list; - struct list_head resource_list; - struct list_head param_list; - struct list_head region_list; - struct list_head reporter_list; - struct mutex reporters_lock; /* protects reporter_list */ - struct devlink_dpipe_headers *dpipe_headers; - struct list_head trap_list; - struct list_head trap_group_list; - struct list_head trap_policer_list; - const struct devlink_ops *ops; - struct xarray snapshot_ids; - struct devlink_dev_stats stats; - struct device *dev; - possible_net_t _net; - struct mutex lock; /* Serializes access to devlink instance specific objects such as - * port, sb, dpipe, resource, params, region, traps and more. - */ - u8 reload_failed:1, - reload_enabled:1; - refcount_t refcount; - struct completion comp; - char priv[0] __aligned(NETDEV_ALIGN); -}; +struct devlink; struct devlink_port_phys_attrs { u32 port_number; /* Same value as "split group". @@ -506,7 +468,6 @@ struct devlink_param_item { const struct devlink_param *param; union devlink_param_value driverinit_value; bool driverinit_value_valid; - bool published; }; enum devlink_param_generic_id { @@ -1224,6 +1185,11 @@ enum devlink_trap_group_generic_id { .min_burst = _min_burst, \ } +enum { + /* device supports reload operations */ + DEVLINK_F_RELOAD = 1UL << 0, +}; + struct devlink_ops { /** * @supported_flash_update_params: @@ -1520,34 +1486,9 @@ struct devlink_ops { struct netlink_ext_ack *extack); }; -static inline void *devlink_priv(struct devlink *devlink) -{ - BUG_ON(!devlink); - return &devlink->priv; -} - -static inline struct devlink *priv_to_devlink(void *priv) -{ - BUG_ON(!priv); - return container_of(priv, struct devlink, priv); -} - -static inline struct devlink_port * -netdev_to_devlink_port(struct net_device *dev) -{ - if (dev->netdev_ops->ndo_get_devlink_port) - return dev->netdev_ops->ndo_get_devlink_port(dev); - return NULL; -} - -static inline struct devlink *netdev_to_devlink(struct net_device *dev) -{ - struct devlink_port *devlink_port = netdev_to_devlink_port(dev); - - if (devlink_port) - return devlink_port->devlink; - return NULL; -} +void *devlink_priv(struct devlink *devlink); +struct devlink *priv_to_devlink(void *priv); +struct device *devlink_to_dev(const struct devlink *devlink); struct ib_device; @@ -1566,10 +1507,9 @@ static inline struct devlink *devlink_alloc(const struct devlink_ops *ops, { return devlink_alloc_ns(ops, priv_size, &init_net, dev); } -int devlink_register(struct devlink *devlink); +void devlink_set_features(struct devlink *devlink, u64 features); +void devlink_register(struct devlink *devlink); void devlink_unregister(struct devlink *devlink); -void devlink_reload_enable(struct devlink *devlink); -void devlink_reload_disable(struct devlink *devlink); void devlink_free(struct devlink *devlink); int devlink_port_register(struct devlink *devlink, struct devlink_port *devlink_port, @@ -1651,34 +1591,11 @@ int devlink_param_register(struct devlink *devlink, const struct devlink_param *param); void devlink_param_unregister(struct devlink *devlink, const struct devlink_param *param); -void devlink_params_publish(struct devlink *devlink); -void devlink_params_unpublish(struct devlink *devlink); -void devlink_param_publish(struct devlink *devlink, - const struct devlink_param *param); -void devlink_param_unpublish(struct devlink *devlink, - const struct devlink_param *param); -int devlink_port_params_register(struct devlink_port *devlink_port, - const struct devlink_param *params, - size_t params_count); -void devlink_port_params_unregister(struct devlink_port *devlink_port, - const struct devlink_param *params, - size_t params_count); int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, union devlink_param_value *init_val); int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val); -int -devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port, - u32 param_id, - union devlink_param_value *init_val); -int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port, - u32 param_id, - union devlink_param_value init_val); void devlink_param_value_changed(struct devlink *devlink, u32 param_id); -void devlink_port_param_value_changed(struct devlink_port *devlink_port, - u32 param_id); -void devlink_param_value_str_fill(union devlink_param_value *dst_val, - const char *src); struct devlink_region * devlink_region_create(struct devlink *devlink, const struct devlink_region_ops *ops, @@ -1723,10 +1640,7 @@ int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, const char *name); int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg); -int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value); -int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value); int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value); -int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value); int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value); int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, u16 value_len); @@ -1812,9 +1726,12 @@ devlink_trap_policers_unregister(struct devlink *devlink, #if IS_ENABLED(CONFIG_NET_DEVLINK) -void devlink_compat_running_version(struct net_device *dev, +struct devlink *__must_check devlink_try_get(struct devlink *devlink); +void devlink_put(struct devlink *devlink); + +void devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len); -int devlink_compat_flash_update(struct net_device *dev, const char *file_name); +int devlink_compat_flash_update(struct devlink *devlink, const char *file_name); int devlink_compat_phys_port_name_get(struct net_device *dev, char *name, size_t len); int devlink_compat_switch_id_get(struct net_device *dev, @@ -1822,13 +1739,22 @@ int devlink_compat_switch_id_get(struct net_device *dev, #else +static inline struct devlink *devlink_try_get(struct devlink *devlink) +{ + return NULL; +} + +static inline void devlink_put(struct devlink *devlink) +{ +} + static inline void -devlink_compat_running_version(struct net_device *dev, char *buf, size_t len) +devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { } static inline int -devlink_compat_flash_update(struct net_device *dev, const char *file_name) +devlink_compat_flash_update(struct devlink *devlink, const char *file_name) { return -EOPNOTSUPP; } diff --git a/include/net/dn.h b/include/net/dn.h index 56ab0726c641a0..ba9655b0098ae0 100644 --- a/include/net/dn.h +++ b/include/net/dn.h @@ -166,7 +166,7 @@ struct dn_skb_cb { int iif; }; -static inline __le16 dn_eth2dn(unsigned char *ethaddr) +static inline __le16 dn_eth2dn(const unsigned char *ethaddr) { return get_unaligned((__le16 *)(ethaddr + 4)); } diff --git a/include/net/dsa.h b/include/net/dsa.h index d784e76113b8d4..eff5c44ba3774a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -51,6 +51,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_SEVILLE_VALUE 21 #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 #define DSA_TAG_PROTO_SJA1110_VALUE 23 +#define DSA_TAG_PROTO_RTL8_4_VALUE 24 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -77,6 +78,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE, DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, + DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE, }; struct dsa_switch; @@ -285,6 +287,7 @@ struct dsa_port { /* List of MAC addresses that must be forwarded on this port. * These are only valid on CPU ports and DSA links. */ + struct mutex addr_lists_lock; struct list_head fdbs; struct list_head mdbs; @@ -472,14 +475,41 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p) return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER; } +#define dsa_tree_for_each_user_port(_dp, _dst) \ + list_for_each_entry((_dp), &(_dst)->ports, list) \ + if (dsa_port_is_user((_dp))) + +#define dsa_switch_for_each_port(_dp, _ds) \ + list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ + if ((_dp)->ds == (_ds)) + +#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \ + list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \ + if ((_dp)->ds == (_ds)) + +#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \ + list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \ + if ((_dp)->ds == (_ds)) + +#define dsa_switch_for_each_available_port(_dp, _ds) \ + dsa_switch_for_each_port((_dp), (_ds)) \ + if (!dsa_port_is_unused((_dp))) + +#define dsa_switch_for_each_user_port(_dp, _ds) \ + dsa_switch_for_each_port((_dp), (_ds)) \ + if (dsa_port_is_user((_dp))) + +#define dsa_switch_for_each_cpu_port(_dp, _ds) \ + dsa_switch_for_each_port((_dp), (_ds)) \ + if (dsa_port_is_cpu((_dp))) + static inline u32 dsa_user_ports(struct dsa_switch *ds) { + struct dsa_port *dp; u32 mask = 0; - int p; - for (p = 0; p < ds->num_ports; p++) - if (dsa_is_user_port(ds, p)) - mask |= BIT(p); + dsa_switch_for_each_user_port(dp, ds) + mask |= BIT(dp->index); return mask; } @@ -615,6 +645,8 @@ struct dsa_switch_ops { /* * PHYLINK integration */ + void (*phylink_get_interfaces)(struct dsa_switch *ds, int port, + unsigned long *supported_interfaces); void (*phylink_validate)(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state); @@ -645,6 +677,12 @@ struct dsa_switch_ops { int (*get_sset_count)(struct dsa_switch *ds, int port, int sset); void (*get_ethtool_phy_stats)(struct dsa_switch *ds, int port, uint64_t *data); + void (*get_eth_phy_stats)(struct dsa_switch *ds, int port, + struct ethtool_eth_phy_stats *phy_stats); + void (*get_eth_mac_stats)(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats); + void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats); void (*get_stats64)(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); void (*self_test)(struct dsa_switch *ds, int port, diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index ffd386ea0dbb3e..aa33e1092e2c4d 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -287,6 +287,7 @@ enum flow_dissector_key_id { #define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0) #define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1) #define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2) +#define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3) struct flow_dissector_key { enum flow_dissector_key_id key_id; diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 1424e02cef90c0..7aa2b8e1fb298c 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -7,14 +7,17 @@ #include #include -/* Note: this used to be in include/uapi/linux/gen_stats.h */ -struct gnet_stats_basic_packed { - __u64 bytes; - __u64 packets; -}; - -struct gnet_stats_basic_cpu { - struct gnet_stats_basic_packed bstats; +/* Throughput stats. + * Must be initialized beforehand with gnet_stats_basic_sync_init(). + * + * If no reads can ever occur parallel to writes (e.g. stack-allocated + * bstats), then the internal stat values can be written to and read + * from directly. Otherwise, use _bstats_set/update() for writes and + * gnet_stats_add_basic() for reads. + */ +struct gnet_stats_basic_sync { + u64_stats_t bytes; + u64_stats_t packets; struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); @@ -34,6 +37,7 @@ struct gnet_dump { struct tc_stats tc_stats; }; +void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b); int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d, int padattr); @@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d, int padattr); -int gnet_stats_copy_basic(const seqcount_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b); -void __gnet_stats_copy_basic(const seqcount_t *running, - struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b); -int gnet_stats_copy_basic_hw(const seqcount_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b); +int gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, bool running); +void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, bool running); +int gnet_stats_copy_basic_hw(struct gnet_dump *d, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, bool running); int gnet_stats_copy_rate_est(struct gnet_dump *d, struct net_rate_estimator __rcu **ptr); int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue __percpu *cpu_q, struct gnet_stats_queue *q, __u32 qlen); -void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, - const struct gnet_stats_queue __percpu *cpu_q, - const struct gnet_stats_queue *q, __u32 qlen); +void gnet_stats_add_queue(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *cpu_q, + const struct gnet_stats_queue *q); int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_finish_copy(struct gnet_dump *d); -int gen_new_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, +int gen_new_estimator(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + bool running, struct nlattr *opt); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); -int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, +int gen_replace_estimator(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu_bstats, struct net_rate_estimator __rcu **ptr, spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + bool running, struct nlattr *opt); bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, struct gnet_stats_rate_est64 *sample); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index b06c2d02ec84e9..fa6a87246a7b85 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -289,7 +289,7 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) { /* The below has to be done to allow calling inet_csk_destroy_sock */ sock_set_flag(sk, SOCK_DEAD); - percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(*sk->sk_prot->orphan_count); } void inet_csk_destroy_sock(struct sock *sk); diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index ba77f47ef61ed3..ea32393464a291 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -188,6 +188,23 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) return 0; } +static inline int skb_get_dsfield(struct sk_buff *skb) +{ + switch (skb_protocol(skb, true)) { + case cpu_to_be16(ETH_P_IP): + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + break; + return ipv4_get_dsfield(ip_hdr(skb)); + + case cpu_to_be16(ETH_P_IPV6): + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + break; + return ipv6_get_dsfield(ipv6_hdr(skb)); + } + + return -1; +} + static inline int INET_ECN_set_ect1(struct sk_buff *skb) { switch (skb_protocol(skb, true)) { diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 89163ef8cf4be2..9e1111f5915bd0 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -207,11 +207,10 @@ struct inet_sock { __be32 inet_saddr; __s16 uc_ttl; __u16 cmsg_flags; + struct ip_options_rcu __rcu *inet_opt; __be16 inet_sport; __u16 inet_id; - struct ip_options_rcu __rcu *inet_opt; - int rx_dst_ifindex; __u8 tos; __u8 min_ttl; __u8 mc_ttl; diff --git a/include/net/ioam6.h b/include/net/ioam6.h index 3c2993bc48c81e..3f45ba37a2c69e 100644 --- a/include/net/ioam6.h +++ b/include/net/ioam6.h @@ -56,7 +56,8 @@ static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net) struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id); void ioam6_fill_trace_data(struct sk_buff *skb, struct ioam6_namespace *ns, - struct ioam6_trace_hdr *trace); + struct ioam6_trace_hdr *trace, + bool is_input); int ioam6_init(void); void ioam6_exit(void); diff --git a/include/net/ip.h b/include/net/ip.h index 9192444f2964eb..b71e88507c4a09 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -291,7 +292,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); +static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) +{ + return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); +} + unsigned long snmp_fold_field(void __percpu *mib, int offt); #if BITS_PER_LONG==32 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, @@ -746,6 +751,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); +DECLARE_STATIC_KEY_FALSE(ip4_min_ttl); int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 7cb5a1aace40db..ff1804a0c4692e 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -931,6 +931,7 @@ struct netns_ipvs { int sysctl_conn_reuse_mode; int sysctl_schedule_icmp; int sysctl_ignore_tunneled; + int sysctl_run_estimation; /* ip_vs_lblc */ int sysctl_lblc_expiration; @@ -1071,6 +1072,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) return ipvs->sysctl_cache_bypass; } +static inline int sysctl_run_estimation(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_run_estimation; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -1163,6 +1169,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) return 0; } +static inline int sysctl_run_estimation(struct netns_ipvs *ipvs) +{ + return 1; +} + #endif /* IPVS core functions diff --git a/include/net/ipv6.h b/include/net/ipv6.h index f2d0ecc257bb28..c19bf51ded1d02 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1092,6 +1092,7 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, /* * socket options (ipv6_sockglue.c) */ +DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount); int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); diff --git a/include/net/llc.h b/include/net/llc.h index df282d9b401704..fd1f9a3fd8dda4 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -133,7 +133,7 @@ static inline void llc_sap_put(struct llc_sap *sap) struct llc_sap *llc_sap_find(unsigned char sap_value); int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, - unsigned char *dmac, unsigned char dsap); + const unsigned char *dmac, unsigned char dsap); void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); diff --git a/include/net/llc_if.h b/include/net/llc_if.h index 8d5c543cd62003..c72570a21a4ff0 100644 --- a/include/net/llc_if.h +++ b/include/net/llc_if.h @@ -62,7 +62,8 @@ #define LLC_STATUS_CONFLICT 7 /* disconnect conn */ #define LLC_STATUS_RESET_DONE 8 /* */ -int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap); +int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac, + u8 dsap); int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb); int llc_send_disc(struct sock *sk); #endif /* LLC_IF_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 618d1f427cb276..dd757f0987b0f9 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -632,6 +632,10 @@ struct ieee80211_fils_discovery { * @s1g: BSS is S1G BSS (affects Association Request format). * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed * to driver when rate control is offloaded to firmware. + * @power_type: power type of BSS for 6 GHz + * @tx_pwr_env: transmit power envelope array of BSS. + * @tx_pwr_env_num: number of @tx_pwr_env. + * @pwr_reduction: power constraint of BSS. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -702,6 +706,10 @@ struct ieee80211_bss_conf { u32 unsol_bcast_probe_resp_interval; bool s1g; struct cfg80211_bitrate_mask beacon_tx_rate; + enum ieee80211_ap_reg_power power_type; + struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; + u8 tx_pwr_env_num; + u8 pwr_reduction; }; /** @@ -1715,6 +1723,7 @@ enum ieee80211_offload_flags { * write-protected by sdata_lock and local->mtx so holding either is fine * for read access. * @color_change_color: the bss color that will be used after the change. + * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled. */ struct ieee80211_vif { enum nl80211_iftype type; @@ -1746,6 +1755,8 @@ struct ieee80211_vif { bool color_change_active; u8 color_change_color; + struct ieee80211_vif *mbssid_tx_vif; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; diff --git a/include/net/mctp.h b/include/net/mctp.h index ffd2c23bd76d56..7e35ec79b909c8 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -58,39 +59,53 @@ struct mctp_sock { mctp_eid_t bind_addr; __u8 bind_type; + /* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */ + bool addr_ext; + /* list of mctp_sk_key, for incoming tag lookup. updates protected * by sk->net->keys_lock */ struct hlist_head keys; + + /* mechanism for expiring allocated keys; will release an allocated + * tag, and any netdev state for a request/response pairing + */ + struct timer_list key_expiry; }; /* Key for matching incoming packets to sockets or reassembly contexts. * Packets are matched on (src,dest,tag). * - * Lifetime requirements: + * Lifetime / locking requirements: + * + * - individual key data (ie, the struct itself) is protected by key->lock; + * changes must be made with that lock held. + * + * - the lookup fields: peer_addr, local_addr and tag are set before the + * key is added to lookup lists, and never updated. * - * - keys are free()ed via RCU + * - A ref to the key must be held (throuh key->refs) if a pointer to the + * key is to be accessed after key->lock is released. * * - a mctp_sk_key contains a reference to a struct sock; this is valid * for the life of the key. On sock destruction (through unhash), the key is - * removed from lists (see below), and will not be observable after a RCU - * grace period. - * - * any RX occurring within that grace period may still queue to the socket, - * but will hit the SOCK_DEAD case before the socket is freed. + * removed from lists (see below), and marked invalid. * * - these mctp_sk_keys appear on two lists: * 1) the struct mctp_sock->keys list * 2) the struct netns_mctp->keys list * - * updates to either list are performed under the netns_mctp->keys - * lock. + * presences on these lists requires a (single) refcount to be held; both + * lists are updated as a single operation. + * + * Updates and lookups in either list are performed under the + * netns_mctp->keys lock. Lookup functions will need to lock the key and + * take a reference before unlocking the keys_lock. Consequently, the list's + * keys_lock *cannot* be acquired with the individual key->lock held. * * - a key may have a sk_buff attached as part of an in-progress message - * reassembly (->reasm_head). The reassembly context is protected by - * reasm_lock, which may be acquired with the keys lock (above) held, if - * necessary. Consequently, keys lock *cannot* be acquired with the - * reasm_lock held. + * reassembly (->reasm_head). The reasm data is protected by the individual + * key->lock. * * - there are two destruction paths for a mctp_sk_key: * @@ -101,6 +116,8 @@ struct mctp_sock { * the (complete) reply, or during reassembly errors. Here, we clean up * the reassembly context (marking reasm_dead, to prevent another from * starting), and remove the socket from the netns & socket lists. + * + * - through an expiry timeout, on a per-socket timer */ struct mctp_sk_key { mctp_eid_t peer_addr; @@ -116,20 +133,40 @@ struct mctp_sk_key { /* per-socket list */ struct hlist_node sklist; + /* lock protects against concurrent updates to the reassembly and + * expiry data below. + */ + spinlock_t lock; + + /* Keys are referenced during the output path, which may sleep */ + refcount_t refs; + /* incoming fragment reassembly context */ - spinlock_t reasm_lock; struct sk_buff *reasm_head; struct sk_buff **reasm_tailp; bool reasm_dead; u8 last_seq; - struct rcu_head rcu; + /* key validity */ + bool valid; + + /* expiry timeout; valid (above) cleared on expiry */ + unsigned long expiry; + + /* free to use for device flow state tracking. Initialised to + * zero on initial key creation + */ + unsigned long dev_flow_state; + struct mctp_dev *dev; }; struct mctp_skb_cb { unsigned int magic; unsigned int net; + int ifindex; /* extended/direct addressing if set */ mctp_eid_t src; + unsigned char halen; + unsigned char haddr[MAX_ADDR_LEN]; }; /* skb control-block accessors with a little extra debugging for initial @@ -153,10 +190,18 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb) { struct mctp_skb_cb *cb = (void *)skb->cb; + BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb)); WARN_ON(cb->magic != 0x4d435450); return (void *)(skb->cb); } +/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension, + * indicating the flow to the device driver. + */ +struct mctp_flow { + struct mctp_sk_key *key; +}; + /* Route definition. * * These are held in the pernet->mctp.routes list, with RCU protection for @@ -165,8 +210,7 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb) * * Updates to the route table are performed under rtnl; all reads under RCU, * so routes cannot be referenced over a RCU grace period. Specifically: A - * caller cannot block between mctp_route_lookup and passing the route to - * mctp_do_route. + * caller cannot block between mctp_route_lookup and mctp_route_release() */ struct mctp_route { mctp_eid_t min, max; @@ -186,11 +230,11 @@ struct mctp_route { struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, mctp_eid_t daddr); -int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb); - int mctp_local_output(struct sock *sk, struct mctp_route *rt, struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag); +void mctp_key_unref(struct mctp_sk_key *key); + /* routing <--> device interface */ unsigned int mctp_default_net(struct net *net); int mctp_default_net_set(struct net *net, unsigned int index); diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h index 71a11012fac7c4..5c0d04b5c12cbe 100644 --- a/include/net/mctpdevice.h +++ b/include/net/mctpdevice.h @@ -14,11 +14,17 @@ #include #include +struct mctp_sk_key; + struct mctp_dev { struct net_device *dev; + refcount_t refs; + unsigned int net; + const struct mctp_netdev_ops *ops; + /* Only modified under RTNL. Reads have addrs_lock held */ u8 *addrs; size_t num_addrs; @@ -27,9 +33,24 @@ struct mctp_dev { struct rcu_head rcu; }; +struct mctp_netdev_ops { + void (*release_flow)(struct mctp_dev *dev, + struct mctp_sk_key *key); +}; + #define MCTP_INITIAL_DEFAULT_NET 1 struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev); struct mctp_dev *__mctp_dev_get(const struct net_device *dev); +int mctp_register_netdev(struct net_device *dev, + const struct mctp_netdev_ops *ops); +void mctp_unregister_netdev(struct net_device *dev); + +void mctp_dev_hold(struct mctp_dev *mdev); +void mctp_dev_put(struct mctp_dev *mdev); + +void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key); +void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key); + #endif /* __NET_MCTPDEVICE_H */ diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 3214848402ec93..a925349b4b8991 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -12,6 +12,8 @@ #include #include +struct mptcp_info; +struct mptcp_sock; struct seq_file; /* MPTCP sk_buff extension data */ @@ -125,6 +127,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts); +void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info); + /* move the skb extension owership, with the assumption that 'to' is * newly allocated */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 38e4094960cee8..04341d86585de9 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -137,7 +137,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev, u8 *opt, int opt_len, struct ndisc_options *ndopts); -void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data, +void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data, int data_len, int pad); #define NDISC_OPS_REDIRECT_DATA_SPACE 2 diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 22ced1381ede51..38a0c1d2457087 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -144,17 +144,18 @@ struct neighbour { struct timer_list timer; unsigned long used; atomic_t probes; - __u8 flags; - __u8 nud_state; - __u8 type; - __u8 dead; + u8 nud_state; + u8 type; + u8 dead; u8 protocol; + u32 flags; seqlock_t ha_lock; unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8); struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct list_head gc_list; + struct list_head managed_list; struct rcu_head rcu; struct net_device *dev; u8 primary_key[0]; @@ -172,7 +173,7 @@ struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; - u8 flags; + u32 flags; u8 protocol; u8 key[]; }; @@ -216,11 +217,13 @@ struct neigh_table { int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; + struct delayed_work managed_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; atomic_t gc_entries; struct list_head gc_list; + struct list_head managed_list; rwlock_t lock; unsigned long last_rand; struct neigh_statistics __percpu *stats; @@ -250,12 +253,21 @@ static inline void *neighbour_priv(const struct neighbour *n) } /* flags for neigh_update() */ -#define NEIGH_UPDATE_F_OVERRIDE 0x00000001 -#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 -#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004 -#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000 -#define NEIGH_UPDATE_F_ISROUTER 0x40000000 -#define NEIGH_UPDATE_F_ADMIN 0x80000000 +#define NEIGH_UPDATE_F_OVERRIDE BIT(0) +#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1) +#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2) +#define NEIGH_UPDATE_F_USE BIT(3) +#define NEIGH_UPDATE_F_MANAGED BIT(4) +#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5) +#define NEIGH_UPDATE_F_ISROUTER BIT(6) +#define NEIGH_UPDATE_F_ADMIN BIT(7) + +/* In-kernel representation for NDA_FLAGS_EXT flags: */ +#define NTF_OLD_MASK 0xff +#define NTF_EXT_SHIFT 8 +#define NTF_EXT_MASK (NTF_EXT_MANAGED) + +#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT) extern const struct nla_policy nda_policy[]; @@ -504,10 +516,15 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb, { const struct hh_cache *hh = &n->hh; - if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache) + /* n->nud_state and hh->hh_len could be changed under us. + * neigh_hh_output() is taking care of the race later. + */ + if (!skip_cache && + (READ_ONCE(n->nud_state) & NUD_CONNECTED) && + READ_ONCE(hh->hh_len)) return neigh_hh_output(hh, skb); - else - return n->output(n, skb); + + return n->output(n, skb); } static inline struct neighbour * diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index a16171c5fd9ebc..a0d9e0b47ab8f4 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -21,13 +21,19 @@ struct module; #define NFT_JUMP_STACK_SIZE 16 +enum { + NFT_PKTINFO_L4PROTO = (1 << 0), + NFT_PKTINFO_INNER = (1 << 1), +}; + struct nft_pktinfo { struct sk_buff *skb; const struct nf_hook_state *state; - bool tprot_set; + u8 flags; u8 tprot; u16 fragoff; unsigned int thoff; + unsigned int inneroff; }; static inline struct sock *nft_sk(const struct nft_pktinfo *pkt) @@ -75,7 +81,7 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt) { - pkt->tprot_set = false; + pkt->flags = 0; pkt->tprot = 0; pkt->thoff = 0; pkt->fragoff = 0; diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index eb4c094cd54d23..c4a6147b0ef8ce 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -10,7 +10,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt) struct iphdr *ip; ip = ip_hdr(pkt->skb); - pkt->tprot_set = true; + pkt->flags = NFT_PKTINFO_L4PROTO; pkt->tprot = ip->protocol; pkt->thoff = ip_hdrlen(pkt->skb); pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET; @@ -36,7 +36,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) else if (len < thoff) return -1; - pkt->tprot_set = true; + pkt->flags = NFT_PKTINFO_L4PROTO; pkt->tprot = iph->protocol; pkt->thoff = thoff; pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; @@ -71,7 +71,7 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt) goto inhdr_error; } - pkt->tprot_set = true; + pkt->flags = NFT_PKTINFO_L4PROTO; pkt->tprot = iph->protocol; pkt->thoff = thoff; pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; @@ -82,4 +82,5 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt) __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS); return -1; } + #endif diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index 7595e02b00ba08..ec7eaeaf4f04c2 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -18,7 +18,7 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt) return; } - pkt->tprot_set = true; + pkt->flags = NFT_PKTINFO_L4PROTO; pkt->tprot = protohdr; pkt->thoff = thoff; pkt->fragoff = frag_off; @@ -50,7 +50,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) if (protohdr < 0) return -1; - pkt->tprot_set = true; + pkt->flags = NFT_PKTINFO_L4PROTO; pkt->tprot = protohdr; pkt->thoff = thoff; pkt->fragoff = frag_off; @@ -96,7 +96,7 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt) if (protohdr < 0) goto inhdr_error; - pkt->tprot_set = true; + pkt->flags = NFT_PKTINFO_L4PROTO; pkt->tprot = protohdr; pkt->thoff = thoff; pkt->fragoff = frag_off; diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h index 832ab69efda57c..4c3809e141f4ff 100644 --- a/include/net/netfilter/xt_rateest.h +++ b/include/net/netfilter/xt_rateest.h @@ -6,7 +6,7 @@ struct xt_rateest { /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */ - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; spinlock_t lock; diff --git a/include/net/page_pool.h b/include/net/page_pool.h index a4082406a00396..3855f069627f45 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -216,24 +216,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, page_pool_put_full_page(pool, page, true); } -#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ - (sizeof(dma_addr_t) > sizeof(unsigned long)) - static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { - dma_addr_t ret = page->dma_addr; - - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; - - return ret; + return page->dma_addr; } static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) { page->dma_addr = addr; - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - page->dma_addr_upper = upper_32_bits(addr); } static inline void page_pool_set_frag_count(struct page *page, long nr) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 83a6d07921806e..193f88ebf629bd 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -765,7 +765,7 @@ struct tc_cookie { }; struct tc_qopt_offload_stats { - struct gnet_stats_basic_packed *bstats; + struct gnet_stats_basic_sync *bstats; struct gnet_stats_queue *qstats; }; @@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params { }; struct tc_gred_qopt_offload_stats { - struct gnet_stats_basic_packed bstats[MAX_DPs]; + struct gnet_stats_basic_sync bstats[MAX_DPs]; struct gnet_stats_queue qstats[MAX_DPs]; struct red_stats *xstats[MAX_DPs]; }; @@ -977,6 +977,7 @@ enum tc_tbf_command { TC_TBF_REPLACE, TC_TBF_DESTROY, TC_TBF_STATS, + TC_TBF_GRAFT, }; struct tc_tbf_qopt_offload_replace_params { @@ -992,6 +993,7 @@ struct tc_tbf_qopt_offload { union { struct tc_tbf_qopt_offload_replace_params replace_params; struct tc_qopt_offload_stats stats; + u32 child_handle; }; }; diff --git a/include/net/rose.h b/include/net/rose.h index cf517d306a28b2..0f0a4ce0fee7cc 100644 --- a/include/net/rose.h +++ b/include/net/rose.h @@ -162,8 +162,8 @@ extern int sysctl_rose_link_fail_timeout; extern int sysctl_rose_maximum_vcs; extern int sysctl_rose_window_size; -int rosecmp(rose_address *, rose_address *); -int rosecmpm(rose_address *, rose_address *, unsigned short); +int rosecmp(const rose_address *, const rose_address *); +int rosecmpm(const rose_address *, const rose_address *, unsigned short); char *rose2asc(char *buf, const rose_address *); struct sock *rose_find_socket(unsigned int, struct rose_neigh *); void rose_kill_by_neigh(struct rose_neigh *); @@ -205,8 +205,8 @@ extern const struct seq_operations rose_node_seqops; extern struct seq_operations rose_route_seqops; void rose_add_loopback_neigh(void); -int __must_check rose_add_loopback_node(rose_address *); -void rose_del_loopback_node(rose_address *); +int __must_check rose_add_loopback_node(const rose_address *); +void rose_del_loopback_node(const rose_address *); void rose_rt_device_down(struct net_device *); void rose_link_device_down(struct net_device *); struct net_device *rose_dev_first(void); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c0069ac00e62d2..22179b2fda72a0 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -40,6 +40,13 @@ enum qdisc_state_t { __QDISC_STATE_DRAINING, }; +enum qdisc_state2_t { + /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. + * Use qdisc_run_begin/end() or qdisc_is_running() instead. + */ + __QDISC_STATE2_RUNNING, +}; + #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) @@ -97,7 +104,7 @@ struct Qdisc { struct netdev_queue *dev_queue; struct net_rate_estimator __rcu *rate_est; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_basic_sync __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; int pad; refcount_t refcnt; @@ -107,10 +114,10 @@ struct Qdisc { */ struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; struct qdisc_skb_head q; - struct gnet_stats_basic_packed bstats; - seqcount_t running; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; unsigned long state; + unsigned long state2; /* must be written under qdisc spinlock */ struct Qdisc *next_sched; struct sk_buff_head skb_bad_txq; @@ -143,11 +150,15 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) return NULL; } +/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc + * root_lock section, or provide their own memory barriers -- ordering + * against qdisc_run_begin/end() atomic bit operations. + */ static inline bool qdisc_is_running(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) return spin_is_locked(&qdisc->seqlock); - return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; + return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); } static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) @@ -167,6 +178,9 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc) return !READ_ONCE(qdisc->q.qlen); } +/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with + * the qdisc root lock acquired. + */ static inline bool qdisc_run_begin(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) { @@ -206,15 +220,8 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) * after it releases the lock at the end of qdisc_run_end(). */ return spin_trylock(&qdisc->seqlock); - } else if (qdisc_is_running(qdisc)) { - return false; } - /* Variant of write_seqcount_begin() telling lockdep a trylock - * was attempted. - */ - raw_write_seqcount_begin(&qdisc->running); - seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); - return true; + return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); } static inline void qdisc_run_end(struct Qdisc *qdisc) @@ -226,7 +233,7 @@ static inline void qdisc_run_end(struct Qdisc *qdisc) &qdisc->state))) __netif_schedule(qdisc); } else { - write_seqcount_end(&qdisc->running); + __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); } } @@ -308,6 +315,8 @@ struct Qdisc_ops { struct netlink_ext_ack *extack); void (*attach)(struct Qdisc *sch); int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + void (*change_real_num_tx)(struct Qdisc *sch, + unsigned int new_real_tx); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); @@ -590,14 +599,6 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) return qdisc_lock(root); } -static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) -{ - struct Qdisc *root = qdisc_root_sleeping(qdisc); - - ASSERT_RTNL(); - return &root->running; -} - static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) { return qdisc->dev_queue->dev; @@ -684,6 +685,8 @@ void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); void qdisc_class_hash_destroy(struct Qdisc_class_hash *); int dev_qdisc_change_tx_queue_len(struct net_device *dev); +void dev_qdisc_change_real_num_tx(struct net_device *dev, + unsigned int new_real_tx); void dev_init_scheduler(struct net_device *dev); void dev_shutdown(struct net_device *dev); void dev_activate(struct net_device *dev); @@ -845,14 +848,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, return sch->enqueue(skb, sch, to_free); } -static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, +static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, __u64 bytes, __u32 packets) { - bstats->bytes += bytes; - bstats->packets += packets; + u64_stats_update_begin(&bstats->syncp); + u64_stats_add(&bstats->bytes, bytes); + u64_stats_add(&bstats->packets, packets); + u64_stats_update_end(&bstats->syncp); } -static inline void bstats_update(struct gnet_stats_basic_packed *bstats, +static inline void bstats_update(struct gnet_stats_basic_sync *bstats, const struct sk_buff *skb) { _bstats_update(bstats, @@ -860,26 +865,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats, skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); } -static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, - __u64 bytes, __u32 packets) -{ - u64_stats_update_begin(&bstats->syncp); - _bstats_update(&bstats->bstats, bytes, packets); - u64_stats_update_end(&bstats->syncp); -} - -static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, - const struct sk_buff *skb) -{ - u64_stats_update_begin(&bstats->syncp); - bstats_update(&bstats->bstats, skb); - u64_stats_update_end(&bstats->syncp); -} - static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, const struct sk_buff *skb) { - bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); + bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); } static inline void qdisc_bstats_update(struct Qdisc *sch, @@ -968,10 +957,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, __u32 *backlog) { struct gnet_stats_queue qstats = { 0 }; - __u32 len = qdisc_qlen_sum(sch); - __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); - *qlen = qstats.qlen; + gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); + *qlen = qstats.qlen + qdisc_qlen(sch); *backlog = qstats.backlog; } @@ -1312,15 +1300,15 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); struct mini_Qdisc { struct tcf_proto *filter_list; struct tcf_block *block; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_basic_sync __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; - struct rcu_head rcu; + unsigned long rcu_state; }; static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, const struct sk_buff *skb) { - bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); + bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); } static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) @@ -1341,6 +1329,8 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, struct tcf_block *block); +void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); + int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); #endif diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 69bab88ad66b18..189fdb9db16220 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -626,7 +626,8 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) static inline int sctp_transport_pl_hlen(struct sctp_transport *t) { - return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0); + return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) - + sizeof(struct sctphdr); } static inline void sctp_transport_pl_reset(struct sctp_transport *t) @@ -653,12 +654,10 @@ static inline void sctp_transport_pl_update(struct sctp_transport *t) if (t->pl.state == SCTP_PL_DISABLED) return; - if (del_timer(&t->probe_timer)) - sctp_transport_put(t); - t->pl.state = SCTP_PL_BASE; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pl.probe_size = SCTP_BASE_PLPMTU; + sctp_transport_reset_probe_timer(t); } static inline bool sctp_transport_pl_enabled(struct sctp_transport *t) diff --git a/include/net/sock.h b/include/net/sock.h index 463f390d90b3e0..b32906e1ab5552 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -259,10 +259,11 @@ struct bpf_local_storage; * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux + * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst + * @sk_rx_dst_cookie: cookie for @sk_rx_dst * @sk_dst_cache: destination cache * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy - * @sk_rx_skb_cache: cache copy of recently accessed RX skb * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_tsq_flags: TCP Small Queues flags @@ -270,6 +271,7 @@ struct bpf_local_storage; * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_reserved_mem: space reserved and non-reclaimable for the socket * @sk_napi_id: id of the last napi context to receive data for sk * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode @@ -329,7 +331,6 @@ struct bpf_local_storage; * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] - * @sk_tx_skb_cache: cache copy of recently accessed TX skb * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_cgrp_data: cgroup data for this cgroup @@ -394,7 +395,6 @@ struct sock { atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; - struct sk_buff *sk_rx_skb_cache; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with @@ -413,6 +413,7 @@ struct sock { #define sk_rmem_alloc sk_backlog.rmem_alloc int sk_forward_alloc; + u32 sk_reserved_mem; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_ll_usec; /* ===== mostly read cache line ===== */ @@ -431,6 +432,9 @@ struct sock { struct xfrm_policy __rcu *sk_policy[2]; #endif struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@ -443,7 +447,6 @@ struct sock { struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; - struct sk_buff *sk_tx_skb_cache; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; @@ -1207,6 +1210,8 @@ struct proto { unsigned int inuse_idx; #endif + int (*forward_alloc_get)(const struct sock *sk); + bool (*stream_memory_free)(const struct sock *sk, int wake); bool (*sock_is_readable)(struct sock *sk); /* Memory pressure */ @@ -1214,6 +1219,7 @@ struct proto { void (*leave_memory_pressure)(struct sock *sk); atomic_long_t *memory_allocated; /* Current allocated memory. */ struct percpu_counter *sockets_allocated; /* Current number of sockets. */ + /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. @@ -1237,7 +1243,7 @@ struct proto { unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */ - struct percpu_counter *orphan_count; + unsigned int __percpu *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; @@ -1291,20 +1297,22 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake)); +static inline int sk_forward_alloc_get(const struct sock *sk) +{ + if (!sk->sk_prot->forward_alloc_get) + return sk->sk_forward_alloc; + + return sk->sk_prot->forward_alloc_get(sk); +} + static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) { if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) return false; -#ifdef CONFIG_INET return sk->sk_prot->stream_memory_free ? - INDIRECT_CALL_1(sk->sk_prot->stream_memory_free, - tcp_stream_memory_free, - sk, wake) : true; -#else - return sk->sk_prot->stream_memory_free ? - sk->sk_prot->stream_memory_free(sk, wake) : true; -#endif + INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free, + tcp_stream_memory_free, sk, wake) : true; } static inline bool sk_stream_memory_free(const struct sock *sk) @@ -1518,20 +1526,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) skb_pfmemalloc(skb); } +static inline int sk_unused_reserved_mem(const struct sock *sk) +{ + int unused_mem; + + if (likely(!sk->sk_reserved_mem)) + return 0; + + unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - + atomic_read(&sk->sk_rmem_alloc); + + return unused_mem > 0 ? unused_mem : 0; +} + static inline void sk_mem_reclaim(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable >= SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable); +} + +static inline void sk_mem_reclaim_final(struct sock *sk) +{ + sk->sk_reserved_mem = 0; + sk_mem_reclaim(sk); } static inline void sk_mem_reclaim_partial(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc > SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable > SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable - 1); } static inline void sk_mem_charge(struct sock *sk, int size) @@ -1541,11 +1578,19 @@ static inline void sk_mem_charge(struct sock *sk, int size) sk->sk_forward_alloc -= size; } +/* the following macros control memory reclaiming in sk_mem_uncharge() + */ +#define SK_RECLAIM_THRESHOLD (1 << 21) +#define SK_RECLAIM_CHUNK (1 << 20) + static inline void sk_mem_uncharge(struct sock *sk, int size) { + int reclaimable; + if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); /* Avoid a possible overflow. * TCP send queues can make this happen, if sk_mem_reclaim() @@ -1554,23 +1599,8 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) * If we reach 2 MBytes, reclaim 1 MBytes right now, there is * no need to hold that much forward allocation anyway. */ - if (unlikely(sk->sk_forward_alloc >= 1 << 21)) - __sk_mem_reclaim(sk, 1 << 20); -} - -DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); -static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) -{ - sk_wmem_queued_add(sk, -skb->truesize); - sk_mem_uncharge(sk, skb->truesize); - if (static_branch_unlikely(&tcp_tx_skb_cache_key) && - !sk->sk_tx_skb_cache && !skb_cloned(skb)) { - skb_ext_reset(skb); - skb_zcopy_clear(skb, true); - sk->sk_tx_skb_cache = skb; - return; - } - __kfree_skb(skb); + if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD)) + __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK); } static inline void sock_release_ownership(struct sock *sk) @@ -1889,10 +1919,8 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); - if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING)) - return; - - sk->sk_rx_queue_mapping = rx_queue; + if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } @@ -1900,15 +1928,19 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; + WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); #endif } static inline int sk_rx_queue_get(const struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) - return sk->sk_rx_queue_mapping; + if (sk) { + int res = READ_ONCE(sk->sk_rx_queue_mapping); + + if (res != NO_QUEUE_MAPPING) + return res; + } #endif return -1; @@ -2388,13 +2420,11 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) return; val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); + val = max_t(u32, val, sk_unused_reserved_mem(sk)); WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); } -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule); - /** * sk_page_frag - return an appropriate page_frag * @sk: socket @@ -2608,7 +2638,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) &skb_shinfo(skb)->tskey); } -DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from @@ -2620,12 +2649,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); - if (static_branch_unlikely(&tcp_rx_skb_cache_key) && - !sk->sk_rx_skb_cache) { - sk->sk_rx_skb_cache = skb; - skb_orphan(skb); - return; - } __kfree_skb(skb); } @@ -2820,6 +2843,10 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs); int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len); +int sock_get_timeout(long timeo, void *optval, bool old_timeval); +int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, + sockptr_t optval, int optlen, bool old_timeval); + static inline bool sk_is_readable(struct sock *sk) { if (sk->sk_prot->sock_is_readable) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 60d806b6a5ae1f..d353793dfeb5fe 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -299,28 +299,16 @@ void switchdev_port_fwd_mark_set(struct net_device *dev, struct net_device *group_dev, bool joining); -int switchdev_handle_fdb_add_to_device(struct net_device *dev, +int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), - int (*add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info)); - -int switchdev_handle_fdb_del_to_device(struct net_device *dev, - const struct switchdev_notifier_fdb_info *fdb_info, - bool (*check_cb)(const struct net_device *dev), - bool (*foreign_dev_check_cb)(const struct net_device *dev, - const struct net_device *foreign_dev), - int (*del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info)); int switchdev_handle_port_obj_add(struct net_device *dev, @@ -426,32 +414,16 @@ call_switchdev_blocking_notifiers(unsigned long val, } static inline int -switchdev_handle_fdb_add_to_device(struct net_device *dev, - const struct switchdev_notifier_fdb_info *fdb_info, - bool (*check_cb)(const struct net_device *dev), - bool (*foreign_dev_check_cb)(const struct net_device *dev, - const struct net_device *foreign_dev), - int (*add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info)) -{ - return 0; -} - -static inline int -switchdev_handle_fdb_del_to_device(struct net_device *dev, +switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), - int (*del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info)) { return 0; diff --git a/include/net/tcp.h b/include/net/tcp.h index 60c384569e9cd6..70972f3ac8fa39 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -48,7 +48,9 @@ extern struct inet_hashinfo tcp_hashinfo; -extern struct percpu_counter tcp_orphan_count; +DECLARE_PER_CPU(unsigned int, tcp_orphan_count); +int tcp_orphan_count_sum(void); + void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) @@ -288,21 +290,15 @@ static inline bool tcp_out_of_memory(struct sock *sk) return false; } -void sk_forced_mem_schedule(struct sock *sk, int size); - -static inline bool tcp_too_many_orphans(struct sock *sk, int shift) +static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { - struct percpu_counter *ocp = sk->sk_prot->orphan_count; - int orphans = percpu_counter_read_positive(ocp); - - if (orphans << shift > sysctl_tcp_max_orphans) { - orphans = percpu_counter_sum_positive(ocp); - if (orphans << shift > sysctl_tcp_max_orphans) - return true; - } - return false; + sk_wmem_queued_add(sk, -skb->truesize); + sk_mem_uncharge(sk, skb->truesize); + __kfree_skb(skb); } +void sk_forced_mem_schedule(struct sock *sk, int size); + bool tcp_check_oom(struct sock *sk, int shift); @@ -322,7 +318,7 @@ void tcp_shutdown(struct sock *sk, int how); int tcp_v4_early_demux(struct sk_buff *skb); int tcp_v4_rcv(struct sk_buff *skb); -void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb); +void tcp_remove_empty_skb(struct sock *sk); int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); @@ -330,8 +326,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); -struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, - struct page *page, int offset, size_t *size); ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tcp_send_mss(struct sock *sk, int *size_goal, int flags); @@ -350,6 +344,8 @@ void tcp_twsk_destructor(struct sock *sk); ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule); void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, @@ -581,6 +577,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); #endif /* tcp_output.c */ +void tcp_skb_entail(struct sock *sk, struct sk_buff *skb); +void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb); void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle); int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); @@ -874,10 +872,11 @@ struct tcp_skb_cb { __u32 ack_seq; /* Sequence number ACK'd */ union { struct { +#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1) /* There is space for up to 24 bytes */ - __u32 in_flight:30,/* Bytes in flight at transmit */ - is_app_limited:1, /* cwnd not fully used? */ - unused:1; + __u32 is_app_limited:1, /* cwnd not fully used? */ + delivered_ce:20, + unused:11; /* pkts S/ACKed so far upon tx of skb, incl retrans: */ __u32 delivered; /* start of send pipeline phase */ @@ -1029,7 +1028,9 @@ struct ack_sample { struct rate_sample { u64 prior_mstamp; /* starting timestamp for interval */ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ + u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */ s32 delivered; /* number of packets delivered over interval */ + s32 delivered_ce; /* number of packets delivered w/ CE marks*/ long interval_us; /* time for tp->delivered to incr "delivered" */ u32 snd_interval_us; /* snd interval for delivered packets */ u32 rcv_interval_us; /* rcv interval for delivered packets */ @@ -1418,6 +1419,17 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); } +static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) +{ + int unused_mem = sk_unused_reserved_mem(sk); + struct tcp_sock *tp = tcp_sk(sk); + + tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + if (unused_mem) + tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh, + tcp_win_from_space(sk, unused_mem)); +} + void tcp_cleanup_rbuf(struct sock *sk, int copied); /* We provision sk_rcvbuf around 200% of sk_rcvlowat. @@ -1870,7 +1882,7 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc { list_del(&skb->tcp_tsorted_anchor); tcp_rtx_queue_unlink(skb, sk); - sk_wmem_free_skb(sk, skb); + tcp_wmem_free_skb(sk, skb); } static inline void tcp_push_pending_frames(struct sock *sk) diff --git a/include/net/tls.h b/include/net/tls.h index 1fffb206f09f51..526cb2c3b724e7 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -66,7 +66,7 @@ #define MAX_IV_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 -/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. +/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] * @@ -74,6 +74,7 @@ * Hence b0 contains (3 - 1) = 2. */ #define TLS_AES_CCM_IV_B0_BYTE 2 +#define TLS_SM4_CCM_IV_B0_BYTE 2 #define __TLS_INC_STATS(net, field) \ __SNMP_INC_STATS((net)->mib.tls_statistics, field) @@ -220,6 +221,8 @@ union tls_crypto_context { struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; }; }; diff --git a/include/net/xdp.h b/include/net/xdp.h index ad5b02dcb6f4c4..447f9b1578f387 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -15,13 +15,13 @@ * level RX-ring queues. It is information that is specific to how * the driver have configured a given RX-ring queue. * - * Each xdp_buff frame received in the driver carry a (pointer) + * Each xdp_buff frame received in the driver carries a (pointer) * reference to this xdp_rxq_info structure. This provides the XDP * data-path read-access to RX-info for both kernel and bpf-side * (limited subset). * * For now, direct access is only safe while running in NAPI/softirq - * context. Contents is read-mostly and must not be updated during + * context. Contents are read-mostly and must not be updated during * driver NAPI/softirq poll. * * The driver usage API is a register and unregister API. @@ -30,8 +30,8 @@ * can be attached as long as it doesn't change the underlying * RX-ring. If the RX-ring does change significantly, the NIC driver * naturally need to stop the RX-ring before purging and reallocating - * memory. In that process the driver MUST call unregistor (which - * also apply for driver shutdown and unload). The register API is + * memory. In that process the driver MUST call unregister (which + * also applies for driver shutdown and unload). The register API is * also mandatory during RX-ring setup. */ diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 4e295541e39671..443d4595156489 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -77,6 +77,12 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) return xp_alloc(pool); } +/* Returns as many entries as possible up to max. 0 <= N <= max. */ +static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +{ + return xp_alloc_batch(pool, xdp, max); +} + static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) { return xp_can_alloc(pool, count); @@ -89,6 +95,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) xp_free(xskb); } +static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) +{ + xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; + xdp->data_meta = xdp->data; + xdp->data_end = xdp->data + size; +} + static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) { @@ -212,6 +225,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) return NULL; } +static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +{ + return 0; +} + static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) { return false; @@ -221,6 +239,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) { } +static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) +{ +} + static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) { diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 7a9a23e7a604a9..ddeefc4a10405c 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -7,6 +7,7 @@ #include #include #include +#include #include struct xsk_buff_pool; @@ -23,7 +24,6 @@ struct xdp_buff_xsk { dma_addr_t dma; dma_addr_t frame_dma; struct xsk_buff_pool *pool; - bool unaligned; u64 orig_addr; struct list_head free_list_node; }; @@ -67,6 +67,7 @@ struct xsk_buff_pool { u32 free_heads_cnt; u32 headroom; u32 chunk_size; + u32 chunk_shift; u32 frame_len; u8 cached_need_wakeup; bool uses_need_wakeup; @@ -81,6 +82,13 @@ struct xsk_buff_pool { struct xdp_buff_xsk *free_heads[]; }; +/* Masks for xdp_umem_page flags. + * The low 12-bits of the addr will be 0 since this is the page address, so we + * can use them for flags. + */ +#define XSK_NEXT_PG_CONTIG_SHIFT 0 +#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) + /* AF_XDP core. */ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem); @@ -89,7 +97,6 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, struct net_device *dev, u16 queue_id); void xp_destroy(struct xsk_buff_pool *pool); -void xp_release(struct xdp_buff_xsk *xskb); void xp_get_pool(struct xsk_buff_pool *pool); bool xp_put_pool(struct xsk_buff_pool *pool); void xp_clear_dev(struct xsk_buff_pool *pool); @@ -99,12 +106,28 @@ void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); /* AF_XDP, and XDP core. */ void xp_free(struct xdp_buff_xsk *xskb); +static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, + u64 addr) +{ + xskb->orig_addr = addr; + xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; +} + +static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, + dma_addr_t *dma_pages, u64 addr) +{ + xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + + (addr & ~PAGE_MASK); + xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM; +} + /* AF_XDP ZC drivers, via xdp_sock_buff.h */ void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs, struct page **pages, u32 nr_pages); void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); +u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); @@ -180,4 +203,25 @@ static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) xp_unaligned_extract_offset(addr); } +static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) +{ + return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; +} + +static inline void xp_release(struct xdp_buff_xsk *xskb) +{ + if (xskb->pool->unaligned) + xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; +} + +static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) +{ + u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; + + offset += xskb->pool->headroom; + if (!xskb->pool->unaligned) + return xskb->orig_addr + offset; + return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); +} + #endif /* XSK_BUFF_POOL_H_ */ diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h index c9d849924f890a..4bf62de2e00ef0 100644 --- a/include/soc/fsl/dpaa2-io.h +++ b/include/soc/fsl/dpaa2-io.h @@ -44,6 +44,7 @@ struct device; * @regs_cinh: The cache inhibited regs * @dpio_id: The dpio index * @qman_version: The qman version + * @qman_clk: The qman clock frequency in Hz * * Describes the attributes and features of the DPIO object. */ @@ -55,6 +56,7 @@ struct dpaa2_io_desc { void __iomem *regs_cinh; int dpio_id; u32 qman_version; + u32 qman_clk; }; struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, @@ -129,4 +131,11 @@ int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid, u32 *fcnt, u32 *bcnt); int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num); + +int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff); +void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff); +void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d, + int use_adaptive_rx_coalesce); +int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d); +void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes); #endif /* __FSL_DPAA2_IO_H */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index d7055b41982dfa..fef3a36b021007 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -563,9 +563,22 @@ struct ocelot_vcap_block { int pol_lpr; }; -struct ocelot_vlan { - bool valid; +struct ocelot_bridge_vlan { u16 vid; + unsigned long portmask; + unsigned long untagged; + struct list_head list; +}; + +enum ocelot_port_tag_config { + /* all VLANs are egress-untagged */ + OCELOT_PORT_TAG_DISABLED = 0, + /* all VLANs except the native VLAN and VID 0 are egress-tagged */ + OCELOT_PORT_TAG_NATIVE = 1, + /* all VLANs except VID 0 are egress-tagged */ + OCELOT_PORT_TAG_TRUNK_NO_VID0 = 2, + /* all VLANs are egress-tagged */ + OCELOT_PORT_TAG_TRUNK = 3, }; enum ocelot_sb { @@ -590,9 +603,7 @@ struct ocelot_port { bool vlan_aware; /* VLAN that untagged frames are classified to, on ingress */ - struct ocelot_vlan pvid_vlan; - /* The VLAN ID that will be transmitted as untagged, on egress */ - struct ocelot_vlan native_vlan; + const struct ocelot_bridge_vlan *pvid_vlan; unsigned int ptp_skbs_in_flight; u8 ptp_cmd; @@ -635,8 +646,7 @@ struct ocelot { u8 base_mac[ETH_ALEN]; - /* Keep track of the vlan port masks */ - u32 vlan_mask[VLAN_N_VID]; + struct list_head vlans; /* Switches like VSC9959 have flooding per traffic class */ int num_flooding_pgids; @@ -665,6 +675,9 @@ struct ocelot { struct delayed_work stats_work; struct workqueue_struct *stats_queue; + /* Lock for serializing access to the MAC table */ + struct mutex mact_lock; + struct workqueue_struct *owq; u8 ptp:1; diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index 4869ebbd438d9a..eeb1142aa1b1d5 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -576,6 +576,16 @@ enum ocelot_mask_mode { OCELOT_MASK_MODE_REDIRECT, }; +enum ocelot_es0_vid_sel { + OCELOT_ES0_VID_PLUS_CLASSIFIED_VID = 0, + OCELOT_ES0_VID = 1, +}; + +enum ocelot_es0_pcp_sel { + OCELOT_CLASSIFIED_PCP = 0, + OCELOT_ES0_PCP = 1, +}; + enum ocelot_es0_tag { OCELOT_NO_ES0_TAG, OCELOT_ES0_TAG, diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index a23be89119aa51..a8e97f84b65273 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -93,8 +93,7 @@ __section("__bpf_raw_tp_map") = { \ #define FIRST(x, ...) x -#undef DEFINE_EVENT_WRITABLE -#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ +#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \ static inline void bpf_test_buffer_##call(void) \ { \ /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \ @@ -103,8 +102,12 @@ static inline void bpf_test_buffer_##call(void) \ */ \ FIRST(proto); \ (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \ -} \ -__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) +} + +#undef DEFINE_EVENT_WRITABLE +#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ + __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \ + __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ @@ -119,9 +122,17 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0) +#undef DECLARE_TRACE_WRITABLE +#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \ + __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \ + __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ + __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef DECLARE_TRACE_WRITABLE #undef DEFINE_EVENT_WRITABLE +#undef __CHECK_WRITABLE_BUF_SIZE #undef __DEFINE_EVENT #undef FIRST diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h index 44d8e298106582..2814f188d98c3c 100644 --- a/include/trace/events/devlink.h +++ b/include/trace/events/devlink.h @@ -21,9 +21,9 @@ TRACE_EVENT(devlink_hwmsg, TP_ARGS(devlink, incoming, type, buf, len), TP_STRUCT__entry( - __string(bus_name, devlink->dev->bus->name) - __string(dev_name, dev_name(devlink->dev)) - __string(driver_name, devlink->dev->driver->name) + __string(bus_name, devlink_to_dev(devlink)->bus->name) + __string(dev_name, dev_name(devlink_to_dev(devlink))) + __string(driver_name, devlink_to_dev(devlink)->driver->name) __field(bool, incoming) __field(unsigned long, type) __dynamic_array(u8, buf, len) @@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg, ), TP_fast_assign( - __assign_str(bus_name, devlink->dev->bus->name); - __assign_str(dev_name, dev_name(devlink->dev)); - __assign_str(driver_name, devlink->dev->driver->name); + __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); + __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); + __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __entry->incoming = incoming; __entry->type = type; memcpy(__get_dynamic_array(buf), buf, len); @@ -55,17 +55,17 @@ TRACE_EVENT(devlink_hwerr, TP_ARGS(devlink, err, msg), TP_STRUCT__entry( - __string(bus_name, devlink->dev->bus->name) - __string(dev_name, dev_name(devlink->dev)) - __string(driver_name, devlink->dev->driver->name) + __string(bus_name, devlink_to_dev(devlink)->bus->name) + __string(dev_name, dev_name(devlink_to_dev(devlink))) + __string(driver_name, devlink_to_dev(devlink)->driver->name) __field(int, err) __string(msg, msg) ), TP_fast_assign( - __assign_str(bus_name, devlink->dev->bus->name); - __assign_str(dev_name, dev_name(devlink->dev)); - __assign_str(driver_name, devlink->dev->driver->name); + __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); + __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); + __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __entry->err = err; __assign_str(msg, msg); ), @@ -85,17 +85,17 @@ TRACE_EVENT(devlink_health_report, TP_ARGS(devlink, reporter_name, msg), TP_STRUCT__entry( - __string(bus_name, devlink->dev->bus->name) - __string(dev_name, dev_name(devlink->dev)) - __string(driver_name, devlink->dev->driver->name) + __string(bus_name, devlink_to_dev(devlink)->bus->name) + __string(dev_name, dev_name(devlink_to_dev(devlink))) + __string(driver_name, devlink_to_dev(devlink)->driver->name) __string(reporter_name, msg) __string(msg, msg) ), TP_fast_assign( - __assign_str(bus_name, devlink->dev->bus->name); - __assign_str(dev_name, dev_name(devlink->dev)); - __assign_str(driver_name, devlink->dev->driver->name); + __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); + __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); + __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __assign_str(reporter_name, reporter_name); __assign_str(msg, msg); ), @@ -116,18 +116,18 @@ TRACE_EVENT(devlink_health_recover_aborted, TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover), TP_STRUCT__entry( - __string(bus_name, devlink->dev->bus->name) - __string(dev_name, dev_name(devlink->dev)) - __string(driver_name, devlink->dev->driver->name) + __string(bus_name, devlink_to_dev(devlink)->bus->name) + __string(dev_name, dev_name(devlink_to_dev(devlink))) + __string(driver_name, devlink_to_dev(devlink)->driver->name) __string(reporter_name, reporter_name) __field(bool, health_state) __field(u64, time_since_last_recover) ), TP_fast_assign( - __assign_str(bus_name, devlink->dev->bus->name); - __assign_str(dev_name, dev_name(devlink->dev)); - __assign_str(driver_name, devlink->dev->driver->name); + __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); + __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); + __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __assign_str(reporter_name, reporter_name); __entry->health_state = health_state; __entry->time_since_last_recover = time_since_last_recover; @@ -150,17 +150,17 @@ TRACE_EVENT(devlink_health_reporter_state_update, TP_ARGS(devlink, reporter_name, new_state), TP_STRUCT__entry( - __string(bus_name, devlink->dev->bus->name) - __string(dev_name, dev_name(devlink->dev)) - __string(driver_name, devlink->dev->driver->name) + __string(bus_name, devlink_to_dev(devlink)->bus->name) + __string(dev_name, dev_name(devlink_to_dev(devlink))) + __string(driver_name, devlink_to_dev(devlink)->driver->name) __string(reporter_name, reporter_name) __field(u8, new_state) ), TP_fast_assign( - __assign_str(bus_name, devlink->dev->bus->name); - __assign_str(dev_name, dev_name(devlink->dev)); - __assign_str(driver_name, devlink->dev->driver->name); + __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); + __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); + __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __assign_str(reporter_name, reporter_name); __entry->new_state = new_state; ), @@ -181,9 +181,9 @@ TRACE_EVENT(devlink_trap_report, TP_ARGS(devlink, skb, metadata), TP_STRUCT__entry( - __string(bus_name, devlink->dev->bus->name) - __string(dev_name, dev_name(devlink->dev)) - __string(driver_name, devlink->dev->driver->name) + __string(bus_name, devlink_to_dev(devlink)->bus->name) + __string(dev_name, dev_name(devlink_to_dev(devlink))) + __string(driver_name, devlink_to_dev(devlink)->driver->name) __string(trap_name, metadata->trap_name) __string(trap_group_name, metadata->trap_group_name) __dynamic_array(char, input_dev_name, IFNAMSIZ) @@ -192,9 +192,9 @@ TRACE_EVENT(devlink_trap_report, TP_fast_assign( struct net_device *input_dev = metadata->input_dev; - __assign_str(bus_name, devlink->dev->bus->name); - __assign_str(dev_name, dev_name(devlink->dev)); - __assign_str(driver_name, devlink->dev->driver->name); + __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); + __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); + __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __assign_str(trap_name, metadata->trap_name); __assign_str(trap_group_name, metadata->trap_group_name); __assign_str(input_dev_name, diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h new file mode 100644 index 00000000000000..175b057c507fd3 --- /dev/null +++ b/include/trace/events/mctp.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mctp + +#if !defined(_TRACE_MCTP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MCTP_H + +#include + +#ifndef __TRACE_MCTP_ENUMS +#define __TRACE_MCTP_ENUMS +enum { + MCTP_TRACE_KEY_TIMEOUT, + MCTP_TRACE_KEY_REPLIED, + MCTP_TRACE_KEY_INVALIDATED, + MCTP_TRACE_KEY_CLOSED, +}; +#endif /* __TRACE_MCTP_ENUMS */ + +TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT); +TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED); +TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED); +TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED); + +TRACE_EVENT(mctp_key_acquire, + TP_PROTO(const struct mctp_sk_key *key), + TP_ARGS(key), + TP_STRUCT__entry( + __field(__u8, paddr) + __field(__u8, laddr) + __field(__u8, tag) + ), + TP_fast_assign( + __entry->paddr = key->peer_addr; + __entry->laddr = key->local_addr; + __entry->tag = key->tag; + ), + TP_printk("local %d, peer %d, tag %1x", + __entry->laddr, + __entry->paddr, + __entry->tag + ) +); + +TRACE_EVENT(mctp_key_release, + TP_PROTO(const struct mctp_sk_key *key, int reason), + TP_ARGS(key, reason), + TP_STRUCT__entry( + __field(__u8, paddr) + __field(__u8, laddr) + __field(__u8, tag) + __field(int, reason) + ), + TP_fast_assign( + __entry->paddr = key->peer_addr; + __entry->laddr = key->local_addr; + __entry->tag = key->tag; + __entry->reason = reason; + ), + TP_printk("local %d, peer %d, tag %1x %s", + __entry->laddr, + __entry->paddr, + __entry->tag, + __print_symbolic(__entry->reason, + { MCTP_TRACE_KEY_TIMEOUT, "timeout" }, + { MCTP_TRACE_KEY_REPLIED, "replied" }, + { MCTP_TRACE_KEY_INVALIDATED, "invalidated" }, + { MCTP_TRACE_KEY_CLOSED, "closed" }) + ) +); + +#endif + +#include diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 1f0a2b4864e445..c77a1313b3b0be 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -126,6 +126,8 @@ #define SO_BUF_LOCK 72 +#define SO_RESERVE_MEM 73 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/linux/amt.h b/include/uapi/linux/amt.h new file mode 100644 index 00000000000000..2dccff41719568 --- /dev/null +++ b/include/uapi/linux/amt.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2021 Taehee Yoo + */ +#ifndef _UAPI_AMT_H_ +#define _UAPI_AMT_H_ + +enum ifla_amt_mode { + /* AMT interface works as Gateway mode. + * The Gateway mode encapsulates IGMP/MLD traffic and decapsulates + * multicast traffic. + */ + AMT_MODE_GATEWAY = 0, + /* AMT interface works as Relay mode. + * The Relay mode encapsulates multicast traffic and decapsulates + * IGMP/MLD traffic. + */ + AMT_MODE_RELAY, + __AMT_MODE_MAX, +}; + +#define AMT_MODE_MAX (__AMT_MODE_MAX - 1) + +enum { + IFLA_AMT_UNSPEC, + /* This attribute specify mode etier Gateway or Relay. */ + IFLA_AMT_MODE, + /* This attribute specify Relay port. + * AMT interface is created as Gateway mode, this attribute is used + * to specify relay(remote) port. + * AMT interface is created as Relay mode, this attribute is used + * as local port. + */ + IFLA_AMT_RELAY_PORT, + /* This attribute specify Gateway port. + * AMT interface is created as Gateway mode, this attribute is used + * as local port. + * AMT interface is created as Relay mode, this attribute is not used. + */ + IFLA_AMT_GATEWAY_PORT, + /* This attribute specify physical device */ + IFLA_AMT_LINK, + /* This attribute specify local ip address */ + IFLA_AMT_LOCAL_IP, + /* This attribute specify Relay ip address. + * So, this is not used by Relay. + */ + IFLA_AMT_REMOTE_IP, + /* This attribute specify Discovery ip address. + * When Gateway get started, it send discovery message to find the + * Relay's ip address. + * So, this is not used by Relay. + */ + IFLA_AMT_DISCOVERY_IP, + /* This attribute specify number of maximum tunnel. */ + IFLA_AMT_MAX_TUNNELS, + __IFLA_AMT_MAX, +}; + +#define IFLA_AMT_MAX (__IFLA_AMT_MAX - 1) + +#endif /* _UAPI_AMT_H_ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 791f31dd0abee7..ba5af15e25f5c1 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -906,6 +906,7 @@ enum bpf_map_type { BPF_MAP_TYPE_RINGBUF, BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, + BPF_MAP_TYPE_BLOOM_FILTER, }; /* Note that tracing related programs such as @@ -1274,6 +1275,13 @@ union bpf_attr { * struct stored as the * map value */ + /* Any per-map-type extra fields + * + * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the + * number of hash functions (if 0, the bloom filter will default + * to using 5 hash functions). + */ + __u64 map_extra; }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -1629,7 +1637,7 @@ union bpf_attr { * u32 bpf_get_smp_processor_id(void) * Description * Get the SMP (symmetric multiprocessing) processor id. Note that - * all programs run with preemption disabled, which means that the + * all programs run with migration disabled, which means that the * SMP processor id is stable during all the execution of the * program. * Return @@ -4046,7 +4054,7 @@ union bpf_attr { * arguments. The *data* are a **u64** array and corresponding format string * values are stored in the array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* array. - * The *data_len* is the size of *data* in bytes. + * The *data_len* is the size of *data* in bytes - must be a multiple of 8. * * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. * Reading kernel memory may fail due to either invalid address or @@ -4751,7 +4759,8 @@ union bpf_attr { * Each format specifier in **fmt** corresponds to one u64 element * in the **data** array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* - * array. The *data_len* is the size of *data* in bytes. + * array. The *data_len* is the size of *data* in bytes - must be + * a multiple of 8. * * Formats **%s** and **%p{i,I}{4,6}** require to read kernel * memory. Reading kernel memory may fail due to either invalid @@ -4877,6 +4886,58 @@ union bpf_attr { * Get the struct pt_regs associated with **task**. * Return * A pointer to struct pt_regs. + * + * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) + * Description + * Get branch trace from hardware engines like Intel LBR. The + * hardware engine is stopped shortly after the helper is + * called. Therefore, the user need to filter branch entries + * based on the actual use case. To capture branch trace + * before the trigger point of the BPF program, the helper + * should be called at the beginning of the BPF program. + * + * The data is stored as struct perf_branch_entry into output + * buffer *entries*. *size* is the size of *entries* in bytes. + * *flags* is reserved for now and must be zero. + * + * Return + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-ENOENT** if architecture does not support branch records. + * + * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * Description + * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 + * to format and can handle more format args as a result. + * + * Arguments are to be used as in **bpf_seq_printf**\ () helper. + * Return + * The number of bytes written to the buffer, or a negative error + * in case of failure. + * + * struct unix_sock *bpf_skc_to_unix_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *unix_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res) + * Description + * Get the address of a kernel symbol, returned in *res*. *res* is + * set to 0 if the symbol is not found. + * Return + * On success, zero. On error, a negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-EINVAL** if string *name* is not the same size as *name_sz*. + * + * **-ENOENT** if symbol is not found. + * + * **-EPERM** if caller does not have permission to obtain kernel address. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5055,6 +5116,10 @@ union bpf_attr { FN(get_func_ip), \ FN(get_attach_cookie), \ FN(task_pt_regs), \ + FN(get_branch_snapshot), \ + FN(trace_vprintk), \ + FN(skc_to_unix_sock), \ + FN(kallsyms_lookup_name), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5284,6 +5349,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; + __u32 :32; /* Padding, future use. */ + __u64 hwtstamp; }; struct bpf_tunnel_key { @@ -5577,6 +5644,7 @@ struct bpf_prog_info { __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; + __u32 verified_insns; } __attribute__((aligned(8))); struct bpf_map_info { @@ -5594,6 +5662,8 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; + __u32 :32; /* alignment pad */ + __u64 map_extra; } __attribute__((aligned(8))); struct bpf_btf_info { diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index d27b1708efe919..deb12f755f0fa4 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -43,7 +43,7 @@ struct btf_type { * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO and VAR. + * FUNC, FUNC_PROTO, VAR and DECL_TAG. * "type" is a type_id referring to another type. */ union { @@ -56,25 +56,29 @@ struct btf_type { #define BTF_INFO_VLEN(info) ((info) & 0xffff) #define BTF_INFO_KFLAG(info) ((info) >> 31) -#define BTF_KIND_UNKN 0 /* Unknown */ -#define BTF_KIND_INT 1 /* Integer */ -#define BTF_KIND_PTR 2 /* Pointer */ -#define BTF_KIND_ARRAY 3 /* Array */ -#define BTF_KIND_STRUCT 4 /* Struct */ -#define BTF_KIND_UNION 5 /* Union */ -#define BTF_KIND_ENUM 6 /* Enumeration */ -#define BTF_KIND_FWD 7 /* Forward */ -#define BTF_KIND_TYPEDEF 8 /* Typedef */ -#define BTF_KIND_VOLATILE 9 /* Volatile */ -#define BTF_KIND_CONST 10 /* Const */ -#define BTF_KIND_RESTRICT 11 /* Restrict */ -#define BTF_KIND_FUNC 12 /* Function */ -#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ -#define BTF_KIND_VAR 14 /* Variable */ -#define BTF_KIND_DATASEC 15 /* Section */ -#define BTF_KIND_FLOAT 16 /* Floating point */ -#define BTF_KIND_MAX BTF_KIND_FLOAT -#define NR_BTF_KINDS (BTF_KIND_MAX + 1) +enum { + BTF_KIND_UNKN = 0, /* Unknown */ + BTF_KIND_INT = 1, /* Integer */ + BTF_KIND_PTR = 2, /* Pointer */ + BTF_KIND_ARRAY = 3, /* Array */ + BTF_KIND_STRUCT = 4, /* Struct */ + BTF_KIND_UNION = 5, /* Union */ + BTF_KIND_ENUM = 6, /* Enumeration */ + BTF_KIND_FWD = 7, /* Forward */ + BTF_KIND_TYPEDEF = 8, /* Typedef */ + BTF_KIND_VOLATILE = 9, /* Volatile */ + BTF_KIND_CONST = 10, /* Const */ + BTF_KIND_RESTRICT = 11, /* Restrict */ + BTF_KIND_FUNC = 12, /* Function */ + BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ + BTF_KIND_VAR = 14, /* Variable */ + BTF_KIND_DATASEC = 15, /* Section */ + BTF_KIND_FLOAT = 16, /* Floating point */ + BTF_KIND_DECL_TAG = 17, /* Decl Tag */ + + NR_BTF_KINDS, + BTF_KIND_MAX = NR_BTF_KINDS - 1, +}; /* For some specific BTF_KIND, "struct btf_type" is immediately * followed by extra data. @@ -170,4 +174,15 @@ struct btf_var_secinfo { __u32 size; }; +/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe + * additional information related to the tag applied location. + * If component_idx == -1, the tag is applied to a struct, union, + * variable or function. Otherwise, it is applied to a struct/union + * member or a func argument, and component_idx indicates which member + * or argument (0 ... vlen-1). + */ +struct btf_decl_tag { + __s32 component_idx; +}; + #endif /* _UAPI__LINUX_BTF_H__ */ diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index f730d443b91846..75b85c60efb21f 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -101,6 +101,8 @@ struct can_ctrlmode { #define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */ #define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */ #define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */ +#define CAN_CTRLMODE_TDC_AUTO 0x200 /* CAN transiver automatically calculates TDCV */ +#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* TDCV is manually set up by user */ /* * CAN device statistics @@ -134,10 +136,35 @@ enum { IFLA_CAN_BITRATE_CONST, IFLA_CAN_DATA_BITRATE_CONST, IFLA_CAN_BITRATE_MAX, - __IFLA_CAN_MAX + IFLA_CAN_TDC, + + /* add new constants above here */ + __IFLA_CAN_MAX, + IFLA_CAN_MAX = __IFLA_CAN_MAX - 1 }; -#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1) +/* + * CAN FD Transmitter Delay Compensation (TDC) + * + * Please refer to struct can_tdc_const and can_tdc in + * include/linux/can/bittiming.h for further details. + */ +enum { + IFLA_CAN_TDC_UNSPEC, + IFLA_CAN_TDC_TDCV_MIN, /* u32 */ + IFLA_CAN_TDC_TDCV_MAX, /* u32 */ + IFLA_CAN_TDC_TDCO_MIN, /* u32 */ + IFLA_CAN_TDC_TDCO_MAX, /* u32 */ + IFLA_CAN_TDC_TDCF_MIN, /* u32 */ + IFLA_CAN_TDC_TDCF_MAX, /* u32 */ + IFLA_CAN_TDC_TDCV, /* u32 */ + IFLA_CAN_TDC_TDCO, /* u32 */ + IFLA_CAN_TDC_TDCF, /* u32 */ + + /* add new constants above here */ + __IFLA_CAN_TDC, + IFLA_CAN_TDC_MAX = __IFLA_CAN_TDC - 1 +}; /* u16 termination range: 1..65535 Ohms */ #define CAN_TERMINATION_DISABLED 0 diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 32f53a0069d6cd..b897b80770f6be 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -551,6 +551,8 @@ enum devlink_attr { DEVLINK_ATTR_RATE_NODE_NAME, /* string */ DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */ + DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, /* u32 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index b6db6590baf0af..a2223b68545193 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -603,6 +603,7 @@ enum ethtool_link_ext_state { ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, ETHTOOL_LINK_EXT_STATE_OVERHEAT, + ETHTOOL_LINK_EXT_STATE_MODULE, }; /* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */ @@ -649,6 +650,11 @@ enum ethtool_link_ext_substate_cable_issue { ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE, }; +/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */ +enum ethtool_link_ext_substate_module { + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, +}; + #define ETH_GSTRING_LEN 32 /** @@ -706,6 +712,29 @@ enum ethtool_stringset { ETH_SS_COUNT }; +/** + * enum ethtool_module_power_mode_policy - plug-in module power mode policy + * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode. + * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host + * to high power mode when the first port using it is put administratively + * up and to low power mode when the last port using it is put + * administratively down. + */ +enum ethtool_module_power_mode_policy { + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO, +}; + +/** + * enum ethtool_module_power_mode - plug-in module power mode + * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode. + * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode. + */ +enum ethtool_module_power_mode { + ETHTOOL_MODULE_POWER_MODE_LOW = 1, + ETHTOOL_MODULE_POWER_MODE_HIGH, +}; + /** * struct ethtool_gstrings - string set for data tagging * @cmd: Command number = %ETHTOOL_GSTRINGS diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 5545f1ca9237c0..ca5fbb59fa4212 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -47,6 +47,8 @@ enum { ETHTOOL_MSG_MODULE_EEPROM_GET, ETHTOOL_MSG_STATS_GET, ETHTOOL_MSG_PHC_VCLOCKS_GET, + ETHTOOL_MSG_MODULE_GET, + ETHTOOL_MSG_MODULE_SET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -90,6 +92,8 @@ enum { ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, ETHTOOL_MSG_STATS_GET_REPLY, ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, + ETHTOOL_MSG_MODULE_GET_REPLY, + ETHTOOL_MSG_MODULE_NTF, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -833,6 +837,19 @@ enum { ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1) }; +/* MODULE */ + +enum { + ETHTOOL_A_MODULE_UNSPEC, + ETHTOOL_A_MODULE_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_MODULE_POWER_MODE_POLICY, /* u8 */ + ETHTOOL_A_MODULE_POWER_MODE, /* u8 */ + + /* add new constants above here */ + __ETHTOOL_A_MODULE_CNT, + ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1) +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 5f589c7a83824b..5da4ee234e0b7e 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -86,6 +86,7 @@ * over Ethernet */ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h index bae14636a8c86f..829ffdfcacca86 100644 --- a/include/uapi/linux/ioam6_iptunnel.h +++ b/include/uapi/linux/ioam6_iptunnel.h @@ -9,9 +9,38 @@ #ifndef _UAPI_LINUX_IOAM6_IPTUNNEL_H #define _UAPI_LINUX_IOAM6_IPTUNNEL_H +/* Encap modes: + * - inline: direct insertion + * - encap: ip6ip6 encapsulation + * - auto: inline for local packets, encap for in-transit packets + */ +enum { + __IOAM6_IPTUNNEL_MODE_MIN, + + IOAM6_IPTUNNEL_MODE_INLINE, + IOAM6_IPTUNNEL_MODE_ENCAP, + IOAM6_IPTUNNEL_MODE_AUTO, + + __IOAM6_IPTUNNEL_MODE_MAX, +}; + +#define IOAM6_IPTUNNEL_MODE_MIN (__IOAM6_IPTUNNEL_MODE_MIN + 1) +#define IOAM6_IPTUNNEL_MODE_MAX (__IOAM6_IPTUNNEL_MODE_MAX - 1) + enum { IOAM6_IPTUNNEL_UNSPEC, + + /* Encap mode */ + IOAM6_IPTUNNEL_MODE, /* u8 */ + + /* Tunnel dst address. + * For encap,auto modes. + */ + IOAM6_IPTUNNEL_DST, /* struct in6_addr */ + + /* IOAM Trace Header */ IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */ + __IOAM6_IPTUNNEL_MAX, }; diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index e42d13b55cf3ac..e00bbb9c47bb49 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -169,6 +169,7 @@ enum IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, IPV4_DEVCONF_DROP_GRATUITOUS_ARP, IPV4_DEVCONF_BC_FORWARDING, + IPV4_DEVCONF_ARP_EVICT_NOCARRIER, __IPV4_DEVCONF_MAX }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index b243a53fa985b3..d4178dace0bfbb 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -193,6 +193,7 @@ enum { DEVCONF_IOAM6_ENABLED, DEVCONF_IOAM6_ID, DEVCONF_IOAM6_ID_WIDE, + DEVCONF_NDISC_EVICT_NOCARRIER, DEVCONF_MAX }; diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h index 6acd4ccafbf71b..07b0318716fcc5 100644 --- a/include/uapi/linux/mctp.h +++ b/include/uapi/linux/mctp.h @@ -11,6 +11,7 @@ #include #include +#include typedef __u8 mctp_eid_t; @@ -28,6 +29,14 @@ struct sockaddr_mctp { __u8 __smctp_pad1; }; +struct sockaddr_mctp_ext { + struct sockaddr_mctp smctp_base; + int smctp_ifindex; + __u8 smctp_halen; + __u8 __smctp_pad0[3]; + __u8 smctp_haddr[MAX_ADDR_LEN]; +}; + #define MCTP_NET_ANY 0x0 #define MCTP_ADDR_NULL 0x00 @@ -36,4 +45,6 @@ struct sockaddr_mctp { #define MCTP_TAG_MASK 0x07 #define MCTP_TAG_OWNER 0x08 +#define MCTP_OPT_ADDR_EXT 1 + #endif /* __UAPI_MCTP_H */ diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index bdf77dffa5a4f7..c54e6eae5366f6 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -53,12 +53,14 @@ #define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ #define MDIO_AN_EEE_ADV2 62 /* EEE advertisement 2 */ #define MDIO_AN_EEE_LPABLE2 63 /* EEE link partner ability 2 */ +#define MDIO_AN_CTRL2 64 /* AN THP bypass request control */ /* Media-dependent registers. */ #define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ #define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ #define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. * Lanes B-D are numbered 134-136. */ +#define MDIO_PMA_10GBR_FSRT_CSR 147 /* 10GBASE-R fast retrain status and control */ #define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ #define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ #define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ @@ -239,6 +241,9 @@ #define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ #define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ +/* PMA 10GBASE-R Fast Retrain status and control register. */ +#define MDIO_PMA_10GBR_FSRT_ENABLE 0x0001 /* Fast retrain enable */ + /* PCS 10GBASE-R/-T status register 1. */ #define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */ @@ -247,6 +252,7 @@ #define MDIO_PCS_10GBRT_STAT2_BER 0x3f00 /* AN 10GBASE-T control register. */ +#define MDIO_AN_10GBT_CTRL_ADVFSRT2_5G 0x0020 /* Advertise 2.5GBASE-T fast retrain */ #define MDIO_AN_10GBT_CTRL_ADV2_5G 0x0080 /* Advertise 2.5GBASE-T */ #define MDIO_AN_10GBT_CTRL_ADV5G 0x0100 /* Advertise 5GBASE-T */ #define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */ @@ -289,6 +295,9 @@ #define MDIO_EEE_2_5GT 0x0001 /* 2.5GT EEE cap */ #define MDIO_EEE_5GT 0x0002 /* 5GT EEE cap */ +/* AN MultiGBASE-T AN control 2 */ +#define MDIO_AN_THP_BP2_5GT 0x0008 /* 2.5GT THP bypass request */ + /* 2.5G/5G Extended abilities register. */ #define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */ #define MDIO_PMA_NG_EXTABLE_5GBT 0x0002 /* 5GBASET ability */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index f66038b9551fa0..c8cc46f80a1619 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -4,6 +4,13 @@ #include #include +#include /* for sockaddr_in */ +#include /* for sockaddr_in6 */ +#include /* for sockaddr_storage and sa_family */ + +#ifndef __KERNEL__ +#include /* for struct sockaddr */ +#endif #define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0) #define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1) @@ -193,4 +200,32 @@ enum mptcp_event_attr { #define MPTCP_RST_EBADPERF 5 #define MPTCP_RST_EMIDDLEBOX 6 +struct mptcp_subflow_data { + __u32 size_subflow_data; /* size of this structure in userspace */ + __u32 num_subflows; /* must be 0, set by kernel */ + __u32 size_kernel; /* must be 0, set by kernel */ + __u32 size_user; /* size of one element in data[] */ +} __attribute__((aligned(8))); + +struct mptcp_subflow_addrs { + union { + __kernel_sa_family_t sa_family; + struct sockaddr sa_local; + struct sockaddr_in sin_local; + struct sockaddr_in6 sin6_local; + struct __kernel_sockaddr_storage ss_local; + }; + union { + struct sockaddr sa_remote; + struct sockaddr_in sin_remote; + struct sockaddr_in6 sin6_remote; + struct __kernel_sockaddr_storage ss_remote; + }; +}; + +/* MPTCP socket options */ +#define MPTCP_INFO 1 +#define MPTCP_TCPINFO 2 +#define MPTCP_SUBFLOW_ADDRS 3 + #endif /* _UAPI_MPTCP_H */ diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 00a60695fa5388..db05fb55055e94 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -31,6 +31,7 @@ enum { NDA_PROTOCOL, /* Originator of entry */ NDA_NH_ID, NDA_FDB_EXT_ATTRS, + NDA_FLAGS_EXT, __NDA_MAX }; @@ -40,14 +41,16 @@ enum { * Neighbor Cache Entry Flags */ -#define NTF_USE 0x01 -#define NTF_SELF 0x02 -#define NTF_MASTER 0x04 -#define NTF_PROXY 0x08 /* == ATF_PUBL */ -#define NTF_EXT_LEARNED 0x10 -#define NTF_OFFLOADED 0x20 -#define NTF_STICKY 0x40 -#define NTF_ROUTER 0x80 +#define NTF_USE (1 << 0) +#define NTF_SELF (1 << 1) +#define NTF_MASTER (1 << 2) +#define NTF_PROXY (1 << 3) /* == ATF_PUBL */ +#define NTF_EXT_LEARNED (1 << 4) +#define NTF_OFFLOADED (1 << 5) +#define NTF_STICKY (1 << 6) +#define NTF_ROUTER (1 << 7) +/* Extended flags under NDA_FLAGS_EXT: */ +#define NTF_EXT_MANAGED (1 << 0) /* * Neighbor Cache Entry States. @@ -65,12 +68,22 @@ enum { #define NUD_PERMANENT 0x80 #define NUD_NONE 0x00 -/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change - * and make no address resolution or NUD. - * NUD_PERMANENT also cannot be deleted by garbage collectors. +/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no + * address resolution or NUD. + * + * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true + * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier + * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED + * flagged entries explicitly are (which is also consistent with the routing + * subsystem). + * * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry * states don't make sense and thus are ignored. Such entries don't age and * can roam. + * + * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf + * of a user space control plane, and automatically refreshed so that (if + * possible) they remain in NUD_REACHABLE state. */ struct nda_cacheinfo { diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h index ef9a44286e23d5..53411ccc69db08 100644 --- a/include/uapi/linux/netfilter.h +++ b/include/uapi/linux/netfilter.h @@ -51,6 +51,7 @@ enum nf_inet_hooks { enum nf_dev_hooks { NF_NETDEV_INGRESS, + NF_NETDEV_EGRESS, NF_NETDEV_NUMHOOKS }; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e94d1fa554cb22..466fd3f4447c2f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -753,11 +753,13 @@ enum nft_dynset_attributes { * @NFT_PAYLOAD_LL_HEADER: link layer header * @NFT_PAYLOAD_NETWORK_HEADER: network header * @NFT_PAYLOAD_TRANSPORT_HEADER: transport header + * @NFT_PAYLOAD_INNER_HEADER: inner header / payload */ enum nft_payload_bases { NFT_PAYLOAD_LL_HEADER, NFT_PAYLOAD_NETWORK_HEADER, NFT_PAYLOAD_TRANSPORT_HEADER, + NFT_PAYLOAD_INNER_HEADER, }; /** @@ -896,7 +898,8 @@ enum nft_meta_keys { NFT_META_OIF, NFT_META_IIFNAME, NFT_META_OIFNAME, - NFT_META_IIFTYPE, + NFT_META_IFTYPE, +#define NFT_META_IIFTYPE NFT_META_IFTYPE NFT_META_OIFTYPE, NFT_META_SKUID, NFT_META_SKGID, @@ -923,6 +926,7 @@ enum nft_meta_keys { NFT_META_TIME_HOUR, NFT_META_SDIF, NFT_META_SDIFNAME, + __NFT_META_IIFTYPE, }; /** diff --git a/include/uapi/linux/nl80211-vnd-intel.h b/include/uapi/linux/nl80211-vnd-intel.h index 0bf177b84fd926..4ed7d0b2451221 100644 --- a/include/uapi/linux/nl80211-vnd-intel.h +++ b/include/uapi/linux/nl80211-vnd-intel.h @@ -13,6 +13,35 @@ * enum iwl_mvm_vendor_cmd - supported vendor commands * @IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO: reports CSME connection info. * @IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP: asks for ownership on the device. + * This is useful when the CSME firmware owns the device and the kernel + * wants to use it. In case the CSME firmware has no connection active the + * kernel will manage on its own to get ownership of the device. + * When the CSME firmware has an active connection, the user space + * involvement is required. The kernel will assert the RFKILL signal with + * the "device not owned" reason so that nobody can touch the device. Then + * the user space can run the following flow to be able to get connected + * to the very same AP the CSME firmware is currently connected to: + * + * 1) The user space (NetworkManager) boots and sees that the device is + * in RFKILL because the host doesn't own the device + * 2) The user space asks the kernel what AP the CSME firmware is + * connected to (with %IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO) + * 3) The user space checks if it has a profile that matches the reply + * from the CSME firmware + * 4) The user space installs a network to the wpa_supplicant with a + * specific BSSID and a specific frequency + * 5) The user space prevents any type of full scan + * 6) The user space asks iwlmei to request ownership on the device (with + * this command) + * 7) iwlmei requests ownership from the CSME firmware + * 8) The CSME firmware grants ownership + * 9) iwlmei tells iwlwifi to lift the RFKILL + * 10) RFKILL OFF is reported to user space + * 11) The host boots the device, loads the firwmare, and connects to a + * specific BSSID without scanning including IP as fast as it can + * 12) The host reports to the CSME firmware that there is a connection + * 13) The TCP connection is preserved and the host has connectivity + * * @IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT: notifies if roaming is allowed. * It contains a &IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN and a * &IWL_MVM_VENDOR_ATTR_VIF_ADDR attributes. diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c2efea98e060b3..61cab81e920de9 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -300,6 +300,29 @@ * the interface goes down. */ +/** + * DOC: FILS shared key crypto offload + * + * This feature is applicable to drivers running in AP mode. + * + * FILS shared key crypto offload can be advertised by drivers by setting + * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD flag. The drivers that support + * FILS shared key crypto offload should be able to encrypt and decrypt + * association frames for FILS shared key authentication as per IEEE 802.11ai. + * With this capability, for FILS key derivation, drivers depend on userspace. + * + * After FILS key derivation, userspace shares the FILS AAD details with the + * driver and the driver stores the same to use in decryption of association + * request and in encryption of association response. The below parameters + * should be given to the driver in %NL80211_CMD_SET_FILS_AAD. + * %NL80211_ATTR_MAC - STA MAC address, used for storing FILS AAD per STA + * %NL80211_ATTR_FILS_KEK - Used for encryption or decryption + * %NL80211_ATTR_FILS_NONCES - Used for encryption or decryption + * (STA Nonce 16 bytes followed by AP Nonce 16 bytes) + * + * Once the association is done, the driver cleans the FILS AAD data. + */ + /** * enum nl80211_commands - supported nl80211 commands * @@ -337,7 +360,10 @@ * @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from * userspace to request deletion of a virtual interface, then requires - * attribute %NL80211_ATTR_IFINDEX. + * attribute %NL80211_ATTR_IFINDEX. If multiple BSSID advertisements are + * enabled using %NL80211_ATTR_MBSSID_CONFIG, %NL80211_ATTR_MBSSID_ELEMS, + * and if this command is used for the transmitting interface, then all + * the non-transmitting interfaces are deleted as well. * * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. @@ -1200,6 +1226,12 @@ * @NL80211_CMD_COLOR_CHANGE_COMPLETED: Notify userland that the color change * has completed * + * @NL80211_CMD_SET_FILS_AAD: Set FILS AAD data to the driver using - + * &NL80211_ATTR_MAC - for STA MAC address + * &NL80211_ATTR_FILS_KEK - for KEK + * &NL80211_ATTR_FILS_NONCES - for FILS Nonces + * (STA Nonce 16 bytes followed by AP Nonce 16 bytes) + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1440,6 +1472,8 @@ enum nl80211_commands { NL80211_CMD_COLOR_CHANGE_ABORTED, NL80211_CMD_COLOR_CHANGE_COMPLETED, + NL80211_CMD_SET_FILS_AAD, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2593,6 +2627,18 @@ enum nl80211_commands { * @NL80211_ATTR_COLOR_CHANGE_ELEMS: Nested set of attributes containing the IE * information for the time while performing a color switch. * + * @NL80211_ATTR_MBSSID_CONFIG: Nested attribute for multiple BSSID + * advertisements (MBSSID) parameters in AP mode. + * Kernel uses this attribute to indicate the driver's support for MBSSID + * and enhanced multi-BSSID advertisements (EMA AP) to the userspace. + * Userspace should use this attribute to configure per interface MBSSID + * parameters. + * See &enum nl80211_mbssid_config_attributes for details. + * + * @NL80211_ATTR_MBSSID_ELEMS: Nested parameter to pass multiple BSSID elements. + * Mandatory parameter for the transmitting interface to enable MBSSID. + * Optional for the non-transmitting interfaces. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3096,6 +3142,9 @@ enum nl80211_attrs { NL80211_ATTR_COLOR_CHANGE_COLOR, NL80211_ATTR_COLOR_CHANGE_ELEMS, + NL80211_ATTR_MBSSID_CONFIG, + NL80211_ATTR_MBSSID_ELEMS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4929,6 +4978,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) * @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs + * @NL80211_BAND_LC: light communication band (placeholder) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4938,6 +4988,7 @@ enum nl80211_band { NL80211_BAND_60GHZ, NL80211_BAND_6GHZ, NL80211_BAND_S1GHZ, + NL80211_BAND_LC, NUM_NL80211_BANDS, }; @@ -5995,6 +6046,11 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_BSS_COLOR: The driver supports BSS color collision * detection and change announcemnts. * + * @NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD: Driver running in AP mode supports + * FILS encryption and decryption for (Re)Association Request and Response + * frames. Userspace has to share FILS AAD details to the driver by using + * @NL80211_CMD_SET_FILS_AAD. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -6060,6 +6116,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_SECURE_RTT, NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE, NL80211_EXT_FEATURE_BSS_COLOR, + NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -7349,4 +7406,60 @@ enum nl80211_sar_specs_attrs { NL80211_SAR_ATTR_SPECS_MAX = __NL80211_SAR_ATTR_SPECS_LAST - 1, }; +/** + * enum nl80211_mbssid_config_attributes - multiple BSSID (MBSSID) and enhanced + * multi-BSSID advertisements (EMA) in AP mode. + * Kernel uses some of these attributes to advertise driver's support for + * MBSSID and EMA. + * Remaining attributes should be used by the userspace to configure the + * features. + * + * @__NL80211_MBSSID_CONFIG_ATTR_INVALID: Invalid + * + * @NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES: Used by the kernel to advertise + * the maximum number of MBSSID interfaces supported by the driver. + * Driver should indicate MBSSID support by setting + * wiphy->mbssid_max_interfaces to a value more than or equal to 2. + * + * @NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY: Used by the kernel + * to advertise the maximum profile periodicity supported by the driver + * if EMA is enabled. Driver should indicate EMA support to the userspace + * by setting wiphy->ema_max_profile_periodicity to + * a non-zero value. + * + * @NL80211_MBSSID_CONFIG_ATTR_INDEX: Mandatory parameter to pass the index of + * this BSS (u8) in the multiple BSSID set. + * Value must be set to 0 for the transmitting interface and non-zero for + * all non-transmitting interfaces. The userspace will be responsible + * for using unique indices for the interfaces. + * Range: 0 to wiphy->mbssid_max_interfaces-1. + * + * @NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX: Mandatory parameter for + * a non-transmitted profile which provides the interface index (u32) of + * the transmitted profile. The value must match one of the interface + * indices advertised by the kernel. Optional if the interface being set up + * is the transmitting one, however, if provided then the value must match + * the interface index of the same. + * + * @NL80211_MBSSID_CONFIG_ATTR_EMA: Flag used to enable EMA AP feature. + * Setting this flag is permitted only if the driver advertises EMA support + * by setting wiphy->ema_max_profile_periodicity to non-zero. + * + * @__NL80211_MBSSID_CONFIG_ATTR_LAST: Internal + * @NL80211_MBSSID_CONFIG_ATTR_MAX: highest attribute + */ +enum nl80211_mbssid_config_attributes { + __NL80211_MBSSID_CONFIG_ATTR_INVALID, + + NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES, + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY, + NL80211_MBSSID_CONFIG_ATTR_INDEX, + NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX, + NL80211_MBSSID_CONFIG_ATTR_EMA, + + /* keep last */ + __NL80211_MBSSID_CONFIG_ATTR_LAST, + NL80211_MBSSID_CONFIG_ATTR_MAX = __NL80211_MBSSID_CONFIG_ATTR_LAST - 1, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index ec88590b319844..f292b467b27fcc 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -840,6 +840,8 @@ enum { TCA_FQ_CODEL_CE_THRESHOLD, TCA_FQ_CODEL_DROP_BATCH_SIZE, TCA_FQ_CODEL_MEMORY_LIMIT, + TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, + TCA_FQ_CODEL_CE_THRESHOLD_MASK, __TCA_FQ_CODEL_MAX }; diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 0f7f87c70baf59..20f33b27787f5f 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -38,6 +38,9 @@ enum { /* SMC PNET Table commands */ #define SMC_GENL_FAMILY_VERSION 1 #define SMC_PCI_ID_STR_LEN 16 /* Max length of pci id string */ +#define SMC_MAX_HOSTNAME_LEN 32 /* Max length of the hostname */ +#define SMC_MAX_UEID 4 /* Max number of user EIDs */ +#define SMC_MAX_EID_LEN 32 /* Max length of an EID */ /* SMC_GENL_FAMILY commands */ enum { @@ -49,6 +52,13 @@ enum { SMC_NETLINK_GET_DEV_SMCR, SMC_NETLINK_GET_STATS, SMC_NETLINK_GET_FBACK_STATS, + SMC_NETLINK_DUMP_UEID, + SMC_NETLINK_ADD_UEID, + SMC_NETLINK_REMOVE_UEID, + SMC_NETLINK_FLUSH_UEID, + SMC_NETLINK_DUMP_SEID, + SMC_NETLINK_ENABLE_SEID, + SMC_NETLINK_DISABLE_SEID, }; /* SMC_GENL_FAMILY top level attributes */ @@ -74,17 +84,28 @@ enum { SMC_NLA_SYS_IS_ISM_V2, /* u8 */ SMC_NLA_SYS_LOCAL_HOST, /* string */ SMC_NLA_SYS_SEID, /* string */ + SMC_NLA_SYS_IS_SMCR_V2, /* u8 */ __SMC_NLA_SYS_MAX, SMC_NLA_SYS_MAX = __SMC_NLA_SYS_MAX - 1 }; -/* SMC_NLA_LGR_V2 nested attributes */ +/* SMC_NLA_LGR_D_V2_COMMON and SMC_NLA_LGR_R_V2_COMMON nested attributes */ enum { SMC_NLA_LGR_V2_VER, /* u8 */ SMC_NLA_LGR_V2_REL, /* u8 */ SMC_NLA_LGR_V2_OS, /* u8 */ SMC_NLA_LGR_V2_NEG_EID, /* string */ SMC_NLA_LGR_V2_PEER_HOST, /* string */ + __SMC_NLA_LGR_V2_MAX, + SMC_NLA_LGR_V2_MAX = __SMC_NLA_LGR_V2_MAX - 1 +}; + +/* SMC_NLA_LGR_R_V2 nested attributes */ +enum { + SMC_NLA_LGR_R_V2_UNSPEC, + SMC_NLA_LGR_R_V2_DIRECT, /* u8 */ + __SMC_NLA_LGR_R_V2_MAX, + SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1 }; /* SMC_GEN_LGR_SMCR attributes */ @@ -96,6 +117,8 @@ enum { SMC_NLA_LGR_R_PNETID, /* string */ SMC_NLA_LGR_R_VLAN_ID, /* u8 */ SMC_NLA_LGR_R_CONNS_NUM, /* u32 */ + SMC_NLA_LGR_R_V2_COMMON, /* nest */ + SMC_NLA_LGR_R_V2, /* nest */ __SMC_NLA_LGR_R_MAX, SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1 }; @@ -128,7 +151,7 @@ enum { SMC_NLA_LGR_D_PNETID, /* string */ SMC_NLA_LGR_D_CHID, /* u16 */ SMC_NLA_LGR_D_PAD, /* flag */ - SMC_NLA_LGR_V2, /* nest */ + SMC_NLA_LGR_D_V2_COMMON, /* nest */ __SMC_NLA_LGR_D_MAX, SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1 }; @@ -242,4 +265,21 @@ enum { __SMC_NLA_FBACK_STATS_MAX, SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1 }; + +/* SMC_NETLINK_UEID attributes */ +enum { + SMC_NLA_EID_TABLE_UNSPEC, + SMC_NLA_EID_TABLE_ENTRY, /* string */ + __SMC_NLA_EID_TABLE_MAX, + SMC_NLA_EID_TABLE_MAX = __SMC_NLA_EID_TABLE_MAX - 1 +}; + +/* SMC_NETLINK_SEID attributes */ +enum { + SMC_NLA_SEID_UNSPEC, + SMC_NLA_SEID_ENTRY, /* string */ + SMC_NLA_SEID_ENABLED, /* u8 */ + __SMC_NLA_SEID_TABLE_MAX, + SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1 +}; #endif /* _UAPI_LINUX_SMC_H */ diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 1e05d3caa712ff..6a3b194c50fe74 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -482,6 +482,7 @@ enum NET_IPV4_CONF_PROMOTE_SECONDARIES=20, NET_IPV4_CONF_ARP_ACCEPT=21, NET_IPV4_CONF_ARP_NOTIFY=22, + NET_IPV4_CONF_ARP_EVICT_NOCARRIER=23, }; /* /proc/sys/net/ipv4/netfilter */ diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index 0d54baea1d8dbe..5f38be0ec0f31a 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -84,6 +84,20 @@ #define TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE 16 #define TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE 8 +#define TLS_CIPHER_SM4_GCM 55 +#define TLS_CIPHER_SM4_GCM_IV_SIZE 8 +#define TLS_CIPHER_SM4_GCM_KEY_SIZE 16 +#define TLS_CIPHER_SM4_GCM_SALT_SIZE 4 +#define TLS_CIPHER_SM4_GCM_TAG_SIZE 16 +#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE 8 + +#define TLS_CIPHER_SM4_CCM 56 +#define TLS_CIPHER_SM4_CCM_IV_SIZE 8 +#define TLS_CIPHER_SM4_CCM_KEY_SIZE 16 +#define TLS_CIPHER_SM4_CCM_SALT_SIZE 4 +#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16 +#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8 + #define TLS_SET_RECORD_TYPE 1 #define TLS_GET_RECORD_TYPE 2 @@ -124,6 +138,22 @@ struct tls12_crypto_info_chacha20_poly1305 { unsigned char rec_seq[TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE]; }; +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE]; + unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE]; + unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE]; +}; + enum { TLS_INFO_UNSPEC, TLS_INFO_VERSION, diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h index 46918a1852d7b7..c60ca33eac594b 100644 --- a/include/uapi/linux/vm_sockets.h +++ b/include/uapi/linux/vm_sockets.h @@ -64,7 +64,7 @@ * timeout for a STREAM socket. */ -#define SO_VM_SOCKETS_CONNECT_TIMEOUT 6 +#define SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD 6 /* Option name for using non-blocking send/receive. Use as the option name * for setsockopt(3) or getsockopt(3) to set or get the non-blocking @@ -81,6 +81,17 @@ #define SO_VM_SOCKETS_NONBLOCK_TXRX 7 +#define SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW 8 + +#if !defined(__KERNEL__) +#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) +#define SO_VM_SOCKETS_CONNECT_TIMEOUT SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD +#else +#define SO_VM_SOCKETS_CONNECT_TIMEOUT \ + (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD : SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW) +#endif +#endif + /* The vSocket equivalent of INADDR_ANY. This works for the svm_cid field of * sockaddr_vm and indicates the context ID of the current endpoint. */ diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig index a82d6de865227c..d24d518ddd6344 100644 --- a/kernel/bpf/Kconfig +++ b/kernel/bpf/Kconfig @@ -64,6 +64,7 @@ config BPF_JIT_DEFAULT_ON config BPF_UNPRIV_DEFAULT_OFF bool "Disable unprivileged BPF by default" + default y depends on BPF_SYSCALL help Disables unprivileged BPF by default by setting the corresponding @@ -72,6 +73,12 @@ config BPF_UNPRIV_DEFAULT_OFF disable it by setting it to 1 (from which no other transition to 0 is possible anymore). + Unprivileged BPF could be used to exploit certain potential + speculative execution side-channel vulnerabilities on unmitigated + affected hardware. + + If you are unsure how to answer this question, answer Y. + source "kernel/bpf/preload/Kconfig" config BPF_LSM diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 7f33098ca63fb5..cf6ca339f3cd8d 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -7,7 +7,7 @@ endif CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy) obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o -obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o +obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 447def54054442..c7a5be3bf8bea3 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -645,7 +645,7 @@ static const struct bpf_iter_seq_info iter_seq_info = { .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), }; -static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, +static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { u32 i, key, num_elems = 0; @@ -668,9 +668,8 @@ static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, val = array->value + array->elem_size * i; num_elems++; key = i; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)&key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)&key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) break; diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c new file mode 100644 index 00000000000000..277a05e9c98493 --- /dev/null +++ b/kernel/bpf/bloom_filter.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include +#include +#include +#include + +#define BLOOM_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + /* If the size of the values in the bloom filter is u32 aligned, + * then it is more performant to use jhash2 as the underlying hash + * function, else we use jhash. This tracks the number of u32s + * in an u32-aligned value size. If the value size is not u32 aligned, + * this will be 0. + */ + u32 aligned_u32_count; + u32 nr_hash_funcs; + unsigned long bitset[]; +}; + +static u32 hash(struct bpf_bloom_filter *bloom, void *value, + u32 value_size, u32 index) +{ + u32 h; + + if (bloom->aligned_u32_count) + h = jhash2(value, bloom->aligned_u32_count, + bloom->hash_seed + index); + else + h = jhash(value, value_size, bloom->hash_seed + index); + + return h & bloom->bitset_mask; +} + +static int bloom_map_peek_elem(struct bpf_map *map, void *value) +{ + struct bpf_bloom_filter *bloom = + container_of(map, struct bpf_bloom_filter, map); + u32 i, h; + + for (i = 0; i < bloom->nr_hash_funcs; i++) { + h = hash(bloom, value, map->value_size, i); + if (!test_bit(h, bloom->bitset)) + return -ENOENT; + } + + return 0; +} + +static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags) +{ + struct bpf_bloom_filter *bloom = + container_of(map, struct bpf_bloom_filter, map); + u32 i, h; + + if (flags != BPF_ANY) + return -EINVAL; + + for (i = 0; i < bloom->nr_hash_funcs; i++) { + h = hash(bloom, value, map->value_size, i); + set_bit(h, bloom->bitset); + } + + return 0; +} + +static int bloom_map_pop_elem(struct bpf_map *map, void *value) +{ + return -EOPNOTSUPP; +} + +static int bloom_map_delete_elem(struct bpf_map *map, void *value) +{ + return -EOPNOTSUPP; +} + +static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) +{ + u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits; + int numa_node = bpf_map_attr_numa_node(attr); + struct bpf_bloom_filter *bloom; + + if (!bpf_capable()) + return ERR_PTR(-EPERM); + + if (attr->key_size != 0 || attr->value_size == 0 || + attr->max_entries == 0 || + attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || + !bpf_map_flags_access_ok(attr->map_flags) || + /* The lower 4 bits of map_extra (0xF) specify the number + * of hash functions + */ + (attr->map_extra & ~0xF)) + return ERR_PTR(-EINVAL); + + nr_hash_funcs = attr->map_extra; + if (nr_hash_funcs == 0) + /* Default to using 5 hash functions if unspecified */ + nr_hash_funcs = 5; + + /* For the bloom filter, the optimal bit array size that minimizes the + * false positive probability is n * k / ln(2) where n is the number of + * expected entries in the bloom filter and k is the number of hash + * functions. We use 7 / 5 to approximate 1 / ln(2). + * + * We round this up to the nearest power of two to enable more efficient + * hashing using bitmasks. The bitmask will be the bit array size - 1. + * + * If this overflows a u32, the bit array size will have 2^32 (4 + * GB) bits. + */ + if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) || + check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) || + nr_bits > (1UL << 31)) { + /* The bit array size is 2^32 bits but to avoid overflowing the + * u32, we use U32_MAX, which will round up to the equivalent + * number of bytes + */ + bitset_bytes = BITS_TO_BYTES(U32_MAX); + bitset_mask = U32_MAX; + } else { + if (nr_bits <= BITS_PER_LONG) + nr_bits = BITS_PER_LONG; + else + nr_bits = roundup_pow_of_two(nr_bits); + bitset_bytes = BITS_TO_BYTES(nr_bits); + bitset_mask = nr_bits - 1; + } + + bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long)); + bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node); + + if (!bloom) + return ERR_PTR(-ENOMEM); + + bpf_map_init_from_attr(&bloom->map, attr); + + bloom->nr_hash_funcs = nr_hash_funcs; + bloom->bitset_mask = bitset_mask; + + /* Check whether the value size is u32-aligned */ + if ((attr->value_size & (sizeof(u32) - 1)) == 0) + bloom->aligned_u32_count = + attr->value_size / sizeof(u32); + + if (!(attr->map_flags & BPF_F_ZERO_SEED)) + bloom->hash_seed = get_random_int(); + + return &bloom->map; +} + +static void bloom_map_free(struct bpf_map *map) +{ + struct bpf_bloom_filter *bloom = + container_of(map, struct bpf_bloom_filter, map); + + bpf_map_area_free(bloom); +} + +static void *bloom_map_lookup_elem(struct bpf_map *map, void *key) +{ + /* The eBPF program should use map_peek_elem instead */ + return ERR_PTR(-EINVAL); +} + +static int bloom_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + /* The eBPF program should use map_push_elem instead */ + return -EINVAL; +} + +static int bloom_map_check_btf(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type) +{ + /* Bloom filter maps are keyless */ + return btf_type_is_void(key_type) ? 0 : -EINVAL; +} + +static int bpf_bloom_map_btf_id; +const struct bpf_map_ops bloom_filter_map_ops = { + .map_meta_equal = bpf_map_meta_equal, + .map_alloc = bloom_map_alloc, + .map_free = bloom_map_free, + .map_push_elem = bloom_map_push_elem, + .map_peek_elem = bloom_map_peek_elem, + .map_pop_elem = bloom_map_pop_elem, + .map_lookup_elem = bloom_map_lookup_elem, + .map_update_elem = bloom_map_update_elem, + .map_delete_elem = bloom_map_delete_elem, + .map_check_btf = bloom_map_check_btf, + .map_btf_name = "bpf_bloom_filter", + .map_btf_id = &bpf_bloom_map_btf_id, +}; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 9abcc33f02cf01..8ecfe475276977 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -93,6 +93,9 @@ const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { }; const struct bpf_prog_ops bpf_struct_ops_prog_ops = { +#ifdef CONFIG_NET + .test_run = bpf_struct_ops_test_run, +#endif }; static const struct btf_type *module_type; @@ -312,6 +315,20 @@ static int check_zero_holes(const struct btf_type *t, void *data) return 0; } +int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, + struct bpf_prog *prog, + const struct btf_func_model *model, + void *image, void *image_end) +{ + u32 flags; + + tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; + tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; + flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0; + return arch_prepare_bpf_trampoline(NULL, image, image_end, + model, flags, tprogs, NULL); +} + static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { @@ -323,7 +340,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, struct bpf_tramp_progs *tprogs = NULL; void *udata, *kdata; int prog_fd, err = 0; - void *image; + void *image, *image_end; u32 i; if (flags) @@ -363,12 +380,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, udata = &uvalue->data; kdata = &kvalue->data; image = st_map->image; + image_end = st_map->image + PAGE_SIZE; for_each_member(i, t, member) { const struct btf_type *mtype, *ptype; struct bpf_prog *prog; u32 moff; - u32 flags; moff = btf_member_bit_offset(t, member) / 8; ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); @@ -430,14 +447,9 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, goto reset_unlock; } - tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; - tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; - flags = st_ops->func_models[i].ret_size > 0 ? - BPF_TRAMP_F_RET_FENTRY_RET : 0; - err = arch_prepare_bpf_trampoline(NULL, image, - st_map->image + PAGE_SIZE, - &st_ops->func_models[i], - flags, tprogs, NULL); + err = bpf_struct_ops_prepare_trampoline(tprogs, prog, + &st_ops->func_models[i], + image, image_end); if (err < 0) goto reset_unlock; diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h index 066d83ea1c99c5..5678a9ddf8178c 100644 --- a/kernel/bpf/bpf_struct_ops_types.h +++ b/kernel/bpf/bpf_struct_ops_types.h @@ -2,6 +2,9 @@ /* internal file - do not include directly */ #ifdef CONFIG_BPF_JIT +#ifdef CONFIG_NET +BPF_STRUCT_OPS_TYPE(bpf_dummy_ops) +#endif #ifdef CONFIG_INET #include BPF_STRUCT_OPS_TYPE(tcp_congestion_ops) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dfe61df4f974d1..dbc3ad07e21b66 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -281,6 +281,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; const char *btf_type_str(const struct btf_type *t) @@ -459,6 +460,17 @@ static bool btf_type_is_datasec(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; } +static bool btf_type_is_decl_tag(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; +} + +static bool btf_type_is_decl_tag_target(const struct btf_type *t) +{ + return btf_type_is_func(t) || btf_type_is_struct(t) || + btf_type_is_var(t) || btf_type_is_typedef(t); +} + u32 btf_nr_types(const struct btf *btf) { u32 total = 0; @@ -537,6 +549,7 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, static bool btf_type_is_resolve_source_only(const struct btf_type *t) { return btf_type_is_var(t) || + btf_type_is_decl_tag(t) || btf_type_is_datasec(t); } @@ -563,6 +576,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t) btf_type_is_struct(t) || btf_type_is_array(t) || btf_type_is_var(t) || + btf_type_is_decl_tag(t) || btf_type_is_datasec(t); } @@ -616,6 +630,11 @@ static const struct btf_var *btf_type_var(const struct btf_type *t) return (const struct btf_var *)(t + 1); } +static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) +{ + return (const struct btf_decl_tag *)(t + 1); +} + static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) { return kind_ops[BTF_INFO_KIND(t->info)]; @@ -3801,6 +3820,110 @@ static const struct btf_kind_operations float_ops = { .show = btf_df_show, }; +static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, + const struct btf_type *t, + u32 meta_left) +{ + const struct btf_decl_tag *tag; + u32 meta_needed = sizeof(*tag); + s32 component_idx; + const char *value; + + if (meta_left < meta_needed) { + btf_verifier_log_basic(env, t, + "meta_left:%u meta_needed:%u", + meta_left, meta_needed); + return -EINVAL; + } + + value = btf_name_by_offset(env->btf, t->name_off); + if (!value || !value[0]) { + btf_verifier_log_type(env, t, "Invalid value"); + return -EINVAL; + } + + if (btf_type_vlen(t)) { + btf_verifier_log_type(env, t, "vlen != 0"); + return -EINVAL; + } + + if (btf_type_kflag(t)) { + btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); + return -EINVAL; + } + + component_idx = btf_type_decl_tag(t)->component_idx; + if (component_idx < -1) { + btf_verifier_log_type(env, t, "Invalid component_idx"); + return -EINVAL; + } + + btf_verifier_log_type(env, t, NULL); + + return meta_needed; +} + +static int btf_decl_tag_resolve(struct btf_verifier_env *env, + const struct resolve_vertex *v) +{ + const struct btf_type *next_type; + const struct btf_type *t = v->t; + u32 next_type_id = t->type; + struct btf *btf = env->btf; + s32 component_idx; + u32 vlen; + + next_type = btf_type_by_id(btf, next_type_id); + if (!next_type || !btf_type_is_decl_tag_target(next_type)) { + btf_verifier_log_type(env, v->t, "Invalid type_id"); + return -EINVAL; + } + + if (!env_type_is_resolve_sink(env, next_type) && + !env_type_is_resolved(env, next_type_id)) + return env_stack_push(env, next_type, next_type_id); + + component_idx = btf_type_decl_tag(t)->component_idx; + if (component_idx != -1) { + if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) { + btf_verifier_log_type(env, v->t, "Invalid component_idx"); + return -EINVAL; + } + + if (btf_type_is_struct(next_type)) { + vlen = btf_type_vlen(next_type); + } else { + /* next_type should be a function */ + next_type = btf_type_by_id(btf, next_type->type); + vlen = btf_type_vlen(next_type); + } + + if ((u32)component_idx >= vlen) { + btf_verifier_log_type(env, v->t, "Invalid component_idx"); + return -EINVAL; + } + } + + env_stack_pop_resolved(env, next_type_id, 0); + + return 0; +} + +static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) +{ + btf_verifier_log(env, "type=%u component_idx=%d", t->type, + btf_type_decl_tag(t)->component_idx); +} + +static const struct btf_kind_operations decl_tag_ops = { + .check_meta = btf_decl_tag_check_meta, + .resolve = btf_decl_tag_resolve, + .check_member = btf_df_check_member, + .check_kflag_member = btf_df_check_kflag_member, + .log_details = btf_decl_tag_log, + .show = btf_df_show, +}; + static int btf_func_proto_check(struct btf_verifier_env *env, const struct btf_type *t) { @@ -3935,6 +4058,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { [BTF_KIND_VAR] = &var_ops, [BTF_KIND_DATASEC] = &datasec_ops, [BTF_KIND_FLOAT] = &float_ops, + [BTF_KIND_DECL_TAG] = &decl_tag_ops, }; static s32 btf_check_meta(struct btf_verifier_env *env, @@ -4019,6 +4143,10 @@ static bool btf_resolve_valid(struct btf_verifier_env *env, return !btf_resolved_type_id(btf, type_id) && !btf_resolved_type_size(btf, type_id); + if (btf_type_is_decl_tag(t)) + return btf_resolved_type_id(btf, type_id) && + !btf_resolved_type_size(btf, type_id); + if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || btf_type_is_var(t)) { t = btf_type_id_resolve(btf, &type_id); @@ -6215,3 +6343,58 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { }; BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct) + +/* BTF ID set registration API for modules */ + +struct kfunc_btf_id_list { + struct list_head list; + struct mutex mutex; +}; + +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + +void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s) +{ + mutex_lock(&l->mutex); + list_add(&s->list, &l->list); + mutex_unlock(&l->mutex); +} +EXPORT_SYMBOL_GPL(register_kfunc_btf_id_set); + +void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s) +{ + mutex_lock(&l->mutex); + list_del_init(&s->list); + mutex_unlock(&l->mutex); +} +EXPORT_SYMBOL_GPL(unregister_kfunc_btf_id_set); + +bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, + struct module *owner) +{ + struct kfunc_btf_id_set *s; + + if (!owner) + return false; + mutex_lock(&klist->mutex); + list_for_each_entry(s, &klist->list, list) { + if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) { + mutex_unlock(&klist->mutex); + return true; + } + } + mutex_unlock(&klist->mutex); + return false; +} + +#endif + +#define DEFINE_KFUNC_BTF_ID_LIST(name) \ + struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \ + __MUTEX_INITIALIZER(name.mutex) }; \ + EXPORT_SYMBOL_GPL(name) + +DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list); +DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 6e3ae90ad107a2..327e3996eadbd9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -2263,6 +2264,9 @@ static void bpf_prog_free_deferred(struct work_struct *work) int i; aux = container_of(work, struct bpf_prog_aux, work); +#ifdef CONFIG_BPF_SYSCALL + bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); +#endif bpf_free_used_maps(aux); bpf_free_used_btfs(aux); if (bpf_prog_is_dev_bound(aux)) @@ -2365,6 +2369,11 @@ const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) return NULL; } +const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) +{ + return NULL; +} + u64 __weak bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 32471ba027086f..d29af9988f37ab 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -668,7 +668,7 @@ static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, offsetof(struct htab_elem, key) + @@ -709,7 +709,7 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map, BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, offsetof(struct htab_elem, lru_node) + @@ -2049,7 +2049,7 @@ static const struct bpf_iter_seq_info iter_seq_info = { .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), }; -static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn, +static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); @@ -2089,9 +2089,8 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn, val = elem->key + roundup_key_size; } num_elems++; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) { rcu_read_unlock(); @@ -2397,7 +2396,7 @@ static int htab_of_map_gen_lookup(struct bpf_map *map, BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, offsetof(struct htab_elem, key) + diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 9aabf84afd4b25..1ffd469c217fad 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -979,15 +979,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, return err; } -#define MAX_SNPRINTF_VARARGS 12 - BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, const void *, data, u32, data_len) { int err, num_args; u32 *bin_args; - if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 || + if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || (data_len && !data)) return -EINVAL; num_args = data_len / 8; @@ -1058,7 +1056,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); struct bpf_map *map = t->map; void *value = t->value; - void *callback_fn; + bpf_callback_t callback_fn; void *key; u32 idx; @@ -1083,8 +1081,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) key = value - round_up(map->key_size, 8); } - BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key, - (u64)(long)value, 0, 0); + callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); /* The verifier checked that return value is zero. */ this_cpu_write(hrtimer_running, NULL); @@ -1437,6 +1434,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_snprintf_proto; case BPF_FUNC_task_pt_regs: return &bpf_task_pt_regs_proto; + case BPF_FUNC_trace_vprintk: + return bpf_get_trace_vprintk_proto(); default: return NULL; } diff --git a/kernel/bpf/preload/.gitignore b/kernel/bpf/preload/.gitignore index 856a4c5ad0dd5a..9452322902a54c 100644 --- a/kernel/bpf/preload/.gitignore +++ b/kernel/bpf/preload/.gitignore @@ -1,4 +1,2 @@ -/FEATURE-DUMP.libbpf -/bpf_helper_defs.h -/feature +/libbpf /bpf_preload_umd diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile index 1951332dd15f5c..1400ac58178e85 100644 --- a/kernel/bpf/preload/Makefile +++ b/kernel/bpf/preload/Makefile @@ -1,21 +1,35 @@ # SPDX-License-Identifier: GPL-2.0 LIBBPF_SRCS = $(srctree)/tools/lib/bpf/ -LIBBPF_A = $(obj)/libbpf.a -LIBBPF_OUT = $(abspath $(obj)) +LIBBPF_OUT = $(abspath $(obj))/libbpf +LIBBPF_A = $(LIBBPF_OUT)/libbpf.a +LIBBPF_DESTDIR = $(LIBBPF_OUT) +LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include # Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test # in tools/scripts/Makefile.include always succeeds when building the kernel # with $(O) pointing to a relative path, as in "make O=build bindeb-pkg". -$(LIBBPF_A): - $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a +$(LIBBPF_A): | $(LIBBPF_OUT) + $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= \ + $(LIBBPF_OUT)/libbpf.a install_headers + +libbpf_hdrs: $(LIBBPF_A) + +.PHONY: libbpf_hdrs + +$(LIBBPF_OUT): + $(call msg,MKDIR,$@) + $(Q)mkdir -p $@ userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \ - -I $(srctree)/tools/lib/ -Wno-unused-result + -I $(LIBBPF_INCLUDE) -Wno-unused-result userprogs := bpf_preload_umd -clean-files := $(userprogs) bpf_helper_defs.h FEATURE-DUMP.libbpf staticobjs/ feature/ +clean-files := libbpf/ + +$(obj)/iterators/iterators.o: | libbpf_hdrs bpf_preload_umd-objs := iterators/iterators.o bpf_preload_umd-userldlibs := $(LIBBPF_A) -lelf -lz diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile index 28fa8c1440f46b..b8bd605112275e 100644 --- a/kernel/bpf/preload/iterators/Makefile +++ b/kernel/bpf/preload/iterators/Makefile @@ -1,18 +1,26 @@ # SPDX-License-Identifier: GPL-2.0 OUTPUT := .output +abs_out := $(abspath $(OUTPUT)) + CLANG ?= clang LLC ?= llc LLVM_STRIP ?= llvm-strip + +TOOLS_PATH := $(abspath ../../../../tools) +BPFTOOL_SRC := $(TOOLS_PATH)/bpf/bpftool +BPFTOOL_OUTPUT := $(abs_out)/bpftool DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool BPFTOOL ?= $(DEFAULT_BPFTOOL) -LIBBPF_SRC := $(abspath ../../../../tools/lib/bpf) -BPFOBJ := $(OUTPUT)/libbpf.a -BPF_INCLUDE := $(OUTPUT) -INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../../../tools/lib) \ - -I$(abspath ../../../../tools/include/uapi) + +LIBBPF_SRC := $(TOOLS_PATH)/lib/bpf +LIBBPF_OUTPUT := $(abs_out)/libbpf +LIBBPF_DESTDIR := $(LIBBPF_OUTPUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include +BPFOBJ := $(LIBBPF_OUTPUT)/libbpf.a + +INCLUDES := -I$(OUTPUT) -I$(LIBBPF_INCLUDE) -I$(TOOLS_PATH)/include/uapi CFLAGS := -g -Wall -abs_out := $(abspath $(OUTPUT)) ifeq ($(V),1) Q = msg = @@ -44,14 +52,18 @@ $(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT) -c $(filter %.c,$^) -o $@ && \ $(LLVM_STRIP) -g $@ -$(OUTPUT): +$(OUTPUT) $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT): $(call msg,MKDIR,$@) - $(Q)mkdir -p $(OUTPUT) + $(Q)mkdir -p $@ -$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \ - OUTPUT=$(abspath $(dir $@))/ $(abspath $@) + OUTPUT=$(abspath $(dir $@))/ prefix= \ + DESTDIR=$(LIBBPF_DESTDIR) $(abspath $@) install_headers -$(DEFAULT_BPFTOOL): - $(Q)$(MAKE) $(submake_extras) -C ../../../../tools/bpf/bpftool \ - prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install +$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \ + OUTPUT=$(BPFTOOL_OUTPUT)/ \ + LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \ + LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/ \ + prefix= DESTDIR=$(abs_out)/ install-bin diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1cad6979a0d0f4..50f96ea4452a2e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -199,7 +199,8 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, err = bpf_fd_reuseport_array_update_elem(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_QUEUE || - map->map_type == BPF_MAP_TYPE_STACK) { + map->map_type == BPF_MAP_TYPE_STACK || + map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { err = map->ops->map_push_elem(map, value, flags); } else { rcu_read_lock(); @@ -238,7 +239,8 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { err = bpf_fd_reuseport_array_lookup_elem(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_QUEUE || - map->map_type == BPF_MAP_TYPE_STACK) { + map->map_type == BPF_MAP_TYPE_STACK || + map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { err = map->ops->map_peek_elem(map, value); } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { /* struct_ops map requires directly updating "value" */ @@ -348,6 +350,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) map->max_entries = attr->max_entries; map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); map->numa_node = bpf_map_attr_numa_node(attr); + map->map_extra = attr->map_extra; } static int bpf_map_alloc_id(struct bpf_map *map) @@ -555,6 +558,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) "value_size:\t%u\n" "max_entries:\t%u\n" "map_flags:\t%#x\n" + "map_extra:\t%#llx\n" "memlock:\t%lu\n" "map_id:\t%u\n" "frozen:\t%u\n", @@ -563,6 +567,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) map->value_size, map->max_entries, map->map_flags, + (unsigned long long)map->map_extra, bpf_map_memory_footprint(map), map->id, READ_ONCE(map->frozen)); @@ -812,7 +817,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, return ret; } -#define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id +#define BPF_MAP_CREATE_LAST_FIELD map_extra /* called via syscall */ static int map_create(union bpf_attr *attr) { @@ -833,6 +838,10 @@ static int map_create(union bpf_attr *attr) return -EINVAL; } + if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && + attr->map_extra != 0) + return -EINVAL; + f_flags = bpf_get_file_flag(attr->map_flags); if (f_flags < 0) return f_flags; @@ -1082,6 +1091,14 @@ static int map_lookup_elem(union bpf_attr *attr) if (!value) goto free_key; + if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { + if (copy_from_user(value, uvalue, value_size)) + err = -EFAULT; + else + err = bpf_map_copy_value(map, key, value, attr->flags); + goto free_value; + } + err = bpf_map_copy_value(map, key, value, attr->flags); if (err) goto free_value; @@ -1807,8 +1824,14 @@ static int bpf_prog_release(struct inode *inode, struct file *filp) return 0; } +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + static void bpf_prog_get_stats(const struct bpf_prog *prog, - struct bpf_prog_stats *stats) + struct bpf_prog_kstats *stats) { u64 nsecs = 0, cnt = 0, misses = 0; int cpu; @@ -1821,9 +1844,9 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog, st = per_cpu_ptr(prog->stats, cpu); do { start = u64_stats_fetch_begin_irq(&st->syncp); - tnsecs = st->nsecs; - tcnt = st->cnt; - tmisses = st->misses; + tnsecs = u64_stats_read(&st->nsecs); + tcnt = u64_stats_read(&st->cnt); + tmisses = u64_stats_read(&st->misses); } while (u64_stats_fetch_retry_irq(&st->syncp, start)); nsecs += tnsecs; cnt += tcnt; @@ -1839,7 +1862,7 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) { const struct bpf_prog *prog = filp->private_data; char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; - struct bpf_prog_stats stats; + struct bpf_prog_kstats stats; bpf_prog_get_stats(prog, &stats); bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); @@ -1851,7 +1874,8 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) "prog_id:\t%u\n" "run_time_ns:\t%llu\n" "run_cnt:\t%llu\n" - "recursion_misses:\t%llu\n", + "recursion_misses:\t%llu\n" + "verified_insns:\t%u\n", prog->type, prog->jited, prog_tag, @@ -1859,7 +1883,8 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) prog->aux->id, stats.nsecs, stats.cnt, - stats.misses); + stats.misses, + prog->aux->verified_insns); } #endif @@ -3578,7 +3603,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); struct bpf_prog_info info; u32 info_len = attr->info.info_len; - struct bpf_prog_stats stats; + struct bpf_prog_kstats stats; char __user *uinsns; u32 ulen; int err; @@ -3628,6 +3653,8 @@ static int bpf_prog_get_info_by_fd(struct file *file, info.run_cnt = stats.cnt; info.recursion_misses = stats.misses; + info.verified_insns = prog->aux->verified_insns; + if (!bpf_capable()) { info.jited_prog_len = 0; info.xlated_prog_len = 0; @@ -3874,6 +3901,7 @@ static int bpf_map_get_info_by_fd(struct file *file, info.value_size = map->value_size; info.max_entries = map->max_entries; info.map_flags = map->map_flags; + info.map_extra = map->map_extra; memcpy(info.name, map->name, sizeof(map->name)); if (map->btf) { @@ -4756,6 +4784,31 @@ static const struct bpf_func_proto bpf_sys_close_proto = { .arg1_type = ARG_ANYTHING, }; +BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) +{ + if (flags) + return -EINVAL; + + if (name_sz <= 1 || name[name_sz - 1]) + return -EINVAL; + + if (!bpf_dump_raw_ok(current_cred())) + return -EPERM; + + *res = kallsyms_lookup_name(name); + return *res ? 0 : -ENOENT; +} + +const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { + .func = bpf_kallsyms_lookup_name, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_LONG, +}; + static const struct bpf_func_proto * syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -4766,6 +4819,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_btf_find_by_name_kind_proto; case BPF_FUNC_sys_close: return &bpf_sys_close_proto; + case BPF_FUNC_kallsyms_lookup_name: + return &bpf_kallsyms_lookup_name_proto; default: return tracing_prog_func_proto(func_id, prog); } diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index fe1e857324e668..e98de5e73ba59f 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -10,6 +10,7 @@ #include #include #include +#include /* dummy _ops. The verifier will operate on target program's ops. */ const struct bpf_verifier_ops bpf_extension_verifier_ops = { @@ -526,7 +527,7 @@ void bpf_trampoline_put(struct bpf_trampoline *tr) } #define NO_START_TIME 1 -static u64 notrace bpf_prog_start_time(void) +static __always_inline u64 notrace bpf_prog_start_time(void) { u64 start = NO_START_TIME; @@ -544,7 +545,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog) stats = this_cpu_ptr(prog->stats); u64_stats_update_begin(&stats->syncp); - stats->misses++; + u64_stats_inc(&stats->misses); u64_stats_update_end(&stats->syncp); } @@ -585,11 +586,13 @@ static void notrace update_prog_stats(struct bpf_prog *prog, * Hence check that 'start' is valid. */ start > NO_START_TIME) { + unsigned long flags; + stats = this_cpu_ptr(prog->stats); - u64_stats_update_begin(&stats->syncp); - stats->cnt++; - stats->nsecs += sched_clock() - start; - u64_stats_update_end(&stats->syncp); + flags = u64_stats_update_begin_irqsave(&stats->syncp); + u64_stats_inc(&stats->cnt); + u64_stats_add(&stats->nsecs, sched_clock() - start); + u64_stats_update_end_irqrestore(&stats->syncp, flags); } } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index de006552be8adc..f0dca726ebfde5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -612,6 +612,20 @@ static const char *kernel_type_name(const struct btf* btf, u32 id) return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); } +/* The reg state of a pointer or a bounded scalar was saved when + * it was spilled to the stack. + */ +static bool is_spilled_reg(const struct bpf_stack_state *stack) +{ + return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; +} + +static void scrub_spilled_slot(u8 *stype) +{ + if (*stype != STACK_INVALID) + *stype = STACK_MISC; +} + static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { @@ -717,7 +731,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); - if (state->stack[i].slot_type[0] == STACK_SPILL) { + if (is_spilled_reg(&state->stack[i])) { reg = &state->stack[i].spilled_ptr; t = reg->type; verbose(env, "=%s", reg_type_str[t]); @@ -1406,12 +1420,12 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg) static bool __reg64_bound_s32(s64 a) { - return a > S32_MIN && a < S32_MAX; + return a >= S32_MIN && a <= S32_MAX; } static bool __reg64_bound_u32(u64 a) { - return a > U32_MIN && a < U32_MAX; + return a >= U32_MIN && a <= U32_MAX; } static void __reg_combine_64_into_32(struct bpf_reg_state *reg) @@ -1626,52 +1640,168 @@ static int add_subprog(struct bpf_verifier_env *env, int off) return env->subprog_cnt - 1; } +#define MAX_KFUNC_DESCS 256 +#define MAX_KFUNC_BTFS 256 + struct bpf_kfunc_desc { struct btf_func_model func_model; u32 func_id; s32 imm; + u16 offset; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; }; -#define MAX_KFUNC_DESCS 256 struct bpf_kfunc_desc_tab { struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; u32 nr_descs; }; -static int kfunc_desc_cmp_by_id(const void *a, const void *b) +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; + u32 nr_descs; +}; + +static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) { const struct bpf_kfunc_desc *d0 = a; const struct bpf_kfunc_desc *d1 = b; /* func_id is not greater than BTF_MAX_TYPE */ - return d0->func_id - d1->func_id; + return d0->func_id - d1->func_id ?: d0->offset - d1->offset; +} + +static int kfunc_btf_cmp_by_off(const void *a, const void *b) +{ + const struct bpf_kfunc_btf *d0 = a; + const struct bpf_kfunc_btf *d1 = b; + + return d0->offset - d1->offset; } static const struct bpf_kfunc_desc * -find_kfunc_desc(const struct bpf_prog *prog, u32 func_id) +find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) { struct bpf_kfunc_desc desc = { .func_id = func_id, + .offset = offset, }; struct bpf_kfunc_desc_tab *tab; tab = prog->aux->kfunc_tab; return bsearch(&desc, tab->descs, tab->nr_descs, - sizeof(tab->descs[0]), kfunc_desc_cmp_by_id); + sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); +} + +static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, + s16 offset, struct module **btf_modp) +{ + struct bpf_kfunc_btf kf_btf = { .offset = offset }; + struct bpf_kfunc_btf_tab *tab; + struct bpf_kfunc_btf *b; + struct module *mod; + struct btf *btf; + int btf_fd; + + tab = env->prog->aux->kfunc_btf_tab; + b = bsearch(&kf_btf, tab->descs, tab->nr_descs, + sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); + if (!b) { + if (tab->nr_descs == MAX_KFUNC_BTFS) { + verbose(env, "too many different module BTFs\n"); + return ERR_PTR(-E2BIG); + } + + if (bpfptr_is_null(env->fd_array)) { + verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); + return ERR_PTR(-EPROTO); + } + + if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, + offset * sizeof(btf_fd), + sizeof(btf_fd))) + return ERR_PTR(-EFAULT); + + btf = btf_get_by_fd(btf_fd); + if (IS_ERR(btf)) { + verbose(env, "invalid module BTF fd specified\n"); + return btf; + } + + if (!btf_is_module(btf)) { + verbose(env, "BTF fd for kfunc is not a module BTF\n"); + btf_put(btf); + return ERR_PTR(-EINVAL); + } + + mod = btf_try_get_module(btf); + if (!mod) { + btf_put(btf); + return ERR_PTR(-ENXIO); + } + + b = &tab->descs[tab->nr_descs++]; + b->btf = btf; + b->module = mod; + b->offset = offset; + + sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), + kfunc_btf_cmp_by_off, NULL); + } + if (btf_modp) + *btf_modp = b->module; + return b->btf; +} + +void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) +{ + if (!tab) + return; + + while (tab->nr_descs--) { + module_put(tab->descs[tab->nr_descs].module); + btf_put(tab->descs[tab->nr_descs].btf); + } + kfree(tab); +} + +static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, + u32 func_id, s16 offset, + struct module **btf_modp) +{ + if (offset) { + if (offset < 0) { + /* In the future, this can be allowed to increase limit + * of fd index into fd_array, interpreted as u16. + */ + verbose(env, "negative offset disallowed for kernel module function call\n"); + return ERR_PTR(-EINVAL); + } + + return __find_kfunc_desc_btf(env, offset, btf_modp); + } + return btf_vmlinux ?: ERR_PTR(-ENOENT); } -static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) +static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) { const struct btf_type *func, *func_proto; + struct bpf_kfunc_btf_tab *btf_tab; struct bpf_kfunc_desc_tab *tab; struct bpf_prog_aux *prog_aux; struct bpf_kfunc_desc *desc; const char *func_name; + struct btf *desc_btf; unsigned long addr; int err; prog_aux = env->prog->aux; tab = prog_aux->kfunc_tab; + btf_tab = prog_aux->kfunc_btf_tab; if (!tab) { if (!btf_vmlinux) { verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); @@ -1699,7 +1829,29 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) prog_aux->kfunc_tab = tab; } - if (find_kfunc_desc(env->prog, func_id)) + /* func_id == 0 is always invalid, but instead of returning an error, be + * conservative and wait until the code elimination pass before returning + * error, so that invalid calls that get pruned out can be in BPF programs + * loaded from userspace. It is also required that offset be untouched + * for such calls. + */ + if (!func_id && !offset) + return 0; + + if (!btf_tab && offset) { + btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); + if (!btf_tab) + return -ENOMEM; + prog_aux->kfunc_btf_tab = btf_tab; + } + + desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL); + if (IS_ERR(desc_btf)) { + verbose(env, "failed to find BTF for kernel function\n"); + return PTR_ERR(desc_btf); + } + + if (find_kfunc_desc(env->prog, func_id, offset)) return 0; if (tab->nr_descs == MAX_KFUNC_DESCS) { @@ -1707,20 +1859,20 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) return -E2BIG; } - func = btf_type_by_id(btf_vmlinux, func_id); + func = btf_type_by_id(desc_btf, func_id); if (!func || !btf_type_is_func(func)) { verbose(env, "kernel btf_id %u is not a function\n", func_id); return -EINVAL; } - func_proto = btf_type_by_id(btf_vmlinux, func->type); + func_proto = btf_type_by_id(desc_btf, func->type); if (!func_proto || !btf_type_is_func_proto(func_proto)) { verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", func_id); return -EINVAL; } - func_name = btf_name_by_offset(btf_vmlinux, func->name_off); + func_name = btf_name_by_offset(desc_btf, func->name_off); addr = kallsyms_lookup_name(func_name); if (!addr) { verbose(env, "cannot find address for kernel function %s\n", @@ -1730,13 +1882,14 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) desc = &tab->descs[tab->nr_descs++]; desc->func_id = func_id; - desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base; - err = btf_distill_func_proto(&env->log, btf_vmlinux, + desc->imm = BPF_CALL_IMM(addr); + desc->offset = offset; + err = btf_distill_func_proto(&env->log, desc_btf, func_proto, func_name, &desc->func_model); if (!err) sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), - kfunc_desc_cmp_by_id, NULL); + kfunc_desc_cmp_by_id_off, NULL); return err; } @@ -1815,7 +1968,7 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env) } else if (bpf_pseudo_call(insn)) { ret = add_subprog(env, i + insn->imm + 1); } else { - ret = add_kfunc_call(env, insn->imm); + ret = add_kfunc_call(env, insn->imm, insn->off); } if (ret < 0) @@ -2152,12 +2305,17 @@ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) { const struct btf_type *func; + struct btf *desc_btf; if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) return NULL; - func = btf_type_by_id(btf_vmlinux, insn->imm); - return btf_name_by_offset(btf_vmlinux, func->name_off); + desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL); + if (IS_ERR(desc_btf)) + return ""; + + func = btf_type_by_id(desc_btf, insn->imm); + return btf_name_by_offset(desc_btf, func->name_off); } /* For given verifier state backtrack_insn() is called from the last insn to @@ -2373,7 +2531,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { - if (func->stack[j].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&func->stack[j])) continue; reg = &func->stack[j].spilled_ptr; if (reg->type != SCALAR_VALUE) @@ -2415,7 +2573,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, } while (spi >= 0) { - if (func->stack[spi].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[spi])) { stack_mask = 0; break; } @@ -2514,7 +2672,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, return 0; } - if (func->stack[i].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[i])) { stack_mask &= ~(1ull << i); continue; } @@ -2626,15 +2784,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks, } static void save_register_state(struct bpf_func_state *state, - int spi, struct bpf_reg_state *reg) + int spi, struct bpf_reg_state *reg, + int size) { int i; state->stack[spi].spilled_ptr = *reg; - state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + if (size == BPF_REG_SIZE) + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + + for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) + state->stack[spi].slot_type[i - 1] = STACK_SPILL; - for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_SPILL; + /* size < 8 bytes spill */ + for (; i; i--) + scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); } /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, @@ -2681,7 +2845,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, env->insn_aux_data[insn_idx].sanitize_stack_spill = true; } - if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && + if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit @@ -2694,7 +2858,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, if (err) return err; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { @@ -2706,16 +2870,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else { u8 type = STACK_MISC; /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ - if (state->stack[spi].slot_type[0] == STACK_SPILL) + if (is_spilled_reg(&state->stack[spi])) for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[i]); /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -2918,23 +3082,50 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; struct bpf_reg_state *reg; - u8 *stype; + u8 *stype, type; stype = reg_state->stack[spi].slot_type; reg = ®_state->stack[spi].spilled_ptr; - if (stype[0] == STACK_SPILL) { + if (is_spilled_reg(®_state->stack[spi])) { if (size != BPF_REG_SIZE) { + u8 scalar_size = 0; + if (reg->type != SCALAR_VALUE) { verbose_linfo(env, env->insn_idx, "; "); verbose(env, "invalid size of register fill\n"); return -EACCES; } - if (dst_regno >= 0) { + + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + if (dst_regno < 0) + return 0; + + for (i = BPF_REG_SIZE; i > 0 && stype[i - 1] == STACK_SPILL; i--) + scalar_size++; + + if (!(off % BPF_REG_SIZE) && size == scalar_size) { + /* The earlier check_reg_arg() has decided the + * subreg_def for this insn. Save it first. + */ + s32 subreg_def = state->regs[dst_regno].subreg_def; + + state->regs[dst_regno] = *reg; + state->regs[dst_regno].subreg_def = subreg_def; + } else { + for (i = 0; i < size; i++) { + type = stype[(slot - i) % BPF_REG_SIZE]; + if (type == STACK_SPILL) + continue; + if (type == STACK_MISC) + continue; + verbose(env, "invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; + } mark_reg_unknown(env, state->regs, dst_regno); - state->regs[dst_regno].live |= REG_LIVE_WRITTEN; } - mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + state->regs[dst_regno].live |= REG_LIVE_WRITTEN; return 0; } for (i = 1; i < BPF_REG_SIZE; i++) { @@ -2965,8 +3156,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { - u8 type; - for (i = 0; i < size; i++) { type = stype[(slot - i) % BPF_REG_SIZE]; if (type == STACK_MISC) @@ -4514,17 +4703,17 @@ static int check_stack_range_initialized( goto mark; } - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) goto mark; - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || env->allow_ptr_leaks)) { if (clobber) { __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) - state->stack[spi].slot_type[j] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[j]); } goto mark; } @@ -4813,7 +5002,10 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env, return -EINVAL; } break; - + case BPF_MAP_TYPE_BLOOM_FILTER: + if (meta->func_id == BPF_FUNC_map_peek_elem) + *arg_type = ARG_PTR_TO_MAP_VALUE; + break; default: break; } @@ -5388,6 +5580,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, func_id != BPF_FUNC_task_storage_delete) goto error; break; + case BPF_MAP_TYPE_BLOOM_FILTER: + if (func_id != BPF_FUNC_map_peek_elem && + func_id != BPF_FUNC_map_push_elem) + goto error; + break; default: break; } @@ -5455,13 +5652,18 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; - case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_pop_elem: - case BPF_FUNC_map_push_elem: if (map->map_type != BPF_MAP_TYPE_QUEUE && map->map_type != BPF_MAP_TYPE_STACK) goto error; break; + case BPF_FUNC_map_peek_elem: + case BPF_FUNC_map_push_elem: + if (map->map_type != BPF_MAP_TYPE_QUEUE && + map->map_type != BPF_MAP_TYPE_STACK && + map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) + goto error; + break; case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_delete: if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) @@ -6485,23 +6687,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) struct bpf_reg_state *regs = cur_regs(env); const char *func_name, *ptr_type_name; u32 i, nargs, func_id, ptr_type_id; + struct module *btf_mod = NULL; const struct btf_param *args; + struct btf *desc_btf; int err; + /* skip for now, but return error when we find this in fixup_kfunc_call */ + if (!insn->imm) + return 0; + + desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod); + if (IS_ERR(desc_btf)) + return PTR_ERR(desc_btf); + func_id = insn->imm; - func = btf_type_by_id(btf_vmlinux, func_id); - func_name = btf_name_by_offset(btf_vmlinux, func->name_off); - func_proto = btf_type_by_id(btf_vmlinux, func->type); + func = btf_type_by_id(desc_btf, func_id); + func_name = btf_name_by_offset(desc_btf, func->name_off); + func_proto = btf_type_by_id(desc_btf, func->type); if (!env->ops->check_kfunc_call || - !env->ops->check_kfunc_call(func_id)) { + !env->ops->check_kfunc_call(func_id, btf_mod)) { verbose(env, "calling kernel function %s is not allowed\n", func_name); return -EACCES; } /* Check the arguments */ - err = btf_check_kfunc_arg_match(env, btf_vmlinux, func_id, regs); + err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs); if (err) return err; @@ -6509,15 +6721,15 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) mark_reg_not_init(env, regs, caller_saved[i]); /* Check return type */ - t = btf_type_skip_modifiers(btf_vmlinux, func_proto->type, NULL); + t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL); if (btf_type_is_scalar(t)) { mark_reg_unknown(env, regs, BPF_REG_0); mark_btf_func_reg_size(env, BPF_REG_0, t->size); } else if (btf_type_is_ptr(t)) { - ptr_type = btf_type_skip_modifiers(btf_vmlinux, t->type, + ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); if (!btf_type_is_struct(ptr_type)) { - ptr_type_name = btf_name_by_offset(btf_vmlinux, + ptr_type_name = btf_name_by_offset(desc_btf, ptr_type->name_off); verbose(env, "kernel function %s returns pointer type %s %s is not supported\n", func_name, btf_type_str(ptr_type), @@ -6525,7 +6737,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].btf = btf_vmlinux; + regs[BPF_REG_0].btf = desc_btf; regs[BPF_REG_0].type = PTR_TO_BTF_ID; regs[BPF_REG_0].btf_id = ptr_type_id; mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); @@ -6536,7 +6748,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) for (i = 0; i < nargs; i++) { u32 regno = i + 1; - t = btf_type_skip_modifiers(btf_vmlinux, args[i].type, NULL); + t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); if (btf_type_is_ptr(t)) mark_btf_func_reg_size(env, regno, sizeof(void *)); else @@ -10356,9 +10568,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, * return false to continue verification of this path */ return false; - if (i % BPF_REG_SIZE) + if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) continue; - if (old->stack[spi].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&old->stack[spi])) continue; if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) @@ -10565,7 +10777,7 @@ static int propagate_precision(struct bpf_verifier_env *env, } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&state->stack[i])) continue; state_reg = &state->stack[i].spilled_ptr; if (state_reg->type != SCALAR_VALUE || @@ -11076,7 +11288,8 @@ static int do_check(struct bpf_verifier_env *env) env->jmps_processed++; if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || - insn->off != 0 || + (insn->src_reg != BPF_PSEUDO_KFUNC_CALL + && insn->off != 0) || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL && insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || @@ -12432,6 +12645,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; + func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; func[i]->aux->linfo = prog->aux->linfo; func[i]->aux->nr_linfo = prog->aux->nr_linfo; func[i]->aux->jited_linfo = prog->aux->jited_linfo; @@ -12469,8 +12683,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) if (!bpf_pseudo_call(insn)) continue; subprog = insn->off; - insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); } /* we use the aux data to keep a list of the start addresses @@ -12618,10 +12831,15 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, { const struct bpf_kfunc_desc *desc; + if (!insn->imm) { + verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); + return -EINVAL; + } + /* insn->imm has the btf func_id. Replace it with * an address (relative to __bpf_base_call). */ - desc = find_kfunc_desc(env->prog, insn->imm); + desc = find_kfunc_desc(env->prog, insn->imm, insn->off); if (!desc) { verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", insn->imm); @@ -12902,7 +13120,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->imm == BPF_FUNC_map_push_elem || insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem || - insn->imm == BPF_FUNC_redirect_map)) { + insn->imm == BPF_FUNC_redirect_map || + insn->imm == BPF_FUNC_for_each_map_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; @@ -12946,36 +13165,37 @@ static int do_misc_fixups(struct bpf_verifier_env *env) (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_redirect, (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, + (int (*)(struct bpf_map *map, + bpf_callback_t callback_fn, + void *callback_ctx, + u64 flags))NULL)); patch_map_ops_generic: switch (insn->imm) { case BPF_FUNC_map_lookup_elem: - insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); continue; case BPF_FUNC_map_update_elem: - insn->imm = BPF_CAST_CALL(ops->map_update_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_update_elem); continue; case BPF_FUNC_map_delete_elem: - insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_delete_elem); continue; case BPF_FUNC_map_push_elem: - insn->imm = BPF_CAST_CALL(ops->map_push_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_push_elem); continue; case BPF_FUNC_map_pop_elem: - insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_pop_elem); continue; case BPF_FUNC_map_peek_elem: - insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_peek_elem); continue; case BPF_FUNC_redirect_map: - insn->imm = BPF_CAST_CALL(ops->map_redirect) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_redirect); + continue; + case BPF_FUNC_for_each_map_elem: + insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); continue; } @@ -13826,6 +14046,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) env->verification_time = ktime_get_ns() - start_time; print_verification_stats(env); + env->prog->aux->verified_insns = env->insn_processed; if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; diff --git a/kernel/events/core.c b/kernel/events/core.c index a89b01b7e59a41..f2253ea729a273 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -13491,3 +13491,5 @@ struct cgroup_subsys perf_event_cgrp_subsys = { .threaded = true, }; #endif /* CONFIG_CGROUP_PERF */ + +DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8e2eb950aa8297..7396488793ff7b 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -398,7 +398,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { .arg2_type = ARG_CONST_SIZE, }; -const struct bpf_func_proto *bpf_get_trace_printk_proto(void) +static void __set_printk_clr_event(void) { /* * This program might be calling bpf_trace_printk, @@ -410,11 +410,57 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) */ if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) pr_warn_ratelimited("could not enable bpf_trace_printk events"); +} +const struct bpf_func_proto *bpf_get_trace_printk_proto(void) +{ + __set_printk_clr_event(); return &bpf_trace_printk_proto; } -#define MAX_SEQ_PRINTF_VARARGS 12 +BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data, + u32, data_len) +{ + static char buf[BPF_TRACE_PRINTK_SIZE]; + unsigned long flags; + int ret, num_args; + u32 *bin_args; + + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || + (data_len && !data)) + return -EINVAL; + num_args = data_len / 8; + + ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); + if (ret < 0) + return ret; + + raw_spin_lock_irqsave(&trace_printk_lock, flags); + ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); + + trace_bpf_trace_printk(buf); + raw_spin_unlock_irqrestore(&trace_printk_lock, flags); + + bpf_bprintf_cleanup(); + + return ret; +} + +static const struct bpf_func_proto bpf_trace_vprintk_proto = { + .func = bpf_trace_vprintk, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_PTR_TO_MEM_OR_NULL, + .arg4_type = ARG_CONST_SIZE_OR_ZERO, +}; + +const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) +{ + __set_printk_clr_event(); + return &bpf_trace_vprintk_proto; +} BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, const void *, data, u32, data_len) @@ -422,7 +468,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, int err, num_args; u32 *bin_args; - if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 || + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || (data_len && !data)) return -EINVAL; num_args = data_len / 8; @@ -1017,6 +1063,34 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) +{ +#ifndef CONFIG_X86 + return -ENOENT; +#else + static const u32 br_entry_size = sizeof(struct perf_branch_entry); + u32 entry_cnt = size / br_entry_size; + + entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); + + if (unlikely(flags)) + return -EINVAL; + + if (!entry_cnt) + return -ENOENT; + + return entry_cnt * br_entry_size; +#endif +} + +static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { + .func = bpf_get_branch_snapshot, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, +}; + static const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1132,6 +1206,10 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_snprintf_proto; case BPF_FUNC_get_func_ip: return &bpf_get_func_ip_proto_tracing; + case BPF_FUNC_get_branch_snapshot: + return &bpf_get_branch_snapshot_proto; + case BPF_FUNC_trace_vprintk: + return bpf_get_trace_vprintk_proto(); default: return bpf_base_func_proto(func_id); } @@ -1530,6 +1608,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skc_to_tcp_request_sock_proto; case BPF_FUNC_skc_to_udp6_sock: return &bpf_skc_to_udp6_sock_proto; + case BPF_FUNC_skc_to_unix_sock: + return &bpf_skc_to_unix_sock_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_tracing_proto; case BPF_FUNC_sk_storage_delete: @@ -1566,13 +1646,7 @@ static bool raw_tp_prog_is_valid_access(int off, int size, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) - return false; - if (type != BPF_READ) - return false; - if (off % size != 0) - return false; - return true; + return bpf_tracing_ctx_access(off, size, type); } static bool tracing_prog_is_valid_access(int off, int size, @@ -1580,13 +1654,7 @@ static bool tracing_prog_is_valid_access(int off, int size, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) - return false; - if (type != BPF_READ) - return false; - if (off % size != 0) - return false; - return btf_ctx_access(off, size, type, prog, info); + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); } int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, diff --git a/lib/bitmap.c b/lib/bitmap.c index 663dd81967d4ea..9264088834566b 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -1398,6 +1398,19 @@ unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags) } EXPORT_SYMBOL(bitmap_zalloc); +unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node) +{ + return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long), + flags, node); +} +EXPORT_SYMBOL(bitmap_alloc_node); + +unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node) +{ + return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node); +} +EXPORT_SYMBOL(bitmap_zalloc_node); + void bitmap_free(const unsigned long *bitmap) { kfree(bitmap); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 830a18ecffc887..adae39567264f2 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -52,6 +52,7 @@ #define FLAG_NO_DATA BIT(0) #define FLAG_EXPECTED_FAIL BIT(1) #define FLAG_SKB_FRAG BIT(2) +#define FLAG_VERIFIER_ZEXT BIT(3) enum { CLASSIC = BIT(6), /* Old BPF instructions only. */ @@ -80,6 +81,7 @@ struct bpf_test { int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */ __u8 frag_data[MAX_DATA]; int stack_depth; /* for eBPF only, since tests don't call verifier */ + int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */ }; /* Large test cases need separate allocation and fill handler. */ @@ -461,1452 +463,7121 @@ static int bpf_fill_stxdw(struct bpf_test *self) return __bpf_fill_stxdw(self, BPF_DW); } -static int bpf_fill_long_jmp(struct bpf_test *self) +static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64) { - unsigned int len = BPF_MAXINSNS; - struct bpf_insn *insn; + struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)}; + + memcpy(insns, tmp, sizeof(tmp)); + return 2; +} + +/* + * Branch conversion tests. Complex operations can expand to a lot + * of instructions when JITed. This in turn may cause jump offsets + * to overflow the field size of the native instruction, triggering + * a branch conversion mechanism in some JITs. + */ +static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm) +{ + struct bpf_insn *insns; + int len = S16_MAX + 5; int i; + insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); + if (!insns) + return -ENOMEM; + + i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL); + insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); + insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX); + insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2); + insns[i++] = BPF_EXIT_INSN(); + + while (i < len - 1) { + static const int ops[] = { + BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD, + BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD, + }; + int op = ops[(i >> 1) % ARRAY_SIZE(ops)]; + + if (i & 1) + insns[i++] = BPF_ALU32_REG(op, R0, R1); + else + insns[i++] = BPF_ALU64_REG(op, R0, R1); + } + + insns[i++] = BPF_EXIT_INSN(); + self->u.ptr.insns = insns; + self->u.ptr.len = len; + BUG_ON(i != len); + + return 0; +} + +/* Branch taken by runtime decision */ +static int bpf_fill_max_jmp_taken(struct bpf_test *self) +{ + return __bpf_fill_max_jmp(self, BPF_JEQ, 1); +} + +/* Branch not taken by runtime decision */ +static int bpf_fill_max_jmp_not_taken(struct bpf_test *self) +{ + return __bpf_fill_max_jmp(self, BPF_JEQ, 0); +} + +/* Branch always taken, known at JIT time */ +static int bpf_fill_max_jmp_always_taken(struct bpf_test *self) +{ + return __bpf_fill_max_jmp(self, BPF_JGE, 0); +} + +/* Branch never taken, known at JIT time */ +static int bpf_fill_max_jmp_never_taken(struct bpf_test *self) +{ + return __bpf_fill_max_jmp(self, BPF_JLT, 0); +} + +/* ALU result computation used in tests */ +static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op) +{ + *res = 0; + switch (op) { + case BPF_MOV: + *res = v2; + break; + case BPF_AND: + *res = v1 & v2; + break; + case BPF_OR: + *res = v1 | v2; + break; + case BPF_XOR: + *res = v1 ^ v2; + break; + case BPF_LSH: + *res = v1 << v2; + break; + case BPF_RSH: + *res = v1 >> v2; + break; + case BPF_ARSH: + *res = v1 >> v2; + if (v2 > 0 && v1 > S64_MAX) + *res |= ~0ULL << (64 - v2); + break; + case BPF_ADD: + *res = v1 + v2; + break; + case BPF_SUB: + *res = v1 - v2; + break; + case BPF_MUL: + *res = v1 * v2; + break; + case BPF_DIV: + if (v2 == 0) + return false; + *res = div64_u64(v1, v2); + break; + case BPF_MOD: + if (v2 == 0) + return false; + div64_u64_rem(v1, v2, res); + break; + } + return true; +} + +/* Test an ALU shift operation for all valid shift values */ +static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op, + u8 mode, bool alu32) +{ + static const s64 regs[] = { + 0x0123456789abcdefLL, /* dword > 0, word < 0 */ + 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */ + 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */ + 0x0123458967abcdefLL, /* dword > 0, word > 0 */ + }; + int bits = alu32 ? 32 : 64; + int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3; + struct bpf_insn *insn; + int imm, k; + int i = 0; + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); if (!insn) return -ENOMEM; - insn[0] = BPF_ALU64_IMM(BPF_MOV, R0, 1); - insn[1] = BPF_JMP_IMM(BPF_JEQ, R0, 1, len - 2 - 1); + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); - /* - * Fill with a complex 64-bit operation that expands to a lot of - * instructions on 32-bit JITs. The large jump offset can then - * overflow the conditional branch field size, triggering a branch - * conversion mechanism in some JITs. - * - * Note: BPF_MAXINSNS of ALU64 MUL is enough to trigger such branch - * conversion on the 32-bit MIPS JIT. For other JITs, the instruction - * count and/or operation may need to be modified to trigger the - * branch conversion. - */ - for (i = 2; i < len - 1; i++) - insn[i] = BPF_ALU64_IMM(BPF_MUL, R0, (i << 16) + i); + for (k = 0; k < ARRAY_SIZE(regs); k++) { + s64 reg = regs[k]; - insn[len - 1] = BPF_EXIT_INSN(); + i += __bpf_ld_imm64(&insn[i], R3, reg); + + for (imm = 0; imm < bits; imm++) { + u64 val; + + /* Perform operation */ + insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3); + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm); + if (alu32) { + if (mode == BPF_K) + insn[i++] = BPF_ALU32_IMM(op, R1, imm); + else + insn[i++] = BPF_ALU32_REG(op, R1, R2); + + if (op == BPF_ARSH) + reg = (s32)reg; + else + reg = (u32)reg; + __bpf_alu_result(&val, reg, imm, op); + val = (u32)val; + } else { + if (mode == BPF_K) + insn[i++] = BPF_ALU64_IMM(op, R1, imm); + else + insn[i++] = BPF_ALU64_REG(op, R1, R2); + __bpf_alu_result(&val, reg, imm, op); + } + + /* + * When debugging a JIT that fails this test, one + * can write the immediate value to R0 here to find + * out which operand values that fail. + */ + + /* Load reference and check the result */ + i += __bpf_ld_imm64(&insn[i], R4, val); + insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1); + insn[i++] = BPF_EXIT_INSN(); + } + } + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); + insn[i++] = BPF_EXIT_INSN(); self->u.ptr.insns = insn; self->u.ptr.len = len; + BUG_ON(i != len); return 0; } -static struct bpf_test tests[] = { - { - "TAX", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */ - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_LEN, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */ - BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC, - { 10, 20, 30, 40, 50 }, - { { 2, 10 }, { 3, 20 }, { 4, 30 } }, - }, - { - "TXA", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */ - }, - CLASSIC, - { 10, 20, 30, 40, 50 }, - { { 1, 2 }, { 3, 6 }, { 4, 8 } }, - }, - { - "ADD_SUB_MUL_K", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 1), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2), - BPF_STMT(BPF_LDX | BPF_IMM, 3), - BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff), - BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC | FLAG_NO_DATA, - { }, - { { 0, 0xfffffffd } } - }, - { - "DIV_MOD_KX", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 8), - BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), - BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), - BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), - BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), - BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC | FLAG_NO_DATA, - { }, - { { 0, 0x20000000 } } - }, - { - "AND_OR_LSH_K", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xff), - BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0), - BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 0xf), - BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC | FLAG_NO_DATA, - { }, - { { 0, 0x800000ff }, { 1, 0x800000ff } }, - }, - { - "LD_IMM_0", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */ - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 0), - BPF_STMT(BPF_RET | BPF_K, 1), - }, - CLASSIC, - { }, - { { 1, 1 } }, - }, - { - "LD_IND", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K), - BPF_STMT(BPF_RET | BPF_K, 1) - }, - CLASSIC, - { }, - { { 1, 0 }, { 10, 0 }, { 60, 0 } }, - }, - { - "LD_ABS", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000), - BPF_STMT(BPF_RET | BPF_K, 1) - }, - CLASSIC, - { }, - { { 1, 0 }, { 10, 0 }, { 60, 0 } }, - }, - { - "LD_ABS_LL", - .u.insns = { - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC, - { 1, 2, 3 }, - { { 1, 0 }, { 2, 3 } }, - }, - { - "LD_IND_LL", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1), - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC, - { 1, 2, 3, 0xff }, - { { 1, 1 }, { 3, 3 }, { 4, 0xff } }, - }, - { - "LD_ABS_NET", - .u.insns = { - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }, - { { 15, 0 }, { 16, 3 } }, - }, - { - "LD_IND_NET", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15), - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }, - { { 14, 0 }, { 15, 1 }, { 17, 3 } }, - }, - { - "LD_PKTTYPE", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PKTTYPE), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PKTTYPE), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PKTTYPE), +static int bpf_fill_alu64_lsh_imm(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false); +} + +static int bpf_fill_alu64_rsh_imm(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false); +} + +static int bpf_fill_alu64_arsh_imm(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false); +} + +static int bpf_fill_alu64_lsh_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false); +} + +static int bpf_fill_alu64_rsh_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false); +} + +static int bpf_fill_alu64_arsh_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false); +} + +static int bpf_fill_alu32_lsh_imm(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true); +} + +static int bpf_fill_alu32_rsh_imm(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true); +} + +static int bpf_fill_alu32_arsh_imm(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true); +} + +static int bpf_fill_alu32_lsh_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true); +} + +static int bpf_fill_alu32_rsh_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true); +} + +static int bpf_fill_alu32_arsh_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true); +} + +/* + * Test an ALU register shift operation for all valid shift values + * for the case when the source and destination are the same. + */ +static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op, + bool alu32) +{ + int bits = alu32 ? 32 : 64; + int len = 3 + 6 * bits; + struct bpf_insn *insn; + int i = 0; + u64 val; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); + + for (val = 0; val < bits; val++) { + u64 res; + + /* Perform operation */ + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val); + if (alu32) + insn[i++] = BPF_ALU32_REG(op, R1, R1); + else + insn[i++] = BPF_ALU64_REG(op, R1, R1); + + /* Compute the reference result */ + __bpf_alu_result(&res, val, val, op); + if (alu32) + res = (u32)res; + i += __bpf_ld_imm64(&insn[i], R2, res); + + /* Check the actual result */ + insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); + insn[i++] = BPF_EXIT_INSN(); + } + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); + insn[i++] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + BUG_ON(i != len); + + return 0; +} + +static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false); +} + +static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false); +} + +static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false); +} + +static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true); +} + +static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true); +} + +static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self) +{ + return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true); +} + +/* + * Common operand pattern generator for exhaustive power-of-two magnitudes + * tests. The block size parameters can be adjusted to increase/reduce the + * number of combinatons tested and thereby execution speed and memory + * footprint. + */ + +static inline s64 value(int msb, int delta, int sign) +{ + return sign * (1LL << msb) + delta; +} + +static int __bpf_fill_pattern(struct bpf_test *self, void *arg, + int dbits, int sbits, int block1, int block2, + int (*emit)(struct bpf_test*, void*, + struct bpf_insn*, s64, s64)) +{ + static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}}; + struct bpf_insn *insns; + int di, si, bt, db, sb; + int count, len, k; + int extra = 1 + 2; + int i = 0; + + /* Total number of iterations for the two pattern */ + count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn); + count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn); + + /* Compute the maximum number of insns and allocate the buffer */ + len = extra + count * (*emit)(self, arg, NULL, 0, 0); + insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); + if (!insns) + return -ENOMEM; + + /* Add head instruction(s) */ + insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); + + /* + * Pattern 1: all combinations of power-of-two magnitudes and sign, + * and with a block of contiguous values around each magnitude. + */ + for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */ + for (si = 0; si < sbits - 1; si++) /* Src magnitudes */ + for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */ + for (db = -(block1 / 2); + db < (block1 + 1) / 2; db++) + for (sb = -(block1 / 2); + sb < (block1 + 1) / 2; sb++) { + s64 dst, src; + + dst = value(di, db, sgn[k][0]); + src = value(si, sb, sgn[k][1]); + i += (*emit)(self, arg, + &insns[i], + dst, src); + } + /* + * Pattern 2: all combinations for a larger block of values + * for each power-of-two magnitude and sign, where the magnitude is + * the same for both operands. + */ + for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */ + for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */ + for (db = -(block2 / 2); db < (block2 + 1) / 2; db++) + for (sb = -(block2 / 2); + sb < (block2 + 1) / 2; sb++) { + s64 dst, src; + + dst = value(bt % dbits, db, sgn[k][0]); + src = value(bt % sbits, sb, sgn[k][1]); + i += (*emit)(self, arg, &insns[i], + dst, src); + } + + /* Append tail instructions */ + insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); + insns[i++] = BPF_EXIT_INSN(); + BUG_ON(i > len); + + self->u.ptr.insns = insns; + self->u.ptr.len = i; + + return 0; +} + +/* + * Block size parameters used in pattern tests below. une as needed to + * increase/reduce the number combinations tested, see following examples. + * block values per operand MSB + * ---------------------------------------- + * 0 none + * 1 (1 << MSB) + * 2 (1 << MSB) + [-1, 0] + * 3 (1 << MSB) + [-1, 0, 1] + */ +#define PATTERN_BLOCK1 1 +#define PATTERN_BLOCK2 5 + +/* Number of test runs for a pattern test */ +#define NR_PATTERN_RUNS 1 + +/* + * Exhaustive tests of ALU operations for all combinations of power-of-two + * magnitudes of the operands, both for positive and negative values. The + * test is designed to verify e.g. the ALU and ALU64 operations for JITs that + * emit different code depending on the magnitude of the immediate value. + */ +static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 imm) +{ + int op = *(int *)arg; + int i = 0; + u64 res; + + if (!insns) + return 7; + + if (__bpf_alu_result(&res, dst, (s32)imm, op)) { + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R3, res); + insns[i++] = BPF_ALU64_IMM(op, R1, imm); + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); + insns[i++] = BPF_EXIT_INSN(); + } + + return i; +} + +static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 imm) +{ + int op = *(int *)arg; + int i = 0; + u64 res; + + if (!insns) + return 7; + + if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) { + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R3, (u32)res); + insns[i++] = BPF_ALU32_IMM(op, R1, imm); + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); + insns[i++] = BPF_EXIT_INSN(); + } + + return i; +} + +static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int op = *(int *)arg; + int i = 0; + u64 res; + + if (!insns) + return 9; + + if (__bpf_alu_result(&res, dst, src, op)) { + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + i += __bpf_ld_imm64(&insns[i], R3, res); + insns[i++] = BPF_ALU64_REG(op, R1, R2); + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); + insns[i++] = BPF_EXIT_INSN(); + } + + return i; +} + +static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int op = *(int *)arg; + int i = 0; + u64 res; + + if (!insns) + return 9; + + if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) { + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + i += __bpf_ld_imm64(&insns[i], R3, (u32)res); + insns[i++] = BPF_ALU32_REG(op, R1, R2); + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); + insns[i++] = BPF_EXIT_INSN(); + } + + return i; +} + +static int __bpf_fill_alu64_imm(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 32, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_alu64_imm); +} + +static int __bpf_fill_alu32_imm(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 32, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_alu32_imm); +} + +static int __bpf_fill_alu64_reg(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 64, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_alu64_reg); +} + +static int __bpf_fill_alu32_reg(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 64, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_alu32_reg); +} + +/* ALU64 immediate operations */ +static int bpf_fill_alu64_mov_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_MOV); +} + +static int bpf_fill_alu64_and_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_AND); +} + +static int bpf_fill_alu64_or_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_OR); +} + +static int bpf_fill_alu64_xor_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_XOR); +} + +static int bpf_fill_alu64_add_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_ADD); +} + +static int bpf_fill_alu64_sub_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_SUB); +} + +static int bpf_fill_alu64_mul_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_MUL); +} + +static int bpf_fill_alu64_div_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_DIV); +} + +static int bpf_fill_alu64_mod_imm(struct bpf_test *self) +{ + return __bpf_fill_alu64_imm(self, BPF_MOD); +} + +/* ALU32 immediate operations */ +static int bpf_fill_alu32_mov_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_MOV); +} + +static int bpf_fill_alu32_and_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_AND); +} + +static int bpf_fill_alu32_or_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_OR); +} + +static int bpf_fill_alu32_xor_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_XOR); +} + +static int bpf_fill_alu32_add_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_ADD); +} + +static int bpf_fill_alu32_sub_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_SUB); +} + +static int bpf_fill_alu32_mul_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_MUL); +} + +static int bpf_fill_alu32_div_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_DIV); +} + +static int bpf_fill_alu32_mod_imm(struct bpf_test *self) +{ + return __bpf_fill_alu32_imm(self, BPF_MOD); +} + +/* ALU64 register operations */ +static int bpf_fill_alu64_mov_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_MOV); +} + +static int bpf_fill_alu64_and_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_AND); +} + +static int bpf_fill_alu64_or_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_OR); +} + +static int bpf_fill_alu64_xor_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_XOR); +} + +static int bpf_fill_alu64_add_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_ADD); +} + +static int bpf_fill_alu64_sub_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_SUB); +} + +static int bpf_fill_alu64_mul_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_MUL); +} + +static int bpf_fill_alu64_div_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_DIV); +} + +static int bpf_fill_alu64_mod_reg(struct bpf_test *self) +{ + return __bpf_fill_alu64_reg(self, BPF_MOD); +} + +/* ALU32 register operations */ +static int bpf_fill_alu32_mov_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_MOV); +} + +static int bpf_fill_alu32_and_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_AND); +} + +static int bpf_fill_alu32_or_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_OR); +} + +static int bpf_fill_alu32_xor_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_XOR); +} + +static int bpf_fill_alu32_add_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_ADD); +} + +static int bpf_fill_alu32_sub_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_SUB); +} + +static int bpf_fill_alu32_mul_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_MUL); +} + +static int bpf_fill_alu32_div_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_DIV); +} + +static int bpf_fill_alu32_mod_reg(struct bpf_test *self) +{ + return __bpf_fill_alu32_reg(self, BPF_MOD); +} + +/* + * Test JITs that implement complex ALU operations as function + * calls, and must re-arrange operands for argument passing. + */ +static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32) +{ + int len = 2 + 10 * 10; + struct bpf_insn *insns; + u64 dst, res; + int i = 0; + u32 imm; + int rd; + + insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); + if (!insns) + return -ENOMEM; + + /* Operand and result values according to operation */ + if (alu32) + dst = 0x76543210U; + else + dst = 0x7edcba9876543210ULL; + imm = 0x01234567U; + + if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH) + imm &= 31; + + __bpf_alu_result(&res, dst, imm, op); + + if (alu32) + res = (u32)res; + + /* Check all operand registers */ + for (rd = R0; rd <= R9; rd++) { + i += __bpf_ld_imm64(&insns[i], rd, dst); + + if (alu32) + insns[i++] = BPF_ALU32_IMM(op, rd, imm); + else + insns[i++] = BPF_ALU64_IMM(op, rd, imm); + + insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32); + insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + } + + insns[i++] = BPF_MOV64_IMM(R0, 1); + insns[i++] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insns; + self->u.ptr.len = len; + BUG_ON(i != len); + + return 0; +} + +/* ALU64 K registers */ +static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_MOV, false); +} + +static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_AND, false); +} + +static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_OR, false); +} + +static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_XOR, false); +} + +static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_LSH, false); +} + +static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_RSH, false); +} + +static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false); +} + +static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_ADD, false); +} + +static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_SUB, false); +} + +static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_MUL, false); +} + +static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_DIV, false); +} + +static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_MOD, false); +} + +/* ALU32 K registers */ +static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_MOV, true); +} + +static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_AND, true); +} + +static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_OR, true); +} + +static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_XOR, true); +} + +static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_LSH, true); +} + +static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_RSH, true); +} + +static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true); +} + +static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_ADD, true); +} + +static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_SUB, true); +} + +static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_MUL, true); +} + +static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_DIV, true); +} + +static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self) +{ + return __bpf_fill_alu_imm_regs(self, BPF_MOD, true); +} + +/* + * Test JITs that implement complex ALU operations as function + * calls, and must re-arrange operands for argument passing. + */ +static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32) +{ + int len = 2 + 10 * 10 * 12; + u64 dst, src, res, same; + struct bpf_insn *insns; + int rd, rs; + int i = 0; + + insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); + if (!insns) + return -ENOMEM; + + /* Operand and result values according to operation */ + if (alu32) { + dst = 0x76543210U; + src = 0x01234567U; + } else { + dst = 0x7edcba9876543210ULL; + src = 0x0123456789abcdefULL; + } + + if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH) + src &= 31; + + __bpf_alu_result(&res, dst, src, op); + __bpf_alu_result(&same, src, src, op); + + if (alu32) { + res = (u32)res; + same = (u32)same; + } + + /* Check all combinations of operand registers */ + for (rd = R0; rd <= R9; rd++) { + for (rs = R0; rs <= R9; rs++) { + u64 val = rd == rs ? same : res; + + i += __bpf_ld_imm64(&insns[i], rd, dst); + i += __bpf_ld_imm64(&insns[i], rs, src); + + if (alu32) + insns[i++] = BPF_ALU32_REG(op, rd, rs); + else + insns[i++] = BPF_ALU64_REG(op, rd, rs); + + insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32); + insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + } + } + + insns[i++] = BPF_MOV64_IMM(R0, 1); + insns[i++] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insns; + self->u.ptr.len = len; + BUG_ON(i != len); + + return 0; +} + +/* ALU64 X register combinations */ +static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false); +} + +static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_AND, false); +} + +static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_OR, false); +} + +static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false); +} + +static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false); +} + +static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false); +} + +static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false); +} + +static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false); +} + +static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false); +} + +static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false); +} + +static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false); +} + +static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false); +} + +/* ALU32 X register combinations */ +static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true); +} + +static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_AND, true); +} + +static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_OR, true); +} + +static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true); +} + +static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true); +} + +static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true); +} + +static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true); +} + +static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true); +} + +static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true); +} + +static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true); +} + +static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true); +} + +static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true); +} + +/* + * Exhaustive tests of atomic operations for all power-of-two operand + * magnitudes, both for positive and negative values. + */ + +static int __bpf_emit_atomic64(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int op = *(int *)arg; + u64 keep, fetch, res; + int i = 0; + + if (!insns) + return 21; + + switch (op) { + case BPF_XCHG: + res = src; + break; + default: + __bpf_alu_result(&res, dst, src, BPF_OP(op)); + } + + keep = 0x0123456789abcdefULL; + if (op & BPF_FETCH) + fetch = dst; + else + fetch = src; + + i += __bpf_ld_imm64(&insns[i], R0, keep); + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + i += __bpf_ld_imm64(&insns[i], R3, res); + i += __bpf_ld_imm64(&insns[i], R4, fetch); + i += __bpf_ld_imm64(&insns[i], R5, keep); + + insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8); + insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8); + insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1); + insns[i++] = BPF_EXIT_INSN(); + + return i; +} + +static int __bpf_emit_atomic32(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int op = *(int *)arg; + u64 keep, fetch, res; + int i = 0; + + if (!insns) + return 21; + + switch (op) { + case BPF_XCHG: + res = src; + break; + default: + __bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op)); + } + + keep = 0x0123456789abcdefULL; + if (op & BPF_FETCH) + fetch = (u32)dst; + else + fetch = src; + + i += __bpf_ld_imm64(&insns[i], R0, keep); + i += __bpf_ld_imm64(&insns[i], R1, (u32)dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + i += __bpf_ld_imm64(&insns[i], R3, (u32)res); + i += __bpf_ld_imm64(&insns[i], R4, fetch); + i += __bpf_ld_imm64(&insns[i], R5, keep); + + insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4); + insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4); + insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1); + insns[i++] = BPF_EXIT_INSN(); + + return i; +} + +static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int i = 0; + + if (!insns) + return 23; + + i += __bpf_ld_imm64(&insns[i], R0, ~dst); + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + + /* Result unsuccessful */ + insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8); + insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8); + insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + /* Result successful */ + insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8); + insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2); + insns[i++] = BPF_MOV64_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + return i; +} + +static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int i = 0; + + if (!insns) + return 27; + + i += __bpf_ld_imm64(&insns[i], R0, ~dst); + i += __bpf_ld_imm64(&insns[i], R1, (u32)dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + + /* Result unsuccessful */ + insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4); + insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4); + insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */ + insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4); + + insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2); + insns[i++] = BPF_MOV32_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2); + insns[i++] = BPF_MOV32_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + /* Result successful */ + i += __bpf_ld_imm64(&insns[i], R0, dst); + insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4); + insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */ + insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4); + + insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2); + insns[i++] = BPF_MOV32_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2); + insns[i++] = BPF_MOV32_IMM(R0, __LINE__); + insns[i++] = BPF_EXIT_INSN(); + + return i; +} + +static int __bpf_fill_atomic64(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 64, + 0, PATTERN_BLOCK2, + &__bpf_emit_atomic64); +} + +static int __bpf_fill_atomic32(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 64, + 0, PATTERN_BLOCK2, + &__bpf_emit_atomic32); +} + +/* 64-bit atomic operations */ +static int bpf_fill_atomic64_add(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_ADD); +} + +static int bpf_fill_atomic64_and(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_AND); +} + +static int bpf_fill_atomic64_or(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_OR); +} + +static int bpf_fill_atomic64_xor(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_XOR); +} + +static int bpf_fill_atomic64_add_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH); +} + +static int bpf_fill_atomic64_and_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH); +} + +static int bpf_fill_atomic64_or_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH); +} + +static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH); +} + +static int bpf_fill_atomic64_xchg(struct bpf_test *self) +{ + return __bpf_fill_atomic64(self, BPF_XCHG); +} + +static int bpf_fill_cmpxchg64(struct bpf_test *self) +{ + return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2, + &__bpf_emit_cmpxchg64); +} + +/* 32-bit atomic operations */ +static int bpf_fill_atomic32_add(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_ADD); +} + +static int bpf_fill_atomic32_and(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_AND); +} + +static int bpf_fill_atomic32_or(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_OR); +} + +static int bpf_fill_atomic32_xor(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_XOR); +} + +static int bpf_fill_atomic32_add_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH); +} + +static int bpf_fill_atomic32_and_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH); +} + +static int bpf_fill_atomic32_or_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH); +} + +static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH); +} + +static int bpf_fill_atomic32_xchg(struct bpf_test *self) +{ + return __bpf_fill_atomic32(self, BPF_XCHG); +} + +static int bpf_fill_cmpxchg32(struct bpf_test *self) +{ + return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2, + &__bpf_emit_cmpxchg32); +} + +/* + * Test JITs that implement ATOMIC operations as function calls or + * other primitives, and must re-arrange operands for argument passing. + */ +static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op) +{ + struct bpf_insn *insn; + int len = 2 + 34 * 10 * 10; + u64 mem, upd, res; + int rd, rs, i = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + /* Operand and memory values */ + if (width == BPF_DW) { + mem = 0x0123456789abcdefULL; + upd = 0xfedcba9876543210ULL; + } else { /* BPF_W */ + mem = 0x01234567U; + upd = 0x76543210U; + } + + /* Memory updated according to operation */ + switch (op) { + case BPF_XCHG: + res = upd; + break; + case BPF_CMPXCHG: + res = mem; + break; + default: + __bpf_alu_result(&res, mem, upd, BPF_OP(op)); + } + + /* Test all operand registers */ + for (rd = R0; rd <= R9; rd++) { + for (rs = R0; rs <= R9; rs++) { + u64 cmp, src; + + /* Initialize value in memory */ + i += __bpf_ld_imm64(&insn[i], R0, mem); + insn[i++] = BPF_STX_MEM(width, R10, R0, -8); + + /* Initialize registers in order */ + i += __bpf_ld_imm64(&insn[i], R0, ~mem); + i += __bpf_ld_imm64(&insn[i], rs, upd); + insn[i++] = BPF_MOV64_REG(rd, R10); + + /* Perform atomic operation */ + insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8); + if (op == BPF_CMPXCHG && width == BPF_W) + insn[i++] = BPF_ZEXT_REG(R0); + + /* Check R0 register value */ + if (op == BPF_CMPXCHG) + cmp = mem; /* Expect value from memory */ + else if (R0 == rd || R0 == rs) + cmp = 0; /* Aliased, checked below */ + else + cmp = ~mem; /* Expect value to be preserved */ + if (cmp) { + insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0, + (u32)cmp, 2); + insn[i++] = BPF_MOV32_IMM(R0, __LINE__); + insn[i++] = BPF_EXIT_INSN(); + insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32); + insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0, + cmp >> 32, 2); + insn[i++] = BPF_MOV32_IMM(R0, __LINE__); + insn[i++] = BPF_EXIT_INSN(); + } + + /* Check source register value */ + if (rs == R0 && op == BPF_CMPXCHG) + src = 0; /* Aliased with R0, checked above */ + else if (rs == rd && (op == BPF_CMPXCHG || + !(op & BPF_FETCH))) + src = 0; /* Aliased with rd, checked below */ + else if (op == BPF_CMPXCHG) + src = upd; /* Expect value to be preserved */ + else if (op & BPF_FETCH) + src = mem; /* Expect fetched value from mem */ + else /* no fetch */ + src = upd; /* Expect value to be preserved */ + if (src) { + insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs, + (u32)src, 2); + insn[i++] = BPF_MOV32_IMM(R0, __LINE__); + insn[i++] = BPF_EXIT_INSN(); + insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32); + insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs, + src >> 32, 2); + insn[i++] = BPF_MOV32_IMM(R0, __LINE__); + insn[i++] = BPF_EXIT_INSN(); + } + + /* Check destination register value */ + if (!(rd == R0 && op == BPF_CMPXCHG) && + !(rd == rs && (op & BPF_FETCH))) { + insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2); + insn[i++] = BPF_MOV32_IMM(R0, __LINE__); + insn[i++] = BPF_EXIT_INSN(); + } + + /* Check value in memory */ + if (rs != rd) { /* No aliasing */ + i += __bpf_ld_imm64(&insn[i], R1, res); + } else if (op == BPF_XCHG) { /* Aliased, XCHG */ + insn[i++] = BPF_MOV64_REG(R1, R10); + } else if (op == BPF_CMPXCHG) { /* Aliased, CMPXCHG */ + i += __bpf_ld_imm64(&insn[i], R1, mem); + } else { /* Aliased, ALU oper */ + i += __bpf_ld_imm64(&insn[i], R1, mem); + insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10); + } + + insn[i++] = BPF_LDX_MEM(width, R0, R10, -8); + if (width == BPF_DW) + insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2); + else /* width == BPF_W */ + insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2); + insn[i++] = BPF_MOV32_IMM(R0, __LINE__); + insn[i++] = BPF_EXIT_INSN(); + } + } + + insn[i++] = BPF_MOV64_IMM(R0, 1); + insn[i++] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = i; + BUG_ON(i > len); + + return 0; +} + +/* 64-bit atomic register tests */ +static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD); +} + +static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND); +} + +static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR); +} + +static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR); +} + +static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH); +} + +static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH); +} + +static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH); +} + +static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH); +} + +static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG); +} + +static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG); +} + +/* 32-bit atomic register tests */ +static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD); +} + +static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND); +} + +static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR); +} + +static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR); +} + +static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH); +} + +static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH); +} + +static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH); +} + +static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH); +} + +static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG); +} + +static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self) +{ + return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG); +} + +/* + * Test the two-instruction 64-bit immediate load operation for all + * power-of-two magnitudes of the immediate operand. For each MSB, a block + * of immediate values centered around the power-of-two MSB are tested, + * both for positive and negative values. The test is designed to verify + * the operation for JITs that emit different code depending on the magnitude + * of the immediate value. This is often the case if the native instruction + * immediate field width is narrower than 32 bits. + */ +static int bpf_fill_ld_imm64_magn(struct bpf_test *self) +{ + int block = 64; /* Increase for more tests per MSB position */ + int len = 3 + 8 * 63 * block * 2; + struct bpf_insn *insn; + int bit, adj, sign; + int i = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); + + for (bit = 0; bit <= 62; bit++) { + for (adj = -block / 2; adj < block / 2; adj++) { + for (sign = -1; sign <= 1; sign += 2) { + s64 imm = sign * ((1LL << bit) + adj); + + /* Perform operation */ + i += __bpf_ld_imm64(&insn[i], R1, imm); + + /* Load reference */ + insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm); + insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, + (u32)(imm >> 32)); + insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32); + insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3); + + /* Check result */ + insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); + insn[i++] = BPF_EXIT_INSN(); + } + } + } + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); + insn[i++] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + BUG_ON(i != len); + + return 0; +} + +/* + * Test the two-instruction 64-bit immediate load operation for different + * combinations of bytes. Each byte in the 64-bit word is constructed as + * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG. + * All patterns (base1, mask1) and (base2, mask2) bytes are tested. + */ +static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self, + u8 base1, u8 mask1, + u8 base2, u8 mask2) +{ + struct bpf_insn *insn; + int len = 3 + 8 * BIT(8); + int pattern, index; + u32 rand = 1; + int i = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0); + + for (pattern = 0; pattern < BIT(8); pattern++) { + u64 imm = 0; + + for (index = 0; index < 8; index++) { + int byte; + + if (pattern & BIT(index)) + byte = (base1 & mask1) | (rand & ~mask1); + else + byte = (base2 & mask2) | (rand & ~mask2); + imm = (imm << 8) | byte; + } + + /* Update our LCG */ + rand = rand * 1664525 + 1013904223; + + /* Perform operation */ + i += __bpf_ld_imm64(&insn[i], R1, imm); + + /* Load reference */ + insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm); + insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32)); + insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32); + insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3); + + /* Check result */ + insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1); + insn[i++] = BPF_EXIT_INSN(); + } + + insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1); + insn[i++] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + BUG_ON(i != len); + + return 0; +} + +static int bpf_fill_ld_imm64_checker(struct bpf_test *self) +{ + return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff); +} + +static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self) +{ + return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80); +} + +static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self) +{ + return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff); +} + +static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self) +{ + return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff); +} + +/* + * Exhaustive tests of JMP operations for all combinations of power-of-two + * magnitudes of the operands, both for positive and negative values. The + * test is designed to verify e.g. the JMP and JMP32 operations for JITs that + * emit different code depending on the magnitude of the immediate value. + */ + +static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op) +{ + switch (op) { + case BPF_JSET: + return !!(v1 & v2); + case BPF_JEQ: + return v1 == v2; + case BPF_JNE: + return v1 != v2; + case BPF_JGT: + return (u64)v1 > (u64)v2; + case BPF_JGE: + return (u64)v1 >= (u64)v2; + case BPF_JLT: + return (u64)v1 < (u64)v2; + case BPF_JLE: + return (u64)v1 <= (u64)v2; + case BPF_JSGT: + return v1 > v2; + case BPF_JSGE: + return v1 >= v2; + case BPF_JSLT: + return v1 < v2; + case BPF_JSLE: + return v1 <= v2; + } + return false; +} + +static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 imm) +{ + int op = *(int *)arg; + + if (insns) { + bool match = __bpf_match_jmp_cond(dst, (s32)imm, op); + int i = 0; + + insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match); + + i += __bpf_ld_imm64(&insns[i], R1, dst); + insns[i++] = BPF_JMP_IMM(op, R1, imm, 1); + if (!match) + insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + insns[i++] = BPF_EXIT_INSN(); + + return i; + } + + return 5 + 1; +} + +static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 imm) +{ + int op = *(int *)arg; + + if (insns) { + bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op); + int i = 0; + + i += __bpf_ld_imm64(&insns[i], R1, dst); + insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1); + if (!match) + insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + insns[i++] = BPF_EXIT_INSN(); + + return i; + } + + return 5; +} + +static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int op = *(int *)arg; + + if (insns) { + bool match = __bpf_match_jmp_cond(dst, src, op); + int i = 0; + + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + insns[i++] = BPF_JMP_REG(op, R1, R2, 1); + if (!match) + insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + insns[i++] = BPF_EXIT_INSN(); + + return i; + } + + return 7; +} + +static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg, + struct bpf_insn *insns, s64 dst, s64 src) +{ + int op = *(int *)arg; + + if (insns) { + bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op); + int i = 0; + + i += __bpf_ld_imm64(&insns[i], R1, dst); + i += __bpf_ld_imm64(&insns[i], R2, src); + insns[i++] = BPF_JMP32_REG(op, R1, R2, 1); + if (!match) + insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + insns[i++] = BPF_EXIT_INSN(); + + return i; + } + + return 7; +} + +static int __bpf_fill_jmp_imm(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 32, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_jmp_imm); +} + +static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 32, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_jmp32_imm); +} + +static int __bpf_fill_jmp_reg(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 64, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_jmp_reg); +} + +static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op) +{ + return __bpf_fill_pattern(self, &op, 64, 64, + PATTERN_BLOCK1, PATTERN_BLOCK2, + &__bpf_emit_jmp32_reg); +} + +/* JMP immediate tests */ +static int bpf_fill_jmp_jset_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JSET); +} + +static int bpf_fill_jmp_jeq_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JEQ); +} + +static int bpf_fill_jmp_jne_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JNE); +} + +static int bpf_fill_jmp_jgt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JGT); +} + +static int bpf_fill_jmp_jge_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JGE); +} + +static int bpf_fill_jmp_jlt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JLT); +} + +static int bpf_fill_jmp_jle_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JLE); +} + +static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JSGT); +} + +static int bpf_fill_jmp_jsge_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JSGE); +} + +static int bpf_fill_jmp_jslt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JSLT); +} + +static int bpf_fill_jmp_jsle_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp_imm(self, BPF_JSLE); +} + +/* JMP32 immediate tests */ +static int bpf_fill_jmp32_jset_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JSET); +} + +static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JEQ); +} + +static int bpf_fill_jmp32_jne_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JNE); +} + +static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JGT); +} + +static int bpf_fill_jmp32_jge_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JGE); +} + +static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JLT); +} + +static int bpf_fill_jmp32_jle_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JLE); +} + +static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JSGT); +} + +static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JSGE); +} + +static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JSLT); +} + +static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self) +{ + return __bpf_fill_jmp32_imm(self, BPF_JSLE); +} + +/* JMP register tests */ +static int bpf_fill_jmp_jset_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JSET); +} + +static int bpf_fill_jmp_jeq_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JEQ); +} + +static int bpf_fill_jmp_jne_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JNE); +} + +static int bpf_fill_jmp_jgt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JGT); +} + +static int bpf_fill_jmp_jge_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JGE); +} + +static int bpf_fill_jmp_jlt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JLT); +} + +static int bpf_fill_jmp_jle_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JLE); +} + +static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JSGT); +} + +static int bpf_fill_jmp_jsge_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JSGE); +} + +static int bpf_fill_jmp_jslt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JSLT); +} + +static int bpf_fill_jmp_jsle_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp_reg(self, BPF_JSLE); +} + +/* JMP32 register tests */ +static int bpf_fill_jmp32_jset_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JSET); +} + +static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JEQ); +} + +static int bpf_fill_jmp32_jne_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JNE); +} + +static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JGT); +} + +static int bpf_fill_jmp32_jge_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JGE); +} + +static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JLT); +} + +static int bpf_fill_jmp32_jle_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JLE); +} + +static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JSGT); +} + +static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JSGE); +} + +static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JSLT); +} + +static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self) +{ + return __bpf_fill_jmp32_reg(self, BPF_JSLE); +} + +/* + * Set up a sequence of staggered jumps, forwards and backwards with + * increasing offset. This tests the conversion of relative jumps to + * JITed native jumps. On some architectures, for example MIPS, a large + * PC-relative jump offset may overflow the immediate field of the native + * conditional branch instruction, triggering a conversion to use an + * absolute jump instead. Since this changes the jump offsets, another + * offset computation pass is necessary, and that may in turn trigger + * another branch conversion. This jump sequence is particularly nasty + * in that regard. + * + * The sequence generation is parameterized by size and jump type. + * The size must be even, and the expected result is always size + 1. + * Below is an example with size=8 and result=9. + * + * ________________________Start + * R0 = 0 + * R1 = r1 + * R2 = r2 + * ,------- JMP +4 * 3______________Preamble: 4 insns + * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------. + * | | R0 = 8 | + * | | JMP +7 * 3 ------------------------. + * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------. | | + * | | | R0 = 6 | | | + * | | | JMP +5 * 3 ------------------. | | + * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------. | | | | + * | | | | R0 = 4 | | | | | + * | | | | JMP +3 * 3 ------------. | | | | + * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | | + * | | | | | R0 = 2 | | | | | | | + * | | | | | JMP +1 * 3 ------. | | | | | | + * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc + * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off + * | | | | | JMP -2 * 3 ---' | | | | | | | + * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | | + * | | | | | | R0 = 3 | | | | | | + * | | | | | | JMP -4 * 3 ---------' | | | | | + * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | | + * | | | | | | | R0 = 5 | | | | + * | | | | | | | JMP -6 * 3 ---------------' | | | + * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | | + * | | | | | | | | R0 = 7 | | + * | | Error | | | JMP -8 * 3 ---------------------' | + * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------' + * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns + * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn + * + */ + +/* The maximum size parameter */ +#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1) + +/* We use a reduced number of iterations to get a reasonable execution time */ +#define NR_STAGGERED_JMP_RUNS 10 + +static int __bpf_fill_staggered_jumps(struct bpf_test *self, + const struct bpf_insn *jmp, + u64 r1, u64 r2) +{ + int size = self->test[0].result - 1; + int len = 4 + 3 * (size + 1); + struct bpf_insn *insns; + int off, ind; + + insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL); + if (!insns) + return -ENOMEM; + + /* Preamble */ + insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0); + insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1); + insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2); + insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2); + + /* Sequence */ + for (ind = 0, off = size; ind <= size; ind++, off -= 2) { + struct bpf_insn *ins = &insns[4 + 3 * ind]; + int loc; + + if (off == 0) + off--; + + loc = abs(off); + ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1, + 3 * (size - ind) + 1); + ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc); + ins[2] = *jmp; + ins[2].off = 3 * (off - 1); + } + + /* Return */ + insns[len - 1] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insns; + self->u.ptr.len = len; + + return 0; +} + +/* 64-bit unconditional jump */ +static int bpf_fill_staggered_ja(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0, 0); +} + +/* 64-bit immediate jumps */ +static int bpf_fill_staggered_jeq_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jne_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0); +} + +static int bpf_fill_staggered_jset_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0); +} + +static int bpf_fill_staggered_jgt_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0); +} + +static int bpf_fill_staggered_jge_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jlt_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jle_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0); +} + +static int bpf_fill_staggered_jsge_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0); +} + +static int bpf_fill_staggered_jslt_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0); +} + +static int bpf_fill_staggered_jsle_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0); +} + +/* 64-bit register jumps */ +static int bpf_fill_staggered_jeq_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); +} + +static int bpf_fill_staggered_jne_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234); +} + +static int bpf_fill_staggered_jset_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82); +} + +static int bpf_fill_staggered_jgt_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234); +} + +static int bpf_fill_staggered_jge_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); +} + +static int bpf_fill_staggered_jlt_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000); +} + +static int bpf_fill_staggered_jle_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); +} + +static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, -2); +} + +static int bpf_fill_staggered_jsge_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, -2); +} + +static int bpf_fill_staggered_jslt_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, -1); +} + +static int bpf_fill_staggered_jsle_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, -1); +} + +/* 32-bit immediate jumps */ +static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jne32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0); +} + +static int bpf_fill_staggered_jset32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0); +} + +static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0); +} + +static int bpf_fill_staggered_jge32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jle32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0); +} + +static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0); +} + +static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0); +} + +static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, 0); +} + +static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, 0); +} + +/* 32-bit register jumps */ +static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); +} + +static int bpf_fill_staggered_jne32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234); +} + +static int bpf_fill_staggered_jset32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82); +} + +static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234); +} + +static int bpf_fill_staggered_jge32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); +} + +static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000); +} + +static int bpf_fill_staggered_jle32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234); +} + +static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, -2); +} + +static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, -2); +} + +static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -2, -1); +} + +static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self) +{ + struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0); + + return __bpf_fill_staggered_jumps(self, &jmp, -1, -1); +} + + +static struct bpf_test tests[] = { + { + "TAX", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */ + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_LEN, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */ + BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { 10, 20, 30, 40, 50 }, + { { 2, 10 }, { 3, 20 }, { 4, 30 } }, + }, + { + "TXA", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */ + }, + CLASSIC, + { 10, 20, 30, 40, 50 }, + { { 1, 2 }, { 3, 6 }, { 4, 8 } }, + }, + { + "ADD_SUB_MUL_K", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 1), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2), + BPF_STMT(BPF_LDX | BPF_IMM, 3), + BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xfffffffd } } + }, + { + "DIV_MOD_KX", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 8), + BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0x20000000 } } + }, + { + "AND_OR_LSH_K", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xff), + BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0), + BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xf), + BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0x800000ff }, { 1, 0x800000ff } }, + }, + { + "LD_IMM_0", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */ + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 0), + BPF_STMT(BPF_RET | BPF_K, 1), + }, + CLASSIC, + { }, + { { 1, 1 } }, + }, + { + "LD_IND", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K), + BPF_STMT(BPF_RET | BPF_K, 1) + }, + CLASSIC, + { }, + { { 1, 0 }, { 10, 0 }, { 60, 0 } }, + }, + { + "LD_ABS", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000), + BPF_STMT(BPF_RET | BPF_K, 1) + }, + CLASSIC, + { }, + { { 1, 0 }, { 10, 0 }, { 60, 0 } }, + }, + { + "LD_ABS_LL", + .u.insns = { + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { 1, 2, 3 }, + { { 1, 0 }, { 2, 3 } }, + }, + { + "LD_IND_LL", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1), + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { 1, 2, 3, 0xff }, + { { 1, 1 }, { 3, 3 }, { 4, 0xff } }, + }, + { + "LD_ABS_NET", + .u.insns = { + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }, + { { 15, 0 }, { 16, 3 } }, + }, + { + "LD_IND_NET", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15), + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 }, + { { 14, 0 }, { 15, 1 }, { 17, 3 } }, + }, + { + "LD_PKTTYPE", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PKTTYPE), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PKTTYPE), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PKTTYPE), BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0), BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_RET | BPF_A, 0) + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, 3 }, { 10, 3 } }, + }, + { + "LD_MARK", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_MARK), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, SKB_MARK}, { 10, SKB_MARK} }, + }, + { + "LD_RXHASH", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_RXHASH), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, SKB_HASH}, { 10, SKB_HASH} }, + }, + { + "LD_QUEUE", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_QUEUE), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } }, + }, + { + "LD_PROTOCOL", + .u.insns = { + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 0), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PROTOCOL), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 0), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { 10, 20, 30 }, + { { 10, ETH_P_IP }, { 100, ETH_P_IP } }, + }, + { + "LD_VLAN_TAG", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_VLAN_TAG), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { + { 1, SKB_VLAN_TCI }, + { 10, SKB_VLAN_TCI } + }, + }, + { + "LD_VLAN_TAG_PRESENT", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } + }, + }, + { + "LD_IFINDEX", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_IFINDEX), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } }, + }, + { + "LD_HATYPE", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_HATYPE), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } }, + }, + { + "LD_CPU", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_CPU), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_CPU), + BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, 0 }, { 10, 0 } }, + }, + { + "LD_NLATTR", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 2), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_LDX | BPF_IMM, 3), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, +#ifdef __BIG_ENDIAN + { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 }, +#else + { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 }, +#endif + { { 4, 0 }, { 20, 6 } }, + }, + { + "LD_NLATTR_NEST", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LDX | BPF_IMM, 3), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_NLATTR_NEST), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, +#ifdef __BIG_ENDIAN + { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 }, +#else + { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 }, +#endif + { { 4, 0 }, { 20, 10 } }, + }, + { + "LD_PAYLOAD_OFF", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PAY_OFFSET), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PAY_OFFSET), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PAY_OFFSET), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PAY_OFFSET), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_PAY_OFFSET), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800), + * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request, + * id 9737, seq 1, length 64 + */ + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40, + 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 }, + { { 30, 0 }, { 100, 42 } }, + }, + { + "LD_ANC_XOR", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 10), + BPF_STMT(BPF_LDX | BPF_IMM, 300), + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_ALU_XOR_X), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } }, + }, + { + "SPILL_FILL", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_IMM, 2), + BPF_STMT(BPF_ALU | BPF_RSH, 1), + BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), + BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */ + BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000), + BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */ + BPF_STMT(BPF_STX, 15), /* M3 = len */ + BPF_STMT(BPF_LDX | BPF_MEM, 1), + BPF_STMT(BPF_LD | BPF_MEM, 2), + BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 15), + BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } } + }, + { + "JEQ", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 3, 3, 3, 3, 3 }, + { { 1, 0 }, { 3, 1 }, { 4, MAX_K } }, + }, + { + "JGT", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 4, 4, 4, 3, 3 }, + { { 2, 0 }, { 3, 1 }, { 4, MAX_K } }, + }, + { + "JGE (jt 0), test 1", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 4, 4, 4, 3, 3 }, + { { 2, 0 }, { 3, 1 }, { 4, 1 } }, + }, + { + "JGE (jt 0), test 2", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 4, 4, 5, 3, 3 }, + { { 4, 1 }, { 5, 1 }, { 6, MAX_K } }, + }, + { + "JGE", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 10), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 20), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 30), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 40), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 1, 2, 3, 4, 5 }, + { { 1, 20 }, { 3, 40 }, { 5, MAX_K } }, + }, + { + "JSET", + .u.insns = { + BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0), + BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1), + BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0), + BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0), + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 10), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 20), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 30), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 30), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 30), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 30), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 30), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 0, 0xAA, 0x55, 1 }, + { { 4, 10 }, { 5, 20 }, { 6, MAX_K } }, + }, + { + "tcpdump port 22", + .u.insns = { + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */ + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */ + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14), + BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 0xffff), + BPF_STMT(BPF_RET | BPF_K, 0), + }, + CLASSIC, + /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800) + * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.], + * seq 1305692979:1305693027, ack 3650467037, win 65535, + * options [nop,nop,TS val 2502645400 ecr 3971138], length 48 + */ + { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, + 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76, + 0x08, 0x00, + 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5, + 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */ + 0x0a, 0x01, 0x01, 0x95, /* ip src */ + 0x0a, 0x01, 0x02, 0x0a, /* ip dst */ + 0xc2, 0x24, + 0x00, 0x16 /* dst port */ }, + { { 10, 0 }, { 30, 0 }, { 100, 65535 } }, + }, + { + "tcpdump complex", + .u.insns = { + /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] - + * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and + * (len > 115 or len < 30000000000)' -d + */ + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20), + BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14), + BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16), + BPF_STMT(BPF_ST, 1), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14), + BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf), + BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2), + BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */ + BPF_STMT(BPF_LD | BPF_MEM, 1), + BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), + BPF_STMT(BPF_ST, 5), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14), + BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26), + BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0), + BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2), + BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */ + BPF_STMT(BPF_LD | BPF_MEM, 5), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0), + BPF_STMT(BPF_LD | BPF_LEN, 0), + BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 0xffff), + BPF_STMT(BPF_RET | BPF_K, 0), + }, + CLASSIC, + { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, + 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76, + 0x08, 0x00, + 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5, + 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */ + 0x0a, 0x01, 0x01, 0x95, /* ip src */ + 0x0a, 0x01, 0x02, 0x0a, /* ip dst */ + 0xc2, 0x24, + 0x00, 0x16 /* dst port */ }, + { { 10, 0 }, { 30, 0 }, { 100, 65535 } }, + }, + { + "RET_A", + .u.insns = { + /* check that uninitialized X and A contain zeros */ + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0) + }, + CLASSIC, + { }, + { {1, 0}, {2, 0} }, + }, + { + "INT: ADD trivial", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_ADD, R1, 2), + BPF_ALU64_IMM(BPF_MOV, R2, 3), + BPF_ALU64_REG(BPF_SUB, R1, R2), + BPF_ALU64_IMM(BPF_ADD, R1, -1), + BPF_ALU64_IMM(BPF_MUL, R1, 3), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffd } } + }, + { + "INT: MUL_X", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_ALU64_IMM(BPF_MOV, R1, -1), + BPF_ALU64_IMM(BPF_MOV, R2, 3), + BPF_ALU64_REG(BPF_MUL, R1, R2), + BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "INT: MUL_X2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -1), + BPF_ALU32_IMM(BPF_MOV, R1, -1), + BPF_ALU32_IMM(BPF_MOV, R2, 3), + BPF_ALU64_REG(BPF_MUL, R1, R2), + BPF_ALU64_IMM(BPF_RSH, R1, 8), + BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "INT: MUL32_X", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -1), + BPF_ALU64_IMM(BPF_MOV, R1, -1), + BPF_ALU32_IMM(BPF_MOV, R2, 3), + BPF_ALU32_REG(BPF_MUL, R1, R2), + BPF_ALU64_IMM(BPF_RSH, R1, 8), + BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + /* Have to test all register combinations, since + * JITing of different registers will produce + * different asm code. + */ + "INT: ADD 64-bit", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R2, 2), + BPF_ALU64_IMM(BPF_MOV, R3, 3), + BPF_ALU64_IMM(BPF_MOV, R4, 4), + BPF_ALU64_IMM(BPF_MOV, R5, 5), + BPF_ALU64_IMM(BPF_MOV, R6, 6), + BPF_ALU64_IMM(BPF_MOV, R7, 7), + BPF_ALU64_IMM(BPF_MOV, R8, 8), + BPF_ALU64_IMM(BPF_MOV, R9, 9), + BPF_ALU64_IMM(BPF_ADD, R0, 20), + BPF_ALU64_IMM(BPF_ADD, R1, 20), + BPF_ALU64_IMM(BPF_ADD, R2, 20), + BPF_ALU64_IMM(BPF_ADD, R3, 20), + BPF_ALU64_IMM(BPF_ADD, R4, 20), + BPF_ALU64_IMM(BPF_ADD, R5, 20), + BPF_ALU64_IMM(BPF_ADD, R6, 20), + BPF_ALU64_IMM(BPF_ADD, R7, 20), + BPF_ALU64_IMM(BPF_ADD, R8, 20), + BPF_ALU64_IMM(BPF_ADD, R9, 20), + BPF_ALU64_IMM(BPF_SUB, R0, 10), + BPF_ALU64_IMM(BPF_SUB, R1, 10), + BPF_ALU64_IMM(BPF_SUB, R2, 10), + BPF_ALU64_IMM(BPF_SUB, R3, 10), + BPF_ALU64_IMM(BPF_SUB, R4, 10), + BPF_ALU64_IMM(BPF_SUB, R5, 10), + BPF_ALU64_IMM(BPF_SUB, R6, 10), + BPF_ALU64_IMM(BPF_SUB, R7, 10), + BPF_ALU64_IMM(BPF_SUB, R8, 10), + BPF_ALU64_IMM(BPF_SUB, R9, 10), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */ + BPF_JMP_IMM(BPF_JEQ, R0, 155, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R1, R0), + BPF_ALU64_REG(BPF_ADD, R1, R1), + BPF_ALU64_REG(BPF_ADD, R1, R2), + BPF_ALU64_REG(BPF_ADD, R1, R3), + BPF_ALU64_REG(BPF_ADD, R1, R4), + BPF_ALU64_REG(BPF_ADD, R1, R5), + BPF_ALU64_REG(BPF_ADD, R1, R6), + BPF_ALU64_REG(BPF_ADD, R1, R7), + BPF_ALU64_REG(BPF_ADD, R1, R8), + BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */ + BPF_JMP_IMM(BPF_JEQ, R1, 456, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R2, R0), + BPF_ALU64_REG(BPF_ADD, R2, R1), + BPF_ALU64_REG(BPF_ADD, R2, R2), + BPF_ALU64_REG(BPF_ADD, R2, R3), + BPF_ALU64_REG(BPF_ADD, R2, R4), + BPF_ALU64_REG(BPF_ADD, R2, R5), + BPF_ALU64_REG(BPF_ADD, R2, R6), + BPF_ALU64_REG(BPF_ADD, R2, R7), + BPF_ALU64_REG(BPF_ADD, R2, R8), + BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */ + BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R3, R0), + BPF_ALU64_REG(BPF_ADD, R3, R1), + BPF_ALU64_REG(BPF_ADD, R3, R2), + BPF_ALU64_REG(BPF_ADD, R3, R3), + BPF_ALU64_REG(BPF_ADD, R3, R4), + BPF_ALU64_REG(BPF_ADD, R3, R5), + BPF_ALU64_REG(BPF_ADD, R3, R6), + BPF_ALU64_REG(BPF_ADD, R3, R7), + BPF_ALU64_REG(BPF_ADD, R3, R8), + BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */ + BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R4, R0), + BPF_ALU64_REG(BPF_ADD, R4, R1), + BPF_ALU64_REG(BPF_ADD, R4, R2), + BPF_ALU64_REG(BPF_ADD, R4, R3), + BPF_ALU64_REG(BPF_ADD, R4, R4), + BPF_ALU64_REG(BPF_ADD, R4, R5), + BPF_ALU64_REG(BPF_ADD, R4, R6), + BPF_ALU64_REG(BPF_ADD, R4, R7), + BPF_ALU64_REG(BPF_ADD, R4, R8), + BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */ + BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R5, R0), + BPF_ALU64_REG(BPF_ADD, R5, R1), + BPF_ALU64_REG(BPF_ADD, R5, R2), + BPF_ALU64_REG(BPF_ADD, R5, R3), + BPF_ALU64_REG(BPF_ADD, R5, R4), + BPF_ALU64_REG(BPF_ADD, R5, R5), + BPF_ALU64_REG(BPF_ADD, R5, R6), + BPF_ALU64_REG(BPF_ADD, R5, R7), + BPF_ALU64_REG(BPF_ADD, R5, R8), + BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */ + BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R6, R0), + BPF_ALU64_REG(BPF_ADD, R6, R1), + BPF_ALU64_REG(BPF_ADD, R6, R2), + BPF_ALU64_REG(BPF_ADD, R6, R3), + BPF_ALU64_REG(BPF_ADD, R6, R4), + BPF_ALU64_REG(BPF_ADD, R6, R5), + BPF_ALU64_REG(BPF_ADD, R6, R6), + BPF_ALU64_REG(BPF_ADD, R6, R7), + BPF_ALU64_REG(BPF_ADD, R6, R8), + BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */ + BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R7, R0), + BPF_ALU64_REG(BPF_ADD, R7, R1), + BPF_ALU64_REG(BPF_ADD, R7, R2), + BPF_ALU64_REG(BPF_ADD, R7, R3), + BPF_ALU64_REG(BPF_ADD, R7, R4), + BPF_ALU64_REG(BPF_ADD, R7, R5), + BPF_ALU64_REG(BPF_ADD, R7, R6), + BPF_ALU64_REG(BPF_ADD, R7, R7), + BPF_ALU64_REG(BPF_ADD, R7, R8), + BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */ + BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R8, R0), + BPF_ALU64_REG(BPF_ADD, R8, R1), + BPF_ALU64_REG(BPF_ADD, R8, R2), + BPF_ALU64_REG(BPF_ADD, R8, R3), + BPF_ALU64_REG(BPF_ADD, R8, R4), + BPF_ALU64_REG(BPF_ADD, R8, R5), + BPF_ALU64_REG(BPF_ADD, R8, R6), + BPF_ALU64_REG(BPF_ADD, R8, R7), + BPF_ALU64_REG(BPF_ADD, R8, R8), + BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */ + BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, R9, R0), + BPF_ALU64_REG(BPF_ADD, R9, R1), + BPF_ALU64_REG(BPF_ADD, R9, R2), + BPF_ALU64_REG(BPF_ADD, R9, R3), + BPF_ALU64_REG(BPF_ADD, R9, R4), + BPF_ALU64_REG(BPF_ADD, R9, R5), + BPF_ALU64_REG(BPF_ADD, R9, R6), + BPF_ALU64_REG(BPF_ADD, R9, R7), + BPF_ALU64_REG(BPF_ADD, R9, R8), + BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */ + BPF_ALU64_REG(BPF_MOV, R0, R9), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2957380 } } + }, + { + "INT: ADD 32-bit", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 20), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R2, 2), + BPF_ALU32_IMM(BPF_MOV, R3, 3), + BPF_ALU32_IMM(BPF_MOV, R4, 4), + BPF_ALU32_IMM(BPF_MOV, R5, 5), + BPF_ALU32_IMM(BPF_MOV, R6, 6), + BPF_ALU32_IMM(BPF_MOV, R7, 7), + BPF_ALU32_IMM(BPF_MOV, R8, 8), + BPF_ALU32_IMM(BPF_MOV, R9, 9), + BPF_ALU64_IMM(BPF_ADD, R1, 10), + BPF_ALU64_IMM(BPF_ADD, R2, 10), + BPF_ALU64_IMM(BPF_ADD, R3, 10), + BPF_ALU64_IMM(BPF_ADD, R4, 10), + BPF_ALU64_IMM(BPF_ADD, R5, 10), + BPF_ALU64_IMM(BPF_ADD, R6, 10), + BPF_ALU64_IMM(BPF_ADD, R7, 10), + BPF_ALU64_IMM(BPF_ADD, R8, 10), + BPF_ALU64_IMM(BPF_ADD, R9, 10), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_ALU32_REG(BPF_ADD, R0, R2), + BPF_ALU32_REG(BPF_ADD, R0, R3), + BPF_ALU32_REG(BPF_ADD, R0, R4), + BPF_ALU32_REG(BPF_ADD, R0, R5), + BPF_ALU32_REG(BPF_ADD, R0, R6), + BPF_ALU32_REG(BPF_ADD, R0, R7), + BPF_ALU32_REG(BPF_ADD, R0, R8), + BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */ + BPF_JMP_IMM(BPF_JEQ, R0, 155, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R1, R0), + BPF_ALU32_REG(BPF_ADD, R1, R1), + BPF_ALU32_REG(BPF_ADD, R1, R2), + BPF_ALU32_REG(BPF_ADD, R1, R3), + BPF_ALU32_REG(BPF_ADD, R1, R4), + BPF_ALU32_REG(BPF_ADD, R1, R5), + BPF_ALU32_REG(BPF_ADD, R1, R6), + BPF_ALU32_REG(BPF_ADD, R1, R7), + BPF_ALU32_REG(BPF_ADD, R1, R8), + BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */ + BPF_JMP_IMM(BPF_JEQ, R1, 456, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R2, R0), + BPF_ALU32_REG(BPF_ADD, R2, R1), + BPF_ALU32_REG(BPF_ADD, R2, R2), + BPF_ALU32_REG(BPF_ADD, R2, R3), + BPF_ALU32_REG(BPF_ADD, R2, R4), + BPF_ALU32_REG(BPF_ADD, R2, R5), + BPF_ALU32_REG(BPF_ADD, R2, R6), + BPF_ALU32_REG(BPF_ADD, R2, R7), + BPF_ALU32_REG(BPF_ADD, R2, R8), + BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */ + BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R3, R0), + BPF_ALU32_REG(BPF_ADD, R3, R1), + BPF_ALU32_REG(BPF_ADD, R3, R2), + BPF_ALU32_REG(BPF_ADD, R3, R3), + BPF_ALU32_REG(BPF_ADD, R3, R4), + BPF_ALU32_REG(BPF_ADD, R3, R5), + BPF_ALU32_REG(BPF_ADD, R3, R6), + BPF_ALU32_REG(BPF_ADD, R3, R7), + BPF_ALU32_REG(BPF_ADD, R3, R8), + BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */ + BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R4, R0), + BPF_ALU32_REG(BPF_ADD, R4, R1), + BPF_ALU32_REG(BPF_ADD, R4, R2), + BPF_ALU32_REG(BPF_ADD, R4, R3), + BPF_ALU32_REG(BPF_ADD, R4, R4), + BPF_ALU32_REG(BPF_ADD, R4, R5), + BPF_ALU32_REG(BPF_ADD, R4, R6), + BPF_ALU32_REG(BPF_ADD, R4, R7), + BPF_ALU32_REG(BPF_ADD, R4, R8), + BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */ + BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R5, R0), + BPF_ALU32_REG(BPF_ADD, R5, R1), + BPF_ALU32_REG(BPF_ADD, R5, R2), + BPF_ALU32_REG(BPF_ADD, R5, R3), + BPF_ALU32_REG(BPF_ADD, R5, R4), + BPF_ALU32_REG(BPF_ADD, R5, R5), + BPF_ALU32_REG(BPF_ADD, R5, R6), + BPF_ALU32_REG(BPF_ADD, R5, R7), + BPF_ALU32_REG(BPF_ADD, R5, R8), + BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */ + BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R6, R0), + BPF_ALU32_REG(BPF_ADD, R6, R1), + BPF_ALU32_REG(BPF_ADD, R6, R2), + BPF_ALU32_REG(BPF_ADD, R6, R3), + BPF_ALU32_REG(BPF_ADD, R6, R4), + BPF_ALU32_REG(BPF_ADD, R6, R5), + BPF_ALU32_REG(BPF_ADD, R6, R6), + BPF_ALU32_REG(BPF_ADD, R6, R7), + BPF_ALU32_REG(BPF_ADD, R6, R8), + BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */ + BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R7, R0), + BPF_ALU32_REG(BPF_ADD, R7, R1), + BPF_ALU32_REG(BPF_ADD, R7, R2), + BPF_ALU32_REG(BPF_ADD, R7, R3), + BPF_ALU32_REG(BPF_ADD, R7, R4), + BPF_ALU32_REG(BPF_ADD, R7, R5), + BPF_ALU32_REG(BPF_ADD, R7, R6), + BPF_ALU32_REG(BPF_ADD, R7, R7), + BPF_ALU32_REG(BPF_ADD, R7, R8), + BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */ + BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R8, R0), + BPF_ALU32_REG(BPF_ADD, R8, R1), + BPF_ALU32_REG(BPF_ADD, R8, R2), + BPF_ALU32_REG(BPF_ADD, R8, R3), + BPF_ALU32_REG(BPF_ADD, R8, R4), + BPF_ALU32_REG(BPF_ADD, R8, R5), + BPF_ALU32_REG(BPF_ADD, R8, R6), + BPF_ALU32_REG(BPF_ADD, R8, R7), + BPF_ALU32_REG(BPF_ADD, R8, R8), + BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */ + BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_ADD, R9, R0), + BPF_ALU32_REG(BPF_ADD, R9, R1), + BPF_ALU32_REG(BPF_ADD, R9, R2), + BPF_ALU32_REG(BPF_ADD, R9, R3), + BPF_ALU32_REG(BPF_ADD, R9, R4), + BPF_ALU32_REG(BPF_ADD, R9, R5), + BPF_ALU32_REG(BPF_ADD, R9, R6), + BPF_ALU32_REG(BPF_ADD, R9, R7), + BPF_ALU32_REG(BPF_ADD, R9, R8), + BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */ + BPF_ALU32_REG(BPF_MOV, R0, R9), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2957380 } } + }, + { /* Mainly checking JIT here. */ + "INT: SUB", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R2, 2), + BPF_ALU64_IMM(BPF_MOV, R3, 3), + BPF_ALU64_IMM(BPF_MOV, R4, 4), + BPF_ALU64_IMM(BPF_MOV, R5, 5), + BPF_ALU64_IMM(BPF_MOV, R6, 6), + BPF_ALU64_IMM(BPF_MOV, R7, 7), + BPF_ALU64_IMM(BPF_MOV, R8, 8), + BPF_ALU64_IMM(BPF_MOV, R9, 9), + BPF_ALU64_REG(BPF_SUB, R0, R0), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_ALU64_REG(BPF_SUB, R0, R2), + BPF_ALU64_REG(BPF_SUB, R0, R3), + BPF_ALU64_REG(BPF_SUB, R0, R4), + BPF_ALU64_REG(BPF_SUB, R0, R5), + BPF_ALU64_REG(BPF_SUB, R0, R6), + BPF_ALU64_REG(BPF_SUB, R0, R7), + BPF_ALU64_REG(BPF_SUB, R0, R8), + BPF_ALU64_REG(BPF_SUB, R0, R9), + BPF_ALU64_IMM(BPF_SUB, R0, 10), + BPF_JMP_IMM(BPF_JEQ, R0, -55, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R1, R0), + BPF_ALU64_REG(BPF_SUB, R1, R2), + BPF_ALU64_REG(BPF_SUB, R1, R3), + BPF_ALU64_REG(BPF_SUB, R1, R4), + BPF_ALU64_REG(BPF_SUB, R1, R5), + BPF_ALU64_REG(BPF_SUB, R1, R6), + BPF_ALU64_REG(BPF_SUB, R1, R7), + BPF_ALU64_REG(BPF_SUB, R1, R8), + BPF_ALU64_REG(BPF_SUB, R1, R9), + BPF_ALU64_IMM(BPF_SUB, R1, 10), + BPF_ALU64_REG(BPF_SUB, R2, R0), + BPF_ALU64_REG(BPF_SUB, R2, R1), + BPF_ALU64_REG(BPF_SUB, R2, R3), + BPF_ALU64_REG(BPF_SUB, R2, R4), + BPF_ALU64_REG(BPF_SUB, R2, R5), + BPF_ALU64_REG(BPF_SUB, R2, R6), + BPF_ALU64_REG(BPF_SUB, R2, R7), + BPF_ALU64_REG(BPF_SUB, R2, R8), + BPF_ALU64_REG(BPF_SUB, R2, R9), + BPF_ALU64_IMM(BPF_SUB, R2, 10), + BPF_ALU64_REG(BPF_SUB, R3, R0), + BPF_ALU64_REG(BPF_SUB, R3, R1), + BPF_ALU64_REG(BPF_SUB, R3, R2), + BPF_ALU64_REG(BPF_SUB, R3, R4), + BPF_ALU64_REG(BPF_SUB, R3, R5), + BPF_ALU64_REG(BPF_SUB, R3, R6), + BPF_ALU64_REG(BPF_SUB, R3, R7), + BPF_ALU64_REG(BPF_SUB, R3, R8), + BPF_ALU64_REG(BPF_SUB, R3, R9), + BPF_ALU64_IMM(BPF_SUB, R3, 10), + BPF_ALU64_REG(BPF_SUB, R4, R0), + BPF_ALU64_REG(BPF_SUB, R4, R1), + BPF_ALU64_REG(BPF_SUB, R4, R2), + BPF_ALU64_REG(BPF_SUB, R4, R3), + BPF_ALU64_REG(BPF_SUB, R4, R5), + BPF_ALU64_REG(BPF_SUB, R4, R6), + BPF_ALU64_REG(BPF_SUB, R4, R7), + BPF_ALU64_REG(BPF_SUB, R4, R8), + BPF_ALU64_REG(BPF_SUB, R4, R9), + BPF_ALU64_IMM(BPF_SUB, R4, 10), + BPF_ALU64_REG(BPF_SUB, R5, R0), + BPF_ALU64_REG(BPF_SUB, R5, R1), + BPF_ALU64_REG(BPF_SUB, R5, R2), + BPF_ALU64_REG(BPF_SUB, R5, R3), + BPF_ALU64_REG(BPF_SUB, R5, R4), + BPF_ALU64_REG(BPF_SUB, R5, R6), + BPF_ALU64_REG(BPF_SUB, R5, R7), + BPF_ALU64_REG(BPF_SUB, R5, R8), + BPF_ALU64_REG(BPF_SUB, R5, R9), + BPF_ALU64_IMM(BPF_SUB, R5, 10), + BPF_ALU64_REG(BPF_SUB, R6, R0), + BPF_ALU64_REG(BPF_SUB, R6, R1), + BPF_ALU64_REG(BPF_SUB, R6, R2), + BPF_ALU64_REG(BPF_SUB, R6, R3), + BPF_ALU64_REG(BPF_SUB, R6, R4), + BPF_ALU64_REG(BPF_SUB, R6, R5), + BPF_ALU64_REG(BPF_SUB, R6, R7), + BPF_ALU64_REG(BPF_SUB, R6, R8), + BPF_ALU64_REG(BPF_SUB, R6, R9), + BPF_ALU64_IMM(BPF_SUB, R6, 10), + BPF_ALU64_REG(BPF_SUB, R7, R0), + BPF_ALU64_REG(BPF_SUB, R7, R1), + BPF_ALU64_REG(BPF_SUB, R7, R2), + BPF_ALU64_REG(BPF_SUB, R7, R3), + BPF_ALU64_REG(BPF_SUB, R7, R4), + BPF_ALU64_REG(BPF_SUB, R7, R5), + BPF_ALU64_REG(BPF_SUB, R7, R6), + BPF_ALU64_REG(BPF_SUB, R7, R8), + BPF_ALU64_REG(BPF_SUB, R7, R9), + BPF_ALU64_IMM(BPF_SUB, R7, 10), + BPF_ALU64_REG(BPF_SUB, R8, R0), + BPF_ALU64_REG(BPF_SUB, R8, R1), + BPF_ALU64_REG(BPF_SUB, R8, R2), + BPF_ALU64_REG(BPF_SUB, R8, R3), + BPF_ALU64_REG(BPF_SUB, R8, R4), + BPF_ALU64_REG(BPF_SUB, R8, R5), + BPF_ALU64_REG(BPF_SUB, R8, R6), + BPF_ALU64_REG(BPF_SUB, R8, R7), + BPF_ALU64_REG(BPF_SUB, R8, R9), + BPF_ALU64_IMM(BPF_SUB, R8, 10), + BPF_ALU64_REG(BPF_SUB, R9, R0), + BPF_ALU64_REG(BPF_SUB, R9, R1), + BPF_ALU64_REG(BPF_SUB, R9, R2), + BPF_ALU64_REG(BPF_SUB, R9, R3), + BPF_ALU64_REG(BPF_SUB, R9, R4), + BPF_ALU64_REG(BPF_SUB, R9, R5), + BPF_ALU64_REG(BPF_SUB, R9, R6), + BPF_ALU64_REG(BPF_SUB, R9, R7), + BPF_ALU64_REG(BPF_SUB, R9, R8), + BPF_ALU64_IMM(BPF_SUB, R9, 10), + BPF_ALU64_IMM(BPF_SUB, R0, 10), + BPF_ALU64_IMM(BPF_NEG, R0, 0), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_ALU64_REG(BPF_SUB, R0, R2), + BPF_ALU64_REG(BPF_SUB, R0, R3), + BPF_ALU64_REG(BPF_SUB, R0, R4), + BPF_ALU64_REG(BPF_SUB, R0, R5), + BPF_ALU64_REG(BPF_SUB, R0, R6), + BPF_ALU64_REG(BPF_SUB, R0, R7), + BPF_ALU64_REG(BPF_SUB, R0, R8), + BPF_ALU64_REG(BPF_SUB, R0, R9), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 11 } } + }, + { /* Mainly checking JIT here. */ + "INT: XOR", + .u.insns_int = { + BPF_ALU64_REG(BPF_SUB, R0, R0), + BPF_ALU64_REG(BPF_XOR, R1, R1), + BPF_JMP_REG(BPF_JEQ, R0, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, 10), + BPF_ALU64_IMM(BPF_MOV, R1, -1), + BPF_ALU64_REG(BPF_SUB, R1, R1), + BPF_ALU64_REG(BPF_XOR, R2, R2), + BPF_JMP_REG(BPF_JEQ, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R2, R2), + BPF_ALU64_REG(BPF_XOR, R3, R3), + BPF_ALU64_IMM(BPF_MOV, R0, 10), + BPF_ALU64_IMM(BPF_MOV, R1, -1), + BPF_JMP_REG(BPF_JEQ, R2, R3, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R3, R3), + BPF_ALU64_REG(BPF_XOR, R4, R4), + BPF_ALU64_IMM(BPF_MOV, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R5, -1), + BPF_JMP_REG(BPF_JEQ, R3, R4, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R4, R4), + BPF_ALU64_REG(BPF_XOR, R5, R5), + BPF_ALU64_IMM(BPF_MOV, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R7, -1), + BPF_JMP_REG(BPF_JEQ, R5, R4, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R5, 1), + BPF_ALU64_REG(BPF_SUB, R5, R5), + BPF_ALU64_REG(BPF_XOR, R6, R6), + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R8, -1), + BPF_JMP_REG(BPF_JEQ, R5, R6, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R6, R6), + BPF_ALU64_REG(BPF_XOR, R7, R7), + BPF_JMP_REG(BPF_JEQ, R7, R6, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R7, R7), + BPF_ALU64_REG(BPF_XOR, R8, R8), + BPF_JMP_REG(BPF_JEQ, R7, R8, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R8, R8), + BPF_ALU64_REG(BPF_XOR, R9, R9), + BPF_JMP_REG(BPF_JEQ, R9, R8, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R9, R9), + BPF_ALU64_REG(BPF_XOR, R0, R0), + BPF_JMP_REG(BPF_JEQ, R9, R0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, R1, R1), + BPF_ALU64_REG(BPF_XOR, R0, R0), + BPF_JMP_REG(BPF_JEQ, R9, R0, 2), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { /* Mainly checking JIT here. */ + "INT: MUL", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 11), + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R2, 2), + BPF_ALU64_IMM(BPF_MOV, R3, 3), + BPF_ALU64_IMM(BPF_MOV, R4, 4), + BPF_ALU64_IMM(BPF_MOV, R5, 5), + BPF_ALU64_IMM(BPF_MOV, R6, 6), + BPF_ALU64_IMM(BPF_MOV, R7, 7), + BPF_ALU64_IMM(BPF_MOV, R8, 8), + BPF_ALU64_IMM(BPF_MOV, R9, 9), + BPF_ALU64_REG(BPF_MUL, R0, R0), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_ALU64_REG(BPF_MUL, R0, R2), + BPF_ALU64_REG(BPF_MUL, R0, R3), + BPF_ALU64_REG(BPF_MUL, R0, R4), + BPF_ALU64_REG(BPF_MUL, R0, R5), + BPF_ALU64_REG(BPF_MUL, R0, R6), + BPF_ALU64_REG(BPF_MUL, R0, R7), + BPF_ALU64_REG(BPF_MUL, R0, R8), + BPF_ALU64_REG(BPF_MUL, R0, R9), + BPF_ALU64_IMM(BPF_MUL, R0, 10), + BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_MUL, R1, R0), + BPF_ALU64_REG(BPF_MUL, R1, R2), + BPF_ALU64_REG(BPF_MUL, R1, R3), + BPF_ALU64_REG(BPF_MUL, R1, R4), + BPF_ALU64_REG(BPF_MUL, R1, R5), + BPF_ALU64_REG(BPF_MUL, R1, R6), + BPF_ALU64_REG(BPF_MUL, R1, R7), + BPF_ALU64_REG(BPF_MUL, R1, R8), + BPF_ALU64_REG(BPF_MUL, R1, R9), + BPF_ALU64_IMM(BPF_MUL, R1, 10), + BPF_ALU64_REG(BPF_MOV, R2, R1), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_LSH, R1, 32), + BPF_ALU64_IMM(BPF_ARSH, R1, 32), + BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_MUL, R2, R0), + BPF_ALU64_REG(BPF_MUL, R2, R1), + BPF_ALU64_REG(BPF_MUL, R2, R3), + BPF_ALU64_REG(BPF_MUL, R2, R4), + BPF_ALU64_REG(BPF_MUL, R2, R5), + BPF_ALU64_REG(BPF_MUL, R2, R6), + BPF_ALU64_REG(BPF_MUL, R2, R7), + BPF_ALU64_REG(BPF_MUL, R2, R8), + BPF_ALU64_REG(BPF_MUL, R2, R9), + BPF_ALU64_IMM(BPF_MUL, R2, 10), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_ALU64_REG(BPF_MOV, R0, R2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x35d97ef2 } } + }, + { /* Mainly checking JIT here. */ + "MOV REG64", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffffffffffffLL), + BPF_MOV64_REG(R1, R0), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_MOV64_REG(R4, R3), + BPF_MOV64_REG(R5, R4), + BPF_MOV64_REG(R6, R5), + BPF_MOV64_REG(R7, R6), + BPF_MOV64_REG(R8, R7), + BPF_MOV64_REG(R9, R8), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_ALU64_IMM(BPF_MOV, R1, 0), + BPF_ALU64_IMM(BPF_MOV, R2, 0), + BPF_ALU64_IMM(BPF_MOV, R3, 0), + BPF_ALU64_IMM(BPF_MOV, R4, 0), + BPF_ALU64_IMM(BPF_MOV, R5, 0), + BPF_ALU64_IMM(BPF_MOV, R6, 0), + BPF_ALU64_IMM(BPF_MOV, R7, 0), + BPF_ALU64_IMM(BPF_MOV, R8, 0), + BPF_ALU64_IMM(BPF_MOV, R9, 0), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), + BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfefe } } + }, + { /* Mainly checking JIT here. */ + "MOV REG32", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffffffffffffLL), + BPF_MOV64_REG(R1, R0), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_MOV64_REG(R4, R3), + BPF_MOV64_REG(R5, R4), + BPF_MOV64_REG(R6, R5), + BPF_MOV64_REG(R7, R6), + BPF_MOV64_REG(R8, R7), + BPF_MOV64_REG(R9, R8), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU32_IMM(BPF_MOV, R2, 0), + BPF_ALU32_IMM(BPF_MOV, R3, 0), + BPF_ALU32_IMM(BPF_MOV, R4, 0), + BPF_ALU32_IMM(BPF_MOV, R5, 0), + BPF_ALU32_IMM(BPF_MOV, R6, 0), + BPF_ALU32_IMM(BPF_MOV, R7, 0), + BPF_ALU32_IMM(BPF_MOV, R8, 0), + BPF_ALU32_IMM(BPF_MOV, R9, 0), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), + BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfefe } } + }, + { /* Mainly checking JIT here. */ + "LD IMM64", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffffffffffffLL), + BPF_MOV64_REG(R1, R0), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_MOV64_REG(R4, R3), + BPF_MOV64_REG(R5, R4), + BPF_MOV64_REG(R6, R5), + BPF_MOV64_REG(R7, R6), + BPF_MOV64_REG(R8, R7), + BPF_MOV64_REG(R9, R8), + BPF_LD_IMM64(R0, 0x0LL), + BPF_LD_IMM64(R1, 0x0LL), + BPF_LD_IMM64(R2, 0x0LL), + BPF_LD_IMM64(R3, 0x0LL), + BPF_LD_IMM64(R4, 0x0LL), + BPF_LD_IMM64(R5, 0x0LL), + BPF_LD_IMM64(R6, 0x0LL), + BPF_LD_IMM64(R7, 0x0LL), + BPF_LD_IMM64(R8, 0x0LL), + BPF_LD_IMM64(R9, 0x0LL), + BPF_ALU64_REG(BPF_ADD, R0, R0), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_ALU64_REG(BPF_ADD, R0, R2), + BPF_ALU64_REG(BPF_ADD, R0, R3), + BPF_ALU64_REG(BPF_ADD, R0, R4), + BPF_ALU64_REG(BPF_ADD, R0, R5), + BPF_ALU64_REG(BPF_ADD, R0, R6), + BPF_ALU64_REG(BPF_ADD, R0, R7), + BPF_ALU64_REG(BPF_ADD, R0, R8), + BPF_ALU64_REG(BPF_ADD, R0, R9), + BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfefe } } + }, + { + "INT: ALU MIX", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 11), + BPF_ALU64_IMM(BPF_ADD, R0, -1), + BPF_ALU64_IMM(BPF_MOV, R2, 2), + BPF_ALU64_IMM(BPF_XOR, R2, 3), + BPF_ALU64_REG(BPF_DIV, R0, R2), + BPF_JMP_IMM(BPF_JEQ, R0, 10, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOD, R0, 3), + BPF_JMP_IMM(BPF_JEQ, R0, 1, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, + { + "INT: shifts by register", + .u.insns_int = { + BPF_MOV64_IMM(R0, -1234), + BPF_MOV64_IMM(R1, 1), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R2, 1), + BPF_ALU64_REG(BPF_LSH, R0, R2), + BPF_MOV32_IMM(R4, -1234), + BPF_JMP_REG(BPF_JEQ, R0, R4, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_AND, R4, 63), + BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */ + BPF_MOV64_IMM(R3, 47), + BPF_ALU64_REG(BPF_ARSH, R0, R3), + BPF_JMP_IMM(BPF_JEQ, R0, -617, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R2, 1), + BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */ + BPF_JMP_IMM(BPF_JEQ, R4, 92, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R4, 4), + BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */ + BPF_JMP_IMM(BPF_JEQ, R4, 64, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R4, 5), + BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */ + BPF_JMP_IMM(BPF_JEQ, R4, 160, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R0, -1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, +#ifdef CONFIG_32BIT + { + "INT: 32-bit context pointer word order and zero-extension", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_JMP32_IMM(BPF_JNE, R1, 0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, +#endif + { + "check: missing ret", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 1), + }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { + "check: div_k_0", + .u.insns = { + BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0), + BPF_STMT(BPF_RET | BPF_K, 0) + }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { + "check: unknown insn", + .u.insns = { + /* seccomp insn, rejected in socket filter */ + BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0), + BPF_STMT(BPF_RET | BPF_K, 0) + }, + CLASSIC | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { + "check: out of range spill/fill", + .u.insns = { + BPF_STMT(BPF_STX, 16), + BPF_STMT(BPF_RET | BPF_K, 0) + }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { + "JUMPS + HOLES", + .u.insns = { + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15), + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3), + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15), + BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3), + BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2), + BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), + BPF_STMT(BPF_RET | BPF_A, 0), + BPF_STMT(BPF_RET | BPF_A, 0), + }, + CLASSIC, + { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8, + 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4, + 0x08, 0x00, + 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, + 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */ + 0xc0, 0xa8, 0x33, 0x01, + 0xc0, 0xa8, 0x33, 0x02, + 0xbb, 0xb6, + 0xa9, 0xfa, + 0x00, 0x14, 0x00, 0x00, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, + 0xcc, 0xcc, 0xcc, 0xcc }, + { { 88, 0x001b } } + }, + { + "check: RET X", + .u.insns = { + BPF_STMT(BPF_RET | BPF_X, 0), + }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { + "check: LDX + RET X", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 42), + BPF_STMT(BPF_RET | BPF_X, 0), + }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { /* Mainly checking JIT here. */ + "M[]: alt STX + LDX", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 100), + BPF_STMT(BPF_STX, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 0), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 1), + BPF_STMT(BPF_LDX | BPF_MEM, 1), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 2), + BPF_STMT(BPF_LDX | BPF_MEM, 2), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 3), + BPF_STMT(BPF_LDX | BPF_MEM, 3), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 4), + BPF_STMT(BPF_LDX | BPF_MEM, 4), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 5), + BPF_STMT(BPF_LDX | BPF_MEM, 5), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 6), + BPF_STMT(BPF_LDX | BPF_MEM, 6), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 7), + BPF_STMT(BPF_LDX | BPF_MEM, 7), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 8), + BPF_STMT(BPF_LDX | BPF_MEM, 8), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 9), + BPF_STMT(BPF_LDX | BPF_MEM, 9), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 10), + BPF_STMT(BPF_LDX | BPF_MEM, 10), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 11), + BPF_STMT(BPF_LDX | BPF_MEM, 11), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 12), + BPF_STMT(BPF_LDX | BPF_MEM, 12), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 13), + BPF_STMT(BPF_LDX | BPF_MEM, 13), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 14), + BPF_STMT(BPF_LDX | BPF_MEM, 14), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_STX, 15), + BPF_STMT(BPF_LDX | BPF_MEM, 15), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_RET | BPF_A, 0), + }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 116 } }, + }, + { /* Mainly checking JIT here. */ + "M[]: full STX + full LDX", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb), + BPF_STMT(BPF_STX, 0), + BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae), + BPF_STMT(BPF_STX, 1), + BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf), + BPF_STMT(BPF_STX, 2), + BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc), + BPF_STMT(BPF_STX, 3), + BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb), + BPF_STMT(BPF_STX, 4), + BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda), + BPF_STMT(BPF_STX, 5), + BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb), + BPF_STMT(BPF_STX, 6), + BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade), + BPF_STMT(BPF_STX, 7), + BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec), + BPF_STMT(BPF_STX, 8), + BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc), + BPF_STMT(BPF_STX, 9), + BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac), + BPF_STMT(BPF_STX, 10), + BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea), + BPF_STMT(BPF_STX, 11), + BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb), + BPF_STMT(BPF_STX, 12), + BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf), + BPF_STMT(BPF_STX, 13), + BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde), + BPF_STMT(BPF_STX, 14), + BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad), + BPF_STMT(BPF_STX, 15), + BPF_STMT(BPF_LDX | BPF_MEM, 0), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 1), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 2), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 3), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 4), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 5), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 6), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 7), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 8), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 9), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 10), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 11), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 12), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 13), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 14), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_LDX | BPF_MEM, 15), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0), + }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0x2a5a5e5 } }, + }, + { + "check: SKF_AD_MAX", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_MAX), + BPF_STMT(BPF_RET | BPF_A, 0), + }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = NULL, + .expected_errcode = -EINVAL, + }, + { /* Passes checker but fails during runtime. */ + "LD [SKF_AD_OFF-1]", + .u.insns = { + BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF - 1), + BPF_STMT(BPF_RET | BPF_K, 1), + }, + CLASSIC, + { }, + { { 1, 0 } }, + }, + { + "load 64-bit immediate", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x567800001234LL), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_ALU64_IMM(BPF_LSH, R3, 32), + BPF_ALU64_IMM(BPF_RSH, R3, 32), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(R0, 0x1ffffffffLL), + BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + /* BPF_ALU | BPF_MOV | BPF_X */ + { + "ALU_MOV_X: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_MOV_X: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU64_MOV_X: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOV_X: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + /* BPF_ALU | BPF_MOV | BPF_K */ + { + "ALU_MOV_K: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_MOV_K: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x00000000ffffffffLL), + BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_MOV_K: small negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -123 } } + }, + { + "ALU_MOV_K: small negative zero extension", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU_MOV_K: large negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -123456789), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -123456789 } } + }, + { + "ALU_MOV_K: large negative zero extension", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -123456789), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_MOV_K: dst = 2", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOV_K: dst = 2147483647", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_OR_K: dst = 0x0", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0), + BPF_ALU64_IMM(BPF_MOV, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MOV_K: dst = -1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MOV_K: small negative", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, -123), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -123 } } + }, + { + "ALU64_MOV_K: small negative sign extension", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, -123), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } } + }, + { + "ALU64_MOV_K: large negative", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, -123456789), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -123456789 } } + }, + { + "ALU64_MOV_K: large negative sign extension", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, -123456789), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } } + }, + /* BPF_ALU | BPF_ADD | BPF_X */ + { + "ALU_ADD_X: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_X: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_ADD_X: 2 + 4294967294 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_LD_IMM64(R1, 4294967294U), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_ADD_X: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_X: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU64_ADD_X: 2 + 4294967294 = 4294967296", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_LD_IMM64(R1, 4294967294U), + BPF_LD_IMM64(R2, 4294967296ULL), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_JMP_REG(BPF_JEQ, R0, R2, 2), + BPF_MOV32_IMM(R0, 0), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_ADD | BPF_K */ + { + "ALU_ADD_K: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_K: 3 + 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_ADD, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_K: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_ADD_K: 4294967294 + 2 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_ALU32_IMM(BPF_ADD, R0, 2), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x00000000ffffffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0xffff = 0xffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0xffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x7fffffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80000000 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x80000000), + BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80008000 = 0x80008000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x80008000), + BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_K: 3 + 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_ADD, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_K: 1 + 2147483646 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_ADD, R0, 2147483646), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_ADD_K: 4294967294 + 2 = 4294967296", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_LD_IMM64(R1, 4294967296ULL), + BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_ADD_K: 2147483646 + -2147483647 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483646), + BPF_ALU64_IMM(BPF_ADD, R0, -2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + { + "ALU64_ADD_K: 1 + 0 = 1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0x1), + BPF_ALU64_IMM(BPF_ADD, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0xffff = 0xffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffff), + BPF_ALU64_IMM(BPF_ADD, R2, 0xffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x7fffffff), + BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffff80000000LL), + BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffff80008000LL), + BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_SUB | BPF_X */ + { + "ALU_SUB_X: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_SUB_X: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_SUB_X: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_SUB_X: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_SUB | BPF_K */ + { + "ALU_SUB_K: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_SUB, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_SUB_K: 3 - 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_SUB, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_SUB_K: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_SUB_K: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_SUB, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_SUB_K: 3 - 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_SUB, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_SUB_K: 4294967294 - 4294967295 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + { + "ALU64_ADD_K: 2147483646 - 2147483647 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483646), + BPF_ALU64_IMM(BPF_SUB, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + /* BPF_ALU | BPF_MUL | BPF_X */ + { + "ALU_MUL_X: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 3), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xFFFFFFF0 } }, + }, + { + "ALU_MUL_X: -1 * -1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, -1), + BPF_ALU32_IMM(BPF_MOV, R1, -1), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MUL_X: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 3), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU64_MUL_X: 1 * 2147483647 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_MUL_X: 64x64 multiply, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0fedcba987654321LL), + BPF_LD_IMM64(R1, 0x123456789abcdef0LL), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xe5618cf0 } } + }, + { + "ALU64_MUL_X: 64x64 multiply, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0fedcba987654321LL), + BPF_LD_IMM64(R1, 0x123456789abcdef0LL), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x2236d88f } } + }, + /* BPF_ALU | BPF_MUL | BPF_K */ + { + "ALU_MUL_K: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MUL, R0, 3), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU_MUL_K: 3 * 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MUL, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xFFFFFFF0 } }, + }, + { + "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0x00000000ffffffff), + BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MUL_K: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU64_IMM(BPF_MUL, R0, 3), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU64_MUL_K: 3 * 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MUL, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_MUL_K: 1 * 2147483647 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_MUL, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_MUL_K: 1 * -2147483647 = -2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_MUL, R0, -2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -2147483647 } }, + }, + { + "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MUL_K: 64x32 multiply, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xe242d208 } } + }, + { + "ALU64_MUL_K: 64x32 multiply, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xc28f5c28 } } + }, + /* BPF_ALU | BPF_DIV | BPF_X */ + { + "ALU_DIV_X: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_X: 4294967295 / 4294967295 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_X: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_X: 2147483647 / 2147483647 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), + BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R4, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x0000000000000001LL), + BPF_ALU64_REG(BPF_DIV, R2, R4), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_DIV | BPF_K */ + { + "ALU_DIV_K: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_DIV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_K: 3 / 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_DIV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_K: 4294967295 / 4294967295 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x1UL), + BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_DIV_K: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU64_IMM(BPF_DIV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_K: 3 / 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_DIV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_K: 2147483647 / 2147483647 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU64_IMM(BPF_DIV, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x0000000000000001LL), + BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_MOD | BPF_X */ + { + "ALU_MOD_X: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_MOD_X: 4294967295 % 4294967293 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U), + BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOD_X: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MOD_X: 2147483647 % 2147483645 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483645), + BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + /* BPF_ALU | BPF_MOD | BPF_K */ + { + "ALU_MOD_K: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_MOD_K: 3 % 1 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOD, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "ALU_MOD_K: 4294967295 % 4294967293 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOD_K: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MOD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MOD_K: 3 % 1 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MOD, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "ALU64_MOD_K: 2147483647 % 2147483645 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU64_IMM(BPF_MOD, R0, 2147483645), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + /* BPF_ALU | BPF_AND | BPF_X */ + { + "ALU_AND_X: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_X: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_ALU | BPF_AND | BPF_K */ + { + "ALU_AND_K: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_AND, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU_AND_K: Small immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), + BPF_ALU32_IMM(BPF_AND, R0, 15), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4 } } + }, + { + "ALU_AND_K: Large immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4), + BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xa1b2c3d4 } } + }, + { + "ALU_AND_K: Zero extension", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL), + BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "ALU64_AND_K: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_AND, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000000000000000LL), + BPF_ALU64_IMM(BPF_AND, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: Sign extension 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x00000000090b0d0fLL), + BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "ALU64_AND_K: Sign extension 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL), + BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + /* BPF_ALU | BPF_OR | BPF_X */ + { + "ALU_OR_X: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_X: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_ALU | BPF_OR | BPF_K */ + { + "ALU_OR_K: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_OR, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_OR_K: 0 & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU_OR_K: Small immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), + BPF_ALU32_IMM(BPF_OR, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x01020305 } } + }, + { + "ALU_OR_K: Large immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), + BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xa1b2c3d4 } } + }, + { + "ALU_OR_K: Zero extension", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL), + BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "ALU64_OR_K: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_OR, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_OR, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000000000000000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: Sign extension 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x012345678fafcfefLL), + BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "ALU64_OR_K: Sign extension 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL), + BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + /* BPF_ALU | BPF_XOR | BPF_X */ + { + "ALU_XOR_X: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_MOV, R1, 6), + BPF_ALU32_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_X: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_MOV, R1, 6), + BPF_ALU64_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + /* BPF_ALU | BPF_XOR | BPF_K */ + { + "ALU_XOR_K: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_XOR, R0, 6), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU_XOR_K: Small immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), + BPF_ALU32_IMM(BPF_XOR, R0, 15), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x0102030b } } + }, + { + "ALU_XOR_K: Large immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4), + BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x5e4d3c2b } } + }, + { + "ALU_XOR_K: Zero extension", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x00000000795b3d1fLL), + BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, 3 }, { 10, 3 } }, + { { 0, 1 } } }, { - "LD_MARK", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_MARK), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_XOR_K: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU64_IMM(BPF_XOR, R0, 6), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, SKB_MARK}, { 10, SKB_MARK} }, + { { 0, 3 } }, }, { - "LD_RXHASH", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_RXHASH), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_XOR, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffff00000000ffffLL), + BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000000000000000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: Sign extension 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL), + BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + { + "ALU64_XOR_K: Sign extension 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL), + BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, + /* BPF_ALU | BPF_LSH | BPF_X */ + { + "ALU_LSH_X: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_LSH_X: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + { + "ALU_LSH_X: 0x12345678 << 12 = 0x45678000", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x45678000 } } + }, + { + "ALU64_LSH_X: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_LSH_X: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + { + "ALU64_LSH_X: Shift < 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xbcdef000 } } + }, + { + "ALU64_LSH_X: Shift < 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x3456789a } } + }, + { + "ALU64_LSH_X: Shift > 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 36), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_LSH_X: Shift > 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 36), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x9abcdef0 } } + }, + { + "ALU64_LSH_X: Shift == 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 32), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_LSH_X: Shift == 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 32), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, SKB_HASH}, { 10, SKB_HASH} }, + { { 0, 0x89abcdef } } }, { - "LD_QUEUE", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_QUEUE), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_X: Zero shift, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } }, - }, - { - "LD_PROTOCOL", - .u.insns = { - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 0), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PROTOCOL), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 0), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0) - }, - CLASSIC, - { 10, 20, 30 }, - { { 10, ETH_P_IP }, { 100, ETH_P_IP } }, + { { 0, 0x89abcdef } } }, { - "LD_VLAN_TAG", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_VLAN_TAG), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_X: Zero shift, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { - { 1, SKB_VLAN_TCI }, - { 10, SKB_VLAN_TCI } - }, + { { 0, 0x01234567 } } }, + /* BPF_ALU | BPF_LSH | BPF_K */ { - "LD_VLAN_TAG_PRESENT", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU_LSH_K: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_LSH, R0, 1), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { - { 1, SKB_VLAN_PRESENT }, - { 10, SKB_VLAN_PRESENT } - }, + { { 0, 2 } }, }, { - "LD_IFINDEX", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_IFINDEX), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU_LSH_K: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_LSH, R0, 31), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } }, + { { 0, 0x80000000 } }, }, { - "LD_HATYPE", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_HATYPE), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU_LSH_K: 0x12345678 << 12 = 0x45678000", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), + BPF_ALU32_IMM(BPF_LSH, R0, 12), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } }, + { { 0, 0x45678000 } } }, { - "LD_CPU", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_CPU), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_CPU), - BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU_LSH_K: 0x12345678 << 0 = 0x12345678", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), + BPF_ALU32_IMM(BPF_LSH, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, 0 }, { 10, 0 } }, + { { 0, 0x12345678 } } }, { - "LD_NLATTR", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 2), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_LDX | BPF_IMM, 3), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_K: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_LSH, R0, 1), + BPF_EXIT_INSN(), }, - CLASSIC, -#ifdef __BIG_ENDIAN - { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 }, -#else - { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 }, -#endif - { { 4, 0 }, { 20, 6 } }, + INTERNAL, + { }, + { { 0, 2 } }, }, { - "LD_NLATTR_NEST", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LDX | BPF_IMM, 3), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_NLATTR_NEST), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_K: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_LSH, R0, 31), + BPF_EXIT_INSN(), }, - CLASSIC, -#ifdef __BIG_ENDIAN - { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 }, -#else - { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 }, -#endif - { { 4, 0 }, { 20, 10 } }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, }, { - "LD_PAYLOAD_OFF", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PAY_OFFSET), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PAY_OFFSET), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PAY_OFFSET), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PAY_OFFSET), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_PAY_OFFSET), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_K: Shift < 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 12), + BPF_EXIT_INSN(), }, - CLASSIC, - /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800), - * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request, - * id 9737, seq 1, length 64 - */ - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, - 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40, - 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 }, - { { 30, 0 }, { 100, 42 } }, + INTERNAL, + { }, + { { 0, 0xbcdef000 } } }, { - "LD_ANC_XOR", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 10), - BPF_STMT(BPF_LDX | BPF_IMM, 300), - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_ALU_XOR_X), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_K: Shift < 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 12), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } }, + { { 0, 0x3456789a } } }, { - "SPILL_FILL", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_IMM, 2), - BPF_STMT(BPF_ALU | BPF_RSH, 1), - BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), - BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */ - BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000), - BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */ - BPF_STMT(BPF_STX, 15), /* M3 = len */ - BPF_STMT(BPF_LDX | BPF_MEM, 1), - BPF_STMT(BPF_LD | BPF_MEM, 2), - BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 15), - BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_LSH_K: Shift > 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 36), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } } + { { 0, 0 } } }, { - "JEQ", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_RET | BPF_K, MAX_K) + "ALU64_LSH_K: Shift > 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 36), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, - { 3, 3, 3, 3, 3 }, - { { 1, 0 }, { 3, 1 }, { 4, MAX_K } }, + INTERNAL, + { }, + { { 0, 0x9abcdef0 } } }, { - "JGT", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), - BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_RET | BPF_K, MAX_K) + "ALU64_LSH_K: Shift == 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, - { 4, 4, 4, 3, 3 }, - { { 2, 0 }, { 3, 1 }, { 4, MAX_K } }, + INTERNAL, + { }, + { { 0, 0 } } }, { - "JGE (jt 0), test 1", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_RET | BPF_K, MAX_K) + "ALU64_LSH_K: Shift == 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 32), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, - { 4, 4, 4, 3, 3 }, - { { 2, 0 }, { 3, 1 }, { 4, 1 } }, + INTERNAL, + { }, + { { 0, 0x89abcdef } } }, { - "JGE (jt 0), test 2", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_RET | BPF_K, MAX_K) + "ALU64_LSH_K: Zero shift", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_LSH, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { 4, 4, 5, 3, 3 }, - { { 4, 1 }, { 5, 1 }, { 6, MAX_K } }, + INTERNAL, + { }, + { { 0, 0x89abcdef } } }, + /* BPF_ALU | BPF_RSH | BPF_X */ { - "JGE", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 10), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 20), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 30), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 40), - BPF_STMT(BPF_RET | BPF_K, MAX_K) + "ALU_RSH_X: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), }, - CLASSIC, - { 1, 2, 3, 4, 5 }, - { { 1, 20 }, { 3, 40 }, { 5, MAX_K } }, + INTERNAL, + { }, + { { 0, 1 } }, }, { - "JSET", - .u.insns = { - BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0), - BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1), - BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0), - BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0), - BPF_STMT(BPF_LDX | BPF_LEN, 0), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 10), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 20), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 30), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 30), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 30), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 30), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 30), - BPF_STMT(BPF_RET | BPF_K, MAX_K) + "ALU_RSH_X: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), }, - CLASSIC, - { 0, 0xAA, 0x55, 1 }, - { { 4, 10 }, { 5, 20 }, { 6, MAX_K } }, + INTERNAL, + { }, + { { 0, 1 } }, }, { - "tcpdump port 22", - .u.insns = { - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */ - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */ - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14), - BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 0xffff), - BPF_STMT(BPF_RET | BPF_K, 0), + "ALU_RSH_X: 0x12345678 >> 20 = 0x123", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), + BPF_ALU32_IMM(BPF_MOV, R1, 20), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), }, - CLASSIC, - /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800) - * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.], - * seq 1305692979:1305693027, ack 3650467037, win 65535, - * options [nop,nop,TS val 2502645400 ecr 3971138], length 48 - */ - { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, - 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76, - 0x08, 0x00, - 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5, - 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */ - 0x0a, 0x01, 0x01, 0x95, /* ip src */ - 0x0a, 0x01, 0x02, 0x0a, /* ip dst */ - 0xc2, 0x24, - 0x00, 0x16 /* dst port */ }, - { { 10, 0 }, { 30, 0 }, { 100, 65535 } }, + INTERNAL, + { }, + { { 0, 0x123 } } }, { - "tcpdump complex", - .u.insns = { - /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] - - * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and - * (len > 115 or len < 30000000000)' -d - */ - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20), - BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14), - BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16), - BPF_STMT(BPF_ST, 1), - BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14), - BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf), - BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2), - BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */ - BPF_STMT(BPF_LD | BPF_MEM, 1), - BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), - BPF_STMT(BPF_ST, 5), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14), - BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26), - BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0), - BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2), - BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */ - BPF_STMT(BPF_LD | BPF_MEM, 5), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0), - BPF_STMT(BPF_LD | BPF_LEN, 0), - BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0), - BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 0xffff), - BPF_STMT(BPF_RET | BPF_K, 0), + "ALU64_RSH_X: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_X: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_X: Shift < 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), }, - CLASSIC, - { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, - 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76, - 0x08, 0x00, - 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5, - 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */ - 0x0a, 0x01, 0x01, 0x95, /* ip src */ - 0x0a, 0x01, 0x02, 0x0a, /* ip dst */ - 0xc2, 0x24, - 0x00, 0x16 /* dst port */ }, - { { 10, 0 }, { 30, 0 }, { 100, 65535 } }, + INTERNAL, + { }, + { { 0, 0x56789abc } } }, { - "RET_A", - .u.insns = { - /* check that uninitialized X and A contain zeros */ - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0) + "ALU64_RSH_X: Shift < 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { {1, 0}, {2, 0} }, + { { 0, 0x00081234 } } }, { - "INT: ADD trivial", + "ALU64_RSH_X: Shift > 32, low word", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R1, 1), - BPF_ALU64_IMM(BPF_ADD, R1, 2), - BPF_ALU64_IMM(BPF_MOV, R2, 3), - BPF_ALU64_REG(BPF_SUB, R1, R2), - BPF_ALU64_IMM(BPF_ADD, R1, -1), - BPF_ALU64_IMM(BPF_MUL, R1, 3), - BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 36), + BPF_ALU64_REG(BPF_RSH, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xfffffffd } } + { { 0, 0x08123456 } } }, { - "INT: MUL_X", + "ALU64_RSH_X: Shift > 32, high word", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, -1), - BPF_ALU64_IMM(BPF_MOV, R1, -1), - BPF_ALU64_IMM(BPF_MOV, R2, 3), - BPF_ALU64_REG(BPF_MUL, R1, R2), - BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 36), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R0, 1), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_RSH_X: Shift == 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 32), + BPF_ALU64_REG(BPF_RSH, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 0x81234567 } } }, { - "INT: MUL_X2", + "ALU64_RSH_X: Shift == 32, high word", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -1), - BPF_ALU32_IMM(BPF_MOV, R1, -1), - BPF_ALU32_IMM(BPF_MOV, R2, 3), - BPF_ALU64_REG(BPF_MUL, R1, R2), - BPF_ALU64_IMM(BPF_RSH, R1, 8), - BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 32), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_RSH_X: Zero shift, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU64_REG(BPF_RSH, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 0x89abcdef } } }, { - "INT: MUL32_X", + "ALU64_RSH_X: Zero shift, high word", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -1), - BPF_ALU64_IMM(BPF_MOV, R1, -1), - BPF_ALU32_IMM(BPF_MOV, R2, 3), - BPF_ALU32_REG(BPF_MUL, R1, R2), - BPF_ALU64_IMM(BPF_RSH, R1, 8), - BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), + }, + INTERNAL, + { }, + { { 0, 0x81234567 } } + }, + /* BPF_ALU | BPF_RSH | BPF_K */ + { + "ALU_RSH_K: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_RSH, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 1 } }, }, { - /* Have to test all register combinations, since - * JITing of different registers will produce - * different asm code. - */ - "INT: ADD 64-bit", + "ALU_RSH_K: 0x80000000 >> 31 = 1", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, 0), - BPF_ALU64_IMM(BPF_MOV, R1, 1), - BPF_ALU64_IMM(BPF_MOV, R2, 2), - BPF_ALU64_IMM(BPF_MOV, R3, 3), - BPF_ALU64_IMM(BPF_MOV, R4, 4), - BPF_ALU64_IMM(BPF_MOV, R5, 5), - BPF_ALU64_IMM(BPF_MOV, R6, 6), - BPF_ALU64_IMM(BPF_MOV, R7, 7), - BPF_ALU64_IMM(BPF_MOV, R8, 8), - BPF_ALU64_IMM(BPF_MOV, R9, 9), - BPF_ALU64_IMM(BPF_ADD, R0, 20), - BPF_ALU64_IMM(BPF_ADD, R1, 20), - BPF_ALU64_IMM(BPF_ADD, R2, 20), - BPF_ALU64_IMM(BPF_ADD, R3, 20), - BPF_ALU64_IMM(BPF_ADD, R4, 20), - BPF_ALU64_IMM(BPF_ADD, R5, 20), - BPF_ALU64_IMM(BPF_ADD, R6, 20), - BPF_ALU64_IMM(BPF_ADD, R7, 20), - BPF_ALU64_IMM(BPF_ADD, R8, 20), - BPF_ALU64_IMM(BPF_ADD, R9, 20), - BPF_ALU64_IMM(BPF_SUB, R0, 10), - BPF_ALU64_IMM(BPF_SUB, R1, 10), - BPF_ALU64_IMM(BPF_SUB, R2, 10), - BPF_ALU64_IMM(BPF_SUB, R3, 10), - BPF_ALU64_IMM(BPF_SUB, R4, 10), - BPF_ALU64_IMM(BPF_SUB, R5, 10), - BPF_ALU64_IMM(BPF_SUB, R6, 10), - BPF_ALU64_IMM(BPF_SUB, R7, 10), - BPF_ALU64_IMM(BPF_SUB, R8, 10), - BPF_ALU64_IMM(BPF_SUB, R9, 10), - BPF_ALU64_REG(BPF_ADD, R0, R0), - BPF_ALU64_REG(BPF_ADD, R0, R1), - BPF_ALU64_REG(BPF_ADD, R0, R2), - BPF_ALU64_REG(BPF_ADD, R0, R3), - BPF_ALU64_REG(BPF_ADD, R0, R4), - BPF_ALU64_REG(BPF_ADD, R0, R5), - BPF_ALU64_REG(BPF_ADD, R0, R6), - BPF_ALU64_REG(BPF_ADD, R0, R7), - BPF_ALU64_REG(BPF_ADD, R0, R8), - BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */ - BPF_JMP_IMM(BPF_JEQ, R0, 155, 1), + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_RSH, R0, 31), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R1, R0), - BPF_ALU64_REG(BPF_ADD, R1, R1), - BPF_ALU64_REG(BPF_ADD, R1, R2), - BPF_ALU64_REG(BPF_ADD, R1, R3), - BPF_ALU64_REG(BPF_ADD, R1, R4), - BPF_ALU64_REG(BPF_ADD, R1, R5), - BPF_ALU64_REG(BPF_ADD, R1, R6), - BPF_ALU64_REG(BPF_ADD, R1, R7), - BPF_ALU64_REG(BPF_ADD, R1, R8), - BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */ - BPF_JMP_IMM(BPF_JEQ, R1, 456, 1), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_RSH_K: 0x12345678 >> 20 = 0x123", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), + BPF_ALU32_IMM(BPF_RSH, R0, 20), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R2, R0), - BPF_ALU64_REG(BPF_ADD, R2, R1), - BPF_ALU64_REG(BPF_ADD, R2, R2), - BPF_ALU64_REG(BPF_ADD, R2, R3), - BPF_ALU64_REG(BPF_ADD, R2, R4), - BPF_ALU64_REG(BPF_ADD, R2, R5), - BPF_ALU64_REG(BPF_ADD, R2, R6), - BPF_ALU64_REG(BPF_ADD, R2, R7), - BPF_ALU64_REG(BPF_ADD, R2, R8), - BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */ - BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1), + }, + INTERNAL, + { }, + { { 0, 0x123 } } + }, + { + "ALU_RSH_K: 0x12345678 >> 0 = 0x12345678", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), + BPF_ALU32_IMM(BPF_RSH, R0, 0), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R3, R0), - BPF_ALU64_REG(BPF_ADD, R3, R1), - BPF_ALU64_REG(BPF_ADD, R3, R2), - BPF_ALU64_REG(BPF_ADD, R3, R3), - BPF_ALU64_REG(BPF_ADD, R3, R4), - BPF_ALU64_REG(BPF_ADD, R3, R5), - BPF_ALU64_REG(BPF_ADD, R3, R6), - BPF_ALU64_REG(BPF_ADD, R3, R7), - BPF_ALU64_REG(BPF_ADD, R3, R8), - BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */ - BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1), + }, + INTERNAL, + { }, + { { 0, 0x12345678 } } + }, + { + "ALU64_RSH_K: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU64_IMM(BPF_RSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_K: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU64_IMM(BPF_RSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_K: Shift < 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 12), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R4, R0), - BPF_ALU64_REG(BPF_ADD, R4, R1), - BPF_ALU64_REG(BPF_ADD, R4, R2), - BPF_ALU64_REG(BPF_ADD, R4, R3), - BPF_ALU64_REG(BPF_ADD, R4, R4), - BPF_ALU64_REG(BPF_ADD, R4, R5), - BPF_ALU64_REG(BPF_ADD, R4, R6), - BPF_ALU64_REG(BPF_ADD, R4, R7), - BPF_ALU64_REG(BPF_ADD, R4, R8), - BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */ - BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1), + }, + INTERNAL, + { }, + { { 0, 0x56789abc } } + }, + { + "ALU64_RSH_K: Shift < 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 12), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R5, R0), - BPF_ALU64_REG(BPF_ADD, R5, R1), - BPF_ALU64_REG(BPF_ADD, R5, R2), - BPF_ALU64_REG(BPF_ADD, R5, R3), - BPF_ALU64_REG(BPF_ADD, R5, R4), - BPF_ALU64_REG(BPF_ADD, R5, R5), - BPF_ALU64_REG(BPF_ADD, R5, R6), - BPF_ALU64_REG(BPF_ADD, R5, R7), - BPF_ALU64_REG(BPF_ADD, R5, R8), - BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */ - BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1), + }, + INTERNAL, + { }, + { { 0, 0x00081234 } } + }, + { + "ALU64_RSH_K: Shift > 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 36), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R6, R0), - BPF_ALU64_REG(BPF_ADD, R6, R1), - BPF_ALU64_REG(BPF_ADD, R6, R2), - BPF_ALU64_REG(BPF_ADD, R6, R3), - BPF_ALU64_REG(BPF_ADD, R6, R4), - BPF_ALU64_REG(BPF_ADD, R6, R5), - BPF_ALU64_REG(BPF_ADD, R6, R6), - BPF_ALU64_REG(BPF_ADD, R6, R7), - BPF_ALU64_REG(BPF_ADD, R6, R8), - BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */ - BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1), + }, + INTERNAL, + { }, + { { 0, 0x08123456 } } + }, + { + "ALU64_RSH_K: Shift > 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 36), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R7, R0), - BPF_ALU64_REG(BPF_ADD, R7, R1), - BPF_ALU64_REG(BPF_ADD, R7, R2), - BPF_ALU64_REG(BPF_ADD, R7, R3), - BPF_ALU64_REG(BPF_ADD, R7, R4), - BPF_ALU64_REG(BPF_ADD, R7, R5), - BPF_ALU64_REG(BPF_ADD, R7, R6), - BPF_ALU64_REG(BPF_ADD, R7, R7), - BPF_ALU64_REG(BPF_ADD, R7, R8), - BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */ - BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_RSH_K: Shift == 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R8, R0), - BPF_ALU64_REG(BPF_ADD, R8, R1), - BPF_ALU64_REG(BPF_ADD, R8, R2), - BPF_ALU64_REG(BPF_ADD, R8, R3), - BPF_ALU64_REG(BPF_ADD, R8, R4), - BPF_ALU64_REG(BPF_ADD, R8, R5), - BPF_ALU64_REG(BPF_ADD, R8, R6), - BPF_ALU64_REG(BPF_ADD, R8, R7), - BPF_ALU64_REG(BPF_ADD, R8, R8), - BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */ - BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1), + }, + INTERNAL, + { }, + { { 0, 0x81234567 } } + }, + { + "ALU64_RSH_K: Shift == 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, R9, R0), - BPF_ALU64_REG(BPF_ADD, R9, R1), - BPF_ALU64_REG(BPF_ADD, R9, R2), - BPF_ALU64_REG(BPF_ADD, R9, R3), - BPF_ALU64_REG(BPF_ADD, R9, R4), - BPF_ALU64_REG(BPF_ADD, R9, R5), - BPF_ALU64_REG(BPF_ADD, R9, R6), - BPF_ALU64_REG(BPF_ADD, R9, R7), - BPF_ALU64_REG(BPF_ADD, R9, R8), - BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */ - BPF_ALU64_REG(BPF_MOV, R0, R9), + }, + INTERNAL, + { }, + { { 0, 0 } } + }, + { + "ALU64_RSH_K: Zero shift", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2957380 } } + { { 0, 0x89abcdef } } }, + /* BPF_ALU | BPF_ARSH | BPF_X */ { - "INT: ADD 32-bit", + "ALU32_ARSH_X: -1234 >> 7 = -10", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 20), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R2, 2), - BPF_ALU32_IMM(BPF_MOV, R3, 3), - BPF_ALU32_IMM(BPF_MOV, R4, 4), - BPF_ALU32_IMM(BPF_MOV, R5, 5), - BPF_ALU32_IMM(BPF_MOV, R6, 6), - BPF_ALU32_IMM(BPF_MOV, R7, 7), - BPF_ALU32_IMM(BPF_MOV, R8, 8), - BPF_ALU32_IMM(BPF_MOV, R9, 9), - BPF_ALU64_IMM(BPF_ADD, R1, 10), - BPF_ALU64_IMM(BPF_ADD, R2, 10), - BPF_ALU64_IMM(BPF_ADD, R3, 10), - BPF_ALU64_IMM(BPF_ADD, R4, 10), - BPF_ALU64_IMM(BPF_ADD, R5, 10), - BPF_ALU64_IMM(BPF_ADD, R6, 10), - BPF_ALU64_IMM(BPF_ADD, R7, 10), - BPF_ALU64_IMM(BPF_ADD, R8, 10), - BPF_ALU64_IMM(BPF_ADD, R9, 10), - BPF_ALU32_REG(BPF_ADD, R0, R1), - BPF_ALU32_REG(BPF_ADD, R0, R2), - BPF_ALU32_REG(BPF_ADD, R0, R3), - BPF_ALU32_REG(BPF_ADD, R0, R4), - BPF_ALU32_REG(BPF_ADD, R0, R5), - BPF_ALU32_REG(BPF_ADD, R0, R6), - BPF_ALU32_REG(BPF_ADD, R0, R7), - BPF_ALU32_REG(BPF_ADD, R0, R8), - BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */ - BPF_JMP_IMM(BPF_JEQ, R0, 155, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -1234), + BPF_ALU32_IMM(BPF_MOV, R1, 7), + BPF_ALU32_REG(BPF_ARSH, R0, R1), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R1, R0), - BPF_ALU32_REG(BPF_ADD, R1, R1), - BPF_ALU32_REG(BPF_ADD, R1, R2), - BPF_ALU32_REG(BPF_ADD, R1, R3), - BPF_ALU32_REG(BPF_ADD, R1, R4), - BPF_ALU32_REG(BPF_ADD, R1, R5), - BPF_ALU32_REG(BPF_ADD, R1, R6), - BPF_ALU32_REG(BPF_ADD, R1, R7), - BPF_ALU32_REG(BPF_ADD, R1, R8), - BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */ - BPF_JMP_IMM(BPF_JEQ, R1, 456, 1), + }, + INTERNAL, + { }, + { { 0, -10 } } + }, + { + "ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xff00ff0000000000LL), + BPF_ALU32_IMM(BPF_MOV, R1, 40), + BPF_ALU64_REG(BPF_ARSH, R0, R1), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R2, R0), - BPF_ALU32_REG(BPF_ADD, R2, R1), - BPF_ALU32_REG(BPF_ADD, R2, R2), - BPF_ALU32_REG(BPF_ADD, R2, R3), - BPF_ALU32_REG(BPF_ADD, R2, R4), - BPF_ALU32_REG(BPF_ADD, R2, R5), - BPF_ALU32_REG(BPF_ADD, R2, R6), - BPF_ALU32_REG(BPF_ADD, R2, R7), - BPF_ALU32_REG(BPF_ADD, R2, R8), - BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */ - BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1), + }, + INTERNAL, + { }, + { { 0, 0xffff00ff } }, + }, + { + "ALU64_ARSH_X: Shift < 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU64_REG(BPF_ARSH, R0, R1), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R3, R0), - BPF_ALU32_REG(BPF_ADD, R3, R1), - BPF_ALU32_REG(BPF_ADD, R3, R2), - BPF_ALU32_REG(BPF_ADD, R3, R3), - BPF_ALU32_REG(BPF_ADD, R3, R4), - BPF_ALU32_REG(BPF_ADD, R3, R5), - BPF_ALU32_REG(BPF_ADD, R3, R6), - BPF_ALU32_REG(BPF_ADD, R3, R7), - BPF_ALU32_REG(BPF_ADD, R3, R8), - BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */ - BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1), + }, + INTERNAL, + { }, + { { 0, 0x56789abc } } + }, + { + "ALU64_ARSH_X: Shift < 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 12), + BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R4, R0), - BPF_ALU32_REG(BPF_ADD, R4, R1), - BPF_ALU32_REG(BPF_ADD, R4, R2), - BPF_ALU32_REG(BPF_ADD, R4, R3), - BPF_ALU32_REG(BPF_ADD, R4, R4), - BPF_ALU32_REG(BPF_ADD, R4, R5), - BPF_ALU32_REG(BPF_ADD, R4, R6), - BPF_ALU32_REG(BPF_ADD, R4, R7), - BPF_ALU32_REG(BPF_ADD, R4, R8), - BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */ - BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1), + }, + INTERNAL, + { }, + { { 0, 0xfff81234 } } + }, + { + "ALU64_ARSH_X: Shift > 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 36), + BPF_ALU64_REG(BPF_ARSH, R0, R1), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R5, R0), - BPF_ALU32_REG(BPF_ADD, R5, R1), - BPF_ALU32_REG(BPF_ADD, R5, R2), - BPF_ALU32_REG(BPF_ADD, R5, R3), - BPF_ALU32_REG(BPF_ADD, R5, R4), - BPF_ALU32_REG(BPF_ADD, R5, R5), - BPF_ALU32_REG(BPF_ADD, R5, R6), - BPF_ALU32_REG(BPF_ADD, R5, R7), - BPF_ALU32_REG(BPF_ADD, R5, R8), - BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */ - BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1), + }, + INTERNAL, + { }, + { { 0, 0xf8123456 } } + }, + { + "ALU64_ARSH_X: Shift > 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 36), + BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R6, R0), - BPF_ALU32_REG(BPF_ADD, R6, R1), - BPF_ALU32_REG(BPF_ADD, R6, R2), - BPF_ALU32_REG(BPF_ADD, R6, R3), - BPF_ALU32_REG(BPF_ADD, R6, R4), - BPF_ALU32_REG(BPF_ADD, R6, R5), - BPF_ALU32_REG(BPF_ADD, R6, R6), - BPF_ALU32_REG(BPF_ADD, R6, R7), - BPF_ALU32_REG(BPF_ADD, R6, R8), - BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */ - BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, + { + "ALU64_ARSH_X: Shift == 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 32), + BPF_ALU64_REG(BPF_ARSH, R0, R1), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R7, R0), - BPF_ALU32_REG(BPF_ADD, R7, R1), - BPF_ALU32_REG(BPF_ADD, R7, R2), - BPF_ALU32_REG(BPF_ADD, R7, R3), - BPF_ALU32_REG(BPF_ADD, R7, R4), - BPF_ALU32_REG(BPF_ADD, R7, R5), - BPF_ALU32_REG(BPF_ADD, R7, R6), - BPF_ALU32_REG(BPF_ADD, R7, R7), - BPF_ALU32_REG(BPF_ADD, R7, R8), - BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */ - BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1), + }, + INTERNAL, + { }, + { { 0, 0x81234567 } } + }, + { + "ALU64_ARSH_X: Shift == 32, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 32), + BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R8, R0), - BPF_ALU32_REG(BPF_ADD, R8, R1), - BPF_ALU32_REG(BPF_ADD, R8, R2), - BPF_ALU32_REG(BPF_ADD, R8, R3), - BPF_ALU32_REG(BPF_ADD, R8, R4), - BPF_ALU32_REG(BPF_ADD, R8, R5), - BPF_ALU32_REG(BPF_ADD, R8, R6), - BPF_ALU32_REG(BPF_ADD, R8, R7), - BPF_ALU32_REG(BPF_ADD, R8, R8), - BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */ - BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, + { + "ALU64_ARSH_X: Zero shift, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU64_REG(BPF_ARSH, R0, R1), BPF_EXIT_INSN(), - BPF_ALU32_REG(BPF_ADD, R9, R0), - BPF_ALU32_REG(BPF_ADD, R9, R1), - BPF_ALU32_REG(BPF_ADD, R9, R2), - BPF_ALU32_REG(BPF_ADD, R9, R3), - BPF_ALU32_REG(BPF_ADD, R9, R4), - BPF_ALU32_REG(BPF_ADD, R9, R5), - BPF_ALU32_REG(BPF_ADD, R9, R6), - BPF_ALU32_REG(BPF_ADD, R9, R7), - BPF_ALU32_REG(BPF_ADD, R9, R8), - BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */ - BPF_ALU32_REG(BPF_MOV, R0, R9), + }, + INTERNAL, + { }, + { { 0, 0x89abcdef } } + }, + { + "ALU64_ARSH_X: Zero shift, high word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU32_IMM(BPF_MOV, R1, 0), + BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2957380 } } + { { 0, 0x81234567 } } }, - { /* Mainly checking JIT here. */ - "INT: SUB", + /* BPF_ALU | BPF_ARSH | BPF_K */ + { + "ALU32_ARSH_K: -1234 >> 7 = -10", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, 0), - BPF_ALU64_IMM(BPF_MOV, R1, 1), - BPF_ALU64_IMM(BPF_MOV, R2, 2), - BPF_ALU64_IMM(BPF_MOV, R3, 3), - BPF_ALU64_IMM(BPF_MOV, R4, 4), - BPF_ALU64_IMM(BPF_MOV, R5, 5), - BPF_ALU64_IMM(BPF_MOV, R6, 6), - BPF_ALU64_IMM(BPF_MOV, R7, 7), - BPF_ALU64_IMM(BPF_MOV, R8, 8), - BPF_ALU64_IMM(BPF_MOV, R9, 9), - BPF_ALU64_REG(BPF_SUB, R0, R0), - BPF_ALU64_REG(BPF_SUB, R0, R1), - BPF_ALU64_REG(BPF_SUB, R0, R2), - BPF_ALU64_REG(BPF_SUB, R0, R3), - BPF_ALU64_REG(BPF_SUB, R0, R4), - BPF_ALU64_REG(BPF_SUB, R0, R5), - BPF_ALU64_REG(BPF_SUB, R0, R6), - BPF_ALU64_REG(BPF_SUB, R0, R7), - BPF_ALU64_REG(BPF_SUB, R0, R8), - BPF_ALU64_REG(BPF_SUB, R0, R9), - BPF_ALU64_IMM(BPF_SUB, R0, 10), - BPF_JMP_IMM(BPF_JEQ, R0, -55, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -1234), + BPF_ALU32_IMM(BPF_ARSH, R0, 7), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R1, R0), - BPF_ALU64_REG(BPF_SUB, R1, R2), - BPF_ALU64_REG(BPF_SUB, R1, R3), - BPF_ALU64_REG(BPF_SUB, R1, R4), - BPF_ALU64_REG(BPF_SUB, R1, R5), - BPF_ALU64_REG(BPF_SUB, R1, R6), - BPF_ALU64_REG(BPF_SUB, R1, R7), - BPF_ALU64_REG(BPF_SUB, R1, R8), - BPF_ALU64_REG(BPF_SUB, R1, R9), - BPF_ALU64_IMM(BPF_SUB, R1, 10), - BPF_ALU64_REG(BPF_SUB, R2, R0), - BPF_ALU64_REG(BPF_SUB, R2, R1), - BPF_ALU64_REG(BPF_SUB, R2, R3), - BPF_ALU64_REG(BPF_SUB, R2, R4), - BPF_ALU64_REG(BPF_SUB, R2, R5), - BPF_ALU64_REG(BPF_SUB, R2, R6), - BPF_ALU64_REG(BPF_SUB, R2, R7), - BPF_ALU64_REG(BPF_SUB, R2, R8), - BPF_ALU64_REG(BPF_SUB, R2, R9), - BPF_ALU64_IMM(BPF_SUB, R2, 10), - BPF_ALU64_REG(BPF_SUB, R3, R0), - BPF_ALU64_REG(BPF_SUB, R3, R1), - BPF_ALU64_REG(BPF_SUB, R3, R2), - BPF_ALU64_REG(BPF_SUB, R3, R4), - BPF_ALU64_REG(BPF_SUB, R3, R5), - BPF_ALU64_REG(BPF_SUB, R3, R6), - BPF_ALU64_REG(BPF_SUB, R3, R7), - BPF_ALU64_REG(BPF_SUB, R3, R8), - BPF_ALU64_REG(BPF_SUB, R3, R9), - BPF_ALU64_IMM(BPF_SUB, R3, 10), - BPF_ALU64_REG(BPF_SUB, R4, R0), - BPF_ALU64_REG(BPF_SUB, R4, R1), - BPF_ALU64_REG(BPF_SUB, R4, R2), - BPF_ALU64_REG(BPF_SUB, R4, R3), - BPF_ALU64_REG(BPF_SUB, R4, R5), - BPF_ALU64_REG(BPF_SUB, R4, R6), - BPF_ALU64_REG(BPF_SUB, R4, R7), - BPF_ALU64_REG(BPF_SUB, R4, R8), - BPF_ALU64_REG(BPF_SUB, R4, R9), - BPF_ALU64_IMM(BPF_SUB, R4, 10), - BPF_ALU64_REG(BPF_SUB, R5, R0), - BPF_ALU64_REG(BPF_SUB, R5, R1), - BPF_ALU64_REG(BPF_SUB, R5, R2), - BPF_ALU64_REG(BPF_SUB, R5, R3), - BPF_ALU64_REG(BPF_SUB, R5, R4), - BPF_ALU64_REG(BPF_SUB, R5, R6), - BPF_ALU64_REG(BPF_SUB, R5, R7), - BPF_ALU64_REG(BPF_SUB, R5, R8), - BPF_ALU64_REG(BPF_SUB, R5, R9), - BPF_ALU64_IMM(BPF_SUB, R5, 10), - BPF_ALU64_REG(BPF_SUB, R6, R0), - BPF_ALU64_REG(BPF_SUB, R6, R1), - BPF_ALU64_REG(BPF_SUB, R6, R2), - BPF_ALU64_REG(BPF_SUB, R6, R3), - BPF_ALU64_REG(BPF_SUB, R6, R4), - BPF_ALU64_REG(BPF_SUB, R6, R5), - BPF_ALU64_REG(BPF_SUB, R6, R7), - BPF_ALU64_REG(BPF_SUB, R6, R8), - BPF_ALU64_REG(BPF_SUB, R6, R9), - BPF_ALU64_IMM(BPF_SUB, R6, 10), - BPF_ALU64_REG(BPF_SUB, R7, R0), - BPF_ALU64_REG(BPF_SUB, R7, R1), - BPF_ALU64_REG(BPF_SUB, R7, R2), - BPF_ALU64_REG(BPF_SUB, R7, R3), - BPF_ALU64_REG(BPF_SUB, R7, R4), - BPF_ALU64_REG(BPF_SUB, R7, R5), - BPF_ALU64_REG(BPF_SUB, R7, R6), - BPF_ALU64_REG(BPF_SUB, R7, R8), - BPF_ALU64_REG(BPF_SUB, R7, R9), - BPF_ALU64_IMM(BPF_SUB, R7, 10), - BPF_ALU64_REG(BPF_SUB, R8, R0), - BPF_ALU64_REG(BPF_SUB, R8, R1), - BPF_ALU64_REG(BPF_SUB, R8, R2), - BPF_ALU64_REG(BPF_SUB, R8, R3), - BPF_ALU64_REG(BPF_SUB, R8, R4), - BPF_ALU64_REG(BPF_SUB, R8, R5), - BPF_ALU64_REG(BPF_SUB, R8, R6), - BPF_ALU64_REG(BPF_SUB, R8, R7), - BPF_ALU64_REG(BPF_SUB, R8, R9), - BPF_ALU64_IMM(BPF_SUB, R8, 10), - BPF_ALU64_REG(BPF_SUB, R9, R0), - BPF_ALU64_REG(BPF_SUB, R9, R1), - BPF_ALU64_REG(BPF_SUB, R9, R2), - BPF_ALU64_REG(BPF_SUB, R9, R3), - BPF_ALU64_REG(BPF_SUB, R9, R4), - BPF_ALU64_REG(BPF_SUB, R9, R5), - BPF_ALU64_REG(BPF_SUB, R9, R6), - BPF_ALU64_REG(BPF_SUB, R9, R7), - BPF_ALU64_REG(BPF_SUB, R9, R8), - BPF_ALU64_IMM(BPF_SUB, R9, 10), - BPF_ALU64_IMM(BPF_SUB, R0, 10), - BPF_ALU64_IMM(BPF_NEG, R0, 0), - BPF_ALU64_REG(BPF_SUB, R0, R1), - BPF_ALU64_REG(BPF_SUB, R0, R2), - BPF_ALU64_REG(BPF_SUB, R0, R3), - BPF_ALU64_REG(BPF_SUB, R0, R4), - BPF_ALU64_REG(BPF_SUB, R0, R5), - BPF_ALU64_REG(BPF_SUB, R0, R6), - BPF_ALU64_REG(BPF_SUB, R0, R7), - BPF_ALU64_REG(BPF_SUB, R0, R8), - BPF_ALU64_REG(BPF_SUB, R0, R9), + }, + INTERNAL, + { }, + { { 0, -10 } } + }, + { + "ALU32_ARSH_K: -1234 >> 0 = -1234", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -1234), + BPF_ALU32_IMM(BPF_ARSH, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 11 } } + { { 0, -1234 } } }, - { /* Mainly checking JIT here. */ - "INT: XOR", + { + "ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", .u.insns_int = { - BPF_ALU64_REG(BPF_SUB, R0, R0), - BPF_ALU64_REG(BPF_XOR, R1, R1), - BPF_JMP_REG(BPF_JEQ, R0, R1, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R0, 10), - BPF_ALU64_IMM(BPF_MOV, R1, -1), - BPF_ALU64_REG(BPF_SUB, R1, R1), - BPF_ALU64_REG(BPF_XOR, R2, R2), - BPF_JMP_REG(BPF_JEQ, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R2, R2), - BPF_ALU64_REG(BPF_XOR, R3, R3), - BPF_ALU64_IMM(BPF_MOV, R0, 10), - BPF_ALU64_IMM(BPF_MOV, R1, -1), - BPF_JMP_REG(BPF_JEQ, R2, R3, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R3, R3), - BPF_ALU64_REG(BPF_XOR, R4, R4), - BPF_ALU64_IMM(BPF_MOV, R2, 1), - BPF_ALU64_IMM(BPF_MOV, R5, -1), - BPF_JMP_REG(BPF_JEQ, R3, R4, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R4, R4), - BPF_ALU64_REG(BPF_XOR, R5, R5), - BPF_ALU64_IMM(BPF_MOV, R3, 1), - BPF_ALU64_IMM(BPF_MOV, R7, -1), - BPF_JMP_REG(BPF_JEQ, R5, R4, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R5, 1), - BPF_ALU64_REG(BPF_SUB, R5, R5), - BPF_ALU64_REG(BPF_XOR, R6, R6), - BPF_ALU64_IMM(BPF_MOV, R1, 1), - BPF_ALU64_IMM(BPF_MOV, R8, -1), - BPF_JMP_REG(BPF_JEQ, R5, R6, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R6, R6), - BPF_ALU64_REG(BPF_XOR, R7, R7), - BPF_JMP_REG(BPF_JEQ, R7, R6, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R7, R7), - BPF_ALU64_REG(BPF_XOR, R8, R8), - BPF_JMP_REG(BPF_JEQ, R7, R8, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R8, R8), - BPF_ALU64_REG(BPF_XOR, R9, R9), - BPF_JMP_REG(BPF_JEQ, R9, R8, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R9, R9), - BPF_ALU64_REG(BPF_XOR, R0, R0), - BPF_JMP_REG(BPF_JEQ, R9, R0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, R1, R1), - BPF_ALU64_REG(BPF_XOR, R0, R0), - BPF_JMP_REG(BPF_JEQ, R9, R0, 2), - BPF_ALU64_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R0, 0xff00ff0000000000LL), + BPF_ALU64_IMM(BPF_ARSH, R0, 40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 0xffff00ff } }, }, - { /* Mainly checking JIT here. */ - "INT: MUL", + { + "ALU64_ARSH_K: Shift < 32, low word", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, 11), - BPF_ALU64_IMM(BPF_MOV, R1, 1), - BPF_ALU64_IMM(BPF_MOV, R2, 2), - BPF_ALU64_IMM(BPF_MOV, R3, 3), - BPF_ALU64_IMM(BPF_MOV, R4, 4), - BPF_ALU64_IMM(BPF_MOV, R5, 5), - BPF_ALU64_IMM(BPF_MOV, R6, 6), - BPF_ALU64_IMM(BPF_MOV, R7, 7), - BPF_ALU64_IMM(BPF_MOV, R8, 8), - BPF_ALU64_IMM(BPF_MOV, R9, 9), - BPF_ALU64_REG(BPF_MUL, R0, R0), - BPF_ALU64_REG(BPF_MUL, R0, R1), - BPF_ALU64_REG(BPF_MUL, R0, R2), - BPF_ALU64_REG(BPF_MUL, R0, R3), - BPF_ALU64_REG(BPF_MUL, R0, R4), - BPF_ALU64_REG(BPF_MUL, R0, R5), - BPF_ALU64_REG(BPF_MUL, R0, R6), - BPF_ALU64_REG(BPF_MUL, R0, R7), - BPF_ALU64_REG(BPF_MUL, R0, R8), - BPF_ALU64_REG(BPF_MUL, R0, R9), - BPF_ALU64_IMM(BPF_MUL, R0, 10), - BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_MUL, R1, R0), - BPF_ALU64_REG(BPF_MUL, R1, R2), - BPF_ALU64_REG(BPF_MUL, R1, R3), - BPF_ALU64_REG(BPF_MUL, R1, R4), - BPF_ALU64_REG(BPF_MUL, R1, R5), - BPF_ALU64_REG(BPF_MUL, R1, R6), - BPF_ALU64_REG(BPF_MUL, R1, R7), - BPF_ALU64_REG(BPF_MUL, R1, R8), - BPF_ALU64_REG(BPF_MUL, R1, R9), - BPF_ALU64_IMM(BPF_MUL, R1, 10), - BPF_ALU64_REG(BPF_MOV, R2, R1), - BPF_ALU64_IMM(BPF_RSH, R2, 32), - BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_LSH, R1, 32), - BPF_ALU64_IMM(BPF_ARSH, R1, 32), - BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_MUL, R2, R0), - BPF_ALU64_REG(BPF_MUL, R2, R1), - BPF_ALU64_REG(BPF_MUL, R2, R3), - BPF_ALU64_REG(BPF_MUL, R2, R4), - BPF_ALU64_REG(BPF_MUL, R2, R5), - BPF_ALU64_REG(BPF_MUL, R2, R6), - BPF_ALU64_REG(BPF_MUL, R2, R7), - BPF_ALU64_REG(BPF_MUL, R2, R8), - BPF_ALU64_REG(BPF_MUL, R2, R9), - BPF_ALU64_IMM(BPF_MUL, R2, 10), - BPF_ALU64_IMM(BPF_RSH, R2, 32), - BPF_ALU64_REG(BPF_MOV, R0, R2), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_RSH, R0, 12), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x35d97ef2 } } + { { 0, 0x56789abc } } }, - { /* Mainly checking JIT here. */ - "MOV REG64", + { + "ALU64_ARSH_K: Shift < 32, high word", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffffffffffffLL), - BPF_MOV64_REG(R1, R0), - BPF_MOV64_REG(R2, R1), - BPF_MOV64_REG(R3, R2), - BPF_MOV64_REG(R4, R3), - BPF_MOV64_REG(R5, R4), - BPF_MOV64_REG(R6, R5), - BPF_MOV64_REG(R7, R6), - BPF_MOV64_REG(R8, R7), - BPF_MOV64_REG(R9, R8), - BPF_ALU64_IMM(BPF_MOV, R0, 0), - BPF_ALU64_IMM(BPF_MOV, R1, 0), - BPF_ALU64_IMM(BPF_MOV, R2, 0), - BPF_ALU64_IMM(BPF_MOV, R3, 0), - BPF_ALU64_IMM(BPF_MOV, R4, 0), - BPF_ALU64_IMM(BPF_MOV, R5, 0), - BPF_ALU64_IMM(BPF_MOV, R6, 0), - BPF_ALU64_IMM(BPF_MOV, R7, 0), - BPF_ALU64_IMM(BPF_MOV, R8, 0), - BPF_ALU64_IMM(BPF_MOV, R9, 0), - BPF_ALU64_REG(BPF_ADD, R0, R0), - BPF_ALU64_REG(BPF_ADD, R0, R1), - BPF_ALU64_REG(BPF_ADD, R0, R2), - BPF_ALU64_REG(BPF_ADD, R0, R3), - BPF_ALU64_REG(BPF_ADD, R0, R4), - BPF_ALU64_REG(BPF_ADD, R0, R5), - BPF_ALU64_REG(BPF_ADD, R0, R6), - BPF_ALU64_REG(BPF_ADD, R0, R7), - BPF_ALU64_REG(BPF_ADD, R0, R8), - BPF_ALU64_REG(BPF_ADD, R0, R9), - BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_ARSH, R0, 12), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xfefe } } + { { 0, 0xfff81234 } } }, - { /* Mainly checking JIT here. */ - "MOV REG32", + { + "ALU64_ARSH_K: Shift > 32, low word", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffffffffffffLL), - BPF_MOV64_REG(R1, R0), - BPF_MOV64_REG(R2, R1), - BPF_MOV64_REG(R3, R2), - BPF_MOV64_REG(R4, R3), - BPF_MOV64_REG(R5, R4), - BPF_MOV64_REG(R6, R5), - BPF_MOV64_REG(R7, R6), - BPF_MOV64_REG(R8, R7), - BPF_MOV64_REG(R9, R8), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU32_IMM(BPF_MOV, R2, 0), - BPF_ALU32_IMM(BPF_MOV, R3, 0), - BPF_ALU32_IMM(BPF_MOV, R4, 0), - BPF_ALU32_IMM(BPF_MOV, R5, 0), - BPF_ALU32_IMM(BPF_MOV, R6, 0), - BPF_ALU32_IMM(BPF_MOV, R7, 0), - BPF_ALU32_IMM(BPF_MOV, R8, 0), - BPF_ALU32_IMM(BPF_MOV, R9, 0), - BPF_ALU64_REG(BPF_ADD, R0, R0), - BPF_ALU64_REG(BPF_ADD, R0, R1), - BPF_ALU64_REG(BPF_ADD, R0, R2), - BPF_ALU64_REG(BPF_ADD, R0, R3), - BPF_ALU64_REG(BPF_ADD, R0, R4), - BPF_ALU64_REG(BPF_ADD, R0, R5), - BPF_ALU64_REG(BPF_ADD, R0, R6), - BPF_ALU64_REG(BPF_ADD, R0, R7), - BPF_ALU64_REG(BPF_ADD, R0, R8), - BPF_ALU64_REG(BPF_ADD, R0, R9), - BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_ARSH, R0, 36), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xfefe } } + { { 0, 0xf8123456 } } }, - { /* Mainly checking JIT here. */ - "LD IMM64", + { + "ALU64_ARSH_K: Shift > 32, high word", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffffffffffffLL), - BPF_MOV64_REG(R1, R0), - BPF_MOV64_REG(R2, R1), - BPF_MOV64_REG(R3, R2), - BPF_MOV64_REG(R4, R3), - BPF_MOV64_REG(R5, R4), - BPF_MOV64_REG(R6, R5), - BPF_MOV64_REG(R7, R6), - BPF_MOV64_REG(R8, R7), - BPF_MOV64_REG(R9, R8), - BPF_LD_IMM64(R0, 0x0LL), - BPF_LD_IMM64(R1, 0x0LL), - BPF_LD_IMM64(R2, 0x0LL), - BPF_LD_IMM64(R3, 0x0LL), - BPF_LD_IMM64(R4, 0x0LL), - BPF_LD_IMM64(R5, 0x0LL), - BPF_LD_IMM64(R6, 0x0LL), - BPF_LD_IMM64(R7, 0x0LL), - BPF_LD_IMM64(R8, 0x0LL), - BPF_LD_IMM64(R9, 0x0LL), - BPF_ALU64_REG(BPF_ADD, R0, R0), - BPF_ALU64_REG(BPF_ADD, R0, R1), - BPF_ALU64_REG(BPF_ADD, R0, R2), - BPF_ALU64_REG(BPF_ADD, R0, R3), - BPF_ALU64_REG(BPF_ADD, R0, R4), - BPF_ALU64_REG(BPF_ADD, R0, R5), - BPF_ALU64_REG(BPF_ADD, R0, R6), - BPF_ALU64_REG(BPF_ADD, R0, R7), - BPF_ALU64_REG(BPF_ADD, R0, R8), - BPF_ALU64_REG(BPF_ADD, R0, R9), - BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), + BPF_LD_IMM64(R0, 0xf123456789abcdefLL), + BPF_ALU64_IMM(BPF_ARSH, R0, 36), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, + { + "ALU64_ARSH_K: Shift == 32, low word", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_ARSH, R0, 32), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xfefe } } + { { 0, 0x81234567 } } }, { - "INT: ALU MIX", + "ALU64_ARSH_K: Shift == 32, high word", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, 11), - BPF_ALU64_IMM(BPF_ADD, R0, -1), - BPF_ALU64_IMM(BPF_MOV, R2, 2), - BPF_ALU64_IMM(BPF_XOR, R2, 3), - BPF_ALU64_REG(BPF_DIV, R0, R2), - BPF_JMP_IMM(BPF_JEQ, R0, 10, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOD, R0, 3), - BPF_JMP_IMM(BPF_JEQ, R0, 1, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_ARSH, R0, 32), + BPF_ALU64_IMM(BPF_RSH, R0, 32), BPF_EXIT_INSN(), }, INTERNAL, @@ -1914,1587 +7585,1705 @@ static struct bpf_test tests[] = { { { 0, -1 } } }, { - "INT: shifts by register", + "ALU64_ARSH_K: Zero shift", .u.insns_int = { - BPF_MOV64_IMM(R0, -1234), - BPF_MOV64_IMM(R1, 1), - BPF_ALU32_REG(BPF_RSH, R0, R1), - BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(R2, 1), - BPF_ALU64_REG(BPF_LSH, R0, R2), - BPF_MOV32_IMM(R4, -1234), - BPF_JMP_REG(BPF_JEQ, R0, R4, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_AND, R4, 63), - BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */ - BPF_MOV64_IMM(R3, 47), - BPF_ALU64_REG(BPF_ARSH, R0, R3), - BPF_JMP_IMM(BPF_JEQ, R0, -617, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(R2, 1), - BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */ - BPF_JMP_IMM(BPF_JEQ, R4, 92, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(R4, 4), - BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */ - BPF_JMP_IMM(BPF_JEQ, R4, 64, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(R4, 5), - BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */ - BPF_JMP_IMM(BPF_JEQ, R4, 160, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(R0, -1), + BPF_LD_IMM64(R0, 0x8123456789abcdefLL), + BPF_ALU64_IMM(BPF_ARSH, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -1 } } + { { 0, 0x89abcdef } } }, + /* BPF_ALU | BPF_NEG */ { - /* - * Register (non-)clobbering test, in the case where a 32-bit - * JIT implements complex ALU64 operations via function calls. - * If so, the function call must be invisible in the eBPF - * registers. The JIT must then save and restore relevant - * registers during the call. The following tests check that - * the eBPF registers retain their values after such a call. - */ - "INT: Register clobbering, R1 updated", + "ALU_NEG: -(3) = -3", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_ALU32_IMM(BPF_MOV, R1, 123456789), - BPF_ALU32_IMM(BPF_MOV, R2, 2), - BPF_ALU32_IMM(BPF_MOV, R3, 3), - BPF_ALU32_IMM(BPF_MOV, R4, 4), - BPF_ALU32_IMM(BPF_MOV, R5, 5), - BPF_ALU32_IMM(BPF_MOV, R6, 6), - BPF_ALU32_IMM(BPF_MOV, R7, 7), - BPF_ALU32_IMM(BPF_MOV, R8, 8), - BPF_ALU32_IMM(BPF_MOV, R9, 9), - BPF_ALU64_IMM(BPF_DIV, R1, 123456789), - BPF_JMP_IMM(BPF_JNE, R0, 0, 10), - BPF_JMP_IMM(BPF_JNE, R1, 1, 9), - BPF_JMP_IMM(BPF_JNE, R2, 2, 8), - BPF_JMP_IMM(BPF_JNE, R3, 3, 7), - BPF_JMP_IMM(BPF_JNE, R4, 4, 6), - BPF_JMP_IMM(BPF_JNE, R5, 5, 5), - BPF_JMP_IMM(BPF_JNE, R6, 6, 4), - BPF_JMP_IMM(BPF_JNE, R7, 7, 3), - BPF_JMP_IMM(BPF_JNE, R8, 8, 2), - BPF_JMP_IMM(BPF_JNE, R9, 9, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 3), + BPF_ALU32_IMM(BPF_NEG, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, -3 } }, }, { - "INT: Register clobbering, R2 updated", + "ALU_NEG: -(-3) = 3", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R2, 2 * 123456789), - BPF_ALU32_IMM(BPF_MOV, R3, 3), - BPF_ALU32_IMM(BPF_MOV, R4, 4), - BPF_ALU32_IMM(BPF_MOV, R5, 5), - BPF_ALU32_IMM(BPF_MOV, R6, 6), - BPF_ALU32_IMM(BPF_MOV, R7, 7), - BPF_ALU32_IMM(BPF_MOV, R8, 8), - BPF_ALU32_IMM(BPF_MOV, R9, 9), - BPF_ALU64_IMM(BPF_DIV, R2, 123456789), - BPF_JMP_IMM(BPF_JNE, R0, 0, 10), - BPF_JMP_IMM(BPF_JNE, R1, 1, 9), - BPF_JMP_IMM(BPF_JNE, R2, 2, 8), - BPF_JMP_IMM(BPF_JNE, R3, 3, 7), - BPF_JMP_IMM(BPF_JNE, R4, 4, 6), - BPF_JMP_IMM(BPF_JNE, R5, 5, 5), - BPF_JMP_IMM(BPF_JNE, R6, 6, 4), - BPF_JMP_IMM(BPF_JNE, R7, 7, 3), - BPF_JMP_IMM(BPF_JNE, R8, 8, 2), - BPF_JMP_IMM(BPF_JNE, R9, 9, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -3), + BPF_ALU32_IMM(BPF_NEG, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 3 } }, }, { - /* - * Test 32-bit JITs that implement complex ALU64 operations as - * function calls R0 = f(R1, R2), and must re-arrange operands. - */ -#define NUMER 0xfedcba9876543210ULL -#define DENOM 0x0123456789abcdefULL - "ALU64_DIV X: Operand register permutations", + "ALU64_NEG: -(3) = -3", .u.insns_int = { - /* R0 / R2 */ - BPF_LD_IMM64(R0, NUMER), - BPF_LD_IMM64(R2, DENOM), - BPF_ALU64_REG(BPF_DIV, R0, R2), - BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1), + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_NEG, R0, 0), BPF_EXIT_INSN(), - /* R1 / R0 */ - BPF_LD_IMM64(R1, NUMER), - BPF_LD_IMM64(R0, DENOM), - BPF_ALU64_REG(BPF_DIV, R1, R0), - BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1), + }, + INTERNAL, + { }, + { { 0, -3 } }, + }, + { + "ALU64_NEG: -(-3) = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, -3), + BPF_ALU64_IMM(BPF_NEG, R0, 0), BPF_EXIT_INSN(), - /* R0 / R1 */ - BPF_LD_IMM64(R0, NUMER), - BPF_LD_IMM64(R1, DENOM), - BPF_ALU64_REG(BPF_DIV, R0, R1), - BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1), - BPF_EXIT_INSN(), - /* R2 / R0 */ - BPF_LD_IMM64(R2, NUMER), - BPF_LD_IMM64(R0, DENOM), - BPF_ALU64_REG(BPF_DIV, R2, R0), - BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1), - BPF_EXIT_INSN(), - /* R2 / R1 */ - BPF_LD_IMM64(R2, NUMER), - BPF_LD_IMM64(R1, DENOM), - BPF_ALU64_REG(BPF_DIV, R2, R1), - BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1), - BPF_EXIT_INSN(), - /* R1 / R2 */ - BPF_LD_IMM64(R1, NUMER), - BPF_LD_IMM64(R2, DENOM), - BPF_ALU64_REG(BPF_DIV, R1, R2), - BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1), - BPF_EXIT_INSN(), - /* R1 / R1 */ - BPF_LD_IMM64(R1, NUMER), - BPF_ALU64_REG(BPF_DIV, R1, R1), - BPF_JMP_IMM(BPF_JEQ, R1, 1, 1), - BPF_EXIT_INSN(), - /* R2 / R2 */ - BPF_LD_IMM64(R2, DENOM), - BPF_ALU64_REG(BPF_DIV, R2, R2), - BPF_JMP_IMM(BPF_JEQ, R2, 1, 1), - BPF_EXIT_INSN(), - /* R3 / R4 */ - BPF_LD_IMM64(R3, NUMER), - BPF_LD_IMM64(R4, DENOM), - BPF_ALU64_REG(BPF_DIV, R3, R4), - BPF_JMP_IMM(BPF_JEQ, R3, NUMER / DENOM, 1), - BPF_EXIT_INSN(), - /* Successful return */ - BPF_LD_IMM64(R0, 1), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + /* BPF_ALU | BPF_END | BPF_FROM_BE */ + { + "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 16), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, -#undef NUMER -#undef DENOM + { { 0, cpu_to_be16(0xcdef) } }, }, -#ifdef CONFIG_32BIT { - "INT: 32-bit context pointer word order and zero-extension", + "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3), + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 32), + BPF_ALU64_REG(BPF_MOV, R1, R0), BPF_ALU64_IMM(BPF_RSH, R1, 32), - BPF_JMP32_IMM(BPF_JNE, R1, 0, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, cpu_to_be32(0x89abcdef) } }, }, -#endif { - "check: missing ret", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 1), + "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 64), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, - { }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } }, }, { - "check: div_k_0", - .u.insns = { - BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0), - BPF_STMT(BPF_RET | BPF_K, 0) + "ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 64), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, - { }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } }, }, + /* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */ { - "check: unknown insn", - .u.insns = { - /* seccomp insn, rejected in socket filter */ - BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0), - BPF_STMT(BPF_RET | BPF_K, 0) + "ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_BE, R0, 16), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_EXPECTED_FAIL, - { }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, cpu_to_be16(0x3210) } }, }, { - "check: out of range spill/fill", - .u.insns = { - BPF_STMT(BPF_STX, 16), - BPF_STMT(BPF_RET | BPF_K, 0) + "ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_BE, R0, 32), + BPF_ALU64_REG(BPF_MOV, R1, R0), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, - { }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, cpu_to_be32(0x76543210) } }, }, { - "JUMPS + HOLES", - .u.insns = { - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15), - BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3), - BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15), - BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3), - BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2), - BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0), - BPF_STMT(BPF_RET | BPF_A, 0), - BPF_STMT(BPF_RET | BPF_A, 0), + "ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_BE, R0, 64), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } }, + }, + { + "ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_BE, R0, 64), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC, - { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8, - 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4, - 0x08, 0x00, - 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, - 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */ - 0xc0, 0xa8, 0x33, 0x01, - 0xc0, 0xa8, 0x33, 0x02, - 0xbb, 0xb6, - 0xa9, 0xfa, - 0x00, 0x14, 0x00, 0x00, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, - 0xcc, 0xcc, 0xcc, 0xcc }, - { { 88, 0x001b } } + INTERNAL, + { }, + { { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } }, }, + /* BPF_ALU | BPF_END | BPF_FROM_LE */ { - "check: RET X", - .u.insns = { - BPF_STMT(BPF_RET | BPF_X, 0), + "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 16), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + INTERNAL, { }, + { { 0, cpu_to_le16(0xcdef) } }, + }, + { + "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 32), + BPF_ALU64_REG(BPF_MOV, R1, R0), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ + BPF_EXIT_INSN(), + }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, cpu_to_le32(0x89abcdef) } }, }, { - "check: LDX + RET X", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 42), - BPF_STMT(BPF_RET | BPF_X, 0), + "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 64), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + INTERNAL, { }, + { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } }, + }, + { + "ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 64), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } }, }, - { /* Mainly checking JIT here. */ - "M[]: alt STX + LDX", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 100), - BPF_STMT(BPF_STX, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 0), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 1), - BPF_STMT(BPF_LDX | BPF_MEM, 1), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 2), - BPF_STMT(BPF_LDX | BPF_MEM, 2), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 3), - BPF_STMT(BPF_LDX | BPF_MEM, 3), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 4), - BPF_STMT(BPF_LDX | BPF_MEM, 4), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 5), - BPF_STMT(BPF_LDX | BPF_MEM, 5), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 6), - BPF_STMT(BPF_LDX | BPF_MEM, 6), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 7), - BPF_STMT(BPF_LDX | BPF_MEM, 7), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 8), - BPF_STMT(BPF_LDX | BPF_MEM, 8), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 9), - BPF_STMT(BPF_LDX | BPF_MEM, 9), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 10), - BPF_STMT(BPF_LDX | BPF_MEM, 10), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 11), - BPF_STMT(BPF_LDX | BPF_MEM, 11), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 12), - BPF_STMT(BPF_LDX | BPF_MEM, 12), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 13), - BPF_STMT(BPF_LDX | BPF_MEM, 13), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 14), - BPF_STMT(BPF_LDX | BPF_MEM, 14), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_STX, 15), - BPF_STMT(BPF_LDX | BPF_MEM, 15), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_RET | BPF_A, 0), + /* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */ + { + "ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_LE, R0, 16), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA, + INTERNAL, { }, - { { 0, 116 } }, + { { 0, cpu_to_le16(0x3210) } }, }, - { /* Mainly checking JIT here. */ - "M[]: full STX + full LDX", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb), - BPF_STMT(BPF_STX, 0), - BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae), - BPF_STMT(BPF_STX, 1), - BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf), - BPF_STMT(BPF_STX, 2), - BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc), - BPF_STMT(BPF_STX, 3), - BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb), - BPF_STMT(BPF_STX, 4), - BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda), - BPF_STMT(BPF_STX, 5), - BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb), - BPF_STMT(BPF_STX, 6), - BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade), - BPF_STMT(BPF_STX, 7), - BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec), - BPF_STMT(BPF_STX, 8), - BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc), - BPF_STMT(BPF_STX, 9), - BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac), - BPF_STMT(BPF_STX, 10), - BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea), - BPF_STMT(BPF_STX, 11), - BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb), - BPF_STMT(BPF_STX, 12), - BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf), - BPF_STMT(BPF_STX, 13), - BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde), - BPF_STMT(BPF_STX, 14), - BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad), - BPF_STMT(BPF_STX, 15), - BPF_STMT(BPF_LDX | BPF_MEM, 0), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 1), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 2), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 3), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 4), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 5), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 6), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 7), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 8), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 9), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 10), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 11), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 12), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 13), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 14), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_LDX | BPF_MEM, 15), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0), + { + "ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_LE, R0, 32), + BPF_ALU64_REG(BPF_MOV, R1, R0), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA, + INTERNAL, + { }, + { { 0, cpu_to_le32(0x76543210) } }, + }, + { + "ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_LE, R0, 64), + BPF_EXIT_INSN(), + }, + INTERNAL, { }, - { { 0, 0x2a5a5e5 } }, + { { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } }, }, { - "check: SKF_AD_MAX", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_MAX), - BPF_STMT(BPF_RET | BPF_A, 0), + "ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_ENDIAN(BPF_FROM_LE, R0, 64), + BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_EXIT_INSN(), }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, - { }, + INTERNAL, { }, - .fill_helper = NULL, - .expected_errcode = -EINVAL, + { { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } }, }, - { /* Passes checker but fails during runtime. */ - "LD [SKF_AD_OFF-1]", - .u.insns = { - BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF - 1), - BPF_STMT(BPF_RET | BPF_K, 1), + /* BPF_LDX_MEM B/H/W/DW */ + { + "BPF_LDX_MEM | BPF_B", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x0102030405060708ULL), + BPF_LD_IMM64(R2, 0x0000000000000008ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_LDX_MEM(BPF_B, R0, R10, -1), +#else + BPF_LDX_MEM(BPF_B, R0, R10, -8), +#endif + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, + INTERNAL, { }, - { { 1, 0 } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "load 64-bit immediate", + "BPF_LDX_MEM | BPF_B, MSB set", .u.insns_int = { - BPF_LD_IMM64(R1, 0x567800001234LL), - BPF_MOV64_REG(R2, R1), - BPF_MOV64_REG(R3, R2), - BPF_ALU64_IMM(BPF_RSH, R2, 32), - BPF_ALU64_IMM(BPF_LSH, R3, 32), - BPF_ALU64_IMM(BPF_RSH, R3, 32), + BPF_LD_IMM64(R1, 0x8182838485868788ULL), + BPF_LD_IMM64(R2, 0x0000000000000088ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_LDX_MEM(BPF_B, R0, R10, -1), +#else + BPF_LDX_MEM(BPF_B, R0, R10, -8), +#endif + BPF_JMP_REG(BPF_JNE, R0, R2, 1), BPF_ALU64_IMM(BPF_MOV, R0, 0), - BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), - BPF_EXIT_INSN(), - BPF_LD_IMM64(R0, 0x1ffffffffLL), - BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */ BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 0 } }, + .stack_depth = 8, }, - /* BPF_ALU | BPF_MOV | BPF_X */ { - "ALU_MOV_X: dst = 2", + "BPF_LDX_MEM | BPF_H", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_LD_IMM64(R1, 0x0102030405060708ULL), + BPF_LD_IMM64(R2, 0x0000000000000708ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_LDX_MEM(BPF_H, R0, R10, -2), +#else + BPF_LDX_MEM(BPF_H, R0, R10, -8), +#endif + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU_MOV_X: dst = 4294967295", + "BPF_LDX_MEM | BPF_H, MSB set", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), - BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_LD_IMM64(R1, 0x8182838485868788ULL), + BPF_LD_IMM64(R2, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_LDX_MEM(BPF_H, R0, R10, -2), +#else + BPF_LDX_MEM(BPF_H, R0, R10, -8), +#endif + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4294967295U } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU64_MOV_X: dst = 2", + "BPF_LDX_MEM | BPF_W", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_LD_IMM64(R1, 0x0102030405060708ULL), + BPF_LD_IMM64(R2, 0x0000000005060708ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_LDX_MEM(BPF_W, R0, R10, -4), +#else + BPF_LDX_MEM(BPF_W, R0, R10, -8), +#endif + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU64_MOV_X: dst = 4294967295", + "BPF_LDX_MEM | BPF_W, MSB set", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), - BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_LD_IMM64(R1, 0x8182838485868788ULL), + BPF_LD_IMM64(R2, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_LDX_MEM(BPF_W, R0, R10, -4), +#else + BPF_LDX_MEM(BPF_W, R0, R10, -8), +#endif + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4294967295U } }, + { { 0, 0 } }, + .stack_depth = 8, }, - /* BPF_ALU | BPF_MOV | BPF_K */ + /* BPF_STX_MEM B/H/W/DW */ { - "ALU_MOV_K: dst = 2", + "BPF_STX_MEM | BPF_B", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 2), + BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL), + BPF_LD_IMM64(R2, 0x0102030405060708ULL), + BPF_LD_IMM64(R3, 0x8090a0b0c0d0e008ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_STX_MEM(BPF_B, R10, R2, -1), +#else + BPF_STX_MEM(BPF_B, R10, R2, -8), +#endif + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU_MOV_K: dst = 4294967295", + "BPF_STX_MEM | BPF_B, MSB set", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U), + BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL), + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x8090a0b0c0d0e088ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_STX_MEM(BPF_B, R10, R2, -1), +#else + BPF_STX_MEM(BPF_B, R10, R2, -8), +#endif + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4294967295U } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff", + "BPF_STX_MEM | BPF_H", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0x00000000ffffffffLL), - BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL), + BPF_LD_IMM64(R2, 0x0102030405060708ULL), + BPF_LD_IMM64(R3, 0x8090a0b0c0d00708ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_STX_MEM(BPF_H, R10, R2, -2), +#else + BPF_STX_MEM(BPF_H, R10, R2, -8), +#endif + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU_MOV_K: small negative", + "BPF_STX_MEM | BPF_H, MSB set", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL), + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x8090a0b0c0d08788ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_STX_MEM(BPF_H, R10, R2, -2), +#else + BPF_STX_MEM(BPF_H, R10, R2, -8), +#endif + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -123 } } + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU_MOV_K: small negative zero extension", + "BPF_STX_MEM | BPF_W", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL), + BPF_LD_IMM64(R2, 0x0102030405060708ULL), + BPF_LD_IMM64(R3, 0x8090a0b005060708ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_STX_MEM(BPF_W, R10, R2, -4), +#else + BPF_STX_MEM(BPF_W, R10, R2, -8), +#endif + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0 } } + { { 0, 0 } }, + .stack_depth = 8, }, { - "ALU_MOV_K: large negative", + "BPF_STX_MEM | BPF_W, MSB set", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123456789), + BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL), + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x8090a0b085868788ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), +#ifdef __BIG_ENDIAN + BPF_STX_MEM(BPF_W, R10, R2, -4), +#else + BPF_STX_MEM(BPF_W, R10, R2, -8), +#endif + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -123456789 } } + { { 0, 0 } }, + .stack_depth = 8, }, + /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */ { - "ALU_MOV_K: large negative zero extension", + "ST_MEM_B: Store/Load byte: max negative", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123456789), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_B, R10, -40, 0xff), + BPF_LDX_MEM(BPF_B, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0 } } + { { 0, 0xff } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: dst = 2", + "ST_MEM_B: Store/Load byte: max positive", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0x7f), + BPF_LDX_MEM(BPF_H, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 0x7f } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: dst = 2147483647", + "STX_MEM_B: Store/Load byte: max negative", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, 2147483647), + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffLL), + BPF_STX_MEM(BPF_B, R10, R1, -40), + BPF_LDX_MEM(BPF_B, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2147483647 } }, + { { 0, 0xff } }, + .stack_depth = 40, }, { - "ALU64_OR_K: dst = 0x0", + "ST_MEM_H: Store/Load half word: max negative", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0x0), - BPF_ALU64_IMM(BPF_MOV, R2, 0x0), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0xffff), + BPF_LDX_MEM(BPF_H, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0xffff } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: dst = -1", + "ST_MEM_H: Store/Load half word: max positive", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0x7fff), + BPF_LDX_MEM(BPF_H, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0x7fff } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: small negative", + "STX_MEM_H: Store/Load half word: max negative", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, -123), + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffLL), + BPF_STX_MEM(BPF_H, R10, R1, -40), + BPF_LDX_MEM(BPF_H, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -123 } } + { { 0, 0xffff } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: small negative sign extension", + "ST_MEM_W: Store/Load word: max negative", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, -123), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } } + { { 0, 0xffffffff } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: large negative", + "ST_MEM_W: Store/Load word: max positive", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, -123456789), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -123456789 } } + { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { - "ALU64_MOV_K: large negative sign extension", + "STX_MEM_W: Store/Load word: max negative", .u.insns_int = { - BPF_ALU64_IMM(BPF_MOV, R0, -123456789), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffffffLL), + BPF_STX_MEM(BPF_W, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } } + { { 0, 0xffffffff } }, + .stack_depth = 40, }, - /* BPF_ALU | BPF_ADD | BPF_X */ { - "ALU_ADD_X: 1 + 2 = 3", + "ST_MEM_DW: Store/Load double word: max negative", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 0xffffffff } }, + .stack_depth = 40, }, { - "ALU_ADD_X: 1 + 4294967294 = 4294967295", + "ST_MEM_DW: Store/Load double word: max negative 2", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), - BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_LD_IMM64(R2, 0xffff00000000ffffLL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_DW, R2, R10, -40), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4294967295U } }, + { { 0, 0x1 } }, + .stack_depth = 40, }, { - "ALU_ADD_X: 2 + 4294967294 = 0", + "ST_MEM_DW: Store/Load double word: max positive", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_LD_IMM64(R1, 4294967294U), - BPF_ALU32_REG(BPF_ADD, R0, R1), - BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, + { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { - "ALU64_ADD_X: 1 + 2 = 3", + "STX_MEM_DW: Store/Load double word: max negative", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 0xffffffff } }, + .stack_depth = 40, }, { - "ALU64_ADD_X: 1 + 4294967294 = 4294967295", + "STX_MEM_DW: Store double word: first word in memory", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), - BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0x0123456789abcdefLL), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4294967295U } }, +#ifdef __BIG_ENDIAN + { { 0, 0x01234567 } }, +#else + { { 0, 0x89abcdef } }, +#endif + .stack_depth = 40, }, { - "ALU64_ADD_X: 2 + 4294967294 = 4294967296", + "STX_MEM_DW: Store double word: second word in memory", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_LD_IMM64(R1, 4294967294U), - BPF_LD_IMM64(R2, 4294967296ULL), - BPF_ALU64_REG(BPF_ADD, R0, R1), - BPF_JMP_REG(BPF_JEQ, R0, R2, 2), - BPF_MOV32_IMM(R0, 0), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0x0123456789abcdefLL), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -36), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, +#ifdef __BIG_ENDIAN + { { 0, 0x89abcdef } }, +#else + { { 0, 0x01234567 } }, +#endif + .stack_depth = 40, }, - /* BPF_ALU | BPF_ADD | BPF_K */ + /* BPF_STX | BPF_ATOMIC | BPF_W/DW */ { - "ALU_ADD_K: 1 + 2 = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_ADD, R0, 2), - BPF_EXIT_INSN(), - }, + "STX_XADD_W: X + 1 + 1 + 1 + ...", + { }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 4134 } }, + .fill_helper = bpf_fill_stxw, }, { - "ALU_ADD_K: 3 + 0 = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_ADD, R0, 0), - BPF_EXIT_INSN(), - }, + "STX_XADD_DW: X + 1 + 1 + 1 + ...", + { }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 4134 } }, + .fill_helper = bpf_fill_stxdw, }, + /* + * Exhaustive tests of atomic operation variants. + * Individual tests are expanded from template macros for all + * combinations of ALU operation, word size and fetching. + */ +#define BPF_ATOMIC_POISON(width) ((width) == BPF_W ? (0xbaadf00dULL << 32) : 0) + +#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \ +{ \ + "BPF_ATOMIC | " #width ", " #op ": Test: " \ + #old " " #logic " " #update " = " #result, \ + .u.insns_int = { \ + BPF_LD_IMM64(R5, (update) | BPF_ATOMIC_POISON(width)), \ + BPF_ST_MEM(width, R10, -40, old), \ + BPF_ATOMIC_OP(width, op, R10, R5, -40), \ + BPF_LDX_MEM(width, R0, R10, -40), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, result } }, \ + .stack_depth = 40, \ +} +#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \ +{ \ + "BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \ + #old " " #logic " " #update " = " #result, \ + .u.insns_int = { \ + BPF_ALU64_REG(BPF_MOV, R1, R10), \ + BPF_LD_IMM64(R0, (update) | BPF_ATOMIC_POISON(width)), \ + BPF_ST_MEM(BPF_W, R10, -40, old), \ + BPF_ATOMIC_OP(width, op, R10, R0, -40), \ + BPF_ALU64_REG(BPF_MOV, R0, R10), \ + BPF_ALU64_REG(BPF_SUB, R0, R1), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 0 } }, \ + .stack_depth = 40, \ +} +#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \ +{ \ + "BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \ + #old " " #logic " " #update " = " #result, \ + .u.insns_int = { \ + BPF_ALU64_REG(BPF_MOV, R0, R10), \ + BPF_LD_IMM64(R1, (update) | BPF_ATOMIC_POISON(width)), \ + BPF_ST_MEM(width, R10, -40, old), \ + BPF_ATOMIC_OP(width, op, R10, R1, -40), \ + BPF_ALU64_REG(BPF_SUB, R0, R10), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 0 } }, \ + .stack_depth = 40, \ +} +#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \ +{ \ + "BPF_ATOMIC | " #width ", " #op ": Test fetch: " \ + #old " " #logic " " #update " = " #result, \ + .u.insns_int = { \ + BPF_LD_IMM64(R3, (update) | BPF_ATOMIC_POISON(width)), \ + BPF_ST_MEM(width, R10, -40, old), \ + BPF_ATOMIC_OP(width, op, R10, R3, -40), \ + BPF_ALU32_REG(BPF_MOV, R0, R3), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, (op) & BPF_FETCH ? old : update } }, \ + .stack_depth = 40, \ +} + /* BPF_ATOMIC | BPF_W: BPF_ADD */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), + /* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + /* BPF_ATOMIC | BPF_DW: BPF_ADD */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), + /* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), + /* BPF_ATOMIC | BPF_W: BPF_AND */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), + /* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + /* BPF_ATOMIC | BPF_DW: BPF_AND */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), + /* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), + /* BPF_ATOMIC | BPF_W: BPF_OR */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), + /* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + /* BPF_ATOMIC | BPF_DW: BPF_OR */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), + /* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), + /* BPF_ATOMIC | BPF_W: BPF_XOR */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), + /* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + /* BPF_ATOMIC | BPF_DW: BPF_XOR */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), + /* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), + /* BPF_ATOMIC | BPF_W: BPF_XCHG */ + BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + /* BPF_ATOMIC | BPF_DW: BPF_XCHG */ + BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), + BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), +#undef BPF_ATOMIC_POISON +#undef BPF_ATOMIC_OP_TEST1 +#undef BPF_ATOMIC_OP_TEST2 +#undef BPF_ATOMIC_OP_TEST3 +#undef BPF_ATOMIC_OP_TEST4 + /* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */ { - "ALU_ADD_K: 1 + 4294967294 = 4294967295", + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U), + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4294967295U } }, + { { 0, 0x01234567 } }, + .stack_depth = 40, }, { - "ALU_ADD_K: 4294967294 + 2 = 0", + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967294U), - BPF_ALU32_IMM(BPF_ADD, R0, 2), - BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, + { { 0, 0x89abcdef } }, + .stack_depth = 40, }, { - "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0x00000000ffffffff), - BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210), + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0x01234567 } }, + .stack_depth = 40, }, { - "ALU_ADD_K: 0 + 0xffff = 0xffff", + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0xffff), - BPF_ALU32_IMM(BPF_ADD, R2, 0xffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210), + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0x01234567 } }, + .stack_depth = 40, }, { - "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0x7fffffff), - BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), + BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), + BPF_ALU32_REG(BPF_MOV, R0, R3), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0x89abcdef } }, + .stack_depth = 40, }, + /* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */ { - "ALU_ADD_K: 0 + 0x80000000 = 0x80000000", + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0x80000000), - BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), + BPF_LD_IMM64(R2, 0xfedcba9876543210ULL), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_REG(BPF_SUB, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0 } }, + .stack_depth = 40, }, { - "ALU_ADD_K: 0 + 0x80008000 = 0x80008000", + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0x80008000), - BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), + BPF_LD_IMM64(R2, 0xfedcba9876543210ULL), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_STX_MEM(BPF_DW, R10, R0, -40), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_REG(BPF_SUB, R0, R2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 0 } }, + .stack_depth = 40, }, { - "ALU64_ADD_K: 1 + 2 = 3", + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), + BPF_LD_IMM64(R2, 0xfedcba9876543210ULL), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_ALU64_IMM(BPF_ADD, R0, 1), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_REG(BPF_SUB, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 0 } }, + .stack_depth = 40, }, { - "ALU64_ADD_K: 3 + 0 = 3", + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_ADD, R0, 0), + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), + BPF_LD_IMM64(R2, 0xfedcba9876543210ULL), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_ALU64_IMM(BPF_ADD, R0, 1), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_REG(BPF_SUB, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 0 } }, + .stack_depth = 40, }, { - "ALU64_ADD_K: 1 + 2147483646 = 2147483647", + "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_ADD, R0, 2147483646), + BPF_LD_IMM64(R1, 0x0123456789abcdefULL), + BPF_LD_IMM64(R2, 0xfedcba9876543210ULL), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), + BPF_LD_IMM64(R0, 0xfedcba9876543210ULL), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_REG(BPF_SUB, R0, R2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2147483647 } }, + { { 0, 0 } }, + .stack_depth = 40, }, + /* BPF_JMP32 | BPF_JEQ | BPF_K */ { - "ALU64_ADD_K: 4294967294 + 2 = 4294967296", + "JMP32_JEQ_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967294U), - BPF_LD_IMM64(R1, 4294967296ULL), - BPF_ALU64_IMM(BPF_ADD, R0, 2), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 123), + BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1), + BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1), BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, - { }, - { { 0, 1 } }, - }, - { - "ALU64_ADD_K: 2147483646 + -2147483647 = -1", - .u.insns_int = { - BPF_LD_IMM64(R0, 2147483646), - BPF_ALU64_IMM(BPF_ADD, R0, -2147483647), - BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -1 } }, + { { 0, 123 } } }, { - "ALU64_ADD_K: 1 + 0 = 1", + "JMP32_JEQ_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R2, 0x1), - BPF_LD_IMM64(R3, 0x1), - BPF_ALU64_IMM(BPF_ADD, R2, 0x0), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 12345678), + BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1), + BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 12345678 } } }, { - "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff", + "JMP32_JEQ_K: negative immediate", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1), + BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, -123 } } }, + /* BPF_JMP32 | BPF_JEQ | BPF_X */ { - "ALU64_ADD_K: 0 + 0xffff = 0xffff", + "JMP32_JEQ_X", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0xffff), - BPF_ALU64_IMM(BPF_ADD, R2, 0xffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1234), + BPF_ALU32_IMM(BPF_MOV, R1, 4321), + BPF_JMP32_REG(BPF_JEQ, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1234), + BPF_JMP32_REG(BPF_JEQ, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1234 } } }, + /* BPF_JMP32 | BPF_JNE | BPF_K */ { - "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff", - .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0x7fffffff), - BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + "JMP32_JNE_K: Small immediate", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 123), + BPF_JMP32_IMM(BPF_JNE, R0, 123, 1), + BPF_JMP32_IMM(BPF_JNE, R0, 321, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 123 } } }, { - "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000", + "JMP32_JNE_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0xffffffff80000000LL), - BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 12345678), + BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1), + BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 12345678 } } }, { - "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000", + "JMP32_JNE_K: negative immediate", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0), - BPF_LD_IMM64(R3, 0xffffffff80008000LL), - BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JNE, R0, -123, 1), + BPF_JMP32_IMM(BPF_JNE, R0, 123, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, -123 } } }, - /* BPF_ALU | BPF_SUB | BPF_X */ + /* BPF_JMP32 | BPF_JNE | BPF_X */ { - "ALU_SUB_X: 3 - 1 = 2", + "JMP32_JNE_X", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 1234), + BPF_ALU32_IMM(BPF_MOV, R1, 1234), + BPF_JMP32_REG(BPF_JNE, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 4321), + BPF_JMP32_REG(BPF_JNE, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1234 } } }, + /* BPF_JMP32 | BPF_JSET | BPF_K */ { - "ALU_SUB_X: 4294967295 - 4294967294 = 1", + "JMP32_JSET_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), - BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP32_IMM(BPF_JSET, R0, 2, 1), + BPF_JMP32_IMM(BPF_JSET, R0, 3, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, + { { 0, 1 } } }, { - "ALU64_SUB_X: 3 - 1 = 2", + "JMP32_JSET_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000), + BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1), + BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 0x40000000 } } }, { - "ALU64_SUB_X: 4294967295 - 4294967294 = 1", + "JMP32_JSET_K: negative immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), - BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JSET, R0, -1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, + { { 0, -123 } } }, - /* BPF_ALU | BPF_SUB | BPF_K */ + /* BPF_JMP32 | BPF_JSET | BPF_X */ { - "ALU_SUB_K: 3 - 1 = 2", + "JMP32_JSET_X", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_SUB, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 8), + BPF_ALU32_IMM(BPF_MOV, R1, 7), + BPF_JMP32_REG(BPF_JSET, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2), + BPF_JMP32_REG(BPF_JNE, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 8 } } }, + /* BPF_JMP32 | BPF_JGT | BPF_K */ { - "ALU_SUB_K: 3 - 0 = 3", + "JMP32_JGT_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_SUB, R0, 0), + BPF_ALU32_IMM(BPF_MOV, R0, 123), + BPF_JMP32_IMM(BPF_JGT, R0, 123, 1), + BPF_JMP32_IMM(BPF_JGT, R0, 122, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 123 } } }, { - "ALU_SUB_K: 4294967295 - 4294967294 = 1", + "JMP32_JGT_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1), + BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JGT | BPF_X */ { - "ALU64_SUB_K: 3 - 1 = 2", + "JMP32_JGT_X", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_SUB, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_JMP32_REG(BPF_JGT, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), + BPF_JMP32_REG(BPF_JGT, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JGE | BPF_K */ { - "ALU64_SUB_K: 3 - 0 = 3", + "JMP32_JGE_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_SUB, R0, 0), + BPF_ALU32_IMM(BPF_MOV, R0, 123), + BPF_JMP32_IMM(BPF_JGE, R0, 124, 1), + BPF_JMP32_IMM(BPF_JGE, R0, 123, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 123 } } }, { - "ALU64_SUB_K: 4294967294 - 4294967295 = -1", + "JMP32_JGE_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967294U), - BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1), + BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -1 } }, + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JGE | BPF_X */ { - "ALU64_ADD_K: 2147483646 - 2147483647 = -1", + "JMP32_JGE_X", .u.insns_int = { - BPF_LD_IMM64(R0, 2147483646), - BPF_ALU64_IMM(BPF_SUB, R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_JMP32_REG(BPF_JGE, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe), + BPF_JMP32_REG(BPF_JGE, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -1 } }, + { { 0, 0xfffffffe } } }, - /* BPF_ALU | BPF_MUL | BPF_X */ + /* BPF_JMP32 | BPF_JLT | BPF_K */ { - "ALU_MUL_X: 2 * 3 = 6", + "JMP32_JLT_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 3), - BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 123), + BPF_JMP32_IMM(BPF_JLT, R0, 123, 1), + BPF_JMP32_IMM(BPF_JLT, R0, 124, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 6 } }, + { { 0, 123 } } }, { - "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + "JMP32_JLT_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8), - BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1), + BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xFFFFFFF0 } }, + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JLT | BPF_X */ { - "ALU_MUL_X: -1 * -1 = 1", + "JMP32_JLT_X", .u.insns_int = { - BPF_LD_IMM64(R0, -1), - BPF_ALU32_IMM(BPF_MOV, R1, -1), - BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), + BPF_JMP32_REG(BPF_JLT, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_JMP32_REG(BPF_JLT, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } }, + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JLE | BPF_K */ { - "ALU64_MUL_X: 2 * 3 = 6", + "JMP32_JLE_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 3), - BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 123), + BPF_JMP32_IMM(BPF_JLE, R0, 122, 1), + BPF_JMP32_IMM(BPF_JLE, R0, 123, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 6 } }, + { { 0, 123 } } }, { - "ALU64_MUL_X: 1 * 2147483647 = 2147483647", + "JMP32_JLE_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), - BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1), + BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2147483647 } }, + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JLE | BPF_X */ { - "ALU64_MUL_X: 64x64 multiply, low word", + "JMP32_JLE_X", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0fedcba987654321LL), - BPF_LD_IMM64(R1, 0x123456789abcdef0LL), - BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), + BPF_JMP32_REG(BPF_JLE, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe), + BPF_JMP32_REG(BPF_JLE, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xe5618cf0 } } + { { 0, 0xfffffffe } } }, + /* BPF_JMP32 | BPF_JSGT | BPF_K */ { - "ALU64_MUL_X: 64x64 multiply, high word", + "JMP32_JSGT_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0fedcba987654321LL), - BPF_LD_IMM64(R1, 0x123456789abcdef0LL), - BPF_ALU64_REG(BPF_MUL, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1), + BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x2236d88f } } + { { 0, -123 } } }, - /* BPF_ALU | BPF_MUL | BPF_K */ { - "ALU_MUL_K: 2 * 3 = 6", + "JMP32_JSGT_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MUL, R0, 3), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1), + BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 6 } }, + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSGT | BPF_X */ { - "ALU_MUL_K: 3 * 1 = 3", + "JMP32_JSGT_X", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MUL, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), + BPF_JMP32_REG(BPF_JSGT, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, -12345679), + BPF_JMP32_REG(BPF_JSGT, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSGE | BPF_K */ { - "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + "JMP32_JSGE_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1), + BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xFFFFFFF0 } }, + { { 0, -123 } } }, { - "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff", + "JMP32_JSGE_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R2, 0x1), - BPF_LD_IMM64(R3, 0x00000000ffffffff), - BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1), + BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSGE | BPF_X */ { - "ALU64_MUL_K: 2 * 3 = 6", + "JMP32_JSGE_X", .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU64_IMM(BPF_MUL, R0, 3), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_ALU32_IMM(BPF_MOV, R1, -12345677), + BPF_JMP32_REG(BPF_JSGE, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), + BPF_JMP32_REG(BPF_JSGE, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 6 } }, + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSLT | BPF_K */ { - "ALU64_MUL_K: 3 * 1 = 3", + "JMP32_JSLT_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_MUL, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1), + BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, -123 } } }, { - "ALU64_MUL_K: 1 * 2147483647 = 2147483647", + "JMP32_JSLT_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_MUL, R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1), + BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2147483647 } }, + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSLT | BPF_X */ { - "ALU64_MUL_K: 1 * -2147483647 = -2147483647", + "JMP32_JSLT_X", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_MUL, R0, -2147483647), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), + BPF_JMP32_REG(BPF_JSLT, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, -12345677), + BPF_JMP32_REG(BPF_JSLT, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -2147483647 } }, + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSLE | BPF_K */ { - "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff", + "JMP32_JSLE_K: Small immediate", .u.insns_int = { - BPF_LD_IMM64(R2, 0x1), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, -123), + BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1), + BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, -123 } } }, { - "ALU64_MUL_K: 64x32 multiply, low word", + "JMP32_JSLE_K: Large immediate", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1), + BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xe242d208 } } + { { 0, -12345678 } } }, + /* BPF_JMP32 | BPF_JSLE | BPF_K */ { - "ALU64_MUL_K: 64x32 multiply, high word", + "JMP32_JSLE_X", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ALU32_IMM(BPF_MOV, R0, -12345678), + BPF_ALU32_IMM(BPF_MOV, R1, -12345679), + BPF_JMP32_REG(BPF_JSLE, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R1, -12345678), + BPF_JMP32_REG(BPF_JSLE, R0, R1, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xc28f5c28 } } + { { 0, -12345678 } } }, - /* BPF_ALU | BPF_DIV | BPF_X */ + /* BPF_JMP | BPF_EXIT */ { - "ALU_DIV_X: 6 / 2 = 3", + "JMP_EXIT", .u.insns_int = { - BPF_LD_IMM64(R0, 6), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0x4711), BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0x4712), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 0x4711 } }, }, + /* BPF_JMP | BPF_JA */ { - "ALU_DIV_X: 4294967295 / 4294967295 = 1", + "JMP_JA: Unconditional jump: if (true) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), - BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLT | BPF_K */ { - "ALU64_DIV_X: 6 / 2 = 3", + "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 6), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xfffffffffffffffeLL), + BPF_JMP_IMM(BPF_JSLT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, { - "ALU64_DIV_X: 2147483647 / 2147483647 = 1", + "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0", .u.insns_int = { - BPF_LD_IMM64(R0, 2147483647), - BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), - BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSLT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSGT | BPF_K */ { - "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001", + "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0xffffffffffffffffLL), - BPF_LD_IMM64(R4, 0xffffffffffffffffLL), - BPF_LD_IMM64(R3, 0x0000000000000001LL), - BPF_ALU64_REG(BPF_DIV, R2, R4), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGT, R1, -2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, - /* BPF_ALU | BPF_DIV | BPF_K */ { - "ALU_DIV_K: 6 / 2 = 3", + "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0", .u.insns_int = { - BPF_LD_IMM64(R0, 6), - BPF_ALU32_IMM(BPF_DIV, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLE | BPF_K */ { - "ALU_DIV_K: 3 / 1 = 3", + "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_DIV, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xfffffffffffffffeLL), + BPF_JMP_IMM(BPF_JSLE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, { - "ALU_DIV_K: 4294967295 / 4294967295 = 1", + "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSLE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, @@ -3502,48 +9291,66 @@ static struct bpf_test tests[] = { { { 0, 1 } }, }, { - "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1", + "JMP_JSLE_K: Signed jump: value walk 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0xffffffffffffffffLL), - BPF_LD_IMM64(R3, 0x1UL), - BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 6), + BPF_ALU64_IMM(BPF_SUB, R1, 1), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 4), + BPF_ALU64_IMM(BPF_SUB, R1, 1), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 2), + BPF_ALU64_IMM(BPF_SUB, R1, 1), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, { - "ALU64_DIV_K: 6 / 2 = 3", + "JMP_JSLE_K: Signed jump: value walk 2", .u.insns_int = { - BPF_LD_IMM64(R0, 6), - BPF_ALU64_IMM(BPF_DIV, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 4), + BPF_ALU64_IMM(BPF_SUB, R1, 2), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 2), + BPF_ALU64_IMM(BPF_SUB, R1, 2), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSGE | BPF_K */ { - "ALU64_DIV_K: 3 / 1 = 3", + "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_DIV, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGE, R1, -2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, { - "ALU64_DIV_K: 2147483647 / 2147483647 = 1", + "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 2147483647), - BPF_ALU64_IMM(BPF_DIV, R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, @@ -3551,109 +9358,141 @@ static struct bpf_test tests[] = { { { 0, 1 } }, }, { - "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001", + "JMP_JSGE_K: Signed jump: value walk 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0xffffffffffffffffLL), - BPF_LD_IMM64(R3, 0x0000000000000001LL), - BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -3), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 6), + BPF_ALU64_IMM(BPF_ADD, R1, 1), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), + BPF_ALU64_IMM(BPF_ADD, R1, 1), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), + BPF_ALU64_IMM(BPF_ADD, R1, 1), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, - /* BPF_ALU | BPF_MOD | BPF_X */ { - "ALU_MOD_X: 3 % 2 = 1", + "JMP_JSGE_K: Signed jump: value walk 2", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -3), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), + BPF_ALU64_IMM(BPF_ADD, R1, 2), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), + BPF_ALU64_IMM(BPF_ADD, R1, 2), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ BPF_EXIT_INSN(), }, INTERNAL, { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JGT | BPF_K */ { - "ALU_MOD_X: 4294967295 % 4294967293 = 2", + "JMP_JGT_K: if (3 > 2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U), - BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGT, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, { - "ALU64_MOD_X: 3 % 2 = 1", + "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_JMP_IMM(BPF_JGT, R1, 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLT | BPF_K */ { - "ALU64_MOD_X: 2147483647 % 2147483645 = 2", + "JMP_JLT_K: if (2 < 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 2147483647), - BPF_ALU32_IMM(BPF_MOV, R1, 2147483645), - BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 2), + BPF_JMP_IMM(BPF_JLT, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, - /* BPF_ALU | BPF_MOD | BPF_K */ { - "ALU_MOD_K: 3 % 2 = 1", + "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOD, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 1), + BPF_JMP_IMM(BPF_JLT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JGE | BPF_K */ { - "ALU_MOD_K: 3 % 1 = 0", + "JMP_JGE_K: if (3 >= 2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOD, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLE | BPF_K */ { - "ALU_MOD_K: 4294967295 % 4294967293 = 2", + "JMP_JLE_K: if (2 <= 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 4294967295U), - BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 2), + BPF_JMP_IMM(BPF_JLE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JGT | BPF_K jump backwards */ { - "ALU64_MOD_K: 3 % 2 = 1", + "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_MOD, R0, 2), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ + BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */ + BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */ BPF_EXIT_INSN(), }, INTERNAL, @@ -3661,4757 +9500,4555 @@ static struct bpf_test tests[] = { { { 0, 1 } }, }, { - "ALU64_MOD_K: 3 % 1 = 0", + "JMP_JGE_K: if (3 >= 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_MOD, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLT | BPF_K jump backwards */ { - "ALU64_MOD_K: 2147483647 % 2147483645 = 2", + "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)", .u.insns_int = { - BPF_LD_IMM64(R0, 2147483647), - BPF_ALU64_IMM(BPF_MOD, R0, 2147483645), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ + BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */ + BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */ BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, - /* BPF_ALU | BPF_AND | BPF_X */ { - "ALU_AND_X: 3 & 2 = 2", + "JMP_JLE_K: if (3 <= 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JLE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JNE | BPF_K */ { - "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + "JMP_JNE_K: if (3 != 2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffff), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JEQ | BPF_K */ { - "ALU64_AND_X: 3 & 2 = 2", + "JMP_JEQ_K: if (3 == 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JEQ, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSET | BPF_K */ { - "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + "JMP_JSET_K: if (0x3 & 0x2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffff), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JSET, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, - /* BPF_ALU | BPF_AND | BPF_K */ { - "ALU_AND_K: 3 & 2 = 2", + "JMP_JSET_K: if (0x3 & 0xffffffff) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU32_IMM(BPF_AND, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSGT | BPF_X */ { - "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffff), - BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, { - "ALU_AND_K: Small immediate", + "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), - BPF_ALU32_IMM(BPF_AND, R0, 15), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 4 } } + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLT | BPF_X */ { - "ALU_AND_K: Large immediate", + "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4), - BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSLT, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xa1b2c3d4 } } + { { 0, 1 } }, }, { - "ALU_AND_K: Zero extension", + "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL), - BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSLT, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSGE | BPF_X */ { - "ALU64_AND_K: 3 & 2 = 2", + "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_AND, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 2 } }, + { { 0, 1 } }, }, { - "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0xffffffff), - BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLE | BPF_X */ { - "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000", + "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0x0000000000000000LL), - BPF_ALU64_IMM(BPF_AND, R2, 0x0), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSLE, R2, R1, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, { - "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000", + "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), - BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSLE, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JGT | BPF_X */ { - "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff", + "JMP_JGT_X: if (3 > 2) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0xffffffffffffffffLL), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGT, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, { - "ALU64_AND_K: Sign extension 1", + "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x00000000090b0d0fLL), - BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, 1), + BPF_JMP_REG(BPF_JGT, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLT | BPF_X */ { - "ALU64_AND_K: Sign extension 2", + "JMP_JLT_X: if (2 < 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL), - BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLT, R2, R1, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 1 } }, }, - /* BPF_ALU | BPF_OR | BPF_X */ { - "ALU_OR_X: 1 | 2 = 3", + "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, 1), + BPF_JMP_REG(BPF_JLT, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JGE | BPF_X */ { - "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff", + "JMP_JGE_X: if (3 >= 2) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, { - "ALU64_OR_X: 1 | 2 = 3", + "JMP_JGE_X: if (3 >= 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 2), - BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLE | BPF_X */ { - "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff", + "JMP_JLE_X: if (2 <= 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, - /* BPF_ALU | BPF_OR | BPF_K */ { - "ALU_OR_K: 1 | 2 = 3", + "JMP_JLE_X: if (3 <= 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_OR, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JLE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, }, { - "ALU_OR_K: 0 & 0xffffffff = 0xffffffff", + /* Mainly testing JIT + imm64 here. */ + "JMP_JGE_X: ldimm64 test 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 2), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 0xeeeeeeeeU } }, }, { - "ALU_OR_K: Small immediate", + "JMP_JGE_X: ldimm64 test 2", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), - BPF_ALU32_IMM(BPF_OR, R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 0), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x01020305 } } + { { 0, 0xffffffffU } }, }, { - "ALU_OR_K: Large immediate", + "JMP_JGE_X: ldimm64 test 3", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), - BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 4), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xa1b2c3d4 } } + { { 0, 1 } }, }, { - "ALU_OR_K: Zero extension", + "JMP_JLE_X: ldimm64 test 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL), - BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 2), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 0xeeeeeeeeU } }, }, { - "ALU64_OR_K: 1 | 2 = 3", + "JMP_JLE_X: ldimm64 test 2", .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_OR, R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 0), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 0xffffffffU } }, }, { - "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff", + "JMP_JLE_X: ldimm64 test 3", .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 4), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffffffff } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JNE | BPF_X */ { - "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000", + "JMP_JNE_X: if (3 != 2) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), - BPF_ALU64_IMM(BPF_OR, R2, 0x0), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JEQ | BPF_X */ { - "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff", + "JMP_JEQ_X: if (3 == 3) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JEQ, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSET | BPF_X */ { - "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff", + "JMP_JSET_X: if (0x3 & 0x2) return 1", .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000000000000000LL), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JSET, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x1 } }, + { { 0, 1 } }, }, { - "ALU64_OR_K: Sign extension 1", + "JMP_JSET_X: if (0x3 & 0xffffffff) return 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x012345678fafcfefLL), - BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 0xffffffff), + BPF_JMP_REG(BPF_JSET, R1, R2, 1), BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 1 } } + { { 0, 1 } }, }, { - "ALU64_OR_K: Sign extension 2", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL), - BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "JMP_JA: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_ja, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Maximum possible literals", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xffffffff } }, + .fill_helper = bpf_fill_maxinsns1, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Single literal", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xfefefefe } }, + .fill_helper = bpf_fill_maxinsns2, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Run/add until end", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0x947bf368 } }, + .fill_helper = bpf_fill_maxinsns3, + }, + { + "BPF_MAXINSNS: Too many instructions", + { }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = bpf_fill_maxinsns4, + .expected_errcode = -EINVAL, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Very long jump", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xabababab } }, + .fill_helper = bpf_fill_maxinsns5, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Ctx heavy transformations", + { }, + CLASSIC, + { }, + { + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } + }, + .fill_helper = bpf_fill_maxinsns6, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Call heavy transformations", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 1, 0 }, { 10, 0 } }, + .fill_helper = bpf_fill_maxinsns7, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Jump heavy test", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xffffffff } }, + .fill_helper = bpf_fill_maxinsns8, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Very long jump backwards", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0xcbababab } }, + .fill_helper = bpf_fill_maxinsns9, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Edge hopping nuthouse", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0xabababac } }, + .fill_helper = bpf_fill_maxinsns10, + }, + { + "BPF_MAXINSNS: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_maxinsns11, + }, + { + "BPF_MAXINSNS: jump over MSH", + { }, + CLASSIC | FLAG_EXPECTED_FAIL, + { 0xfa, 0xfb, 0xfc, 0xfd, }, + { { 4, 0xabababab } }, + .fill_helper = bpf_fill_maxinsns12, + .expected_errcode = -EINVAL, + }, + { + "BPF_MAXINSNS: exec all MSH", + { }, + CLASSIC, + { 0xfa, 0xfb, 0xfc, 0xfd, }, + { { 4, 0xababab83 } }, + .fill_helper = bpf_fill_maxinsns13, + }, + { + "BPF_MAXINSNS: ld_abs+get_processor_id", + { }, + CLASSIC, + { }, + { { 1, 0xbee } }, + .fill_helper = bpf_fill_ld_abs_get_processor_id, + }, + /* + * LD_IND / LD_ABS on fragmented SKBs + */ + { + "LD_IND byte frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x42} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_IND halfword frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_SKB_FRAG, + { }, + { {0x40, 0x4344} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, + }, + { + "LD_IND word frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, + CLASSIC | FLAG_SKB_FRAG, { }, - { { 0, 1 } } + { {0x40, 0x21071983} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, }, - /* BPF_ALU | BPF_XOR | BPF_X */ { - "ALU_XOR_X: 5 ^ 6 = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, 5), - BPF_ALU32_IMM(BPF_MOV, R1, 6), - BPF_ALU32_REG(BPF_XOR, R0, R1), - BPF_EXIT_INSN(), + "LD_IND halfword mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 3 } }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x0519} }, + .frag_data = { 0x19, 0x82 }, }, { - "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_ALU32_REG(BPF_XOR, R0, R1), - BPF_EXIT_INSN(), + "LD_IND word mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x40), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0xfffffffe } }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x25051982} }, + .frag_data = { 0x19, 0x82 }, }, { - "ALU64_XOR_X: 5 ^ 6 = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, 5), - BPF_ALU32_IMM(BPF_MOV, R1, 6), - BPF_ALU64_REG(BPF_XOR, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS byte frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, + CLASSIC | FLAG_SKB_FRAG, { }, - { { 0, 3 } }, + { {0x40, 0x42} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, }, { - "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_ALU64_REG(BPF_XOR, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS halfword frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, + CLASSIC | FLAG_SKB_FRAG, { }, - { { 0, 0xfffffffe } }, + { {0x40, 0x4344} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, }, - /* BPF_ALU | BPF_XOR | BPF_K */ { - "ALU_XOR_K: 5 ^ 6 = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, 5), - BPF_ALU32_IMM(BPF_XOR, R0, 6), - BPF_EXIT_INSN(), + "LD_ABS word frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, + CLASSIC | FLAG_SKB_FRAG, { }, - { { 0, 3 } }, + { {0x40, 0x21071983} }, + .frag_data = { + 0x42, 0x00, 0x00, 0x00, + 0x43, 0x44, 0x00, 0x00, + 0x21, 0x07, 0x19, 0x83, + }, }, { - "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff), - BPF_EXIT_INSN(), + "LD_ABS halfword mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0xfffffffe } }, + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x0519} }, + .frag_data = { 0x19, 0x82 }, }, { - "ALU_XOR_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304), - BPF_ALU32_IMM(BPF_XOR, R0, 15), - BPF_EXIT_INSN(), + "LD_ABS word mixed head/frag", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x0102030b } } + CLASSIC | FLAG_SKB_FRAG, + { [0x3e] = 0x25, [0x3f] = 0x05, }, + { {0x40, 0x25051982} }, + .frag_data = { 0x19, 0x82 }, }, + /* + * LD_IND / LD_ABS on non fragmented SKBs + */ { - "ALU_XOR_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4), - BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf), - BPF_EXIT_INSN(), + /* + * this tests that the JIT/interpreter correctly resets X + * before using it in an LD_IND instruction. + */ + "LD_IND byte default X", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x5e4d3c2b } } + CLASSIC, + { [0x1] = 0x42 }, + { {0x40, 0x42 } }, }, { - "ALU_XOR_K: Zero extension", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x00000000795b3d1fLL), - BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "LD_IND byte positive offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 } }, }, { - "ALU64_XOR_K: 5 ^ 6 = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, 5), - BPF_ALU64_IMM(BPF_XOR, R0, 6), - BPF_EXIT_INSN(), + "LD_IND byte negative offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 3 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x05 } }, }, { - "ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff), - BPF_EXIT_INSN(), + "LD_IND byte positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0xfffffffe } }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xff } }, }, { - "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000", - .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), - BPF_ALU64_IMM(BPF_XOR, R2, 0x0), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "LD_IND byte positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x1 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff", - .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), - BPF_LD_IMM64(R3, 0xffff00000000ffffLL), - BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "LD_IND byte negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 } }, + }, + { + "LD_IND byte negative offset, multiple calls", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x1 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 }, }, }, { - "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff", - .u.insns_int = { - BPF_LD_IMM64(R2, 0x0000000000000000LL), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "LD_IND halfword positive offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x1 } }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + }, + { {0x40, 0xdd88 } }, }, { - "ALU64_XOR_K: Sign extension 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL), - BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "LD_IND halfword negative offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + }, + { {0x40, 0xbb66 } }, }, { - "ALU64_XOR_K: Sign extension 2", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL), - BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0), - BPF_JMP_REG(BPF_JEQ, R0, R1, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), + "LD_IND halfword unaligned", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + }, + { {0x40, 0x66cc } }, }, - /* BPF_ALU | BPF_LSH | BPF_X */ { - "ALU_LSH_X: 1 << 1 = 2", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU32_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND halfword positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3d), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 2 } }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffff } }, }, { - "ALU_LSH_X: 1 << 31 = 0x80000000", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 31), - BPF_ALU32_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND halfword positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x80000000 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU_LSH_X: 0x12345678 << 12 = 0x45678000", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU32_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND halfword negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x45678000 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 } }, }, { - "ALU64_LSH_X: 1 << 1 = 2", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND word positive offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 2 } }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xee99ffaa } }, }, { - "ALU64_LSH_X: 1 << 31 = 0x80000000", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_MOV, R1, 31), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND word negative offset", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x80000000 } }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xaa55bb66 } }, }, { - "ALU64_LSH_X: Shift < 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND word unaligned (addr & 3 == 2)", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0xbcdef000 } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xbb66cc77 } }, }, { - "ALU64_LSH_X: Shift < 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_IND word unaligned (addr & 3 == 1)", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x3456789a } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x55bb66cc } }, }, { - "ALU64_LSH_X: Shift > 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 36), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND word unaligned (addr & 3 == 3)", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x20), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, }, - INTERNAL, - { }, - { { 0, 0 } } + { {0x40, 0x66cc77dd } }, }, { - "ALU64_LSH_X: Shift > 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 36), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_IND word positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x9abcdef0 } } + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffffffff } }, }, { - "ALU64_LSH_X: Shift == 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 32), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_IND word positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_LSH_X: Shift == 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 32), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_IND word negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x89abcdef } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 } }, }, { - "ALU64_LSH_X: Zero shift, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS byte", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x89abcdef } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xcc } }, }, { - "ALU64_LSH_X: Zero shift, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU64_REG(BPF_LSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_ABS byte positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x01234567 } } + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xff } }, }, - /* BPF_ALU | BPF_LSH | BPF_K */ { - "ALU_LSH_K: 1 << 1 = 2", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_LSH, R0, 1), - BPF_EXIT_INSN(), + "LD_ABS byte positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 2 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU_LSH_K: 1 << 31 = 0x80000000", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU32_IMM(BPF_LSH, R0, 31), - BPF_EXIT_INSN(), + "LD_ABS byte negative offset, out of bounds load", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x80000000 } }, + CLASSIC | FLAG_EXPECTED_FAIL, + .expected_errcode = -EINVAL, }, { - "ALU_LSH_K: 0x12345678 << 12 = 0x45678000", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), - BPF_ALU32_IMM(BPF_LSH, R0, 12), - BPF_EXIT_INSN(), + "LD_ABS byte negative offset, in bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x45678000 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 }, }, }, { - "ALU_LSH_K: 0x12345678 << 0 = 0x12345678", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), - BPF_ALU32_IMM(BPF_LSH, R0, 0), - BPF_EXIT_INSN(), + "LD_ABS byte negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x12345678 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_LSH_K: 1 << 1 = 2", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_LSH, R0, 1), - BPF_EXIT_INSN(), + "LD_ABS byte negative offset, multiple calls", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c), + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d), + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 2 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 }, }, }, { - "ALU64_LSH_K: 1 << 31 = 0x80000000", - .u.insns_int = { - BPF_LD_IMM64(R0, 1), - BPF_ALU64_IMM(BPF_LSH, R0, 31), - BPF_EXIT_INSN(), + "LD_ABS halfword", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x80000000 } }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xdd88 } }, }, { - "ALU64_LSH_K: Shift < 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 12), - BPF_EXIT_INSN(), + "LD_ABS halfword unaligned", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0xbcdef000 } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x99ff } }, }, { - "ALU64_LSH_K: Shift < 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 12), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_ABS halfword positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x3456789a } } + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffff } }, }, { - "ALU64_LSH_K: Shift > 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 36), - BPF_EXIT_INSN(), + "LD_ABS halfword positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_LSH_K: Shift > 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 36), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_ABS halfword negative offset, out of bounds load", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x9abcdef0 } } + CLASSIC | FLAG_EXPECTED_FAIL, + .expected_errcode = -EINVAL, }, { - "ALU64_LSH_K: Shift == 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 32), - BPF_EXIT_INSN(), + "LD_ABS halfword negative offset, in bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x1982 }, }, }, { - "ALU64_LSH_K: Shift == 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 32), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_ABS halfword negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x89abcdef } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_LSH_K: Zero shift", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_LSH, R0, 0), - BPF_EXIT_INSN(), + "LD_ABS word", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x89abcdef } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xaa55bb66 } }, }, - /* BPF_ALU | BPF_RSH | BPF_X */ { - "ALU_RSH_X: 2 >> 1 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU32_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word unaligned (addr & 3 == 2)", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0xdd88ee99 } }, }, { - "ALU_RSH_X: 0x80000000 >> 31 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x80000000), - BPF_ALU32_IMM(BPF_MOV, R1, 31), - BPF_ALU32_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word unaligned (addr & 3 == 1)", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x77dd88ee } }, }, { - "ALU_RSH_X: 0x12345678 >> 20 = 0x123", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), - BPF_ALU32_IMM(BPF_MOV, R1, 20), - BPF_ALU32_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word unaligned (addr & 3 == 3)", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x123 } } + CLASSIC, + { + [0x1c] = 0xaa, [0x1d] = 0x55, + [0x1e] = 0xbb, [0x1f] = 0x66, + [0x20] = 0xcc, [0x21] = 0x77, + [0x22] = 0xdd, [0x23] = 0x88, + [0x24] = 0xee, [0x25] = 0x99, + [0x26] = 0xff, [0x27] = 0xaa, + }, + { {0x40, 0x88ee99ff } }, }, { - "ALU64_RSH_X: 2 >> 1 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 1), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffffffff } }, }, { - "ALU64_RSH_X: 0x80000000 >> 31 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x80000000), - BPF_ALU32_IMM(BPF_MOV, R1, 31), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_RSH_X: Shift < 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word negative offset, out of bounds load", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x56789abc } } + CLASSIC | FLAG_EXPECTED_FAIL, + .expected_errcode = -EINVAL, }, { - "ALU64_RSH_X: Shift < 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LD_ABS word negative offset, in bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x00081234 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x25051982 }, }, }, { - "ALU64_RSH_X: Shift > 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 36), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LD_ABS word negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x08123456 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, }, { - "ALU64_RSH_X: Shift > 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 36), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LDX_MSH standalone, preserved A", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0xffeebbaa }, }, }, { - "ALU64_RSH_X: Shift == 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 32), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LDX_MSH standalone, preserved A 2", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x81234567 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x175e9d63 }, }, }, { - "ALU64_RSH_X: Shift == 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 32), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LDX_MSH standalone, test result 1", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x14 }, }, }, { - "ALU64_RSH_X: Zero shift, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_EXIT_INSN(), + "LDX_MSH standalone, test result 2", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x89abcdef } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x24 }, }, }, { - "ALU64_RSH_X: Zero shift, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU64_REG(BPF_RSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "LDX_MSH standalone, negative offset", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x81234567 } } + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0 }, }, }, - /* BPF_ALU | BPF_RSH | BPF_K */ { - "ALU_RSH_K: 2 >> 1 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU32_IMM(BPF_RSH, R0, 1), - BPF_EXIT_INSN(), + "LDX_MSH standalone, negative offset 2", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x24 }, }, }, { - "ALU_RSH_K: 0x80000000 >> 31 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x80000000), - BPF_ALU32_IMM(BPF_RSH, R0, 31), - BPF_EXIT_INSN(), + "LDX_MSH standalone, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0 }, }, }, + /* + * verify that the interpreter or JIT correctly sets A and X + * to 0. + */ { - "ALU_RSH_K: 0x12345678 >> 20 = 0x123", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), - BPF_ALU32_IMM(BPF_RSH, R0, 20), - BPF_EXIT_INSN(), + "ADD default X", + .u.insns = { + /* + * A = 0x42 + * A = A + X + * ret A + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x123 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, }, { - "ALU_RSH_K: 0x12345678 >> 0 = 0x12345678", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678), - BPF_ALU32_IMM(BPF_RSH, R0, 0), - BPF_EXIT_INSN(), + "ADD default A", + .u.insns = { + /* + * A = A + 0x42 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x12345678 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, }, { - "ALU64_RSH_K: 2 >> 1 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 2), - BPF_ALU64_IMM(BPF_RSH, R0, 1), - BPF_EXIT_INSN(), + "SUB default X", + .u.insns = { + /* + * A = 0x66 + * A = A - X + * ret A + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x66), + BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x66 } }, }, { - "ALU64_RSH_K: 0x80000000 >> 31 = 1", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x80000000), - BPF_ALU64_IMM(BPF_RSH, R0, 31), - BPF_EXIT_INSN(), + "SUB default A", + .u.insns = { + /* + * A = A - -0x66 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 1 } }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x66 } }, }, { - "ALU64_RSH_K: Shift < 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 12), - BPF_EXIT_INSN(), + "MUL default X", + .u.insns = { + /* + * A = 0x42 + * A = A * X + * ret A + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x56789abc } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, }, { - "ALU64_RSH_K: Shift < 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 12), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "MUL default A", + .u.insns = { + /* + * A = A * 0x66 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0x00081234 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, }, { - "ALU64_RSH_K: Shift > 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 36), - BPF_EXIT_INSN(), + "DIV default X", + .u.insns = { + /* + * A = 0x42 + * A = A / X ; this halt the filter execution if X is 0 + * ret 0x42 + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_K, 0x42), }, - INTERNAL, - { }, - { { 0, 0x08123456 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, }, { - "ALU64_RSH_K: Shift > 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 36), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), - }, - INTERNAL, - { }, - { { 0, 0 } } + "DIV default A", + .u.insns = { + /* + * A = A / 1 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, }, { - "ALU64_RSH_K: Shift == 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "MOD default X", + .u.insns = { + /* + * A = 0x42 + * A = A mod X ; this halt the filter execution if X is 0 + * ret 0x42 + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x42), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), + BPF_STMT(BPF_RET | BPF_K, 0x42), }, - INTERNAL, - { }, - { { 0, 0x81234567 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, }, { - "ALU64_RSH_K: Shift == 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "MOD default A", + .u.insns = { + /* + * A = A mod 1 + * ret A + */ + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), }, - INTERNAL, - { }, - { { 0, 0 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x0 } }, }, { - "ALU64_RSH_K: Zero shift", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 0), - BPF_EXIT_INSN(), + "JMP EQ default A", + .u.insns = { + /* + * cmp A, 0x0, 0, 1 + * ret 0x42 + * ret 0x66 + */ + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 0x42), + BPF_STMT(BPF_RET | BPF_K, 0x66), }, - INTERNAL, - { }, - { { 0, 0x89abcdef } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, }, - /* BPF_ALU | BPF_ARSH | BPF_X */ { - "ALU32_ARSH_X: -1234 >> 7 = -10", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -1234), - BPF_ALU32_IMM(BPF_MOV, R1, 7), - BPF_ALU32_REG(BPF_ARSH, R0, R1), - BPF_EXIT_INSN(), + "JMP EQ default X", + .u.insns = { + /* + * A = 0x0 + * cmp A, X, 0, 1 + * ret 0x42 + * ret 0x66 + */ + BPF_STMT(BPF_LD | BPF_IMM, 0x0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 0x42), + BPF_STMT(BPF_RET | BPF_K, 0x66), }, - INTERNAL, - { }, - { { 0, -10 } } + CLASSIC | FLAG_NO_DATA, + {}, + { {0x1, 0x42 } }, }, + /* Checking interpreter vs JIT wrt signed extended imms. */ { - "ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + "JNE signed compare, test 1", .u.insns_int = { - BPF_LD_IMM64(R0, 0xff00ff0000000000LL), - BPF_ALU32_IMM(BPF_MOV, R1, 40), - BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12), + BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000), + BPF_MOV64_REG(R2, R1), + BPF_ALU64_REG(BPF_AND, R2, R3), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffff00ff } }, + { { 0, 1 } }, }, { - "ALU64_ARSH_X: Shift < 32, low word", + "JNE signed compare, test 2", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12), + BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000), + BPF_MOV64_REG(R2, R1), + BPF_ALU64_REG(BPF_AND, R2, R3), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x56789abc } } + { { 0, 1 } }, }, { - "ALU64_ARSH_X: Shift < 32, high word", + "JNE signed compare, test 3", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 12), - BPF_ALU64_REG(BPF_ARSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12), + BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000), + BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000), + BPF_MOV64_REG(R2, R1), + BPF_ALU64_REG(BPF_AND, R2, R3), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JNE, R2, R4, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xfff81234 } } + { { 0, 2 } }, }, { - "ALU64_ARSH_X: Shift > 32, low word", + "JNE signed compare, test 4", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 36), - BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_LD_IMM64(R1, -17104896), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xf8123456 } } + { { 0, 2 } }, }, { - "ALU64_ARSH_X: Shift > 32, high word", + "JNE signed compare, test 5", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 36), - BPF_ALU64_REG(BPF_ARSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_LD_IMM64(R1, 0xfefb0000), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -1 } } + { { 0, 1 } }, }, { - "ALU64_ARSH_X: Shift == 32, low word", + "JNE signed compare, test 6", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 32), - BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_LD_IMM64(R1, 0x7efb0000), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1), + BPF_ALU32_IMM(BPF_MOV, R0, 2), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x81234567 } } + { { 0, 2 } }, }, { - "ALU64_ARSH_X: Shift == 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 32), - BPF_ALU64_REG(BPF_ARSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), + "JNE signed compare, test 7", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12), + BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0), + BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, 2), }, - INTERNAL, - { }, - { { 0, -1 } } + CLASSIC | FLAG_NO_DATA, + {}, + { { 0, 2 } }, }, + /* BPF_LDX_MEM with operand aliasing */ { - "ALU64_ARSH_X: Zero shift, low word", + "LDX_MEM_B: operand register aliasing", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_ST_MEM(BPF_B, R10, -8, 123), + BPF_MOV64_REG(R0, R10), + BPF_LDX_MEM(BPF_B, R0, R0, -8), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x89abcdef } } + { { 0, 123 } }, + .stack_depth = 8, }, { - "ALU64_ARSH_X: Zero shift, high word", + "LDX_MEM_H: operand register aliasing", .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU32_IMM(BPF_MOV, R1, 0), - BPF_ALU64_REG(BPF_ARSH, R0, R1), - BPF_ALU64_IMM(BPF_RSH, R0, 32), + BPF_ST_MEM(BPF_H, R10, -8, 12345), + BPF_MOV64_REG(R0, R10), + BPF_LDX_MEM(BPF_H, R0, R0, -8), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0x81234567 } } + { { 0, 12345 } }, + .stack_depth = 8, }, - /* BPF_ALU | BPF_ARSH | BPF_K */ { - "ALU32_ARSH_K: -1234 >> 7 = -10", + "LDX_MEM_W: operand register aliasing", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -1234), - BPF_ALU32_IMM(BPF_ARSH, R0, 7), + BPF_ST_MEM(BPF_W, R10, -8, 123456789), + BPF_MOV64_REG(R0, R10), + BPF_LDX_MEM(BPF_W, R0, R0, -8), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -10 } } + { { 0, 123456789 } }, + .stack_depth = 8, }, { - "ALU32_ARSH_K: -1234 >> 0 = -1234", + "LDX_MEM_DW: operand register aliasing", .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -1234), - BPF_ALU32_IMM(BPF_ARSH, R0, 0), + BPF_LD_IMM64(R1, 0x123456789abcdefULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), + BPF_MOV64_REG(R0, R10), + BPF_LDX_MEM(BPF_DW, R0, R0, -8), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_MOV64_REG(R1, R0), + BPF_ALU64_IMM(BPF_RSH, R1, 32), + BPF_ALU64_REG(BPF_OR, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, -1234 } } + { { 0, 0 } }, + .stack_depth = 8, }, + /* + * Register (non-)clobbering tests for the case where a JIT implements + * complex ALU or ATOMIC operations via function calls. If so, the + * function call must be transparent to the eBPF registers. The JIT + * must therefore save and restore relevant registers across the call. + * The following tests check that the eBPF registers retain their + * values after such an operation. Mainly intended for complex ALU + * and atomic operation, but we run it for all. You never know... + * + * Note that each operations should be tested twice with different + * destinations, to check preservation for all registers. + */ +#define BPF_TEST_CLOBBER_ALU(alu, op, dst, src) \ + { \ + #alu "_" #op " to " #dst ": no clobbering", \ + .u.insns_int = { \ + BPF_ALU64_IMM(BPF_MOV, R0, R0), \ + BPF_ALU64_IMM(BPF_MOV, R1, R1), \ + BPF_ALU64_IMM(BPF_MOV, R2, R2), \ + BPF_ALU64_IMM(BPF_MOV, R3, R3), \ + BPF_ALU64_IMM(BPF_MOV, R4, R4), \ + BPF_ALU64_IMM(BPF_MOV, R5, R5), \ + BPF_ALU64_IMM(BPF_MOV, R6, R6), \ + BPF_ALU64_IMM(BPF_MOV, R7, R7), \ + BPF_ALU64_IMM(BPF_MOV, R8, R8), \ + BPF_ALU64_IMM(BPF_MOV, R9, R9), \ + BPF_##alu(BPF_ ##op, dst, src), \ + BPF_ALU32_IMM(BPF_MOV, dst, dst), \ + BPF_JMP_IMM(BPF_JNE, R0, R0, 10), \ + BPF_JMP_IMM(BPF_JNE, R1, R1, 9), \ + BPF_JMP_IMM(BPF_JNE, R2, R2, 8), \ + BPF_JMP_IMM(BPF_JNE, R3, R3, 7), \ + BPF_JMP_IMM(BPF_JNE, R4, R4, 6), \ + BPF_JMP_IMM(BPF_JNE, R5, R5, 5), \ + BPF_JMP_IMM(BPF_JNE, R6, R6, 4), \ + BPF_JMP_IMM(BPF_JNE, R7, R7, 3), \ + BPF_JMP_IMM(BPF_JNE, R8, R8, 2), \ + BPF_JMP_IMM(BPF_JNE, R9, R9, 1), \ + BPF_ALU64_IMM(BPF_MOV, R0, 1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 1 } } \ + } + /* ALU64 operations, register clobbering */ + BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R8, 12), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R9, 12), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R8, 12), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R9, 12), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R8, 12), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R9, 12), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R9, 123456789), + /* ALU32 immediate operations, register clobbering */ + BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R8, 12), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R9, 12), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R8, 12), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R9, 12), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R8, 12), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R9, 12), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R9, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R8, 123456789), + BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R9, 123456789), + /* ALU64 register operations, register clobbering */ + BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R9, R1), + /* ALU32 register operations, register clobbering */ + BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R9, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R8, R1), + BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R9, R1), +#undef BPF_TEST_CLOBBER_ALU +#define BPF_TEST_CLOBBER_ATOMIC(width, op) \ + { \ + "Atomic_" #width " " #op ": no clobbering", \ + .u.insns_int = { \ + BPF_ALU64_IMM(BPF_MOV, R0, 0), \ + BPF_ALU64_IMM(BPF_MOV, R1, 1), \ + BPF_ALU64_IMM(BPF_MOV, R2, 2), \ + BPF_ALU64_IMM(BPF_MOV, R3, 3), \ + BPF_ALU64_IMM(BPF_MOV, R4, 4), \ + BPF_ALU64_IMM(BPF_MOV, R5, 5), \ + BPF_ALU64_IMM(BPF_MOV, R6, 6), \ + BPF_ALU64_IMM(BPF_MOV, R7, 7), \ + BPF_ALU64_IMM(BPF_MOV, R8, 8), \ + BPF_ALU64_IMM(BPF_MOV, R9, 9), \ + BPF_ST_MEM(width, R10, -8, \ + (op) == BPF_CMPXCHG ? 0 : \ + (op) & BPF_FETCH ? 1 : 0), \ + BPF_ATOMIC_OP(width, op, R10, R1, -8), \ + BPF_JMP_IMM(BPF_JNE, R0, 0, 10), \ + BPF_JMP_IMM(BPF_JNE, R1, 1, 9), \ + BPF_JMP_IMM(BPF_JNE, R2, 2, 8), \ + BPF_JMP_IMM(BPF_JNE, R3, 3, 7), \ + BPF_JMP_IMM(BPF_JNE, R4, 4, 6), \ + BPF_JMP_IMM(BPF_JNE, R5, 5, 5), \ + BPF_JMP_IMM(BPF_JNE, R6, 6, 4), \ + BPF_JMP_IMM(BPF_JNE, R7, 7, 3), \ + BPF_JMP_IMM(BPF_JNE, R8, 8, 2), \ + BPF_JMP_IMM(BPF_JNE, R9, 9, 1), \ + BPF_ALU64_IMM(BPF_MOV, R0, 1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 1 } }, \ + .stack_depth = 8, \ + } + /* 64-bit atomic operations, register clobbering */ + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XCHG), + BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_CMPXCHG), + /* 32-bit atomic operations, register clobbering */ + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR | BPF_FETCH), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XCHG), + BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_CMPXCHG), +#undef BPF_TEST_CLOBBER_ATOMIC + /* Checking that ALU32 src is not zero extended in place */ +#define BPF_ALU32_SRC_ZEXT(op) \ + { \ + "ALU32_" #op "_X: src preserved in zext", \ + .u.insns_int = { \ + BPF_LD_IMM64(R1, 0x0123456789acbdefULL),\ + BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),\ + BPF_ALU64_REG(BPF_MOV, R0, R1), \ + BPF_ALU32_REG(BPF_##op, R2, R1), \ + BPF_ALU64_REG(BPF_SUB, R0, R1), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 0 } }, \ + } + BPF_ALU32_SRC_ZEXT(MOV), + BPF_ALU32_SRC_ZEXT(AND), + BPF_ALU32_SRC_ZEXT(OR), + BPF_ALU32_SRC_ZEXT(XOR), + BPF_ALU32_SRC_ZEXT(ADD), + BPF_ALU32_SRC_ZEXT(SUB), + BPF_ALU32_SRC_ZEXT(MUL), + BPF_ALU32_SRC_ZEXT(DIV), + BPF_ALU32_SRC_ZEXT(MOD), +#undef BPF_ALU32_SRC_ZEXT + /* Checking that ATOMIC32 src is not zero extended in place */ +#define BPF_ATOMIC32_SRC_ZEXT(op) \ + { \ + "ATOMIC_W_" #op ": src preserved in zext", \ + .u.insns_int = { \ + BPF_LD_IMM64(R0, 0x0123456789acbdefULL), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ST_MEM(BPF_W, R10, -4, 0), \ + BPF_ATOMIC_OP(BPF_W, BPF_##op, R10, R1, -4), \ + BPF_ALU64_REG(BPF_SUB, R0, R1), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 0 } }, \ + .stack_depth = 8, \ + } + BPF_ATOMIC32_SRC_ZEXT(ADD), + BPF_ATOMIC32_SRC_ZEXT(AND), + BPF_ATOMIC32_SRC_ZEXT(OR), + BPF_ATOMIC32_SRC_ZEXT(XOR), +#undef BPF_ATOMIC32_SRC_ZEXT + /* Checking that CMPXCHG32 src is not zero extended in place */ { - "ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + "ATOMIC_W_CMPXCHG: src preserved in zext", .u.insns_int = { - BPF_LD_IMM64(R0, 0xff00ff0000000000LL), - BPF_ALU64_IMM(BPF_ARSH, R0, 40), + BPF_LD_IMM64(R1, 0x0123456789acbdefULL), + BPF_ALU64_REG(BPF_MOV, R2, R1), + BPF_ALU64_REG(BPF_MOV, R0, 0), + BPF_ST_MEM(BPF_W, R10, -4, 0), + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R1, -4), + BPF_ALU64_REG(BPF_SUB, R1, R2), + BPF_ALU64_REG(BPF_MOV, R2, R1), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_ALU64_REG(BPF_OR, R1, R2), + BPF_ALU64_REG(BPF_MOV, R0, R1), BPF_EXIT_INSN(), }, INTERNAL, { }, - { { 0, 0xffff00ff } }, - }, + { { 0, 0 } }, + .stack_depth = 8, + }, + /* Checking that JMP32 immediate src is not zero extended in place */ +#define BPF_JMP32_IMM_ZEXT(op) \ + { \ + "JMP32_" #op "_K: operand preserved in zext", \ + .u.insns_int = { \ + BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_JMP32_IMM(BPF_##op, R0, 1234, 1), \ + BPF_JMP_A(0), /* Nop */ \ + BPF_ALU64_REG(BPF_SUB, R0, R1), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 0 } }, \ + } + BPF_JMP32_IMM_ZEXT(JEQ), + BPF_JMP32_IMM_ZEXT(JNE), + BPF_JMP32_IMM_ZEXT(JSET), + BPF_JMP32_IMM_ZEXT(JGT), + BPF_JMP32_IMM_ZEXT(JGE), + BPF_JMP32_IMM_ZEXT(JLT), + BPF_JMP32_IMM_ZEXT(JLE), + BPF_JMP32_IMM_ZEXT(JSGT), + BPF_JMP32_IMM_ZEXT(JSGE), + BPF_JMP32_IMM_ZEXT(JSGT), + BPF_JMP32_IMM_ZEXT(JSLT), + BPF_JMP32_IMM_ZEXT(JSLE), +#undef BPF_JMP2_IMM_ZEXT + /* Checking that JMP32 dst & src are not zero extended in place */ +#define BPF_JMP32_REG_ZEXT(op) \ + { \ + "JMP32_" #op "_X: operands preserved in zext", \ + .u.insns_int = { \ + BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\ + BPF_LD_IMM64(R1, 0xfedcba9876543210ULL),\ + BPF_ALU64_REG(BPF_MOV, R2, R0), \ + BPF_ALU64_REG(BPF_MOV, R3, R1), \ + BPF_JMP32_IMM(BPF_##op, R0, R1, 1), \ + BPF_JMP_A(0), /* Nop */ \ + BPF_ALU64_REG(BPF_SUB, R0, R2), \ + BPF_ALU64_REG(BPF_SUB, R1, R3), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_ALU64_REG(BPF_MOV, R1, R0), \ + BPF_ALU64_IMM(BPF_RSH, R1, 32), \ + BPF_ALU64_REG(BPF_OR, R0, R1), \ + BPF_EXIT_INSN(), \ + }, \ + INTERNAL, \ + { }, \ + { { 0, 0 } }, \ + } + BPF_JMP32_REG_ZEXT(JEQ), + BPF_JMP32_REG_ZEXT(JNE), + BPF_JMP32_REG_ZEXT(JSET), + BPF_JMP32_REG_ZEXT(JGT), + BPF_JMP32_REG_ZEXT(JGE), + BPF_JMP32_REG_ZEXT(JLT), + BPF_JMP32_REG_ZEXT(JLE), + BPF_JMP32_REG_ZEXT(JSGT), + BPF_JMP32_REG_ZEXT(JSGE), + BPF_JMP32_REG_ZEXT(JSGT), + BPF_JMP32_REG_ZEXT(JSLT), + BPF_JMP32_REG_ZEXT(JSLE), +#undef BPF_JMP2_REG_ZEXT + /* ALU64 K register combinations */ { - "ALU64_ARSH_K: Shift < 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_RSH, R0, 12), - BPF_EXIT_INSN(), - }, + "ALU64_MOV_K: registers", + { }, INTERNAL, { }, - { { 0, 0x56789abc } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mov_imm_regs, }, { - "ALU64_ARSH_K: Shift < 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_ARSH, R0, 12), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), - }, + "ALU64_AND_K: registers", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_and_imm_regs, + }, + { + "ALU64_OR_K: registers", + { }, INTERNAL, { }, - { { 0, 0xfff81234 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_or_imm_regs, }, { - "ALU64_ARSH_K: Shift > 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_ARSH, R0, 36), - BPF_EXIT_INSN(), - }, + "ALU64_XOR_K: registers", + { }, INTERNAL, { }, - { { 0, 0xf8123456 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_xor_imm_regs, }, { - "ALU64_ARSH_K: Shift > 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0xf123456789abcdefLL), - BPF_ALU64_IMM(BPF_ARSH, R0, 36), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), - }, + "ALU64_LSH_K: registers", + { }, INTERNAL, { }, - { { 0, -1 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_lsh_imm_regs, }, { - "ALU64_ARSH_K: Shift == 32, low word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_ARSH, R0, 32), - BPF_EXIT_INSN(), - }, + "ALU64_RSH_K: registers", + { }, INTERNAL, { }, - { { 0, 0x81234567 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_rsh_imm_regs, }, { - "ALU64_ARSH_K: Shift == 32, high word", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_ARSH, R0, 32), - BPF_ALU64_IMM(BPF_RSH, R0, 32), - BPF_EXIT_INSN(), - }, + "ALU64_ARSH_K: registers", + { }, INTERNAL, { }, - { { 0, -1 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_arsh_imm_regs, }, { - "ALU64_ARSH_K: Zero shift", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x8123456789abcdefLL), - BPF_ALU64_IMM(BPF_ARSH, R0, 0), - BPF_EXIT_INSN(), - }, + "ALU64_ADD_K: registers", + { }, INTERNAL, { }, - { { 0, 0x89abcdef } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_add_imm_regs, }, - /* BPF_ALU | BPF_NEG */ { - "ALU_NEG: -(3) = -3", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 3), - BPF_ALU32_IMM(BPF_NEG, R0, 0), - BPF_EXIT_INSN(), - }, + "ALU64_SUB_K: registers", + { }, INTERNAL, { }, - { { 0, -3 } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_sub_imm_regs, }, { - "ALU_NEG: -(-3) = 3", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -3), - BPF_ALU32_IMM(BPF_NEG, R0, 0), - BPF_EXIT_INSN(), - }, + "ALU64_MUL_K: registers", + { }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mul_imm_regs, }, { - "ALU64_NEG: -(3) = -3", - .u.insns_int = { - BPF_LD_IMM64(R0, 3), - BPF_ALU64_IMM(BPF_NEG, R0, 0), - BPF_EXIT_INSN(), - }, + "ALU64_DIV_K: registers", + { }, INTERNAL, { }, - { { 0, -3 } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_div_imm_regs, }, { - "ALU64_NEG: -(-3) = 3", - .u.insns_int = { - BPF_LD_IMM64(R0, -3), - BPF_ALU64_IMM(BPF_NEG, R0, 0), - BPF_EXIT_INSN(), - }, + "ALU64_MOD_K: registers", + { }, INTERNAL, { }, - { { 0, 3 } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mod_imm_regs, }, - /* BPF_ALU | BPF_END | BPF_FROM_BE */ + /* ALU32 K registers */ { - "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ENDIAN(BPF_FROM_BE, R0, 16), - BPF_EXIT_INSN(), - }, + "ALU32_MOV_K: registers", + { }, INTERNAL, { }, - { { 0, cpu_to_be16(0xcdef) } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mov_imm_regs, }, { - "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ENDIAN(BPF_FROM_BE, R0, 32), - BPF_ALU64_REG(BPF_MOV, R1, R0), - BPF_ALU64_IMM(BPF_RSH, R1, 32), - BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ - BPF_EXIT_INSN(), - }, + "ALU32_AND_K: registers", + { }, INTERNAL, { }, - { { 0, cpu_to_be32(0x89abcdef) } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_and_imm_regs, }, { - "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ENDIAN(BPF_FROM_BE, R0, 64), - BPF_EXIT_INSN(), - }, + "ALU32_OR_K: registers", + { }, INTERNAL, { }, - { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_or_imm_regs, }, - /* BPF_ALU | BPF_END | BPF_FROM_LE */ { - "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ENDIAN(BPF_FROM_LE, R0, 16), - BPF_EXIT_INSN(), - }, + "ALU32_XOR_K: registers", + { }, INTERNAL, { }, - { { 0, cpu_to_le16(0xcdef) } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_xor_imm_regs, }, { - "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ENDIAN(BPF_FROM_LE, R0, 32), - BPF_ALU64_REG(BPF_MOV, R1, R0), - BPF_ALU64_IMM(BPF_RSH, R1, 32), - BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */ - BPF_EXIT_INSN(), - }, + "ALU32_LSH_K: registers", + { }, INTERNAL, { }, - { { 0, cpu_to_le32(0x89abcdef) } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_lsh_imm_regs, }, { - "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301", - .u.insns_int = { - BPF_LD_IMM64(R0, 0x0123456789abcdefLL), - BPF_ENDIAN(BPF_FROM_LE, R0, 64), - BPF_EXIT_INSN(), - }, + "ALU32_RSH_K: registers", + { }, INTERNAL, { }, - { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_rsh_imm_regs, }, - /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */ { - "ST_MEM_B: Store/Load byte: max negative", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_B, R10, -40, 0xff), - BPF_LDX_MEM(BPF_B, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_ARSH_K: registers", + { }, INTERNAL, { }, - { { 0, 0xff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_arsh_imm_regs, }, { - "ST_MEM_B: Store/Load byte: max positive", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_H, R10, -40, 0x7f), - BPF_LDX_MEM(BPF_H, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_ADD_K: registers", + { }, INTERNAL, { }, - { { 0, 0x7f } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_add_imm_regs, }, { - "STX_MEM_B: Store/Load byte: max negative", - .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_LD_IMM64(R1, 0xffLL), - BPF_STX_MEM(BPF_B, R10, R1, -40), - BPF_LDX_MEM(BPF_B, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_SUB_K: registers", + { }, INTERNAL, { }, - { { 0, 0xff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_sub_imm_regs, }, { - "ST_MEM_H: Store/Load half word: max negative", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_H, R10, -40, 0xffff), - BPF_LDX_MEM(BPF_H, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_MUL_K: registers", + { }, INTERNAL, { }, - { { 0, 0xffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mul_imm_regs, }, { - "ST_MEM_H: Store/Load half word: max positive", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_H, R10, -40, 0x7fff), - BPF_LDX_MEM(BPF_H, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_DIV_K: registers", + { }, INTERNAL, { }, - { { 0, 0x7fff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_div_imm_regs, }, { - "STX_MEM_H: Store/Load half word: max negative", - .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_LD_IMM64(R1, 0xffffLL), - BPF_STX_MEM(BPF_H, R10, R1, -40), - BPF_LDX_MEM(BPF_H, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_MOD_K: registers", + { }, INTERNAL, { }, - { { 0, 0xffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mod_imm_regs, }, + /* ALU64 X register combinations */ { - "ST_MEM_W: Store/Load word: max negative", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff), - BPF_LDX_MEM(BPF_W, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_MOV_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0xffffffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mov_reg_pairs, }, { - "ST_MEM_W: Store/Load word: max positive", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff), - BPF_LDX_MEM(BPF_W, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_AND_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x7fffffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_and_reg_pairs, }, { - "STX_MEM_W: Store/Load word: max negative", - .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_LD_IMM64(R1, 0xffffffffLL), - BPF_STX_MEM(BPF_W, R10, R1, -40), - BPF_LDX_MEM(BPF_W, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_OR_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0xffffffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_or_reg_pairs, }, { - "ST_MEM_DW: Store/Load double word: max negative", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), - BPF_LDX_MEM(BPF_DW, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_XOR_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0xffffffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_xor_reg_pairs, }, { - "ST_MEM_DW: Store/Load double word: max negative 2", - .u.insns_int = { - BPF_LD_IMM64(R2, 0xffff00000000ffffLL), - BPF_LD_IMM64(R3, 0xffffffffffffffffLL), - BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), - BPF_LDX_MEM(BPF_DW, R2, R10, -40), - BPF_JMP_REG(BPF_JEQ, R2, R3, 2), - BPF_MOV32_IMM(R0, 2), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 1), - BPF_EXIT_INSN(), - }, + "ALU64_LSH_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x1 } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_lsh_reg_pairs, }, { - "ST_MEM_DW: Store/Load double word: max positive", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff), - BPF_LDX_MEM(BPF_DW, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_RSH_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x7fffffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_rsh_reg_pairs, }, { - "STX_MEM_DW: Store/Load double word: max negative", - .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_LDX_MEM(BPF_DW, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_ARSH_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0xffffffff } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_arsh_reg_pairs, }, { - "STX_MEM_DW: Store double word: first word in memory", - .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_LD_IMM64(R1, 0x0123456789abcdefLL), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_LDX_MEM(BPF_W, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU64_ADD_X: register combinations", + { }, INTERNAL, { }, -#ifdef __BIG_ENDIAN - { { 0, 0x01234567 } }, -#else - { { 0, 0x89abcdef } }, -#endif - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_add_reg_pairs, }, { - "STX_MEM_DW: Store double word: second word in memory", - .u.insns_int = { - BPF_LD_IMM64(R0, 0), - BPF_LD_IMM64(R1, 0x0123456789abcdefLL), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_LDX_MEM(BPF_W, R0, R10, -36), - BPF_EXIT_INSN(), - }, + "ALU64_SUB_X: register combinations", + { }, INTERNAL, { }, -#ifdef __BIG_ENDIAN - { { 0, 0x89abcdef } }, -#else - { { 0, 0x01234567 } }, -#endif - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_sub_reg_pairs, }, - /* BPF_STX | BPF_ATOMIC | BPF_W/DW */ { - "STX_XADD_W: X + 1 + 1 + 1 + ...", + "ALU64_MUL_X: register combinations", { }, INTERNAL, { }, - { { 0, 4134 } }, - .fill_helper = bpf_fill_stxw, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mul_reg_pairs, }, { - "STX_XADD_DW: X + 1 + 1 + 1 + ...", + "ALU64_DIV_X: register combinations", { }, INTERNAL, { }, - { { 0, 4134 } }, - .fill_helper = bpf_fill_stxdw, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_div_reg_pairs, }, - /* - * Exhaustive tests of atomic operation variants. - * Individual tests are expanded from template macros for all - * combinations of ALU operation, word size and fetching. - */ -#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \ -{ \ - "BPF_ATOMIC | " #width ", " #op ": Test: " \ - #old " " #logic " " #update " = " #result, \ - .u.insns_int = { \ - BPF_ALU32_IMM(BPF_MOV, R5, update), \ - BPF_ST_MEM(width, R10, -40, old), \ - BPF_ATOMIC_OP(width, op, R10, R5, -40), \ - BPF_LDX_MEM(width, R0, R10, -40), \ - BPF_EXIT_INSN(), \ - }, \ - INTERNAL, \ - { }, \ - { { 0, result } }, \ - .stack_depth = 40, \ -} -#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \ -{ \ - "BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \ - #old " " #logic " " #update " = " #result, \ - .u.insns_int = { \ - BPF_ALU64_REG(BPF_MOV, R1, R10), \ - BPF_ALU32_IMM(BPF_MOV, R0, update), \ - BPF_ST_MEM(BPF_W, R10, -40, old), \ - BPF_ATOMIC_OP(width, op, R10, R0, -40), \ - BPF_ALU64_REG(BPF_MOV, R0, R10), \ - BPF_ALU64_REG(BPF_SUB, R0, R1), \ - BPF_EXIT_INSN(), \ - }, \ - INTERNAL, \ - { }, \ - { { 0, 0 } }, \ - .stack_depth = 40, \ -} -#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \ -{ \ - "BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \ - #old " " #logic " " #update " = " #result, \ - .u.insns_int = { \ - BPF_ALU64_REG(BPF_MOV, R0, R10), \ - BPF_ALU32_IMM(BPF_MOV, R1, update), \ - BPF_ST_MEM(width, R10, -40, old), \ - BPF_ATOMIC_OP(width, op, R10, R1, -40), \ - BPF_ALU64_REG(BPF_SUB, R0, R10), \ - BPF_EXIT_INSN(), \ - }, \ - INTERNAL, \ - { }, \ - { { 0, 0 } }, \ - .stack_depth = 40, \ -} -#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \ -{ \ - "BPF_ATOMIC | " #width ", " #op ": Test fetch: " \ - #old " " #logic " " #update " = " #result, \ - .u.insns_int = { \ - BPF_ALU32_IMM(BPF_MOV, R3, update), \ - BPF_ST_MEM(width, R10, -40, old), \ - BPF_ATOMIC_OP(width, op, R10, R3, -40), \ - BPF_ALU64_REG(BPF_MOV, R0, R3), \ - BPF_EXIT_INSN(), \ - }, \ - INTERNAL, \ - { }, \ - { { 0, (op) & BPF_FETCH ? old : update } }, \ - .stack_depth = 40, \ -} - /* BPF_ATOMIC | BPF_W: BPF_ADD */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd), - /* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - /* BPF_ATOMIC | BPF_DW: BPF_ADD */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd), - /* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd), - /* BPF_ATOMIC | BPF_W: BPF_AND */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02), - /* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - /* BPF_ATOMIC | BPF_DW: BPF_AND */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02), - /* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02), - /* BPF_ATOMIC | BPF_W: BPF_OR */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb), - /* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - /* BPF_ATOMIC | BPF_DW: BPF_OR */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb), - /* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb), - /* BPF_ATOMIC | BPF_W: BPF_XOR */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9), - /* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - /* BPF_ATOMIC | BPF_DW: BPF_XOR */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9), - /* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9), - /* BPF_ATOMIC | BPF_W: BPF_XCHG */ - BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - /* BPF_ATOMIC | BPF_DW: BPF_XCHG */ - BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), - BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab), -#undef BPF_ATOMIC_OP_TEST1 -#undef BPF_ATOMIC_OP_TEST2 -#undef BPF_ATOMIC_OP_TEST3 -#undef BPF_ATOMIC_OP_TEST4 - /* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */ { - "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return", - .u.insns_int = { - BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), - BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), - BPF_EXIT_INSN(), - }, + "ALU64_MOD_X: register combinations", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mod_reg_pairs, + }, + /* ALU32 X register combinations */ + { + "ALU32_MOV_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x01234567 } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mov_reg_pairs, }, { - "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store", - .u.insns_int = { - BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), - BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), - BPF_LDX_MEM(BPF_W, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_AND_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x89abcdef } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_and_reg_pairs, }, { - "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return", - .u.insns_int = { - BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210), - BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), - BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), - BPF_EXIT_INSN(), - }, + "ALU32_OR_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x01234567 } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_or_reg_pairs, }, { - "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store", - .u.insns_int = { - BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210), - BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), - BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), - BPF_LDX_MEM(BPF_W, R0, R10, -40), - BPF_EXIT_INSN(), - }, + "ALU32_XOR_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x01234567 } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_xor_reg_pairs, }, { - "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects", - .u.insns_int = { - BPF_ST_MEM(BPF_W, R10, -40, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567), - BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef), - BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), - BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40), - BPF_ALU32_REG(BPF_MOV, R0, R3), - BPF_EXIT_INSN(), - }, + "ALU32_LSH_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0x89abcdef } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_lsh_reg_pairs, }, - /* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */ { - "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return", - .u.insns_int = { - BPF_LD_IMM64(R1, 0x0123456789abcdefULL), - BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), - BPF_ALU64_REG(BPF_MOV, R0, R1), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), - BPF_JMP_REG(BPF_JNE, R0, R1, 1), - BPF_ALU64_REG(BPF_SUB, R0, R1), - BPF_EXIT_INSN(), - }, + "ALU32_RSH_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0 } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_rsh_reg_pairs, }, { - "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store", - .u.insns_int = { - BPF_LD_IMM64(R1, 0x0123456789abcdefULL), - BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), - BPF_ALU64_REG(BPF_MOV, R0, R1), - BPF_STX_MEM(BPF_DW, R10, R0, -40), - BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), - BPF_LDX_MEM(BPF_DW, R0, R10, -40), - BPF_JMP_REG(BPF_JNE, R0, R2, 1), - BPF_ALU64_REG(BPF_SUB, R0, R2), - BPF_EXIT_INSN(), - }, + "ALU32_ARSH_X: register combinations", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_arsh_reg_pairs, + }, + { + "ALU32_ADD_X: register combinations", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_add_reg_pairs, + }, + { + "ALU32_SUB_X: register combinations", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_sub_reg_pairs, + }, + { + "ALU32_MUL_X: register combinations", + { }, INTERNAL, { }, - { { 0, 0 } }, - .stack_depth = 40, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mul_reg_pairs, + }, + { + "ALU32_DIV_X: register combinations", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_div_reg_pairs, + }, + { + "ALU32_MOD_X register combinations", + { }, + INTERNAL, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mod_reg_pairs, + }, + /* Exhaustive test of ALU64 shift operations */ + { + "ALU64_LSH_K: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_lsh_imm, + }, + { + "ALU64_RSH_K: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_rsh_imm, + }, + { + "ALU64_ARSH_K: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_arsh_imm, + }, + { + "ALU64_LSH_X: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_lsh_reg, + }, + { + "ALU64_RSH_X: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_rsh_reg, + }, + { + "ALU64_ARSH_X: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_arsh_reg, + }, + /* Exhaustive test of ALU32 shift operations */ + { + "ALU32_LSH_K: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_lsh_imm, + }, + { + "ALU32_RSH_K: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_rsh_imm, + }, + { + "ALU32_ARSH_K: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_arsh_imm, + }, + { + "ALU32_LSH_X: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_lsh_reg, + }, + { + "ALU32_RSH_X: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_rsh_reg, + }, + { + "ALU32_ARSH_X: all shift values", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_arsh_reg, + }, + /* + * Exhaustive test of ALU64 shift operations when + * source and destination register are the same. + */ + { + "ALU64_LSH_X: all shift values with the same register", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_lsh_same_reg, + }, + { + "ALU64_RSH_X: all shift values with the same register", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_rsh_same_reg, + }, + { + "ALU64_ARSH_X: all shift values with the same register", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_arsh_same_reg, + }, + /* + * Exhaustive test of ALU32 shift operations when + * source and destination register are the same. + */ + { + "ALU32_LSH_X: all shift values with the same register", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_lsh_same_reg, + }, + { + "ALU32_RSH_X: all shift values with the same register", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_rsh_same_reg, + }, + { + "ALU32_ARSH_X: all shift values with the same register", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_arsh_same_reg, + }, + /* ALU64 immediate magnitudes */ + { + "ALU64_MOV_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mov_imm, + .nr_testruns = NR_PATTERN_RUNS, + }, + { + "ALU64_AND_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_and_imm, + .nr_testruns = NR_PATTERN_RUNS, + }, + { + "ALU64_OR_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_or_imm, + .nr_testruns = NR_PATTERN_RUNS, + }, + { + "ALU64_XOR_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_xor_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return", - .u.insns_int = { - BPF_LD_IMM64(R1, 0x0123456789abcdefULL), - BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), - BPF_ALU64_REG(BPF_MOV, R0, R1), - BPF_ALU64_IMM(BPF_ADD, R0, 1), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), - BPF_JMP_REG(BPF_JNE, R0, R1, 1), - BPF_ALU64_REG(BPF_SUB, R0, R1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_ADD_K: all immediate value magnitudes", { }, - { { 0, 0 } }, - .stack_depth = 40, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_add_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store", - .u.insns_int = { - BPF_LD_IMM64(R1, 0x0123456789abcdefULL), - BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), - BPF_ALU64_REG(BPF_MOV, R0, R1), - BPF_ALU64_IMM(BPF_ADD, R0, 1), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), - BPF_LDX_MEM(BPF_DW, R0, R10, -40), - BPF_JMP_REG(BPF_JNE, R0, R1, 1), - BPF_ALU64_REG(BPF_SUB, R0, R1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_SUB_K: all immediate value magnitudes", { }, - { { 0, 0 } }, - .stack_depth = 40, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_sub_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects", - .u.insns_int = { - BPF_LD_IMM64(R1, 0x0123456789abcdefULL), - BPF_LD_IMM64(R2, 0xfecdba9876543210ULL), - BPF_ALU64_REG(BPF_MOV, R0, R1), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40), - BPF_LD_IMM64(R0, 0xfecdba9876543210ULL), - BPF_JMP_REG(BPF_JNE, R0, R2, 1), - BPF_ALU64_REG(BPF_SUB, R0, R2), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_MUL_K: all immediate value magnitudes", { }, - { { 0, 0 } }, - .stack_depth = 40, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mul_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JEQ | BPF_K */ { - "JMP32_JEQ_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 123), - BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1), - BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_DIV_K: all immediate value magnitudes", { }, - { { 0, 123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_div_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JEQ_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 12345678), - BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1), - BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_MOD_K: all immediate value magnitudes", { }, - { { 0, 12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mod_imm, + .nr_testruns = NR_PATTERN_RUNS, }, + /* ALU32 immediate magnitudes */ { - "JMP32_JEQ_K: negative immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1), - BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_MOV_K: all immediate value magnitudes", { }, - { { 0, -123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mov_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JEQ | BPF_X */ { - "JMP32_JEQ_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1234), - BPF_ALU32_IMM(BPF_MOV, R1, 4321), - BPF_JMP32_REG(BPF_JEQ, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 1234), - BPF_JMP32_REG(BPF_JEQ, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_AND_K: all immediate value magnitudes", { }, - { { 0, 1234 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_and_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JNE | BPF_K */ { - "JMP32_JNE_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 123), - BPF_JMP32_IMM(BPF_JNE, R0, 123, 1), - BPF_JMP32_IMM(BPF_JNE, R0, 321, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_OR_K: all immediate value magnitudes", { }, - { { 0, 123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_or_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JNE_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 12345678), - BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1), - BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_XOR_K: all immediate value magnitudes", { }, - { { 0, 12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_xor_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JNE_K: negative immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JNE, R0, -123, 1), - BPF_JMP32_IMM(BPF_JNE, R0, 123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_ADD_K: all immediate value magnitudes", { }, - { { 0, -123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_add_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JNE | BPF_X */ { - "JMP32_JNE_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1234), - BPF_ALU32_IMM(BPF_MOV, R1, 1234), - BPF_JMP32_REG(BPF_JNE, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 4321), - BPF_JMP32_REG(BPF_JNE, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_SUB_K: all immediate value magnitudes", { }, - { { 0, 1234 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_sub_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JSET | BPF_K */ { - "JMP32_JSET_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP32_IMM(BPF_JSET, R0, 2, 1), - BPF_JMP32_IMM(BPF_JSET, R0, 3, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_MUL_K: all immediate value magnitudes", { }, - { { 0, 1 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mul_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JSET_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000), - BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1), - BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_DIV_K: all immediate value magnitudes", { }, - { { 0, 0x40000000 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_div_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JSET_K: negative immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JSET, R0, -1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_MOD_K: all immediate value magnitudes", { }, - { { 0, -123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mod_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JSET | BPF_X */ + /* ALU64 register magnitudes */ { - "JMP32_JSET_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 8), - BPF_ALU32_IMM(BPF_MOV, R1, 7), - BPF_JMP32_REG(BPF_JSET, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2), - BPF_JMP32_REG(BPF_JNE, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_MOV_X: all register value magnitudes", { }, - { { 0, 8 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mov_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JGT | BPF_K */ { - "JMP32_JGT_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 123), - BPF_JMP32_IMM(BPF_JGT, R0, 123, 1), - BPF_JMP32_IMM(BPF_JGT, R0, 122, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_AND_X: all register value magnitudes", { }, - { { 0, 123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_and_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JGT_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1), - BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_OR_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_or_reg, + .nr_testruns = NR_PATTERN_RUNS, + }, + { + "ALU64_XOR_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xfffffffe } } + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_xor_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JGT | BPF_X */ { - "JMP32_JGT_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_JMP32_REG(BPF_JGT, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), - BPF_JMP32_REG(BPF_JGT, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_ADD_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_add_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JGE | BPF_K */ { - "JMP32_JGE_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 123), - BPF_JMP32_IMM(BPF_JGE, R0, 124, 1), - BPF_JMP32_IMM(BPF_JGE, R0, 123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_SUB_X: all register value magnitudes", { }, - { { 0, 123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_sub_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JGE_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1), - BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_MUL_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mul_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JGE | BPF_X */ { - "JMP32_JGE_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_JMP32_REG(BPF_JGE, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe), - BPF_JMP32_REG(BPF_JGE, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_DIV_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_div_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JLT | BPF_K */ { - "JMP32_JLT_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 123), - BPF_JMP32_IMM(BPF_JLT, R0, 123, 1), - BPF_JMP32_IMM(BPF_JLT, R0, 124, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU64_MOD_X: all register value magnitudes", { }, - { { 0, 123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu64_mod_reg, + .nr_testruns = NR_PATTERN_RUNS, }, + /* ALU32 register magnitudes */ { - "JMP32_JLT_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1), - BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_MOV_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mov_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JLT | BPF_X */ { - "JMP32_JLT_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), - BPF_JMP32_REG(BPF_JLT, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), - BPF_JMP32_REG(BPF_JLT, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_AND_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_and_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JLE | BPF_K */ { - "JMP32_JLE_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 123), - BPF_JMP32_IMM(BPF_JLE, R0, 122, 1), - BPF_JMP32_IMM(BPF_JLE, R0, 123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_OR_X: all register value magnitudes", { }, - { { 0, 123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_or_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JLE_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1), - BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_XOR_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_xor_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JLE | BPF_X */ { - "JMP32_JLE_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe), - BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd), - BPF_JMP32_REG(BPF_JLE, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe), - BPF_JMP32_REG(BPF_JLE, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_ADD_X: all register value magnitudes", { }, - { { 0, 0xfffffffe } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_add_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JSGT | BPF_K */ { - "JMP32_JSGT_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1), - BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_SUB_X: all register value magnitudes", { }, - { { 0, -123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_sub_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP32_JSGT_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1), - BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_MUL_X: all register value magnitudes", { }, - { { 0, -12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mul_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JSGT | BPF_X */ { - "JMP32_JSGT_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_ALU32_IMM(BPF_MOV, R1, -12345678), - BPF_JMP32_REG(BPF_JSGT, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, -12345679), - BPF_JMP32_REG(BPF_JSGT, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_DIV_X: all register value magnitudes", { }, - { { 0, -12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_div_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP32 | BPF_JSGE | BPF_K */ { - "JMP32_JSGE_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1), - BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ALU32_MOD_X: all register value magnitudes", { }, - { { 0, -123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_alu32_mod_reg, + .nr_testruns = NR_PATTERN_RUNS, }, + /* LD_IMM64 immediate magnitudes and byte patterns */ { - "JMP32_JSGE_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1), - BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "LD_IMM64: all immediate value magnitudes", { }, - { { 0, -12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_ld_imm64_magn, }, - /* BPF_JMP32 | BPF_JSGE | BPF_X */ { - "JMP32_JSGE_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_ALU32_IMM(BPF_MOV, R1, -12345677), - BPF_JMP32_REG(BPF_JSGE, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, -12345678), - BPF_JMP32_REG(BPF_JSGE, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "LD_IMM64: checker byte patterns", { }, - { { 0, -12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_ld_imm64_checker, }, - /* BPF_JMP32 | BPF_JSLT | BPF_K */ { - "JMP32_JSLT_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1), - BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "LD_IMM64: random positive and zero byte patterns", { }, - { { 0, -123 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_ld_imm64_pos_zero, }, { - "JMP32_JSLT_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1), - BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "LD_IMM64: random negative and zero byte patterns", { }, - { { 0, -12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_ld_imm64_neg_zero, }, - /* BPF_JMP32 | BPF_JSLT | BPF_X */ { - "JMP32_JSLT_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_ALU32_IMM(BPF_MOV, R1, -12345678), - BPF_JMP32_REG(BPF_JSLT, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, -12345677), - BPF_JMP32_REG(BPF_JSLT, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "LD_IMM64: random positive and negative byte patterns", { }, - { { 0, -12345678 } } + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_ld_imm64_pos_neg, }, - /* BPF_JMP32 | BPF_JSLE | BPF_K */ + /* 64-bit ATOMIC register combinations */ { - "JMP32_JSLE_K: Small immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -123), - BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1), - BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_ADD: register combinations", + { }, INTERNAL, { }, - { { 0, -123 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_add_reg_pairs, + .stack_depth = 8, }, { - "JMP32_JSLE_K: Large immediate", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1), - BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_AND: register combinations", + { }, INTERNAL, { }, - { { 0, -12345678 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_and_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP32 | BPF_JSLE | BPF_K */ { - "JMP32_JSLE_X", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, -12345678), - BPF_ALU32_IMM(BPF_MOV, R1, -12345679), - BPF_JMP32_REG(BPF_JSLE, R0, R1, 2), - BPF_ALU32_IMM(BPF_MOV, R1, -12345678), - BPF_JMP32_REG(BPF_JSLE, R0, R1, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_OR: register combinations", + { }, INTERNAL, { }, - { { 0, -12345678 } } + { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_or_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_EXIT */ { - "JMP_EXIT", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0x4711), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0x4712), - }, + "ATOMIC_DW_XOR: register combinations", + { }, INTERNAL, { }, - { { 0, 0x4711 } }, + { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_xor_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JA */ { - "JMP_JA: Unconditional jump: if (true) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_ADD_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_add_fetch_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JSLT | BPF_K */ { - "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 0xfffffffffffffffeLL), - BPF_JMP_IMM(BPF_JSLT, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_AND_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_and_fetch_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_JMP_IMM(BPF_JSLT, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_OR_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_or_fetch_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JSGT | BPF_K */ { - "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_JMP_IMM(BPF_JSGT, R1, -2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_XOR_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_xor_fetch_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_JMP_IMM(BPF_JSGT, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_XCHG: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_xchg_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JSLE | BPF_K */ { - "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 0xfffffffffffffffeLL), - BPF_JMP_IMM(BPF_JSLE, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_DW_CMPXCHG: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_cmpxchg_reg_pairs, + .stack_depth = 8, }, + /* 32-bit ATOMIC register combinations */ { - "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_JMP_IMM(BPF_JSLE, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_W_ADD: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_add_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSLE_K: Signed jump: value walk 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 6), - BPF_ALU64_IMM(BPF_SUB, R1, 1), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 4), - BPF_ALU64_IMM(BPF_SUB, R1, 1), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 2), - BPF_ALU64_IMM(BPF_SUB, R1, 1), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 1), - BPF_EXIT_INSN(), /* bad exit */ - BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ - BPF_EXIT_INSN(), - }, + "ATOMIC_W_AND: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_and_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSLE_K: Signed jump: value walk 2", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 4), - BPF_ALU64_IMM(BPF_SUB, R1, 2), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 2), - BPF_ALU64_IMM(BPF_SUB, R1, 2), - BPF_JMP_IMM(BPF_JSLE, R1, 0, 1), - BPF_EXIT_INSN(), /* bad exit */ - BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ - BPF_EXIT_INSN(), - }, + "ATOMIC_W_OR: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_or_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JSGE | BPF_K */ { - "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_JMP_IMM(BPF_JSGE, R1, -2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_W_XOR: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_xor_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_JMP_IMM(BPF_JSGE, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_W_ADD_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_add_fetch_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSGE_K: Signed jump: value walk 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -3), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 6), - BPF_ALU64_IMM(BPF_ADD, R1, 1), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), - BPF_ALU64_IMM(BPF_ADD, R1, 1), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), - BPF_ALU64_IMM(BPF_ADD, R1, 1), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), - BPF_EXIT_INSN(), /* bad exit */ - BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ - BPF_EXIT_INSN(), - }, + "ATOMIC_W_AND_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_and_fetch_reg_pairs, + .stack_depth = 8, }, { - "JMP_JSGE_K: Signed jump: value walk 2", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -3), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), - BPF_ALU64_IMM(BPF_ADD, R1, 2), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), - BPF_ALU64_IMM(BPF_ADD, R1, 2), - BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), - BPF_EXIT_INSN(), /* bad exit */ - BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ - BPF_EXIT_INSN(), - }, + "ATOMIC_W_OR_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_or_fetch_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JGT | BPF_K */ { - "JMP_JGT_K: if (3 > 2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JGT, R1, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_W_XOR_FETCH: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_xor_fetch_reg_pairs, + .stack_depth = 8, }, { - "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_JMP_IMM(BPF_JGT, R1, 1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_W_XCHG: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_xchg_reg_pairs, + .stack_depth = 8, }, - /* BPF_JMP | BPF_JLT | BPF_K */ { - "JMP_JLT_K: if (2 < 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 2), - BPF_JMP_IMM(BPF_JLT, R1, 3, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, + "ATOMIC_W_CMPXCHG: register combinations", + { }, INTERNAL, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_cmpxchg_reg_pairs, + .stack_depth = 8, + }, + /* 64-bit ATOMIC magnitudes */ + { + "ATOMIC_DW_ADD: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_add, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 1), - BPF_JMP_IMM(BPF_JLT, R1, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_AND: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_and, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JGE | BPF_K */ { - "JMP_JGE_K: if (3 >= 2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JGE, R1, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_OR: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_or, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JLE | BPF_K */ { - "JMP_JLE_K: if (2 <= 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 2), - BPF_JMP_IMM(BPF_JLE, R1, 3, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_XOR: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_xor, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JGT | BPF_K jump backwards */ { - "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", - .u.insns_int = { - BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ - BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ - BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */ - BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */ - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_ADD_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_add_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JGE_K: if (3 >= 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JGE, R1, 3, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_AND_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_and_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JLT | BPF_K jump backwards */ { - "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)", - .u.insns_int = { - BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ - BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ - BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */ - BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */ - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_OR_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_or_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JLE_K: if (3 <= 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JLE, R1, 3, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_XOR_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_xor_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JNE | BPF_K */ { - "JMP_JNE_K: if (3 != 2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JNE, R1, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_XCHG: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic64_xchg, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JEQ | BPF_K */ { - "JMP_JEQ_K: if (3 == 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JEQ, R1, 3, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_DW_CMPXCHG: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_cmpxchg64, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JSET | BPF_K */ + /* 64-bit atomic magnitudes */ { - "JMP_JSET_K: if (0x3 & 0x2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JSET, R1, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_ADD: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_add, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JSET_K: if (0x3 & 0xffffffff) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_AND: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_and, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JSGT | BPF_X */ { - "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -2), - BPF_JMP_REG(BPF_JSGT, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_OR: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_or, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -1), - BPF_JMP_REG(BPF_JSGT, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_XOR: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_xor, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JSLT | BPF_X */ { - "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -2), - BPF_JMP_REG(BPF_JSLT, R2, R1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_ADD_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_add_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -1), - BPF_JMP_REG(BPF_JSLT, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_AND_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_and_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JSGE | BPF_X */ { - "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -2), - BPF_JMP_REG(BPF_JSGE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_OR_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_or_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -1), - BPF_JMP_REG(BPF_JSGE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_XOR_FETCH: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_xor_fetch, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JSLE | BPF_X */ { - "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -2), - BPF_JMP_REG(BPF_JSLE, R2, R1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_XCHG: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_atomic32_xchg, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, -1), - BPF_JMP_REG(BPF_JSLE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "ATOMIC_W_CMPXCHG: all operand magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_cmpxchg32, + .stack_depth = 8, + .nr_testruns = NR_PATTERN_RUNS, + }, + /* JMP immediate magnitudes */ + { + "JMP_JSET_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jset_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JGT | BPF_X */ { - "JMP_JGT_X: if (3 > 2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JGT, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JEQ_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jeq_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, 1), - BPF_JMP_REG(BPF_JGT, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JNE_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jne_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JLT | BPF_X */ { - "JMP_JLT_X: if (2 < 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JLT, R2, R1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JGT_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jgt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, -1), - BPF_LD_IMM64(R2, 1), - BPF_JMP_REG(BPF_JLT, R2, R1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JGE_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jge_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JGE | BPF_X */ { - "JMP_JGE_X: if (3 >= 2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JGE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JLT_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jlt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JGE_X: if (3 >= 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 3), - BPF_JMP_REG(BPF_JGE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JLE_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jle_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JLE | BPF_X */ { - "JMP_JLE_X: if (2 <= 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JLE, R2, R1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JSGT_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jsgt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JLE_X: if (3 <= 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 3), - BPF_JMP_REG(BPF_JLE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JSGE_K: all immediate value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jsge_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - /* Mainly testing JIT + imm64 here. */ - "JMP_JGE_X: ldimm64 test 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JGE, R1, R2, 2), - BPF_LD_IMM64(R0, 0xffffffffffffffffULL), - BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JSLT_K: all immediate value magnitudes", { }, - { { 0, 0xeeeeeeeeU } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jslt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JGE_X: ldimm64 test 2", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JGE, R1, R2, 0), - BPF_LD_IMM64(R0, 0xffffffffffffffffULL), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JSLE_K: all immediate value magnitudes", { }, - { { 0, 0xffffffffU } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jsle_imm, + .nr_testruns = NR_PATTERN_RUNS, }, + /* JMP register magnitudes */ { - "JMP_JGE_X: ldimm64 test 3", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JGE, R1, R2, 4), - BPF_LD_IMM64(R0, 0xffffffffffffffffULL), - BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JSET_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jset_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JLE_X: ldimm64 test 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JLE, R2, R1, 2), - BPF_LD_IMM64(R0, 0xffffffffffffffffULL), - BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JEQ_X: all register value magnitudes", { }, - { { 0, 0xeeeeeeeeU } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jeq_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JLE_X: ldimm64 test 2", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JLE, R2, R1, 0), - BPF_LD_IMM64(R0, 0xffffffffffffffffULL), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JNE_X: all register value magnitudes", { }, - { { 0, 0xffffffffU } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jne_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JLE_X: ldimm64 test 3", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JLE, R2, R1, 4), - BPF_LD_IMM64(R0, 0xffffffffffffffffULL), - BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JGT_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jgt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JNE | BPF_X */ { - "JMP_JNE_X: if (3 != 2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JNE, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JGE_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jge_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JEQ | BPF_X */ { - "JMP_JEQ_X: if (3 == 3) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 3), - BPF_JMP_REG(BPF_JEQ, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JLT_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jlt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* BPF_JMP | BPF_JSET | BPF_X */ { - "JMP_JSET_X: if (0x3 & 0x2) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JSET, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JLE_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jle_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JSET_X: if (0x3 & 0xffffffff) return 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R0, 0), - BPF_LD_IMM64(R1, 3), - BPF_LD_IMM64(R2, 0xffffffff), - BPF_JMP_REG(BPF_JSET, R1, R2, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_EXIT_INSN(), - }, - INTERNAL, + "JMP_JSGT_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jsgt_reg, + .nr_testruns = NR_PATTERN_RUNS, + }, + { + "JMP_JSGE_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jsge_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Very long conditional jump", + { + "JMP_JSLT_X: all register value magnitudes", { }, INTERNAL | FLAG_NO_DATA, { }, { { 0, 1 } }, - .fill_helper = bpf_fill_long_jmp, + .fill_helper = bpf_fill_jmp_jslt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "JMP_JA: Jump, gap, jump, ...", + "JMP_JSLE_X: all register value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xababcbac } }, - .fill_helper = bpf_fill_ja, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp_jsle_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Maximum possible literals", + /* JMP32 immediate magnitudes */ + { + "JMP32_JSET_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xffffffff } }, - .fill_helper = bpf_fill_maxinsns1, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jset_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Single literal", + { + "JMP32_JEQ_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xfefefefe } }, - .fill_helper = bpf_fill_maxinsns2, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jeq_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Run/add until end", + { + "JMP32_JNE_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0x947bf368 } }, - .fill_helper = bpf_fill_maxinsns3, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jne_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "BPF_MAXINSNS: Too many instructions", - { }, - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + "JMP32_JGT_K: all immediate value magnitudes", { }, + INTERNAL | FLAG_NO_DATA, { }, - .fill_helper = bpf_fill_maxinsns4, - .expected_errcode = -EINVAL, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jgt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Very long jump", + { + "JMP32_JGE_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xabababab } }, - .fill_helper = bpf_fill_maxinsns5, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jge_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Ctx heavy transformations", + { + "JMP32_JLT_K: all immediate value magnitudes", { }, - CLASSIC, + INTERNAL | FLAG_NO_DATA, { }, - { - { 1, SKB_VLAN_PRESENT }, - { 10, SKB_VLAN_PRESENT } - }, - .fill_helper = bpf_fill_maxinsns6, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jlt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Call heavy transformations", + { + "JMP32_JLE_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 1, 0 }, { 10, 0 } }, - .fill_helper = bpf_fill_maxinsns7, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jle_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Jump heavy test", + { + "JMP32_JSGT_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xffffffff } }, - .fill_helper = bpf_fill_maxinsns8, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jsgt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Very long jump backwards", + { + "JMP32_JSGE_K: all immediate value magnitudes", { }, INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xcbababab } }, - .fill_helper = bpf_fill_maxinsns9, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jsge_imm, + .nr_testruns = NR_PATTERN_RUNS, }, - { /* Mainly checking JIT here. */ - "BPF_MAXINSNS: Edge hopping nuthouse", + { + "JMP32_JSLT_K: all immediate value magnitudes", { }, INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xabababac } }, - .fill_helper = bpf_fill_maxinsns10, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jslt_imm, + .nr_testruns = NR_PATTERN_RUNS, }, { - "BPF_MAXINSNS: Jump, gap, jump, ...", + "JMP32_JSLE_K: all immediate value magnitudes", { }, - CLASSIC | FLAG_NO_DATA, + INTERNAL | FLAG_NO_DATA, { }, - { { 0, 0xababcbac } }, - .fill_helper = bpf_fill_maxinsns11, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jsle_imm, + .nr_testruns = NR_PATTERN_RUNS, }, + /* JMP32 register magnitudes */ { - "BPF_MAXINSNS: jump over MSH", + "JMP32_JSET_X: all register value magnitudes", { }, - CLASSIC | FLAG_EXPECTED_FAIL, - { 0xfa, 0xfb, 0xfc, 0xfd, }, - { { 4, 0xabababab } }, - .fill_helper = bpf_fill_maxinsns12, - .expected_errcode = -EINVAL, - }, - { - "BPF_MAXINSNS: exec all MSH", + INTERNAL | FLAG_NO_DATA, { }, - CLASSIC, - { 0xfa, 0xfb, 0xfc, 0xfd, }, - { { 4, 0xababab83 } }, - .fill_helper = bpf_fill_maxinsns13, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jset_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "BPF_MAXINSNS: ld_abs+get_processor_id", + "JMP32_JEQ_X: all register value magnitudes", { }, - CLASSIC, + INTERNAL | FLAG_NO_DATA, { }, - { { 1, 0xbee } }, - .fill_helper = bpf_fill_ld_abs_get_processor_id, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jeq_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* - * LD_IND / LD_ABS on fragmented SKBs - */ { - "LD_IND byte frag", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x40), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, + "JMP32_JNE_X: all register value magnitudes", { }, - { {0x40, 0x42} }, - .frag_data = { - 0x42, 0x00, 0x00, 0x00, - 0x43, 0x44, 0x00, 0x00, - 0x21, 0x07, 0x19, 0x83, - }, - }, - { - "LD_IND halfword frag", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x40), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, + INTERNAL | FLAG_NO_DATA, { }, - { {0x40, 0x4344} }, - .frag_data = { - 0x42, 0x00, 0x00, 0x00, - 0x43, 0x44, 0x00, 0x00, - 0x21, 0x07, 0x19, 0x83, - }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jne_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_IND word frag", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x40), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, + "JMP32_JGT_X: all register value magnitudes", { }, - { {0x40, 0x21071983} }, - .frag_data = { - 0x42, 0x00, 0x00, 0x00, - 0x43, 0x44, 0x00, 0x00, - 0x21, 0x07, 0x19, 0x83, - }, - }, - { - "LD_IND halfword mixed head/frag", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x40), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, - { [0x3e] = 0x25, [0x3f] = 0x05, }, - { {0x40, 0x0519} }, - .frag_data = { 0x19, 0x82 }, - }, - { - "LD_IND word mixed head/frag", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x40), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, - { [0x3e] = 0x25, [0x3f] = 0x05, }, - { {0x40, 0x25051982} }, - .frag_data = { 0x19, 0x82 }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jgt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_ABS byte frag", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, + "JMP32_JGE_X: all register value magnitudes", { }, - { {0x40, 0x42} }, - .frag_data = { - 0x42, 0x00, 0x00, 0x00, - 0x43, 0x44, 0x00, 0x00, - 0x21, 0x07, 0x19, 0x83, - }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jge_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_ABS halfword frag", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, + "JMP32_JLT_X: all register value magnitudes", { }, - { {0x40, 0x4344} }, - .frag_data = { - 0x42, 0x00, 0x00, 0x00, - 0x43, 0x44, 0x00, 0x00, - 0x21, 0x07, 0x19, 0x83, - }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jlt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_ABS word frag", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, + "JMP32_JLE_X: all register value magnitudes", { }, - { {0x40, 0x21071983} }, - .frag_data = { - 0x42, 0x00, 0x00, 0x00, - 0x43, 0x44, 0x00, 0x00, - 0x21, 0x07, 0x19, 0x83, - }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jle_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_ABS halfword mixed head/frag", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, - { [0x3e] = 0x25, [0x3f] = 0x05, }, - { {0x40, 0x0519} }, - .frag_data = { 0x19, 0x82 }, + "JMP32_JSGT_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jsgt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_ABS word mixed head/frag", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_SKB_FRAG, - { [0x3e] = 0x25, [0x3f] = 0x05, }, - { {0x40, 0x25051982} }, - .frag_data = { 0x19, 0x82 }, + "JMP32_JSGE_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jsge_reg, + .nr_testruns = NR_PATTERN_RUNS, }, - /* - * LD_IND / LD_ABS on non fragmented SKBs - */ { - /* - * this tests that the JIT/interpreter correctly resets X - * before using it in an LD_IND instruction. - */ - "LD_IND byte default X", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x1] = 0x42 }, - { {0x40, 0x42 } }, + "JMP32_JSLT_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jslt_reg, + .nr_testruns = NR_PATTERN_RUNS, }, { - "LD_IND byte positive offset", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x82 } }, + "JMP32_JSLE_X: all register value magnitudes", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_jmp32_jsle_reg, + .nr_testruns = NR_PATTERN_RUNS, }, + /* Conditional jumps with constant decision */ { - "LD_IND byte negative offset", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JSET_K: imm = 0 -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JSET, R1, 0, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x05 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND byte positive offset, all ff", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JLT_K: imm = 0 -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JLT, R1, 0, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, - { {0x40, 0xff } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND byte positive offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JGE_K: imm = 0 -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JGE, R1, 0, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND byte negative offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JGT_K: imm = 0xffffffff -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND byte negative offset, multiple calls", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3), - BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JLE_K: imm = 0xffffffff -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x82 }, }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND halfword positive offset", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, + "JMP32_JSGT_K: imm = 0x7fffffff -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0xdd88 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND halfword negative offset", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, + "JMP32_JSGE_K: imm = -0x80000000 -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0xbb66 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND halfword unaligned", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, + "JMP32_JSLT_K: imm = -0x80000000 -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0x66cc } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND halfword positive offset, all ff", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3d), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP32_JSLE_K: imm = 0x7fffffff -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, - { {0x40, 0xffff } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND halfword positive offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JEQ_X: dst = src -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JEQ, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND halfword negative offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JGE_X: dst = src -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JGE, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND word positive offset", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, + "JMP_JLE_X: dst = src -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JLE, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0xee99ffaa } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND word negative offset", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, + "JMP_JSGE_X: dst = src -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JSGE, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0xaa55bb66 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND word unaligned (addr & 3 == 2)", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, + "JMP_JSLE_X: dst = src -> always taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JSLE, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0xbb66cc77 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, }, { - "LD_IND word unaligned (addr & 3 == 1)", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, + "JMP_JNE_X: dst = src -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JNE, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0x55bb66cc } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND word unaligned (addr & 3 == 3)", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x20), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, + "JMP_JGT_X: dst = src -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JGT, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - { {0x40, 0x66cc77dd } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND word positive offset, all ff", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JLT_X: dst = src -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JLT, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, - { {0x40, 0xffffffff } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND word positive offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JSGT_X: dst = src -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JSGT, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, { - "LD_IND word negative offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), - BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "JMP_JSLT_X: dst = src -> never taken", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_JMP_REG(BPF_JSLT, R1, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0 } }, }, + /* Short relative jumps */ { - "LD_ABS byte", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, + "Short relative jump: offset=0", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, -1), }, - { {0x40, 0xcc } }, + INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT, + { }, + { { 0, 0 } }, }, { - "LD_ABS byte positive offset, all ff", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "Short relative jump: offset=1", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, -1), }, - CLASSIC, - { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, - { {0x40, 0xff } }, + INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT, + { }, + { { 0, 0 } }, }, { - "LD_ABS byte positive offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "Short relative jump: offset=2", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, -1), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT, + { }, + { { 0, 0 } }, }, { - "LD_ABS byte negative offset, out of bounds load", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "Short relative jump: offset=3", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 3), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, -1), }, - CLASSIC | FLAG_EXPECTED_FAIL, - .expected_errcode = -EINVAL, + INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT, + { }, + { { 0, 0 } }, }, { - "LD_ABS byte negative offset, in bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), + "Short relative jump: offset=4", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 4), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, -1), }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x82 }, }, + INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT, + { }, + { { 0, 0 } }, }, + /* Conditional branch conversions */ { - "LD_ABS byte negative offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + "Long conditional jump: taken at runtime", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_max_jmp_taken, }, { - "LD_ABS byte negative offset, multiple calls", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c), - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d), - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e), - BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x82 }, }, + "Long conditional jump: not taken at runtime", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 2 } }, + .fill_helper = bpf_fill_max_jmp_not_taken, }, { - "LD_ABS halfword", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, - }, - { {0x40, 0xdd88 } }, + "Long conditional jump: always taken, known at JIT time", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 1 } }, + .fill_helper = bpf_fill_max_jmp_always_taken, }, { - "LD_ABS halfword unaligned", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, - }, - { {0x40, 0x99ff } }, + "Long conditional jump: never taken, known at JIT time", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 2 } }, + .fill_helper = bpf_fill_max_jmp_never_taken, + }, + /* Staggered jump sequences, immediate */ + { + "Staggered jumps: JMP_JA", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_ja, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS halfword positive offset, all ff", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, - { {0x40, 0xffff } }, + "Staggered jumps: JMP_JEQ_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jeq_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS halfword positive offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + "Staggered jumps: JMP_JNE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jne_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS halfword negative offset, out of bounds load", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_EXPECTED_FAIL, - .expected_errcode = -EINVAL, + "Staggered jumps: JMP_JSET_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jset_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS halfword negative offset, in bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x1982 }, }, + "Staggered jumps: JMP_JGT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jgt_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS halfword negative offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + "Staggered jumps: JMP_JGE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jge_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, - }, - { {0x40, 0xaa55bb66 } }, + "Staggered jumps: JMP_JLT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jlt_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word unaligned (addr & 3 == 2)", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, - }, - { {0x40, 0xdd88ee99 } }, + "Staggered jumps: JMP_JLE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jle_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word unaligned (addr & 3 == 1)", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, - }, - { {0x40, 0x77dd88ee } }, + "Staggered jumps: JMP_JSGT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsgt_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word unaligned (addr & 3 == 3)", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { - [0x1c] = 0xaa, [0x1d] = 0x55, - [0x1e] = 0xbb, [0x1f] = 0x66, - [0x20] = 0xcc, [0x21] = 0x77, - [0x22] = 0xdd, [0x23] = 0x88, - [0x24] = 0xee, [0x25] = 0x99, - [0x26] = 0xff, [0x27] = 0xaa, - }, - { {0x40, 0x88ee99ff } }, + "Staggered jumps: JMP_JSGE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsge_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word positive offset, all ff", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, - { {0x40, 0xffffffff } }, + "Staggered jumps: JMP_JSLT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jslt_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word positive offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + "Staggered jumps: JMP_JSLE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsle_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, + /* Staggered jump sequences, register */ { - "LD_ABS word negative offset, out of bounds load", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_EXPECTED_FAIL, - .expected_errcode = -EINVAL, + "Staggered jumps: JMP_JEQ_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jeq_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word negative offset, in bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x25051982 }, }, + "Staggered jumps: JMP_JNE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jne_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LD_ABS word negative offset, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x3f, 0 }, }, + "Staggered jumps: JMP_JSET_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jset_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LDX_MSH standalone, preserved A", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0xffeebbaa }, }, + "Staggered jumps: JMP_JGT_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jgt_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LDX_MSH standalone, preserved A 2", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x175e9d63 }, }, + "Staggered jumps: JMP_JGE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jge_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LDX_MSH standalone, test result 1", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x14 }, }, + "Staggered jumps: JMP_JLT_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jlt_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LDX_MSH standalone, test result 2", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x24 }, }, + "Staggered jumps: JMP_JLE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jle_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LDX_MSH standalone, negative offset", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0 }, }, + "Staggered jumps: JMP_JSGT_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsgt_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, + }, + { + "Staggered jumps: JMP_JSGE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsge_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, + }, + { + "Staggered jumps: JMP_JSLT_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jslt_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "LDX_MSH standalone, negative offset 2", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0x24 }, }, + "Staggered jumps: JMP_JSLE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsle_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, + /* Staggered jump sequences, JMP32 immediate */ { - "LDX_MSH standalone, out of bounds", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), - BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40), - BPF_STMT(BPF_MISC | BPF_TXA, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC, - { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, - { {0x40, 0 }, }, + "Staggered jumps: JMP32_JEQ_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jeq32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, - /* - * verify that the interpreter or JIT correctly sets A and X - * to 0. - */ { - "ADD default X", - .u.insns = { - /* - * A = 0x42 - * A = A + X - * ret A - */ - BPF_STMT(BPF_LD | BPF_IMM, 0x42), - BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x42 } }, + "Staggered jumps: JMP32_JNE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jne32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "ADD default A", - .u.insns = { - /* - * A = A + 0x42 - * ret A - */ - BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x42 } }, + "Staggered jumps: JMP32_JSET_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jset32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "SUB default X", - .u.insns = { - /* - * A = 0x66 - * A = A - X - * ret A - */ - BPF_STMT(BPF_LD | BPF_IMM, 0x66), - BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x66 } }, + "Staggered jumps: JMP32_JGT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jgt32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "SUB default A", - .u.insns = { - /* - * A = A - -0x66 - * ret A - */ - BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x66 } }, + "Staggered jumps: JMP32_JGE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jge32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "MUL default X", - .u.insns = { - /* - * A = 0x42 - * A = A * X - * ret A - */ - BPF_STMT(BPF_LD | BPF_IMM, 0x42), - BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x0 } }, + "Staggered jumps: JMP32_JLT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jlt32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "MUL default A", - .u.insns = { - /* - * A = A * 0x66 - * ret A - */ - BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x0 } }, + "Staggered jumps: JMP32_JLE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jle32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "DIV default X", - .u.insns = { - /* - * A = 0x42 - * A = A / X ; this halt the filter execution if X is 0 - * ret 0x42 - */ - BPF_STMT(BPF_LD | BPF_IMM, 0x42), - BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_K, 0x42), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x0 } }, + "Staggered jumps: JMP32_JSGT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsgt32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "DIV default A", - .u.insns = { - /* - * A = A / 1 - * ret A - */ - BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x0 } }, + "Staggered jumps: JMP32_JSGE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsge32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "MOD default X", - .u.insns = { - /* - * A = 0x42 - * A = A mod X ; this halt the filter execution if X is 0 - * ret 0x42 - */ - BPF_STMT(BPF_LD | BPF_IMM, 0x42), - BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), - BPF_STMT(BPF_RET | BPF_K, 0x42), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x0 } }, + "Staggered jumps: JMP32_JSLT_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jslt32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "MOD default A", - .u.insns = { - /* - * A = A mod 1 - * ret A - */ - BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1), - BPF_STMT(BPF_RET | BPF_A, 0x0), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x0 } }, + "Staggered jumps: JMP32_JSLE_K", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsle32_imm, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, + /* Staggered jump sequences, JMP32 register */ { - "JMP EQ default A", - .u.insns = { - /* - * cmp A, 0x0, 0, 1 - * ret 0x42 - * ret 0x66 - */ - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 0x42), - BPF_STMT(BPF_RET | BPF_K, 0x66), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x42 } }, + "Staggered jumps: JMP32_JEQ_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jeq32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "JMP EQ default X", - .u.insns = { - /* - * A = 0x0 - * cmp A, X, 0, 1 - * ret 0x42 - * ret 0x66 - */ - BPF_STMT(BPF_LD | BPF_IMM, 0x0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1), - BPF_STMT(BPF_RET | BPF_K, 0x42), - BPF_STMT(BPF_RET | BPF_K, 0x66), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { {0x1, 0x42 } }, + "Staggered jumps: JMP32_JNE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jne32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, - /* Checking interpreter vs JIT wrt signed extended imms. */ { - "JNE signed compare, test 1", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12), - BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000), - BPF_MOV64_REG(R2, R1), - BPF_ALU64_REG(BPF_AND, R2, R3), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 2), - BPF_EXIT_INSN(), - }, - INTERNAL, + "Staggered jumps: JMP32_JSET_X", { }, - { { 0, 1 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jset32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, - { - "JNE signed compare, test 2", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12), - BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000), - BPF_MOV64_REG(R2, R1), - BPF_ALU64_REG(BPF_AND, R2, R3), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 2), - BPF_EXIT_INSN(), - }, - INTERNAL, + { + "Staggered jumps: JMP32_JGT_X", { }, - { { 0, 1 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jgt32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "JNE signed compare, test 3", - .u.insns_int = { - BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12), - BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000), - BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000), - BPF_MOV64_REG(R2, R1), - BPF_ALU64_REG(BPF_AND, R2, R3), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP_REG(BPF_JNE, R2, R4, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 2), - BPF_EXIT_INSN(), - }, - INTERNAL, + "Staggered jumps: JMP32_JGE_X", { }, - { { 0, 2 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jge32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "JNE signed compare, test 4", - .u.insns_int = { - BPF_LD_IMM64(R1, -17104896), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 2), - BPF_EXIT_INSN(), - }, - INTERNAL, + "Staggered jumps: JMP32_JLT_X", { }, - { { 0, 2 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jlt32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "JNE signed compare, test 5", - .u.insns_int = { - BPF_LD_IMM64(R1, 0xfefb0000), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 2), - BPF_EXIT_INSN(), - }, - INTERNAL, + "Staggered jumps: JMP32_JLE_X", { }, - { { 0, 1 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jle32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "JNE signed compare, test 6", - .u.insns_int = { - BPF_LD_IMM64(R1, 0x7efb0000), - BPF_ALU32_IMM(BPF_MOV, R0, 1), - BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1), - BPF_ALU32_IMM(BPF_MOV, R0, 2), - BPF_EXIT_INSN(), - }, - INTERNAL, + "Staggered jumps: JMP32_JSGT_X", { }, - { { 0, 2 } }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsgt32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, { - "JNE signed compare, test 7", - .u.insns = { - BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000), - BPF_STMT(BPF_MISC | BPF_TAX, 0), - BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12), - BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0), - BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0), - BPF_STMT(BPF_RET | BPF_K, 1), - BPF_STMT(BPF_RET | BPF_K, 2), - }, - CLASSIC | FLAG_NO_DATA, - {}, - { { 0, 2 } }, + "Staggered jumps: JMP32_JSGE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsge32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, + }, + { + "Staggered jumps: JMP32_JSLT_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jslt32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, + }, + { + "Staggered jumps: JMP32_JSLE_X", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, MAX_STAGGERED_JMP_SIZE + 1 } }, + .fill_helper = bpf_fill_staggered_jsle32_reg, + .nr_testruns = NR_STAGGERED_JMP_RUNS, }, }; @@ -8576,6 +14213,8 @@ static struct bpf_prog *generate_filter(int which, int *err) fp->type = BPF_PROG_TYPE_SOCKET_FILTER; memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); fp->aux->stack_depth = tests[which].stack_depth; + fp->aux->verifier_zext = !!(tests[which].aux & + FLAG_VERIFIER_ZEXT); /* We cannot error here as we don't need type compatibility * checks. @@ -8631,6 +14270,9 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) { int err_cnt = 0, i, runs = MAX_TESTRUNS; + if (test->nr_testruns) + runs = min(test->nr_testruns, MAX_TESTRUNS); + for (i = 0; i < MAX_SUBTESTS; i++) { void *data; u64 duration; @@ -8674,86 +14316,9 @@ module_param_string(test_name, test_name, sizeof(test_name), 0); static int test_id = -1; module_param(test_id, int, 0); -static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 }; +static int test_range[2] = { 0, INT_MAX }; module_param_array(test_range, int, NULL, 0); -static __init int find_test_index(const char *test_name) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(tests); i++) { - if (!strcmp(tests[i].descr, test_name)) - return i; - } - return -1; -} - -static __init int prepare_bpf_tests(void) -{ - int i; - - if (test_id >= 0) { - /* - * if a test_id was specified, use test_range to - * cover only that test. - */ - if (test_id >= ARRAY_SIZE(tests)) { - pr_err("test_bpf: invalid test_id specified.\n"); - return -EINVAL; - } - - test_range[0] = test_id; - test_range[1] = test_id; - } else if (*test_name) { - /* - * if a test_name was specified, find it and setup - * test_range to cover only that test. - */ - int idx = find_test_index(test_name); - - if (idx < 0) { - pr_err("test_bpf: no test named '%s' found.\n", - test_name); - return -EINVAL; - } - test_range[0] = idx; - test_range[1] = idx; - } else { - /* - * check that the supplied test_range is valid. - */ - if (test_range[0] >= ARRAY_SIZE(tests) || - test_range[1] >= ARRAY_SIZE(tests) || - test_range[0] < 0 || test_range[1] < 0) { - pr_err("test_bpf: test_range is out of bound.\n"); - return -EINVAL; - } - - if (test_range[1] < test_range[0]) { - pr_err("test_bpf: test_range is ending before it starts.\n"); - return -EINVAL; - } - } - - for (i = 0; i < ARRAY_SIZE(tests); i++) { - if (tests[i].fill_helper && - tests[i].fill_helper(&tests[i]) < 0) - return -ENOMEM; - } - - return 0; -} - -static __init void destroy_bpf_tests(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(tests); i++) { - if (tests[i].fill_helper) - kfree(tests[i].u.ptr.insns); - } -} - static bool exclude_test(int test_id) { return test_id < test_range[0] || test_id > test_range[1]; @@ -8800,6 +14365,7 @@ static __init struct sk_buff *build_test_skb(void) skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb[0])->gso_segs = 0; skb_shinfo(skb[0])->frag_list = skb[1]; + skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000; /* adjust skb[0]'s len */ skb[0]->len += skb[1]->len; @@ -8924,6 +14490,10 @@ static __init int test_skb_segment(void) for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) { const struct skb_segment_test *test = &skb_segment_tests[i]; + cond_resched(); + if (exclude_test(i)) + continue; + pr_info("#%d %s ", i, test->descr); if (test_skb_segment_single(test)) { @@ -8955,7 +14525,19 @@ static __init int test_bpf(void) pr_info("#%d %s ", i, tests[i].descr); + if (tests[i].fill_helper && + tests[i].fill_helper(&tests[i]) < 0) { + pr_cont("FAIL to prog_fill\n"); + continue; + } + fp = generate_filter(i, &err); + + if (tests[i].fill_helper) { + kfree(tests[i].u.ptr.insns); + tests[i].u.ptr.insns = NULL; + } + if (fp == NULL) { if (err == 0) { pass_cnt++; @@ -8992,10 +14574,15 @@ static __init int test_bpf(void) struct tail_call_test { const char *descr; struct bpf_insn insns[MAX_INSNS]; + int flags; int result; int stack_depth; }; +/* Flags that can be passed to tail call test cases */ +#define FLAG_NEED_STATE BIT(0) +#define FLAG_RESULT_IN_STATE BIT(1) + /* * Magic marker used in test snippets for tail calls below. * BPF_LD/MOV to R2 and R2 with this immediate value is replaced @@ -9015,6 +14602,30 @@ struct tail_call_test { offset, TAIL_CALL_MARKER), \ BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0) +/* + * A test function to be called from a BPF program, clobbering a lot of + * CPU registers in the process. A JITed BPF program calling this function + * must save and restore any caller-saved registers it uses for internal + * state, for example the current tail call count. + */ +BPF_CALL_1(bpf_test_func, u64, arg) +{ + char buf[64]; + long a = 0; + long b = 1; + long c = 2; + long d = 3; + long e = 4; + long f = 5; + long g = 6; + long h = 7; + + return snprintf(buf, sizeof(buf), + "%ld %lu %lx %ld %lu %lx %ld %lu %x", + a, b, c, d, e, f, g, h, (int)arg); +} +#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID + /* * Tail call tests. Each test case may call any other test in the table, * including itself, specified as a relative index offset from the calling @@ -9065,32 +14676,60 @@ static struct tail_call_test tail_call_tests[] = { { "Tail call error path, max count reached", .insns = { - BPF_ALU64_IMM(BPF_ADD, R1, 1), - BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_LDX_MEM(BPF_W, R2, R1, 0), + BPF_ALU64_IMM(BPF_ADD, R2, 1), + BPF_STX_MEM(BPF_W, R1, R2, 0), + TAIL_CALL(0), + BPF_EXIT_INSN(), + }, + .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, + .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + }, + { + "Tail call count preserved across function calls", + .insns = { + BPF_LDX_MEM(BPF_W, R2, R1, 0), + BPF_ALU64_IMM(BPF_ADD, R2, 1), + BPF_STX_MEM(BPF_W, R1, R2, 0), + BPF_STX_MEM(BPF_DW, R10, R1, -8), + BPF_CALL_REL(BPF_FUNC_get_numa_node_id), + BPF_CALL_REL(BPF_FUNC_ktime_get_ns), + BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns), + BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns), + BPF_CALL_REL(BPF_FUNC_jiffies64), + BPF_CALL_REL(BPF_FUNC_test_func), + BPF_LDX_MEM(BPF_DW, R1, R10, -8), + BPF_ALU32_REG(BPF_MOV, R0, R1), TAIL_CALL(0), BPF_EXIT_INSN(), }, - .result = MAX_TAIL_CALL_CNT + 1, + .stack_depth = 8, + .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, + .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, }, { "Tail call error path, NULL target", .insns = { - BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_LDX_MEM(BPF_W, R2, R1, 0), + BPF_ALU64_IMM(BPF_ADD, R2, 1), + BPF_STX_MEM(BPF_W, R1, R2, 0), TAIL_CALL(TAIL_CALL_NULL), - BPF_ALU64_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, - .result = 1, + .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, + .result = MAX_TESTRUNS, }, { "Tail call error path, index out of range", .insns = { - BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_LDX_MEM(BPF_W, R2, R1, 0), + BPF_ALU64_IMM(BPF_ADD, R2, 1), + BPF_STX_MEM(BPF_W, R1, R2, 0), TAIL_CALL(TAIL_CALL_INVALID), - BPF_ALU64_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), }, - .result = 1, + .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, + .result = MAX_TESTRUNS, }, }; @@ -9146,17 +14785,19 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) /* Relocate runtime tail call offsets and addresses */ for (i = 0; i < len; i++) { struct bpf_insn *insn = &fp->insnsi[i]; - - if (insn->imm != TAIL_CALL_MARKER) - continue; + long addr = 0; switch (insn->code) { case BPF_LD | BPF_DW | BPF_IMM: + if (insn->imm != TAIL_CALL_MARKER) + break; insn[0].imm = (u32)(long)progs; insn[1].imm = ((u64)(long)progs) >> 32; break; case BPF_ALU | BPF_MOV | BPF_K: + if (insn->imm != TAIL_CALL_MARKER) + break; if (insn->off == TAIL_CALL_NULL) insn->imm = ntests; else if (insn->off == TAIL_CALL_INVALID) @@ -9164,6 +14805,38 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) else insn->imm = which + insn->off; insn->off = 0; + break; + + case BPF_JMP | BPF_CALL: + if (insn->src_reg != BPF_PSEUDO_CALL) + break; + switch (insn->imm) { + case BPF_FUNC_get_numa_node_id: + addr = (long)&numa_node_id; + break; + case BPF_FUNC_ktime_get_ns: + addr = (long)&ktime_get_ns; + break; + case BPF_FUNC_ktime_get_boot_ns: + addr = (long)&ktime_get_boot_fast_ns; + break; + case BPF_FUNC_ktime_get_coarse_ns: + addr = (long)&ktime_get_coarse_ns; + break; + case BPF_FUNC_jiffies64: + addr = (long)&get_jiffies_64; + break; + case BPF_FUNC_test_func: + addr = (long)&bpf_test_func; + break; + default: + err = -EFAULT; + goto out_err; + } + *insn = BPF_EMIT_CALL(addr); + if ((long)__bpf_call_base + insn->imm != addr) + *insn = BPF_JMP_A(0); /* Skip: NOP */ + break; } } @@ -9196,10 +14869,14 @@ static __init int test_tail_calls(struct bpf_array *progs) for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) { struct tail_call_test *test = &tail_call_tests[i]; struct bpf_prog *fp = progs->ptrs[i]; + int *data = NULL; + int state = 0; u64 duration; int ret; cond_resched(); + if (exclude_test(i)) + continue; pr_info("#%d %s ", i, test->descr); if (!fp) { @@ -9212,7 +14889,11 @@ static __init int test_tail_calls(struct bpf_array *progs) if (fp->jited) jit_cnt++; - ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration); + if (test->flags & FLAG_NEED_STATE) + data = &state; + ret = __run_one(fp, data, MAX_TESTRUNS, &duration); + if (test->flags & FLAG_RESULT_IN_STATE) + ret = state; if (ret == test->result) { pr_cont("%lld PASS", duration); pass_cnt++; @@ -9228,29 +14909,144 @@ static __init int test_tail_calls(struct bpf_array *progs) return err_cnt ? -EINVAL : 0; } +static char test_suite[32]; +module_param_string(test_suite, test_suite, sizeof(test_suite), 0); + +static __init int find_test_index(const char *test_name) +{ + int i; + + if (!strcmp(test_suite, "test_bpf")) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!strcmp(tests[i].descr, test_name)) + return i; + } + } + + if (!strcmp(test_suite, "test_tail_calls")) { + for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) { + if (!strcmp(tail_call_tests[i].descr, test_name)) + return i; + } + } + + if (!strcmp(test_suite, "test_skb_segment")) { + for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) { + if (!strcmp(skb_segment_tests[i].descr, test_name)) + return i; + } + } + + return -1; +} + +static __init int prepare_test_range(void) +{ + int valid_range; + + if (!strcmp(test_suite, "test_bpf")) + valid_range = ARRAY_SIZE(tests); + else if (!strcmp(test_suite, "test_tail_calls")) + valid_range = ARRAY_SIZE(tail_call_tests); + else if (!strcmp(test_suite, "test_skb_segment")) + valid_range = ARRAY_SIZE(skb_segment_tests); + else + return 0; + + if (test_id >= 0) { + /* + * if a test_id was specified, use test_range to + * cover only that test. + */ + if (test_id >= valid_range) { + pr_err("test_bpf: invalid test_id specified for '%s' suite.\n", + test_suite); + return -EINVAL; + } + + test_range[0] = test_id; + test_range[1] = test_id; + } else if (*test_name) { + /* + * if a test_name was specified, find it and setup + * test_range to cover only that test. + */ + int idx = find_test_index(test_name); + + if (idx < 0) { + pr_err("test_bpf: no test named '%s' found for '%s' suite.\n", + test_name, test_suite); + return -EINVAL; + } + test_range[0] = idx; + test_range[1] = idx; + } else if (test_range[0] != 0 || test_range[1] != INT_MAX) { + /* + * check that the supplied test_range is valid. + */ + if (test_range[0] < 0 || test_range[1] >= valid_range) { + pr_err("test_bpf: test_range is out of bound for '%s' suite.\n", + test_suite); + return -EINVAL; + } + + if (test_range[1] < test_range[0]) { + pr_err("test_bpf: test_range is ending before it starts.\n"); + return -EINVAL; + } + } + + return 0; +} + static int __init test_bpf_init(void) { struct bpf_array *progs = NULL; int ret; - ret = prepare_bpf_tests(); + if (strlen(test_suite) && + strcmp(test_suite, "test_bpf") && + strcmp(test_suite, "test_tail_calls") && + strcmp(test_suite, "test_skb_segment")) { + pr_err("test_bpf: invalid test_suite '%s' specified.\n", test_suite); + return -EINVAL; + } + + /* + * if test_suite is not specified, but test_id, test_name or test_range + * is specified, set 'test_bpf' as the default test suite. + */ + if (!strlen(test_suite) && + (test_id != -1 || strlen(test_name) || + (test_range[0] != 0 || test_range[1] != INT_MAX))) { + pr_info("test_bpf: set 'test_bpf' as the default test_suite.\n"); + strscpy(test_suite, "test_bpf", sizeof(test_suite)); + } + + ret = prepare_test_range(); if (ret < 0) return ret; - ret = test_bpf(); - destroy_bpf_tests(); - if (ret) - return ret; + if (!strlen(test_suite) || !strcmp(test_suite, "test_bpf")) { + ret = test_bpf(); + if (ret) + return ret; + } - ret = prepare_tail_call_tests(&progs); - if (ret) - return ret; - ret = test_tail_calls(progs); - destroy_tail_call_tests(progs); - if (ret) - return ret; + if (!strlen(test_suite) || !strcmp(test_suite, "test_tail_calls")) { + ret = prepare_tail_call_tests(&progs); + if (ret) + return ret; + ret = test_tail_calls(progs); + destroy_tail_call_tests(progs); + if (ret) + return ret; + } - return test_skb_segment(); + if (!strlen(test_suite) || !strcmp(test_suite, "test_skb_segment")) + return test_skb_segment(); + + return 0; } static void __exit test_bpf_exit(void) diff --git a/net/802/hippi.c b/net/802/hippi.c index f80b33a8f7e0dd..887e73d520e497 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -121,7 +121,7 @@ int hippi_mac_addr(struct net_device *dev, void *p) struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + dev_addr_set(dev, addr->sa_data); return 0; } EXPORT_SYMBOL(hippi_mac_addr); diff --git a/net/802/p8022.c b/net/802/p8022.c index a6585627051d5b..79c23173116c61 100644 --- a/net/802/p8022.c +++ b/net/802/p8022.c @@ -23,7 +23,7 @@ #include static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb, - unsigned char *dest) + const unsigned char *dest) { llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap); return 0; diff --git a/net/802/psnap.c b/net/802/psnap.c index 4492e8d7ad2068..1406bfdbda13b7 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -79,7 +79,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, * Put a SNAP header on a frame and pass to 802.2 */ static int snap_request(struct datalink_proto *dl, - struct sk_buff *skb, u8 *dest) + struct sk_buff *skb, const u8 *dest) { memcpy(skb_push(skb, 5), dl->type, 5); llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 0c21d1fec8522b..90330b893134f8 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -250,7 +250,7 @@ bool vlan_dev_inherit_address(struct net_device *dev, if (dev->addr_assign_type != NET_ADDR_STOLEN) return false; - ether_addr_copy(dev->dev_addr, real_dev->dev_addr); + eth_hw_addr_set(dev, real_dev->dev_addr); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return true; } @@ -349,7 +349,7 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) dev_uc_del(real_dev, dev->dev_addr); out: - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -586,7 +586,7 @@ static int vlan_dev_init(struct net_device *dev) dev->dev_id = real_dev->dev_id; if (is_zero_ether_addr(dev->dev_addr)) { - ether_addr_copy(dev->dev_addr, real_dev->dev_addr); + eth_hw_addr_set(dev, real_dev->dev_addr); dev->addr_assign_type = NET_ADDR_STOLEN; } if (is_zero_ether_addr(dev->broadcast)) diff --git a/net/Kconfig b/net/Kconfig index fb13460c6dab37..074472dfa94ae7 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -294,7 +294,7 @@ config CGROUP_NET_CLASSID config NET_RX_BUSY_POLL bool - default y + default y if !PREEMPT_RT config BQL bool diff --git a/net/atm/br2684.c b/net/atm/br2684.c index dd2a8dabed849b..f666f2f98ba509 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -577,10 +577,12 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc); if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { unsigned char *esi = atmvcc->dev->esi; + const u8 one = 1; + if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5]) - memcpy(net_dev->dev_addr, esi, net_dev->addr_len); + dev_addr_set(net_dev, esi); else - net_dev->dev_addr[2] = 1; + dev_addr_mod(net_dev, 2, &one, 1); } list_add(&brvcc->brvccs, &brdev->brvccs); write_unlock_irq(&devs_lock); diff --git a/net/atm/lec.c b/net/atm/lec.c index 7226c784dbe0c7..6257bf12e5a00b 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -340,12 +340,12 @@ static int lec_close(struct net_device *dev) static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { + static const u8 zero_addr[ETH_ALEN] = {}; unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); struct atmlec_msg *mesg; struct lec_arp_table *entry; - int i; char *tmp; /* FIXME */ WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); @@ -355,12 +355,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); switch (mesg->type) { case l_set_mac_addr: - for (i = 0; i < 6; i++) - dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; + eth_hw_addr_set(dev, mesg->content.normal.mac_addr); break; case l_del_mac_addr: - for (i = 0; i < 6; i++) - dev->dev_addr[i] = 0; + eth_hw_addr_set(dev, zero_addr); break; case l_addr_delete: lec_addr_delete(priv, mesg->content.normal.atm_addr, diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 2631efc6e359fb..2f34bbdde0e8fe 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -202,7 +202,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, * Find an AX.25 control block given both ends. It will only pick up * floating AX.25 control blocks or non Raw socket bound control blocks. */ -ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, +ax25_cb *ax25_find_cb(const ax25_address *src_addr, ax25_address *dest_addr, ax25_digi *digi, struct net_device *dev) { ax25_cb *s; diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 4ac2e0847652a9..d0a043a51848b3 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -35,7 +35,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr) spin_lock_bh(&ax25_dev_lock); for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) - if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) { + if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; } spin_unlock_bh(&ax25_dev_lock); diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index b4083f30af0de3..979bc4b828a007 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c @@ -98,7 +98,7 @@ void ax25_linkfail_release(struct ax25_linkfail *lf) EXPORT_SYMBOL(ax25_linkfail_release); -int ax25_listen_register(ax25_address *callsign, struct net_device *dev) +int ax25_listen_register(const ax25_address *callsign, struct net_device *dev) { struct listen_struct *listen; @@ -121,7 +121,7 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev) EXPORT_SYMBOL(ax25_listen_register); -void ax25_listen_release(ax25_address *callsign, struct net_device *dev) +void ax25_listen_release(const ax25_address *callsign, struct net_device *dev) { struct listen_struct *s, *listen; @@ -171,7 +171,7 @@ int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) return res; } -int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) +int ax25_listen_mine(const ax25_address *callsign, struct net_device *dev) { struct listen_struct *listen; diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index cd6afe895db991..1cac25aca63784 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -181,7 +181,7 @@ static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, i } static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, - ax25_address *dev_addr, struct packet_type *ptype) + const ax25_address *dev_addr, struct packet_type *ptype) { ax25_address src, dest, *next_digi = NULL; int type = 0, mine = 0, dama; @@ -447,5 +447,5 @@ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ - return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); + return ax25_rcv(skb, dev, (const ax25_address *)dev->dev_addr, ptype); } diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 22f2f66c6e0a76..3db76d2470e954 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(ax25_frag_lock); -ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev) +ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *ax25; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 17687848daec56..2ed9496fc41fcb 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -254,7 +254,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv, * Return: backbone gateway if found or NULL otherwise */ static struct batadv_bla_backbone_gw * -batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr, +batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; @@ -336,7 +336,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ -static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, +static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, unsigned short vid, int claimtype) { struct sk_buff *skb; @@ -488,7 +488,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work) * Return: the (possibly created) backbone gateway or NULL on error */ static struct batadv_bla_backbone_gw * -batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, +batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig, unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; @@ -926,7 +926,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv, */ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@ -964,7 +964,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, */ static bool batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@ -2130,7 +2130,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, struct batadv_hard_iface *primary_if, struct batadv_bla_claim *claim) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; void *hdr; @@ -2298,7 +2298,7 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, struct batadv_hard_iface *primary_if, struct batadv_bla_backbone_gw *backbone_gw) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; int msecs; diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index a3b6658ed78991..433901dcf0c37c 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -89,7 +89,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) rcu_read_lock(); do { upper = netdev_master_upper_dev_get_rcu(upper); - } while (upper && !(upper->priv_flags & IFF_EBRIDGE)); + } while (upper && !netif_is_bridge_master(upper)); dev_hold(upper); rcu_read_unlock(); diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 970d0d7ccc981a..83f31494ea4d92 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -747,7 +747,8 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node *orig_node = NULL; struct batadv_hard_iface *primary_if = NULL; bool ret = false; - u8 *orig_addr, orig_ttvn; + const u8 *orig_addr; + u8 orig_ttvn; if (batadv_is_my_client(bat_priv, dst_addr, vid)) { primary_if = batadv_primary_if_get_selected(bat_priv); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 0604b02795731c..7ee09337fc4011 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -134,7 +134,7 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) return -EADDRNOTAVAIL; ether_addr_copy(old_addr, dev->dev_addr); - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); /* only modify transtable if it has been initialized before */ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 56b9fe97b3b44c..fbcb15c7c29b6e 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -631,9 +631,9 @@ static void batadv_tp_recv_ack(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node = NULL; const struct batadv_icmp_tp_packet *icmp; struct batadv_tp_vars *tp_vars; + const unsigned char *dev_addr; size_t packet_len, mss; u32 rtt, recv_ack, cwnd; - unsigned char *dev_addr; packet_len = BATADV_TP_PLEN; mss = BATADV_TP_PLEN; diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 992773376e51d4..0cb58eb04093ae 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -587,8 +587,8 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv, * @tvlv_value: tvlv content * @tvlv_value_len: tvlv content length */ -void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src, - u8 *dst, u8 type, u8 version, +void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src, + const u8 *dst, u8 type, u8 version, void *tvlv_value, u16 tvlv_value_len) { struct batadv_unicast_tvlv_packet *unicast_tvlv_packet; diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h index 54f2a35653d0f1..4cf8af00fc11a0 100644 --- a/net/batman-adv/tvlv.h +++ b/net/batman-adv/tvlv.h @@ -42,8 +42,8 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, u8 *src, u8 *dst, void *tvlv_buff, u16 tvlv_buff_len); -void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src, - u8 *dst, u8 type, u8 version, +void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src, + const u8 *dst, u8 type, u8 version, void *tvlv_value, u16 tvlv_value_len); #endif /* _NET_BATMAN_ADV_TVLV_H_ */ diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index fd164a248569c3..133d7ea063fb13 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -663,6 +663,7 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) { struct net_device *netdev; + bdaddr_t addr; int err; netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)), @@ -672,7 +673,8 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) return -ENOMEM; netdev->addr_assign_type = NET_ADDR_PERM; - baswap((void *)netdev->dev_addr, &chan->src); + baswap(&addr, &chan->src); + __dev_addr_set(netdev, &addr, sizeof(addr)); netdev->netdev_ops = &netdev_ops; SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index cc0995301f93f0..291770fc95515e 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -14,7 +14,8 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ - ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o + ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ + eir.o bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 72f47b372705da..c9add7753b9f23 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -594,7 +594,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) * ie. eh.h_dest is our local address. */ memcpy(s->eh.h_dest, &src, ETH_ALEN); memcpy(s->eh.h_source, &dst, ETH_ALEN); - memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); + eth_hw_addr_set(dev, s->eh.h_dest); s->dev = dev; s->sock = sock; diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c new file mode 100644 index 00000000000000..7e930f77ecab55 --- /dev/null +++ b/net/bluetooth/eir.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +#include +#include +#include + +#include "eir.h" + +#define PNP_INFO_SVCLASS_ID 0x1200 + +u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) +{ + size_t short_len; + size_t complete_len; + + /* no space left for name (+ NULL + type + len) */ + if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) + return ad_len; + + /* use complete name if present and fits */ + complete_len = strlen(hdev->dev_name); + if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) + return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, + hdev->dev_name, complete_len + 1); + + /* use short name if present */ + short_len = strlen(hdev->short_name); + if (short_len) + return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, + hdev->short_name, short_len + 1); + + /* use shortened full name if present, we already know that name + * is longer then HCI_MAX_SHORT_NAME_LENGTH + */ + if (complete_len) { + u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; + + memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); + name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; + + return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, + sizeof(name)); + } + + return ad_len; +} + +u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) +{ + return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); +} + +static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) +{ + u8 *ptr = data, *uuids_start = NULL; + struct bt_uuid *uuid; + + if (len < 4) + return ptr; + + list_for_each_entry(uuid, &hdev->uuids, list) { + u16 uuid16; + + if (uuid->size != 16) + continue; + + uuid16 = get_unaligned_le16(&uuid->uuid[12]); + if (uuid16 < 0x1100) + continue; + + if (uuid16 == PNP_INFO_SVCLASS_ID) + continue; + + if (!uuids_start) { + uuids_start = ptr; + uuids_start[0] = 1; + uuids_start[1] = EIR_UUID16_ALL; + ptr += 2; + } + + /* Stop if not enough space to put next UUID */ + if ((ptr - data) + sizeof(u16) > len) { + uuids_start[1] = EIR_UUID16_SOME; + break; + } + + *ptr++ = (uuid16 & 0x00ff); + *ptr++ = (uuid16 & 0xff00) >> 8; + uuids_start[0] += sizeof(uuid16); + } + + return ptr; +} + +static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) +{ + u8 *ptr = data, *uuids_start = NULL; + struct bt_uuid *uuid; + + if (len < 6) + return ptr; + + list_for_each_entry(uuid, &hdev->uuids, list) { + if (uuid->size != 32) + continue; + + if (!uuids_start) { + uuids_start = ptr; + uuids_start[0] = 1; + uuids_start[1] = EIR_UUID32_ALL; + ptr += 2; + } + + /* Stop if not enough space to put next UUID */ + if ((ptr - data) + sizeof(u32) > len) { + uuids_start[1] = EIR_UUID32_SOME; + break; + } + + memcpy(ptr, &uuid->uuid[12], sizeof(u32)); + ptr += sizeof(u32); + uuids_start[0] += sizeof(u32); + } + + return ptr; +} + +static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) +{ + u8 *ptr = data, *uuids_start = NULL; + struct bt_uuid *uuid; + + if (len < 18) + return ptr; + + list_for_each_entry(uuid, &hdev->uuids, list) { + if (uuid->size != 128) + continue; + + if (!uuids_start) { + uuids_start = ptr; + uuids_start[0] = 1; + uuids_start[1] = EIR_UUID128_ALL; + ptr += 2; + } + + /* Stop if not enough space to put next UUID */ + if ((ptr - data) + 16 > len) { + uuids_start[1] = EIR_UUID128_SOME; + break; + } + + memcpy(ptr, uuid->uuid, 16); + ptr += 16; + uuids_start[0] += 16; + } + + return ptr; +} + +void eir_create(struct hci_dev *hdev, u8 *data) +{ + u8 *ptr = data; + size_t name_len; + + name_len = strlen(hdev->dev_name); + + if (name_len > 0) { + /* EIR Data type */ + if (name_len > 48) { + name_len = 48; + ptr[1] = EIR_NAME_SHORT; + } else { + ptr[1] = EIR_NAME_COMPLETE; + } + + /* EIR Data length */ + ptr[0] = name_len + 1; + + memcpy(ptr + 2, hdev->dev_name, name_len); + + ptr += (name_len + 2); + } + + if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { + ptr[0] = 2; + ptr[1] = EIR_TX_POWER; + ptr[2] = (u8)hdev->inq_tx_power; + + ptr += 3; + } + + if (hdev->devid_source > 0) { + ptr[0] = 9; + ptr[1] = EIR_DEVICE_ID; + + put_unaligned_le16(hdev->devid_source, ptr + 2); + put_unaligned_le16(hdev->devid_vendor, ptr + 4); + put_unaligned_le16(hdev->devid_product, ptr + 6); + put_unaligned_le16(hdev->devid_version, ptr + 8); + + ptr += 10; + } + + ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); + ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); + ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); +} + +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv = NULL; + u8 ad_len = 0, flags = 0; + u32 instance_flags; + + /* Return 0 when the current instance identifier is invalid. */ + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + } + + instance_flags = hci_adv_instance_flags(hdev, instance); + + /* If instance already has the flags set skip adding it once + * again. + */ + if (adv && eir_get_data(adv->adv_data, adv->adv_data_len, EIR_FLAGS, + NULL)) + goto skip_flags; + + /* The Add Advertising command allows userspace to set both the general + * and limited discoverable flags. + */ + if (instance_flags & MGMT_ADV_FLAG_DISCOV) + flags |= LE_AD_GENERAL; + + if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) + flags |= LE_AD_LIMITED; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + flags |= LE_AD_NO_BREDR; + + if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { + /* If a discovery flag wasn't provided, simply use the global + * settings. + */ + if (!flags) + flags |= mgmt_get_adv_discov_flags(hdev); + + /* If flags would still be empty, then there is no need to + * include the "Flags" AD field". + */ + if (flags) { + ptr[0] = 0x02; + ptr[1] = EIR_FLAGS; + ptr[2] = flags; + + ad_len += 3; + ptr += 3; + } + } + +skip_flags: + if (adv) { + memcpy(ptr, adv->adv_data, adv->adv_data_len); + ad_len += adv->adv_data_len; + ptr += adv->adv_data_len; + } + + if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { + s8 adv_tx_power; + + if (ext_adv_capable(hdev)) { + if (adv) + adv_tx_power = adv->tx_power; + else + adv_tx_power = hdev->adv_tx_power; + } else { + adv_tx_power = hdev->adv_tx_power; + } + + /* Provide Tx Power only if we can provide a valid value for it */ + if (adv_tx_power != HCI_TX_POWER_INVALID) { + ptr[0] = 0x02; + ptr[1] = EIR_TX_POWER; + ptr[2] = (u8)adv_tx_power; + + ad_len += 3; + ptr += 3; + } + } + + return ad_len; +} + +static u8 create_default_scan_rsp(struct hci_dev *hdev, u8 *ptr) +{ + u8 scan_rsp_len = 0; + + if (hdev->appearance) + scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); + + return eir_append_local_name(hdev, ptr, scan_rsp_len); +} + +u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv; + u8 scan_rsp_len = 0; + + if (!instance) + return create_default_scan_rsp(hdev, ptr); + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + + if ((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) + scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); + + memcpy(&ptr[scan_rsp_len], adv->scan_rsp_data, adv->scan_rsp_len); + + scan_rsp_len += adv->scan_rsp_len; + + if (adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + scan_rsp_len = eir_append_local_name(hdev, ptr, scan_rsp_len); + + return scan_rsp_len; +} diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h new file mode 100644 index 00000000000000..724662f8f8b155 --- /dev/null +++ b/net/bluetooth/eir.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +void eir_create(struct hci_dev *hdev, u8 *data); + +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); + +u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); +u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); + +static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, + u8 *data, u8 data_len) +{ + eir[eir_len++] = sizeof(type) + data_len; + eir[eir_len++] = type; + memcpy(&eir[eir_len], data, data_len); + eir_len += data_len; + + return eir_len; +} + +static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) +{ + eir[eir_len++] = sizeof(type) + sizeof(data); + eir[eir_len++] = type; + put_unaligned_le16(data, &eir[eir_len]); + eir_len += sizeof(data); + + return eir_len; +} + +static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, + size_t *data_len) +{ + size_t parsed = 0; + + if (eir_len < 2) + return NULL; + + while (parsed < eir_len - 1) { + u8 field_len = eir[0]; + + if (field_len == 0) + break; + + parsed += field_len + 1; + + if (parsed > eir_len) + break; + + if (eir[1] != type) { + eir += field_len + 1; + continue; + } + + /* Zero length data */ + if (field_len == 1) + return NULL; + + if (data_len) + *data_len = field_len - 1; + + return &eir[2]; + } + + return NULL; +} diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c new file mode 100644 index 00000000000000..f0421d0edaa377 --- /dev/null +++ b/net/bluetooth/hci_codec.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Intel Corporation */ + +#include +#include +#include "hci_codec.h" + +static int hci_codec_list_add(struct list_head *list, + struct hci_op_read_local_codec_caps *sent, + struct hci_rp_read_local_codec_caps *rp, + void *caps, + __u32 len) +{ + struct codec_list *entry; + + entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->id = sent->id; + if (sent->id == 0xFF) { + entry->cid = __le16_to_cpu(sent->cid); + entry->vid = __le16_to_cpu(sent->vid); + } + entry->transport = sent->transport; + entry->len = len; + entry->num_caps = rp->num_caps; + if (rp->num_caps) + memcpy(entry->caps, caps, len); + list_add(&entry->list, list); + + return 0; +} + +void hci_codec_list_clear(struct list_head *codec_list) +{ + struct codec_list *c, *n; + + list_for_each_entry_safe(c, n, codec_list, list) { + list_del(&c->list); + kfree(c); + } +} + +static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, + struct hci_op_read_local_codec_caps + *cmd) +{ + __u8 i; + + for (i = 0; i < TRANSPORT_TYPE_MAX; i++) { + if (transport & BIT(i)) { + struct hci_rp_read_local_codec_caps *rp; + struct hci_codec_caps *caps; + struct sk_buff *skb; + __u8 j; + __u32 len; + + cmd->transport = i; + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, + sizeof(*cmd), cmd, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to read codec capabilities (%ld)", + PTR_ERR(skb)); + continue; + } + + if (skb->len < sizeof(*rp)) + goto error; + + rp = (void *)skb->data; + + if (rp->status) + goto error; + + if (!rp->num_caps) { + len = 0; + /* this codec doesn't have capabilities */ + goto skip_caps_parse; + } + + skb_pull(skb, sizeof(*rp)); + + for (j = 0, len = 0; j < rp->num_caps; j++) { + caps = (void *)skb->data; + if (skb->len < sizeof(*caps)) + goto error; + if (skb->len < caps->len) + goto error; + len += sizeof(caps->len) + caps->len; + skb_pull(skb, sizeof(caps->len) + caps->len); + } + +skip_caps_parse: + hci_dev_lock(hdev); + hci_codec_list_add(&hdev->local_codecs, cmd, rp, + (__u8 *)rp + sizeof(*rp), len); + hci_dev_unlock(hdev); +error: + kfree_skb(skb); + } + } +} + +void hci_read_supported_codecs(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_supported_codecs *rp; + struct hci_std_codecs *std_codecs; + struct hci_vnd_codecs *vnd_codecs; + struct hci_op_read_local_codec_caps caps; + __u8 i; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, + HCI_CMD_TIMEOUT); + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", + PTR_ERR(skb)); + return; + } + + if (skb->len < sizeof(*rp)) + goto error; + + rp = (void *)skb->data; + + if (rp->status) + goto error; + + skb_pull(skb, sizeof(rp->status)); + + std_codecs = (void *)skb->data; + + /* validate codecs length before accessing */ + if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)) + goto error; + + /* enumerate codec capabilities of standard codecs */ + memset(&caps, 0, sizeof(caps)); + for (i = 0; i < std_codecs->num; i++) { + caps.id = std_codecs->codec[i]; + caps.direction = 0x00; + hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps); + } + + skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)); + + vnd_codecs = (void *)skb->data; + + /* validate vendor codecs length before accessing */ + if (skb->len < + flex_array_size(vnd_codecs, codec, vnd_codecs->num) + + sizeof(vnd_codecs->num)) + goto error; + + /* enumerate vendor codec capabilities */ + for (i = 0; i < vnd_codecs->num; i++) { + caps.id = 0xFF; + caps.cid = vnd_codecs->codec[i].cid; + caps.vid = vnd_codecs->codec[i].vid; + caps.direction = 0x00; + hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps); + } + +error: + kfree_skb(skb); +} + +void hci_read_supported_codecs_v2(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_supported_codecs_v2 *rp; + struct hci_std_codecs_v2 *std_codecs; + struct hci_vnd_codecs_v2 *vnd_codecs; + struct hci_op_read_local_codec_caps caps; + __u8 i; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, + HCI_CMD_TIMEOUT); + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", + PTR_ERR(skb)); + return; + } + + if (skb->len < sizeof(*rp)) + goto error; + + rp = (void *)skb->data; + + if (rp->status) + goto error; + + skb_pull(skb, sizeof(rp->status)); + + std_codecs = (void *)skb->data; + + /* check for payload data length before accessing */ + if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)) + goto error; + + memset(&caps, 0, sizeof(caps)); + + for (i = 0; i < std_codecs->num; i++) { + caps.id = std_codecs->codec[i].id; + hci_read_codec_capabilities(hdev, std_codecs->codec[i].transport, + &caps); + } + + skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)); + + vnd_codecs = (void *)skb->data; + + /* check for payload data length before accessing */ + if (skb->len < + flex_array_size(vnd_codecs, codec, vnd_codecs->num) + + sizeof(vnd_codecs->num)) + goto error; + + for (i = 0; i < vnd_codecs->num; i++) { + caps.id = 0xFF; + caps.cid = vnd_codecs->codec[i].cid; + caps.vid = vnd_codecs->codec[i].vid; + hci_read_codec_capabilities(hdev, vnd_codecs->codec[i].transport, + &caps); + } + +error: + kfree_skb(skb); +} diff --git a/net/bluetooth/hci_codec.h b/net/bluetooth/hci_codec.h new file mode 100644 index 00000000000000..a2751930f1235e --- /dev/null +++ b/net/bluetooth/hci_codec.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (C) 2014 Intel Corporation */ + +void hci_read_supported_codecs(struct hci_dev *hdev); +void hci_read_supported_codecs_v2(struct hci_dev *hdev); +void hci_codec_list_clear(struct list_head *codec_list); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 2b5059a56cdaaa..bd669c95b9a7e9 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -307,13 +307,133 @@ static bool find_next_esco_param(struct hci_conn *conn, return conn->attempt <= size; } -bool hci_setup_sync(struct hci_conn *conn, __u16 handle) +static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_enhanced_setup_sync_conn cp; + const struct sco_param *param; + + bt_dev_dbg(hdev, "hcon %p", conn); + + /* for offload use case, codec needs to configured before opening SCO */ + if (conn->codec.data_path) + hci_req_configure_datapath(hdev, &conn->codec); + + conn->state = BT_CONNECT; + conn->out = true; + + conn->attempt++; + + memset(&cp, 0x00, sizeof(cp)); + + cp.handle = cpu_to_le16(handle); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + + switch (conn->codec.id) { + case BT_CODEC_MSBC: + if (!find_next_esco_param(conn, esco_param_msbc, + ARRAY_SIZE(esco_param_msbc))) + return false; + + param = &esco_param_msbc[conn->attempt - 1]; + cp.tx_coding_format.id = 0x05; + cp.rx_coding_format.id = 0x05; + cp.tx_codec_frame_size = __cpu_to_le16(60); + cp.rx_codec_frame_size = __cpu_to_le16(60); + cp.in_bandwidth = __cpu_to_le32(32000); + cp.out_bandwidth = __cpu_to_le32(32000); + cp.in_coding_format.id = 0x04; + cp.out_coding_format.id = 0x04; + cp.in_coded_data_size = __cpu_to_le16(16); + cp.out_coded_data_size = __cpu_to_le16(16); + cp.in_pcm_data_format = 2; + cp.out_pcm_data_format = 2; + cp.in_pcm_sample_payload_msb_pos = 0; + cp.out_pcm_sample_payload_msb_pos = 0; + cp.in_data_path = conn->codec.data_path; + cp.out_data_path = conn->codec.data_path; + cp.in_transport_unit_size = 1; + cp.out_transport_unit_size = 1; + break; + + case BT_CODEC_TRANSPARENT: + if (!find_next_esco_param(conn, esco_param_msbc, + ARRAY_SIZE(esco_param_msbc))) + return false; + param = &esco_param_msbc[conn->attempt - 1]; + cp.tx_coding_format.id = 0x03; + cp.rx_coding_format.id = 0x03; + cp.tx_codec_frame_size = __cpu_to_le16(60); + cp.rx_codec_frame_size = __cpu_to_le16(60); + cp.in_bandwidth = __cpu_to_le32(0x1f40); + cp.out_bandwidth = __cpu_to_le32(0x1f40); + cp.in_coding_format.id = 0x03; + cp.out_coding_format.id = 0x03; + cp.in_coded_data_size = __cpu_to_le16(16); + cp.out_coded_data_size = __cpu_to_le16(16); + cp.in_pcm_data_format = 2; + cp.out_pcm_data_format = 2; + cp.in_pcm_sample_payload_msb_pos = 0; + cp.out_pcm_sample_payload_msb_pos = 0; + cp.in_data_path = conn->codec.data_path; + cp.out_data_path = conn->codec.data_path; + cp.in_transport_unit_size = 1; + cp.out_transport_unit_size = 1; + break; + + case BT_CODEC_CVSD: + if (lmp_esco_capable(conn->link)) { + if (!find_next_esco_param(conn, esco_param_cvsd, + ARRAY_SIZE(esco_param_cvsd))) + return false; + param = &esco_param_cvsd[conn->attempt - 1]; + } else { + if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) + return false; + param = &sco_param_cvsd[conn->attempt - 1]; + } + cp.tx_coding_format.id = 2; + cp.rx_coding_format.id = 2; + cp.tx_codec_frame_size = __cpu_to_le16(60); + cp.rx_codec_frame_size = __cpu_to_le16(60); + cp.in_bandwidth = __cpu_to_le32(16000); + cp.out_bandwidth = __cpu_to_le32(16000); + cp.in_coding_format.id = 4; + cp.out_coding_format.id = 4; + cp.in_coded_data_size = __cpu_to_le16(16); + cp.out_coded_data_size = __cpu_to_le16(16); + cp.in_pcm_data_format = 2; + cp.out_pcm_data_format = 2; + cp.in_pcm_sample_payload_msb_pos = 0; + cp.out_pcm_sample_payload_msb_pos = 0; + cp.in_data_path = conn->codec.data_path; + cp.out_data_path = conn->codec.data_path; + cp.in_transport_unit_size = 16; + cp.out_transport_unit_size = 16; + break; + default: + return false; + } + + cp.retrans_effort = param->retrans_effort; + cp.pkt_type = __cpu_to_le16(param->pkt_type); + cp.max_latency = __cpu_to_le16(param->max_latency); + + if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) + return false; + + return true; +} + +static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) { struct hci_dev *hdev = conn->hdev; struct hci_cp_setup_sync_conn cp; const struct sco_param *param; - BT_DBG("hcon %p", conn); + bt_dev_dbg(hdev, "hcon %p", conn); conn->state = BT_CONNECT; conn->out = true; @@ -359,6 +479,14 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle) return true; } +bool hci_setup_sync(struct hci_conn *conn, __u16 handle) +{ + if (enhanced_sco_capable(conn->hdev)) + return hci_enhanced_setup_sync_conn(conn, handle); + + return hci_setup_sync_conn(conn, handle); +} + u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier) { @@ -1040,8 +1168,8 @@ static void hci_req_directed_advertising(struct hci_request *req, } struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, - u8 dst_type, u8 sec_level, u16 conn_timeout, - u8 role, bdaddr_t *direct_rpa) + u8 dst_type, bool dst_resolved, u8 sec_level, + u16 conn_timeout, u8 role, bdaddr_t *direct_rpa) { struct hci_conn_params *params; struct hci_conn *conn; @@ -1078,19 +1206,24 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, return ERR_PTR(-EBUSY); } - /* When given an identity address with existing identity - * resolving key, the connection needs to be established - * to a resolvable random address. - * - * Storing the resolvable random address is required here - * to handle connection failures. The address will later - * be resolved back into the original identity address - * from the connect request. + /* Check if the destination address has been resolved by the controller + * since if it did then the identity address shall be used. */ - irk = hci_find_irk_by_addr(hdev, dst, dst_type); - if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { - dst = &irk->rpa; - dst_type = ADDR_LE_DEV_RANDOM; + if (!dst_resolved) { + /* When given an identity address with existing identity + * resolving key, the connection needs to be established + * to a resolvable random address. + * + * Storing the resolvable random address is required here + * to handle connection failures. The address will later + * be resolved back into the original identity address + * from the connect request. + */ + irk = hci_find_irk_by_addr(hdev, dst, dst_type); + if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { + dst = &irk->rpa; + dst_type = ADDR_LE_DEV_RANDOM; + } } if (conn) { @@ -1319,7 +1452,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, } struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, - __u16 setting) + __u16 setting, struct bt_codec *codec) { struct hci_conn *acl; struct hci_conn *sco; @@ -1344,6 +1477,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, hci_conn_hold(sco); sco->setting = setting; + sco->codec = *codec; if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8a47a3017d61de..8d33aa64846b1c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -45,6 +45,7 @@ #include "leds.h" #include "msft.h" #include "aosp.h" +#include "hci_codec.h" static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); @@ -61,130 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); -/* ---- HCI debugfs entries ---- */ - -static ssize_t dut_mode_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - char buf[3]; - - buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; - buf[1] = '\n'; - buf[2] = '\0'; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - struct sk_buff *skb; - bool enable; - int err; - - if (!test_bit(HCI_UP, &hdev->flags)) - return -ENETDOWN; - - err = kstrtobool_from_user(user_buf, count, &enable); - if (err) - return err; - - if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) - return -EALREADY; - - hci_req_sync_lock(hdev); - if (enable) - skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, - HCI_CMD_TIMEOUT); - else - skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, - HCI_CMD_TIMEOUT); - hci_req_sync_unlock(hdev); - - if (IS_ERR(skb)) - return PTR_ERR(skb); - - kfree_skb(skb); - - hci_dev_change_flag(hdev, HCI_DUT_MODE); - - return count; -} - -static const struct file_operations dut_mode_fops = { - .open = simple_open, - .read = dut_mode_read, - .write = dut_mode_write, - .llseek = default_llseek, -}; - -static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - char buf[3]; - - buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; - buf[1] = '\n'; - buf[2] = '\0'; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - bool enable; - int err; - - err = kstrtobool_from_user(user_buf, count, &enable); - if (err) - return err; - - /* When the diagnostic flags are not persistent and the transport - * is not active or in user channel operation, then there is no need - * for the vendor callback. Instead just store the desired value and - * the setting will be programmed when the controller gets powered on. - */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && - (!test_bit(HCI_RUNNING, &hdev->flags) || - hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) - goto done; - - hci_req_sync_lock(hdev); - err = hdev->set_diag(hdev, enable); - hci_req_sync_unlock(hdev); - - if (err < 0) - return err; - -done: - if (enable) - hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); - else - hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); - - return count; -} - -static const struct file_operations vendor_diag_fops = { - .open = simple_open, - .read = vendor_diag_read, - .write = vendor_diag_write, - .llseek = default_llseek, -}; - -static void hci_debugfs_create_basic(struct hci_dev *hdev) -{ - debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, - &dut_mode_fops); - - if (hdev->set_diag) - debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, - &vendor_diag_fops); -} - static int hci_reset_req(struct hci_request *req, unsigned long opt) { BT_DBG("%s %ld", req->hdev->name, opt); @@ -838,10 +715,6 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) if (hdev->commands[22] & 0x04) hci_set_event_mask_page_2(req); - /* Read local codec list if the HCI command is supported */ - if (hdev->commands[29] & 0x20) - hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); - /* Read local pairing options if the HCI command is supported */ if (hdev->commands[41] & 0x08) hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL); @@ -937,6 +810,12 @@ static int __hci_init(struct hci_dev *hdev) if (err < 0) return err; + /* Read local codec list if the HCI command is supported */ + if (hdev->commands[45] & 0x04) + hci_read_supported_codecs_v2(hdev); + else if (hdev->commands[29] & 0x20) + hci_read_supported_codecs(hdev); + /* This function is only called when the controller is actually in * configured state. When the controller is marked as unconfigured, * this initialization procedure is not run. @@ -1848,6 +1727,7 @@ int hci_dev_do_close(struct hci_dev *hdev) memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); bacpy(&hdev->random_addr, BDADDR_ANY); + hci_codec_list_clear(&hdev->local_codecs); hci_req_sync_unlock(hdev); @@ -3080,6 +2960,60 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, return 0; } +/* This function requires the caller holds hdev->lock */ +u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) +{ + u32 flags; + struct adv_info *adv; + + if (instance == 0x00) { + /* Instance 0 always manages the "Tx Power" and "Flags" + * fields + */ + flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; + + /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting + * corresponds to the "connectable" instance flag. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) + flags |= MGMT_ADV_FLAG_CONNECTABLE; + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_DISCOV; + + return flags; + } + + adv = hci_find_adv_instance(hdev, instance); + + /* Return 0 when we got an invalid instance identifier. */ + if (!adv) + return 0; + + return adv->flags; +} + +bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv; + + /* Instance 0x00 always set local name */ + if (instance == 0x00) + return true; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return false; + + if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + return true; + + return adv->scan_rsp_len ? true : false; +} + /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { @@ -3487,15 +3421,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, { struct hci_conn_params *param; - switch (addr_type) { - case ADDR_LE_DEV_PUBLIC_RESOLVED: - addr_type = ADDR_LE_DEV_PUBLIC; - break; - case ADDR_LE_DEV_RANDOM_RESOLVED: - addr_type = ADDR_LE_DEV_RANDOM; - break; - } - list_for_each_entry(param, list, action) { if (bacmp(¶m->addr, addr) == 0 && param->addr_type == addr_type) @@ -3701,55 +3626,12 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, struct hci_dev *hdev = container_of(nb, struct hci_dev, suspend_notifier); int ret = 0; - u8 state = BT_RUNNING; - /* If powering down, wait for completion. */ - if (mgmt_powering_down(hdev)) { - set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); - ret = hci_suspend_wait_event(hdev); - if (ret) - goto done; - } - - /* Suspend notifier should only act on events when powered. */ - if (!hdev_is_powered(hdev) || - hci_dev_test_flag(hdev, HCI_UNREGISTER)) - goto done; + if (action == PM_SUSPEND_PREPARE) + ret = hci_suspend_dev(hdev); + else if (action == PM_POST_SUSPEND) + ret = hci_resume_dev(hdev); - if (action == PM_SUSPEND_PREPARE) { - /* Suspend consists of two actions: - * - First, disconnect everything and make the controller not - * connectable (disabling scanning) - * - Second, program event filter/accept list and enable scan - */ - ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); - if (!ret) - state = BT_SUSPEND_DISCONNECT; - - /* Only configure accept list if disconnect succeeded and wake - * isn't being prevented. - */ - if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) { - ret = hci_change_suspend_state(hdev, - BT_SUSPEND_CONFIGURE_WAKE); - if (!ret) - state = BT_SUSPEND_CONFIGURE_WAKE; - } - - hci_clear_wake_reason(hdev); - mgmt_suspending(hdev, state); - - } else if (action == PM_POST_SUSPEND) { - ret = hci_change_suspend_state(hdev, BT_RUNNING); - - mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, - hdev->wake_addr_type); - } - -done: - /* We always allow suspend even if suspend preparation failed and - * attempt to recover in resume. - */ if (ret) bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", action, ret); @@ -3857,6 +3739,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_LIST_HEAD(&hdev->adv_instances); INIT_LIST_HEAD(&hdev->blocked_keys); + INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); @@ -3994,6 +3877,7 @@ int hci_register_dev(struct hci_dev *hdev) queue_work(hdev->req_workqueue, &hdev->power_on); idr_init(&hdev->adv_monitors_idr); + msft_register(hdev); return id; @@ -4026,6 +3910,8 @@ void hci_unregister_dev(struct hci_dev *hdev) cancel_work_sync(&hdev->suspend_prepare); } + msft_unregister(hdev); + hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && @@ -4088,16 +3974,78 @@ EXPORT_SYMBOL(hci_release_dev); /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { + int ret; + u8 state = BT_RUNNING; + + bt_dev_dbg(hdev, ""); + + /* Suspend should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* If powering down, wait for completion. */ + if (mgmt_powering_down(hdev)) { + set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); + ret = hci_suspend_wait_event(hdev); + if (ret) + goto done; + } + + /* Suspend consists of two actions: + * - First, disconnect everything and make the controller not + * connectable (disabling scanning) + * - Second, program event filter/accept list and enable scan + */ + ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); + if (ret) + goto clear; + + state = BT_SUSPEND_DISCONNECT; + + /* Only configure accept list if device may wakeup. */ + if (hdev->wakeup && hdev->wakeup(hdev)) { + ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE); + if (!ret) + state = BT_SUSPEND_CONFIGURE_WAKE; + } + +clear: + hci_clear_wake_reason(hdev); + mgmt_suspending(hdev, state); + +done: + /* We always allow suspend even if suspend preparation failed and + * attempt to recover in resume. + */ hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); - return 0; + return ret; } EXPORT_SYMBOL(hci_suspend_dev); /* Resume HCI device */ int hci_resume_dev(struct hci_dev *hdev) { + int ret; + + bt_dev_dbg(hdev, ""); + + /* Resume should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* If powering down don't attempt to resume */ + if (mgmt_powering_down(hdev)) + return 0; + + ret = hci_change_suspend_state(hdev, BT_RUNNING); + + mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, + hdev->wake_addr_type); + hci_sock_dev_event(hdev, HCI_DEV_RESUME); - return 0; + return ret; } EXPORT_SYMBOL(hci_resume_dev); diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 841393389f7b91..902b40a90b912f 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -27,6 +27,7 @@ #include #include "smp.h" +#include "hci_request.h" #include "hci_debugfs.h" #define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \ @@ -1250,3 +1251,125 @@ void hci_debugfs_create_conn(struct hci_conn *conn) snprintf(name, sizeof(name), "%u", conn->handle); conn->debugfs = debugfs_create_dir(name, hdev->debugfs); } + +static ssize_t dut_mode_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + struct sk_buff *skb; + bool enable; + int err; + + if (!test_bit(HCI_UP, &hdev->flags)) + return -ENETDOWN; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) + return -EALREADY; + + hci_req_sync_lock(hdev); + if (enable) + skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, + HCI_CMD_TIMEOUT); + else + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, + HCI_CMD_TIMEOUT); + hci_req_sync_unlock(hdev); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + hci_dev_change_flag(hdev, HCI_DUT_MODE); + + return count; +} + +static const struct file_operations dut_mode_fops = { + .open = simple_open, + .read = dut_mode_read, + .write = dut_mode_write, + .llseek = default_llseek, +}; + +static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + /* When the diagnostic flags are not persistent and the transport + * is not active or in user channel operation, then there is no need + * for the vendor callback. Instead just store the desired value and + * the setting will be programmed when the controller gets powered on. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + (!test_bit(HCI_RUNNING, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) + goto done; + + hci_req_sync_lock(hdev); + err = hdev->set_diag(hdev, enable); + hci_req_sync_unlock(hdev); + + if (err < 0) + return err; + +done: + if (enable) + hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); + else + hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); + + return count; +} + +static const struct file_operations vendor_diag_fops = { + .open = simple_open, + .read = vendor_diag_read, + .write = vendor_diag_write, + .llseek = default_llseek, +}; + +void hci_debugfs_create_basic(struct hci_dev *hdev) +{ + debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, + &dut_mode_fops); + + if (hdev->set_diag) + debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, + &vendor_diag_fops); +} diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h index 4444dc8cedc21a..9a8a7c93bb12a3 100644 --- a/net/bluetooth/hci_debugfs.h +++ b/net/bluetooth/hci_debugfs.h @@ -26,6 +26,7 @@ void hci_debugfs_create_common(struct hci_dev *hdev); void hci_debugfs_create_bredr(struct hci_dev *hdev); void hci_debugfs_create_le(struct hci_dev *hdev); void hci_debugfs_create_conn(struct hci_conn *conn); +void hci_debugfs_create_basic(struct hci_dev *hdev); #else @@ -45,4 +46,8 @@ static inline void hci_debugfs_create_conn(struct hci_conn *conn) { } +static inline void hci_debugfs_create_basic(struct hci_dev *hdev) +{ +} + #endif diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0bca035bf2dcc2..7d0db1ca124829 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -36,6 +36,7 @@ #include "amp.h" #include "smp.h" #include "msft.h" +#include "eir.h" #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" @@ -2278,6 +2279,41 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) hci_dev_unlock(hdev); } +static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_enhanced_setup_sync_conn *cp; + struct hci_conn *acl, *sco; + __u16 handle; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); + if (!cp) + return; + + handle = __le16_to_cpu(cp->handle); + + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl) { + sco = acl->link; + if (sco) { + sco->state = BT_CLOSED; + + hci_connect_cfm(sco, status); + hci_conn_del(sco); + } + } + + hci_dev_unlock(hdev); +} + static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_sniff_mode *cp; @@ -2351,7 +2387,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); - if (conn->type == LE_LINK) { + if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; hci_req_reenable_advertising(hdev); } @@ -2367,6 +2403,28 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) hci_dev_unlock(hdev); } +static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) +{ + /* When using controller based address resolution, then the new + * address types 0x02 and 0x03 are used. These types need to be + * converted back into either public address or random address type + */ + switch (type) { + case ADDR_LE_DEV_PUBLIC_RESOLVED: + if (resolved) + *resolved = true; + return ADDR_LE_DEV_PUBLIC; + case ADDR_LE_DEV_RANDOM_RESOLVED: + if (resolved) + *resolved = true; + return ADDR_LE_DEV_RANDOM; + } + + if (resolved) + *resolved = false; + return type; +} + static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, u8 peer_addr_type, u8 own_address_type, u8 filter_policy) @@ -2378,21 +2436,7 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, if (!conn) return; - /* When using controller based address resolution, then the new - * address types 0x02 and 0x03 are used. These types need to be - * converted back into either public address or random address type - */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { - switch (own_address_type) { - case ADDR_LE_DEV_PUBLIC_RESOLVED: - own_address_type = ADDR_LE_DEV_PUBLIC; - break; - case ADDR_LE_DEV_RANDOM_RESOLVED: - own_address_type = ADDR_LE_DEV_RANDOM; - break; - } - } + own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); /* Store the initiator and responder address information which * is needed for SMP. These values will not change during the @@ -2961,7 +3005,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) * or until a connection is created or until the Advertising * is timed out due to Directed Advertising." */ - if (conn->type == LE_LINK) { + if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; hci_req_reenable_advertising(hdev); } @@ -3756,6 +3800,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cs_setup_sync_conn(hdev, ev->status); break; + case HCI_OP_ENHANCED_SETUP_SYNC_CONN: + hci_cs_enhanced_setup_sync_conn(hdev, ev->status); + break; + case HCI_OP_SNIFF_MODE: hci_cs_sniff_mode(hdev, ev->status); break; @@ -4397,6 +4445,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, { struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; + unsigned int notify_evt; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); @@ -4471,15 +4520,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, switch (ev->air_mode) { case 0x02: - if (hdev->notify) - hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); + notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD; break; case 0x03: - if (hdev->notify) - hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); + notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP; break; } + /* Notify only in case of SCO over HCI transport data path which + * is zero and non-zero value shall be non-HCI transport data path + */ + if (conn->codec.data_path == 0) { + if (hdev->notify) + hdev->notify(hdev, notify_evt); + } + hci_connect_cfm(conn, ev->status); if (ev->status) hci_conn_del(conn); @@ -5282,22 +5337,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, conn->dst_type = irk->addr_type; } - /* When using controller based address resolution, then the new - * address types 0x02 and 0x03 are used. These types need to be - * converted back into either public address or random address type - */ - if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { - switch (conn->dst_type) { - case ADDR_LE_DEV_PUBLIC_RESOLVED: - conn->dst_type = ADDR_LE_DEV_PUBLIC; - break; - case ADDR_LE_DEV_RANDOM_RESOLVED: - conn->dst_type = ADDR_LE_DEV_RANDOM; - break; - } - } + conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); if (status) { hci_le_conn_failed(conn, status); @@ -5479,8 +5519,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, /* This function requires the caller holds hdev->lock */ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, - u8 addr_type, u8 adv_type, - bdaddr_t *direct_rpa) + u8 addr_type, bool addr_resolved, + u8 adv_type, bdaddr_t *direct_rpa) { struct hci_conn *conn; struct hci_conn_params *params; @@ -5532,9 +5572,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, } } - conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, - hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER, - direct_rpa); + conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, + BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, + HCI_ROLE_MASTER, direct_rpa); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned * by higher layer that tried to connect, if no then @@ -5575,7 +5615,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; struct hci_conn *conn; - bool match; + bool match, bdaddr_resolved; u32 flags; u8 *ptr; @@ -5619,6 +5659,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * controller address. */ if (direct_addr) { + direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, + &bdaddr_resolved); + /* Only resolvable random addresses are valid for these * kind of reports and others can be ignored. */ @@ -5646,13 +5689,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, bdaddr_type = irk->addr_type; } + bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); + /* Check if we have been requested to connect to this device. * * direct_addr is set only for directed advertising reports (it is NULL * for advertising reports) and is already verified to be RPA above. */ - conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, - direct_addr); + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, + type, direct_addr); if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index f15626607b2d61..92611bfc0b9e16 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -30,6 +30,7 @@ #include "smp.h" #include "hci_request.h" #include "msft.h" +#include "eir.h" #define HCI_REQ_DONE 0 #define HCI_REQ_PEND 1 @@ -521,164 +522,6 @@ void __hci_req_update_name(struct hci_request *req) hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); } -#define PNP_INFO_SVCLASS_ID 0x1200 - -static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) -{ - u8 *ptr = data, *uuids_start = NULL; - struct bt_uuid *uuid; - - if (len < 4) - return ptr; - - list_for_each_entry(uuid, &hdev->uuids, list) { - u16 uuid16; - - if (uuid->size != 16) - continue; - - uuid16 = get_unaligned_le16(&uuid->uuid[12]); - if (uuid16 < 0x1100) - continue; - - if (uuid16 == PNP_INFO_SVCLASS_ID) - continue; - - if (!uuids_start) { - uuids_start = ptr; - uuids_start[0] = 1; - uuids_start[1] = EIR_UUID16_ALL; - ptr += 2; - } - - /* Stop if not enough space to put next UUID */ - if ((ptr - data) + sizeof(u16) > len) { - uuids_start[1] = EIR_UUID16_SOME; - break; - } - - *ptr++ = (uuid16 & 0x00ff); - *ptr++ = (uuid16 & 0xff00) >> 8; - uuids_start[0] += sizeof(uuid16); - } - - return ptr; -} - -static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) -{ - u8 *ptr = data, *uuids_start = NULL; - struct bt_uuid *uuid; - - if (len < 6) - return ptr; - - list_for_each_entry(uuid, &hdev->uuids, list) { - if (uuid->size != 32) - continue; - - if (!uuids_start) { - uuids_start = ptr; - uuids_start[0] = 1; - uuids_start[1] = EIR_UUID32_ALL; - ptr += 2; - } - - /* Stop if not enough space to put next UUID */ - if ((ptr - data) + sizeof(u32) > len) { - uuids_start[1] = EIR_UUID32_SOME; - break; - } - - memcpy(ptr, &uuid->uuid[12], sizeof(u32)); - ptr += sizeof(u32); - uuids_start[0] += sizeof(u32); - } - - return ptr; -} - -static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) -{ - u8 *ptr = data, *uuids_start = NULL; - struct bt_uuid *uuid; - - if (len < 18) - return ptr; - - list_for_each_entry(uuid, &hdev->uuids, list) { - if (uuid->size != 128) - continue; - - if (!uuids_start) { - uuids_start = ptr; - uuids_start[0] = 1; - uuids_start[1] = EIR_UUID128_ALL; - ptr += 2; - } - - /* Stop if not enough space to put next UUID */ - if ((ptr - data) + 16 > len) { - uuids_start[1] = EIR_UUID128_SOME; - break; - } - - memcpy(ptr, uuid->uuid, 16); - ptr += 16; - uuids_start[0] += 16; - } - - return ptr; -} - -static void create_eir(struct hci_dev *hdev, u8 *data) -{ - u8 *ptr = data; - size_t name_len; - - name_len = strlen(hdev->dev_name); - - if (name_len > 0) { - /* EIR Data type */ - if (name_len > 48) { - name_len = 48; - ptr[1] = EIR_NAME_SHORT; - } else - ptr[1] = EIR_NAME_COMPLETE; - - /* EIR Data length */ - ptr[0] = name_len + 1; - - memcpy(ptr + 2, hdev->dev_name, name_len); - - ptr += (name_len + 2); - } - - if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { - ptr[0] = 2; - ptr[1] = EIR_TX_POWER; - ptr[2] = (u8) hdev->inq_tx_power; - - ptr += 3; - } - - if (hdev->devid_source > 0) { - ptr[0] = 9; - ptr[1] = EIR_DEVICE_ID; - - put_unaligned_le16(hdev->devid_source, ptr + 2); - put_unaligned_le16(hdev->devid_vendor, ptr + 4); - put_unaligned_le16(hdev->devid_product, ptr + 6); - put_unaligned_le16(hdev->devid_version, ptr + 8); - - ptr += 10; - } - - ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); - ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); - ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); -} - void __hci_req_update_eir(struct hci_request *req) { struct hci_dev *hdev = req->hdev; @@ -698,7 +541,7 @@ void __hci_req_update_eir(struct hci_request *req) memset(&cp, 0, sizeof(cp)); - create_eir(hdev, cp.data); + eir_create(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return; @@ -1134,25 +977,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req) addr_resolv); } -static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) -{ - struct adv_info *adv_instance; - - /* Instance 0x00 always set local name */ - if (instance == 0x00) - return true; - - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return false; - - if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE || - adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME) - return true; - - return adv_instance->scan_rsp_len ? true : false; -} - static void hci_req_clear_event_filter(struct hci_request *req) { struct hci_cp_set_event_filter f; @@ -1281,21 +1105,24 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) } } -static void hci_req_add_set_adv_filter_enable(struct hci_request *req, - bool enable) +static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req, + bool suspending) { struct hci_dev *hdev = req->hdev; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: - msft_req_add_set_filter_enable(req, enable); + if (suspending) + msft_suspend(hdev); + else + msft_resume(hdev); break; default: return; } /* No need to block when enabling since it's on resume path */ - if (hdev->suspended && !enable) + if (hdev->suspended && suspending) set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); } @@ -1362,7 +1189,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) } /* Disable advertisement filters */ - hci_req_add_set_adv_filter_enable(&req, false); + hci_req_prepare_adv_monitor_suspend(&req, true); /* Prevent disconnects from causing scanning to be re-enabled */ hdev->scanning_paused = true; @@ -1404,7 +1231,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) /* Reset passive/background scanning to normal */ __hci_update_background_scan(&req); /* Enable all of the advertisement filters */ - hci_req_add_set_adv_filter_enable(&req, true); + hci_req_prepare_adv_monitor_suspend(&req, false); /* Unpause directed advertising */ hdev->advertising_paused = false; @@ -1442,7 +1269,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { - return adv_instance_is_scannable(hdev, hdev->cur_adv_instance); + return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); } void __hci_req_disable_advertising(struct hci_request *req) @@ -1457,40 +1284,6 @@ void __hci_req_disable_advertising(struct hci_request *req) } } -static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) -{ - u32 flags; - struct adv_info *adv_instance; - - if (instance == 0x00) { - /* Instance 0 always manages the "Tx Power" and "Flags" - * fields - */ - flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; - - /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting - * corresponds to the "connectable" instance flag. - */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) - flags |= MGMT_ADV_FLAG_CONNECTABLE; - - if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) - flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; - else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) - flags |= MGMT_ADV_FLAG_DISCOV; - - return flags; - } - - adv_instance = hci_find_adv_instance(hdev, instance); - - /* Return 0 when we got an invalid instance identifier. */ - if (!adv_instance) - return 0; - - return adv_instance->flags; -} - static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) { /* If privacy is not enabled don't use RPA */ @@ -1555,15 +1348,15 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) void __hci_req_enable_advertising(struct hci_request *req) { struct hci_dev *hdev = req->hdev; - struct adv_info *adv_instance; + struct adv_info *adv; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; u16 adv_min_interval, adv_max_interval; u32 flags; - flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); - adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); + flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); + adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. @@ -1595,9 +1388,9 @@ void __hci_req_enable_advertising(struct hci_request *req) memset(&cp, 0, sizeof(cp)); - if (adv_instance) { - adv_min_interval = adv_instance->min_interval; - adv_max_interval = adv_instance->max_interval; + if (adv) { + adv_min_interval = adv->min_interval; + adv_max_interval = adv->max_interval; } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; @@ -1628,85 +1421,6 @@ void __hci_req_enable_advertising(struct hci_request *req) hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); } -u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) -{ - size_t short_len; - size_t complete_len; - - /* no space left for name (+ NULL + type + len) */ - if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) - return ad_len; - - /* use complete name if present and fits */ - complete_len = strlen(hdev->dev_name); - if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) - return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, - hdev->dev_name, complete_len + 1); - - /* use short name if present */ - short_len = strlen(hdev->short_name); - if (short_len) - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, - hdev->short_name, short_len + 1); - - /* use shortened full name if present, we already know that name - * is longer then HCI_MAX_SHORT_NAME_LENGTH - */ - if (complete_len) { - u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; - - memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); - name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; - - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, - sizeof(name)); - } - - return ad_len; -} - -static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) -{ - return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); -} - -static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) -{ - u8 scan_rsp_len = 0; - - if (hdev->appearance) - scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); - - return append_local_name(hdev, ptr, scan_rsp_len); -} - -static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, - u8 *ptr) -{ - struct adv_info *adv_instance; - u32 instance_flags; - u8 scan_rsp_len = 0; - - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return 0; - - instance_flags = adv_instance->flags; - - if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) - scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); - - memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, - adv_instance->scan_rsp_len); - - scan_rsp_len += adv_instance->scan_rsp_len; - - if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) - scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); - - return scan_rsp_len; -} - void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; @@ -1723,11 +1437,7 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) memset(&pdu, 0, sizeof(pdu)); - if (instance) - len = create_instance_scan_rsp_data(hdev, instance, - pdu.data); - else - len = create_default_scan_rsp_data(hdev, pdu.data); + len = eir_create_scan_rsp(hdev, instance, pdu.data); if (hdev->scan_rsp_data_len == len && !memcmp(pdu.data, hdev->scan_rsp_data, len)) @@ -1748,11 +1458,7 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - if (instance) - len = create_instance_scan_rsp_data(hdev, instance, - cp.data); - else - len = create_default_scan_rsp_data(hdev, cp.data); + len = eir_create_scan_rsp(hdev, instance, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) @@ -1767,95 +1473,6 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) } } -static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) -{ - struct adv_info *adv_instance = NULL; - u8 ad_len = 0, flags = 0; - u32 instance_flags; - - /* Return 0 when the current instance identifier is invalid. */ - if (instance) { - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return 0; - } - - instance_flags = get_adv_instance_flags(hdev, instance); - - /* If instance already has the flags set skip adding it once - * again. - */ - if (adv_instance && eir_get_data(adv_instance->adv_data, - adv_instance->adv_data_len, EIR_FLAGS, - NULL)) - goto skip_flags; - - /* The Add Advertising command allows userspace to set both the general - * and limited discoverable flags. - */ - if (instance_flags & MGMT_ADV_FLAG_DISCOV) - flags |= LE_AD_GENERAL; - - if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) - flags |= LE_AD_LIMITED; - - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - flags |= LE_AD_NO_BREDR; - - if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { - /* If a discovery flag wasn't provided, simply use the global - * settings. - */ - if (!flags) - flags |= mgmt_get_adv_discov_flags(hdev); - - /* If flags would still be empty, then there is no need to - * include the "Flags" AD field". - */ - if (flags) { - ptr[0] = 0x02; - ptr[1] = EIR_FLAGS; - ptr[2] = flags; - - ad_len += 3; - ptr += 3; - } - } - -skip_flags: - if (adv_instance) { - memcpy(ptr, adv_instance->adv_data, - adv_instance->adv_data_len); - ad_len += adv_instance->adv_data_len; - ptr += adv_instance->adv_data_len; - } - - if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { - s8 adv_tx_power; - - if (ext_adv_capable(hdev)) { - if (adv_instance) - adv_tx_power = adv_instance->tx_power; - else - adv_tx_power = hdev->adv_tx_power; - } else { - adv_tx_power = hdev->adv_tx_power; - } - - /* Provide Tx Power only if we can provide a valid value for it */ - if (adv_tx_power != HCI_TX_POWER_INVALID) { - ptr[0] = 0x02; - ptr[1] = EIR_TX_POWER; - ptr[2] = (u8)adv_tx_power; - - ad_len += 3; - ptr += 3; - } - } - - return ad_len; -} - void __hci_req_update_adv_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; @@ -1872,7 +1489,7 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance) memset(&pdu, 0, sizeof(pdu)); - len = create_instance_adv_data(hdev, instance, pdu.data); + len = eir_create_adv_data(hdev, instance, pdu.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && @@ -1894,7 +1511,7 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - len = create_instance_adv_data(hdev, instance, cp.data); + len = eir_create_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && @@ -2183,7 +1800,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) adv_instance = NULL; } - flags = get_adv_instance_flags(hdev, instance); + flags = hci_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. @@ -2223,7 +1840,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); - } else if (adv_instance_is_scannable(hdev, instance) || + } else if (hci_adv_instance_is_scannable(hdev, instance) || (flags & MGMT_ADV_PARAM_SCAN_RSP)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); @@ -3327,6 +2944,53 @@ bool hci_req_stop_discovery(struct hci_request *req) return ret; } +static void config_data_path_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + bt_dev_dbg(hdev, "status %u", status); +} + +int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec) +{ + struct hci_request req; + int err; + __u8 vnd_len, *vnd_data = NULL; + struct hci_op_configure_data_path *cmd = NULL; + + hci_req_init(&req, hdev); + + err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, + &vnd_data); + if (err < 0) + goto error; + + cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); + if (!cmd) { + err = -ENOMEM; + goto error; + } + + err = hdev->get_data_path_id(hdev, &cmd->data_path_id); + if (err < 0) + goto error; + + cmd->vnd_len = vnd_len; + memcpy(cmd->vnd_data, vnd_data, vnd_len); + + cmd->direction = 0x00; + hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); + + cmd->direction = 0x01; + hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); + + err = hci_req_run(&req, config_data_path_complete); +error: + + kfree(cmd); + kfree(vnd_data); + return err; +} + static int stop_discovery(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 39ee8a18087a28..f31420f5852563 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -101,6 +101,8 @@ void __hci_req_update_class(struct hci_request *req); /* Returns true if HCI commands were queued */ bool hci_req_stop_discovery(struct hci_request *req); +int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec); + static inline void hci_req_update_scan(struct hci_dev *hdev) { queue_work(hdev->req_workqueue, &hdev->scan_update); @@ -122,26 +124,3 @@ static inline void hci_update_background_scan(struct hci_dev *hdev) void hci_request_setup(struct hci_dev *hdev); void hci_request_cancel_all(struct hci_dev *hdev); - -u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len); - -static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, - u8 *data, u8 data_len) -{ - eir[eir_len++] = sizeof(type) + data_len; - eir[eir_len++] = type; - memcpy(&eir[eir_len], data, data_len); - eir_len += data_len; - - return eir_len; -} - -static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) -{ - eir[eir_len++] = sizeof(type) + sizeof(data); - eir[eir_len++] = type; - put_unaligned_le16(data, &eir[eir_len]); - eir_len += sizeof(data); - - return eir_len; -} diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f1128c2134f027..d0dad1fafe0795 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -57,6 +57,7 @@ struct hci_pinfo { unsigned long flags; __u32 cookie; char comm[TASK_COMM_LEN]; + __u16 mtu; }; static struct hci_dev *hci_hdev_from_sock(struct sock *sk) @@ -1374,6 +1375,10 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, break; } + /* Default MTU to HCI_MAX_FRAME_SIZE if not set */ + if (!hci_pi(sk)->mtu) + hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE; + sk->sk_state = BT_BOUND; done: @@ -1506,9 +1511,8 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, } static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, - struct msghdr *msg, size_t msglen) + struct sk_buff *skb) { - void *buf; u8 *cp; struct mgmt_hdr *hdr; u16 opcode, index, len; @@ -1517,40 +1521,31 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, bool var_len, no_hdev; int err; - BT_DBG("got %zu bytes", msglen); + BT_DBG("got %d bytes", skb->len); - if (msglen < sizeof(*hdr)) + if (skb->len < sizeof(*hdr)) return -EINVAL; - buf = kmalloc(msglen, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (memcpy_from_msg(buf, msg, msglen)) { - err = -EFAULT; - goto done; - } - - hdr = buf; + hdr = (void *)skb->data; opcode = __le16_to_cpu(hdr->opcode); index = __le16_to_cpu(hdr->index); len = __le16_to_cpu(hdr->len); - if (len != msglen - sizeof(*hdr)) { + if (len != skb->len - sizeof(*hdr)) { err = -EINVAL; goto done; } if (chan->channel == HCI_CHANNEL_CONTROL) { - struct sk_buff *skb; + struct sk_buff *cmd; /* Send event to monitor */ - skb = create_monitor_ctrl_command(sk, index, opcode, len, - buf + sizeof(*hdr)); - if (skb) { - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + cmd = create_monitor_ctrl_command(sk, index, opcode, len, + skb->data + sizeof(*hdr)); + if (cmd) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd, HCI_SOCK_TRUSTED, NULL); - kfree_skb(skb); + kfree_skb(cmd); } } @@ -1615,26 +1610,25 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, if (hdev && chan->hdev_init) chan->hdev_init(sk, hdev); - cp = buf + sizeof(*hdr); + cp = skb->data + sizeof(*hdr); err = handler->func(sk, hdev, cp, len); if (err < 0) goto done; - err = msglen; + err = skb->len; done: if (hdev) hci_dev_put(hdev); - kfree(buf); return err; } -static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len) +static int hci_logging_frame(struct sock *sk, struct sk_buff *skb, + unsigned int flags) { struct hci_mon_hdr *hdr; - struct sk_buff *skb; struct hci_dev *hdev; u16 index; int err; @@ -1643,24 +1637,13 @@ static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len) * the priority byte, the ident length byte and at least one string * terminator NUL byte. Anything shorter are invalid packets. */ - if (len < sizeof(*hdr) + 3) + if (skb->len < sizeof(*hdr) + 3) return -EINVAL; - skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); - if (!skb) - return err; - - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { - err = -EFAULT; - goto drop; - } - hdr = (void *)skb->data; - if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) { - err = -EINVAL; - goto drop; - } + if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr)) + return -EINVAL; if (__le16_to_cpu(hdr->opcode) == 0x0000) { __u8 priority = skb->data[sizeof(*hdr)]; @@ -1679,25 +1662,20 @@ static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len) * The message follows the ident string (if present) and * must be NUL terminated. Otherwise it is not a valid packet. */ - if (priority > 7 || skb->data[len - 1] != 0x00 || - ident_len > len - sizeof(*hdr) - 3 || - skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) { - err = -EINVAL; - goto drop; - } + if (priority > 7 || skb->data[skb->len - 1] != 0x00 || + ident_len > skb->len - sizeof(*hdr) - 3 || + skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) + return -EINVAL; } else { - err = -EINVAL; - goto drop; + return -EINVAL; } index = __le16_to_cpu(hdr->index); if (index != MGMT_INDEX_NONE) { hdev = hci_dev_get(index); - if (!hdev) { - err = -ENODEV; - goto drop; - } + if (!hdev) + return -ENODEV; } else { hdev = NULL; } @@ -1705,13 +1683,11 @@ static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len) hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING); hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); - err = len; + err = skb->len; if (hdev) hci_dev_put(hdev); -drop: - kfree_skb(skb); return err; } @@ -1723,19 +1699,23 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, struct hci_dev *hdev; struct sk_buff *skb; int err; + const unsigned int flags = msg->msg_flags; BT_DBG("sock %p sk %p", sock, sk); - if (msg->msg_flags & MSG_OOB) + if (flags & MSG_OOB) return -EOPNOTSUPP; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE| - MSG_CMSG_COMPAT)) + if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) return -EINVAL; - if (len < 4 || len > HCI_MAX_FRAME_SIZE) + if (len < 4 || len > hci_pi(sk)->mtu) return -EINVAL; + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + lock_sock(sk); switch (hci_pi(sk)->channel) { @@ -1744,39 +1724,30 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, break; case HCI_CHANNEL_MONITOR: err = -EOPNOTSUPP; - goto done; + goto drop; case HCI_CHANNEL_LOGGING: - err = hci_logging_frame(sk, msg, len); - goto done; + err = hci_logging_frame(sk, skb, flags); + goto drop; default: mutex_lock(&mgmt_chan_list_lock); chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); if (chan) - err = hci_mgmt_cmd(chan, sk, msg, len); + err = hci_mgmt_cmd(chan, sk, skb); else err = -EINVAL; mutex_unlock(&mgmt_chan_list_lock); - goto done; + goto drop; } hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) { err = PTR_ERR(hdev); - goto done; + goto drop; } if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; - goto done; - } - - skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); - if (!skb) - goto done; - - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { - err = -EFAULT; goto drop; } @@ -1857,8 +1828,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, goto done; } -static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int len) +static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -1866,9 +1837,6 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, BT_DBG("sk %p, opt %d", sk, optname); - if (level != SOL_HCI) - return -ENOPROTOOPT; - lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { @@ -1943,18 +1911,63 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, return err; } -static int hci_sock_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int len) { - struct hci_ufilter uf; struct sock *sk = sock->sk; - int len, opt, err = 0; + int err = 0, opt = 0; BT_DBG("sk %p, opt %d", sk, optname); - if (level != SOL_HCI) + if (level == SOL_HCI) + return hci_sock_setsockopt_old(sock, level, optname, optval, + len); + + if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; + lock_sock(sk); + + switch (optname) { + case BT_SNDMTU: + case BT_RCVMTU: + switch (hci_pi(sk)->channel) { + /* Don't allow changing MTU for channels that are meant for HCI + * traffic only. + */ + case HCI_CHANNEL_RAW: + case HCI_CHANNEL_USER: + err = -ENOPROTOOPT; + goto done; + } + + if (copy_from_sockptr(&opt, optval, sizeof(u16))) { + err = -EFAULT; + break; + } + + hci_pi(sk)->mtu = opt; + break; + + default: + err = -ENOPROTOOPT; + break; + } + +done: + release_sock(sk); + return err; +} + +static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct hci_ufilter uf; + struct sock *sk = sock->sk; + int len, opt, err = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + if (get_user(len, optlen)) return -EFAULT; @@ -2012,6 +2025,39 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, return err; } +static int hci_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + + if (level == SOL_HCI) + return hci_sock_getsockopt_old(sock, level, optname, optval, + optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SNDMTU: + case BT_RCVMTU: + if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval)) + err = -EFAULT; + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 77ba68209dbd89..4f8f3759996251 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7902,7 +7902,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, dst_type = ADDR_LE_DEV_RANDOM; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) - hcon = hci_connect_le(hdev, dst, dst_type, + hcon = hci_connect_le(hdev, dst, dst_type, false, chan->sec_level, HCI_LE_CONN_TIMEOUT, HCI_ROLE_SLAVE, NULL); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index c99d65ef13b1e2..160c016a5dfb9e 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1508,6 +1508,9 @@ static void l2cap_sock_close_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; + if (!sk) + return; + l2cap_sock_kill(sk); } @@ -1516,6 +1519,9 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) struct sock *sk = chan->data; struct sock *parent; + if (!sk) + return; + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); /* This callback can be called both for server (BT_LISTEN) @@ -1707,8 +1713,10 @@ static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); - if (l2cap_pi(sk)->chan) + if (l2cap_pi(sk)->chan) { + l2cap_pi(sk)->chan->data = NULL; l2cap_chan_put(l2cap_pi(sk)->chan); + } if (l2cap_pi(sk)->rx_busy_skb) { kfree_skb(l2cap_pi(sk)->rx_busy_skb); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index cea01e275f1ea9..3e5283607b97c1 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -38,6 +38,7 @@ #include "mgmt_util.h" #include "mgmt_config.h" #include "msft.h" +#include "eir.h" #define MGMT_VERSION 1 #define MGMT_REVISION 21 @@ -3791,6 +3792,18 @@ static const u8 debug_uuid[16] = { }; #endif +/* 330859bc-7506-492d-9370-9a6f0614037f */ +static const u8 quality_report_uuid[16] = { + 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93, + 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33, +}; + +/* a6695ace-ee7f-4fb9-881a-5fac66c629af */ +static const u8 offload_codecs_uuid[16] = { + 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88, + 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6, +}; + /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ static const u8 simult_central_periph_uuid[16] = { 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, @@ -3806,7 +3819,7 @@ static const u8 rpa_resolution_uuid[16] = { static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - char buf[62]; /* Enough space for 3 features */ + char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */ struct mgmt_rp_read_exp_features_info *rp = (void *)buf; u16 idx = 0; u32 flags; @@ -3850,6 +3863,28 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } + if (hdev && hdev->set_quality_report) { + if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, quality_report_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (hdev && hdev->get_data_path_id) { + if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable @@ -3892,150 +3927,341 @@ static int exp_debug_feature_changed(bool enabled, struct sock *skip) } #endif -static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, - void *data, u16 data_len) +static int exp_quality_report_feature_changed(bool enabled, struct sock *skip) { - struct mgmt_cp_set_exp_feature *cp = data; - struct mgmt_rp_set_exp_feature rp; + struct mgmt_ev_exp_feature_changed ev; - bt_dev_dbg(hdev, "sock %p", sk); + memset(&ev, 0, sizeof(ev)); + memcpy(ev.uuid, quality_report_uuid, 16); + ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); + + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, + &ev, sizeof(ev), + HCI_MGMT_EXP_FEATURE_EVENTS, skip); +} + +#define EXP_FEAT(_uuid, _set_func) \ +{ \ + .uuid = _uuid, \ + .set_func = _set_func, \ +} + +/* The zero key uuid is special. Multiple exp features are set through it. */ +static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; - if (!memcmp(cp->uuid, ZERO_KEY, 16)) { - memset(rp.uuid, 0, 16); - rp.flags = cpu_to_le32(0); + memset(rp.uuid, 0, 16); + rp.flags = cpu_to_le32(0); #ifdef CONFIG_BT_FEATURE_DEBUG - if (!hdev) { - bool changed = bt_dbg_get(); + if (!hdev) { + bool changed = bt_dbg_get(); - bt_dbg_set(false); + bt_dbg_set(false); - if (changed) - exp_debug_feature_changed(false, sk); - } + if (changed) + exp_debug_feature_changed(false, sk); + } #endif - if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { - bool changed = hci_dev_test_flag(hdev, - HCI_ENABLE_LL_PRIVACY); + if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { + bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); - hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); - if (changed) - exp_ll_privacy_feature_changed(false, hdev, sk); - } + if (changed) + exp_ll_privacy_feature_changed(false, hdev, sk); + } - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); - return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, - MGMT_OP_SET_EXP_FEATURE, 0, - &rp, sizeof(rp)); - } + return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); +} #ifdef CONFIG_BT_FEATURE_DEBUG - if (!memcmp(cp->uuid, debug_uuid, 16)) { - bool val, changed; - int err; +static int set_debug_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; - /* Command requires to use the non-controller index */ - if (hdev) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_INVALID_INDEX); + bool val, changed; + int err; - /* Parameters are limited to a single octet */ - if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) - return mgmt_cmd_status(sk, MGMT_INDEX_NONE, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_INVALID_PARAMS); + /* Command requires to use the non-controller index */ + if (hdev) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); - /* Only boolean on/off is supported */ - if (cp->param[0] != 0x00 && cp->param[0] != 0x01) - return mgmt_cmd_status(sk, MGMT_INDEX_NONE, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_INVALID_PARAMS); + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); - val = !!cp->param[0]; - changed = val ? !bt_dbg_get() : bt_dbg_get(); - bt_dbg_set(val); + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); - memcpy(rp.uuid, debug_uuid, 16); - rp.flags = cpu_to_le32(val ? BIT(0) : 0); + val = !!cp->param[0]; + changed = val ? !bt_dbg_get() : bt_dbg_get(); + bt_dbg_set(val); - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + memcpy(rp.uuid, debug_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); - err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, - MGMT_OP_SET_EXP_FEATURE, 0, - &rp, sizeof(rp)); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); - if (changed) - exp_debug_feature_changed(val, sk); + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); - return err; - } + if (changed) + exp_debug_feature_changed(val, sk); + + return err; +} #endif - if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) { - bool val, changed; - int err; - u32 flags; +static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed; + int err; + u32 flags; + + /* Command requires to use the controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); - /* Command requires to use the controller index */ - if (!hdev) - return mgmt_cmd_status(sk, MGMT_INDEX_NONE, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_INVALID_INDEX); + /* Changes can only be made when controller is powered down */ + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_REJECTED); - /* Changes can only be made when controller is powered down */ - if (hdev_is_powered(hdev)) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_REJECTED); + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); - /* Parameters are limited to a single octet */ - if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_INVALID_PARAMS); + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); - /* Only boolean on/off is supported */ - if (cp->param[0] != 0x00 && cp->param[0] != 0x01) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_INVALID_PARAMS); + val = !!cp->param[0]; - val = !!cp->param[0]; + if (val) { + changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); + hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); - if (val) { - changed = !hci_dev_test_flag(hdev, - HCI_ENABLE_LL_PRIVACY); - hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); - hci_dev_clear_flag(hdev, HCI_ADVERTISING); + /* Enable LL privacy + supported settings changed */ + flags = BIT(0) | BIT(1); + } else { + changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); - /* Enable LL privacy + supported settings changed */ - flags = BIT(0) | BIT(1); - } else { - changed = hci_dev_test_flag(hdev, - HCI_ENABLE_LL_PRIVACY); - hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + /* Disable LL privacy + supported settings changed */ + flags = BIT(1); + } - /* Disable LL privacy + supported settings changed */ - flags = BIT(1); + memcpy(rp.uuid, rpa_resolution_uuid, 16); + rp.flags = cpu_to_le32(flags); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_ll_privacy_feature_changed(val, hdev, sk); + + return err; +} + +static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed; + int err; + + /* Command requires to use a valid controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + hci_req_sync_lock(hdev); + + val = !!cp->param[0]; + changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)); + + if (!hdev->set_quality_report) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); + goto unlock_quality_report; + } + + if (changed) { + err = hdev->set_quality_report(hdev, val); + if (err) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_FAILED); + goto unlock_quality_report; } + if (val) + hci_dev_set_flag(hdev, HCI_QUALITY_REPORT); + else + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); + } - memcpy(rp.uuid, rpa_resolution_uuid, 16); - rp.flags = cpu_to_le32(flags); + bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed); - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + memcpy(rp.uuid, quality_report_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); - err = mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, 0, - &rp, sizeof(rp)); + if (changed) + exp_quality_report_feature_changed(val, sk); - if (changed) - exp_ll_privacy_feature_changed(val, hdev, sk); +unlock_quality_report: + hci_req_sync_unlock(hdev); + return err; +} - return err; +static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip) +{ + struct mgmt_ev_exp_feature_changed ev; + + memset(&ev, 0, sizeof(ev)); + memcpy(ev.uuid, offload_codecs_uuid, 16); + ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); + + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, + &ev, sizeof(ev), + HCI_MGMT_EXP_FEATURE_EVENTS, skip); +} + +static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + bool val, changed; + int err; + struct mgmt_rp_set_exp_feature rp; + + /* Command requires to use a valid controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)); + + if (!hdev->get_data_path_id) { + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); + } + + if (changed) { + if (val) + hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); + } + + bt_dev_info(hdev, "offload codecs enable %d changed %d", + val, changed); + + memcpy(rp.uuid, offload_codecs_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_offload_codec_feature_changed(val, sk); + + return err; +} + +static const struct mgmt_exp_feature { + const u8 *uuid; + int (*set_func)(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len); +} exp_features[] = { + EXP_FEAT(ZERO_KEY, set_zero_key_func), +#ifdef CONFIG_BT_FEATURE_DEBUG + EXP_FEAT(debug_uuid, set_debug_func), +#endif + EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), + EXP_FEAT(quality_report_uuid, set_quality_report_func), + EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), + + /* end with a null feature */ + EXP_FEAT(NULL, NULL) +}; + +static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_set_exp_feature *cp = data; + size_t i = 0; + + bt_dev_dbg(hdev, "sock %p", sk); + + for (i = 0; exp_features[i].uuid; i++) { + if (!memcmp(cp->uuid, exp_features[i].uuid, 16)) + return exp_features[i].set_func(sk, hdev, cp, data_len); } return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, @@ -7315,6 +7541,11 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, if (!rp) return -ENOMEM; + if (!status && !lmp_ssp_capable(hdev)) { + status = MGMT_STATUS_NOT_SUPPORTED; + eir_len = 0; + } + if (status) goto complete; @@ -7526,7 +7757,7 @@ static u8 calculate_name_len(struct hci_dev *hdev) { u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; - return append_local_name(hdev, buf, 0); + return eir_append_local_name(hdev, buf, 0); } static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, @@ -8222,7 +8453,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, * advertising. */ if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index b4bfae41e8a558..255cffa554ee5b 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -94,11 +94,14 @@ struct msft_data { __u16 pending_add_handle; __u16 pending_remove_handle; __u8 reregistering; + __u8 suspending; __u8 filter_enabled; }; static int __msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); +static int __msft_remove_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor, u16 handle); bool msft_monitor_supported(struct hci_dev *hdev) { @@ -154,7 +157,7 @@ static bool read_supported_features(struct hci_dev *hdev, } /* This function requires the caller holds hdev->lock */ -static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle) +static void reregister_monitor(struct hci_dev *hdev, int handle) { struct adv_monitor *monitor; struct msft_data *msft = hdev->msft_data; @@ -182,31 +185,102 @@ static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle) } } +/* This function requires the caller holds hdev->lock */ +static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle) +{ + struct adv_monitor *monitor; + struct msft_data *msft = hdev->msft_data; + int err; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) { + /* All monitors have been removed */ + msft->suspending = false; + hci_update_background_scan(hdev); + return; + } + + msft->pending_remove_handle = (u16)handle; + err = __msft_remove_monitor(hdev, monitor, handle); + + /* If success, return and wait for monitor removed callback */ + if (!err) + return; + + /* Otherwise free the monitor and keep removing */ + hci_free_adv_monitor(hdev, monitor); + handle++; + } +} + +/* This function requires the caller holds hdev->lock */ +void msft_suspend(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return; + + if (msft_monitor_supported(hdev)) { + msft->suspending = true; + /* Quitely remove all monitors on suspend to avoid waking up + * the system. + */ + remove_monitor_on_suspend(hdev, 0); + } +} + +/* This function requires the caller holds hdev->lock */ +void msft_resume(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return; + + if (msft_monitor_supported(hdev)) { + msft->reregistering = true; + /* Monitors are removed on suspend, so we need to add all + * monitors on resume. + */ + reregister_monitor(hdev, 0); + } +} + void msft_do_open(struct hci_dev *hdev) { - struct msft_data *msft; + struct msft_data *msft = hdev->msft_data; if (hdev->msft_opcode == HCI_OP_NOP) return; + if (!msft) { + bt_dev_err(hdev, "MSFT extension not registered"); + return; + } + bt_dev_dbg(hdev, "Initialize MSFT extension"); - msft = kzalloc(sizeof(*msft), GFP_KERNEL); - if (!msft) - return; + /* Reset existing MSFT data before re-reading */ + kfree(msft->evt_prefix); + msft->evt_prefix = NULL; + msft->evt_prefix_len = 0; + msft->features = 0; if (!read_supported_features(hdev, msft)) { + hdev->msft_data = NULL; kfree(msft); return; } - INIT_LIST_HEAD(&msft->handle_map); - hdev->msft_data = msft; - if (msft_monitor_supported(hdev)) { msft->reregistering = true; msft_set_filter_enable(hdev, true); - reregister_monitor_on_restart(hdev, 0); + /* Monitors get removed on power off, so we need to explicitly + * tell the controller to re-monitor. + */ + reregister_monitor(hdev, 0); } } @@ -221,8 +295,9 @@ void msft_do_close(struct hci_dev *hdev) bt_dev_dbg(hdev, "Cleanup of MSFT extension"); - hdev->msft_data = NULL; - + /* The controller will silently remove all monitors on power off. + * Therefore, remove handle_data mapping and reset monitor state. + */ list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) { monitor = idr_find(&hdev->adv_monitors_idr, handle_data->mgmt_handle); @@ -233,6 +308,34 @@ void msft_do_close(struct hci_dev *hdev) list_del(&handle_data->list); kfree(handle_data); } +} + +void msft_register(struct hci_dev *hdev) +{ + struct msft_data *msft = NULL; + + bt_dev_dbg(hdev, "Register MSFT extension"); + + msft = kzalloc(sizeof(*msft), GFP_KERNEL); + if (!msft) { + bt_dev_err(hdev, "Failed to register MSFT extension"); + return; + } + + INIT_LIST_HEAD(&msft->handle_map); + hdev->msft_data = msft; +} + +void msft_unregister(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return; + + bt_dev_dbg(hdev, "Unregister MSFT extension"); + + hdev->msft_data = NULL; kfree(msft->evt_prefix); kfree(msft); @@ -345,8 +448,7 @@ static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, /* If in restart/reregister sequence, keep registering. */ if (msft->reregistering) - reregister_monitor_on_restart(hdev, - msft->pending_add_handle + 1); + reregister_monitor(hdev, msft->pending_add_handle + 1); hci_dev_unlock(hdev); @@ -383,13 +485,25 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, if (handle_data) { monitor = idr_find(&hdev->adv_monitors_idr, handle_data->mgmt_handle); - if (monitor) + + if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) + monitor->state = ADV_MONITOR_STATE_REGISTERED; + + /* Do not free the monitor if it is being removed due to + * suspend. It will be re-monitored on resume. + */ + if (monitor && !msft->suspending) hci_free_adv_monitor(hdev, monitor); list_del(&handle_data->list); kfree(handle_data); } + /* If in suspend/remove sequence, keep removing. */ + if (msft->suspending) + remove_monitor_on_suspend(hdev, + msft->pending_remove_handle + 1); + /* If remove all monitors is required, we need to continue the process * here because the earlier it was paused when waiting for the * response from controller. @@ -408,7 +522,8 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, hci_dev_unlock(hdev); done: - hci_remove_adv_monitor_complete(hdev, status); + if (!msft->suspending) + hci_remove_adv_monitor_complete(hdev, status); } static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, @@ -541,15 +656,15 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) if (!msft) return -EOPNOTSUPP; - if (msft->reregistering) + if (msft->reregistering || msft->suspending) return -EBUSY; return __msft_add_monitor_pattern(hdev, monitor); } /* This function requires the caller holds hdev->lock */ -int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - u16 handle) +static int __msft_remove_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor, u16 handle) { struct msft_cp_le_cancel_monitor_advertisement cp; struct msft_monitor_advertisement_handle_data *handle_data; @@ -557,12 +672,6 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, struct msft_data *msft = hdev->msft_data; int err = 0; - if (!msft) - return -EOPNOTSUPP; - - if (msft->reregistering) - return -EBUSY; - handle_data = msft_find_handle_data(hdev, monitor->handle, true); /* If no matched handle, just remove without telling controller */ @@ -582,6 +691,21 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, return err; } +/* This function requires the caller holds hdev->lock */ +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, + u16 handle) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return -EOPNOTSUPP; + + if (msft->reregistering || msft->suspending) + return -EBUSY; + + return __msft_remove_monitor(hdev, monitor, handle); +} + void msft_req_add_set_filter_enable(struct hci_request *req, bool enable) { struct hci_dev *hdev = req->hdev; diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 6e56d94b88d868..59c6e081c789b6 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -13,6 +13,8 @@ #if IS_ENABLED(CONFIG_BT_MSFTEXT) bool msft_monitor_supported(struct hci_dev *hdev); +void msft_register(struct hci_dev *hdev); +void msft_unregister(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); @@ -22,6 +24,8 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, u16 handle); void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); int msft_set_filter_enable(struct hci_dev *hdev, bool enable); +void msft_suspend(struct hci_dev *hdev); +void msft_resume(struct hci_dev *hdev); bool msft_curve_validity(struct hci_dev *hdev); #else @@ -31,6 +35,8 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev) return false; } +static inline void msft_register(struct hci_dev *hdev) {} +static inline void msft_unregister(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} @@ -55,6 +61,9 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable) return -EOPNOTSUPP; } +static inline void msft_suspend(struct hci_dev *hdev) {} +static inline void msft_resume(struct hci_dev *hdev) {} + static inline bool msft_curve_validity(struct hci_dev *hdev) { return false; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index f2bacb464ccf3b..7324764384b677 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -549,22 +549,58 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel) return dlc; } +static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag) +{ + int len = frag->len; + + BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); + + if (len > d->mtu) + return -EINVAL; + + rfcomm_make_uih(frag, d->addr); + __skb_queue_tail(&d->tx_queue, frag); + + return len; +} + int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) { - int len = skb->len; + unsigned long flags; + struct sk_buff *frag, *next; + int len; if (d->state != BT_CONNECTED) return -ENOTCONN; - BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); + frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; - if (len > d->mtu) - return -EINVAL; + /* Queue all fragments atomically. */ + spin_lock_irqsave(&d->tx_queue.lock, flags); - rfcomm_make_uih(skb, d->addr); - skb_queue_tail(&d->tx_queue, skb); + len = rfcomm_dlc_send_frag(d, skb); + if (len < 0 || !frag) + goto unlock; + + for (; frag; frag = next) { + int ret; + + next = frag->next; + + ret = rfcomm_dlc_send_frag(d, frag); + if (ret < 0) { + kfree_skb(frag); + goto unlock; + } + + len += ret; + } + +unlock: + spin_unlock_irqrestore(&d->tx_queue.lock, flags); - if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) rfcomm_schedule(); return len; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 2c95bb58f901a5..4bf4ea6cbb5eee 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -575,46 +575,20 @@ static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); sent = bt_sock_wait_ready(sk, msg->msg_flags); - if (sent) - goto done; - - while (len) { - size_t size = min_t(size_t, len, d->mtu); - int err; - - skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, - msg->msg_flags & MSG_DONTWAIT, &err); - if (!skb) { - if (sent == 0) - sent = err; - break; - } - skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); - - err = memcpy_from_msg(skb_put(skb, size), msg, size); - if (err) { - kfree_skb(skb); - if (sent == 0) - sent = err; - break; - } - skb->priority = sk->sk_priority; + release_sock(sk); - err = rfcomm_dlc_send(d, skb); - if (err < 0) { - kfree_skb(skb); - if (sent == 0) - sent = err; - break; - } + if (sent) + return sent; - sent += size; - len -= size; - } + skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, + RFCOMM_SKB_TAIL_RESERVE); + if (IS_ERR(skb)) + return PTR_ERR(skb); -done: - release_sock(sk); + sent = rfcomm_dlc_send(d, skb); + if (sent < 0) + kfree_skb(skb); return sent; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 98a88158651281..8eabf41b299394 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -69,6 +69,7 @@ struct sco_pinfo { __u32 flags; __u16 setting; __u8 cmsg_mask; + struct bt_codec codec; struct sco_conn *conn; }; @@ -133,6 +134,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) return NULL; spin_lock_init(&conn->lock); + INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); hcon->sco_data = conn; conn->hcon = hcon; @@ -187,20 +189,21 @@ static void sco_conn_del(struct hci_conn *hcon, int err) /* Kill socket */ sco_conn_lock(conn); sk = conn->sk; + if (sk) + sock_hold(sk); sco_conn_unlock(conn); if (sk) { - sock_hold(sk); lock_sock(sk); sco_sock_clear_timer(sk); sco_chan_del(sk, err); release_sock(sk); sock_put(sk); - - /* Ensure no more work items will run before freeing conn. */ - cancel_delayed_work_sync(&conn->timeout_work); } + /* Ensure no more work items will run before freeing conn. */ + cancel_delayed_work_sync(&conn->timeout_work); + hcon->sco_data = NULL; kfree(conn); } @@ -213,8 +216,6 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, sco_pi(sk)->conn = conn; conn->sk = sk; - INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); - if (parent) bt_accept_enqueue(parent, sk, true); } @@ -252,7 +253,7 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk) return -EOPNOTSUPP; hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, - sco_pi(sk)->setting); + sco_pi(sk)->setting, &sco_pi(sk)->codec); if (IS_ERR(hcon)) return PTR_ERR(hcon); @@ -280,11 +281,10 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk) return err; } -static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) +static int sco_send_frame(struct sock *sk, struct sk_buff *skb) { struct sco_conn *conn = sco_pi(sk)->conn; - struct sk_buff *skb; - int err; + int len = skb->len; /* Check outgoing MTU */ if (len > conn->mtu) @@ -292,15 +292,6 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) BT_DBG("sk %p len %d", sk, len); - skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); - if (!skb) - return err; - - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { - kfree_skb(skb); - return -EFAULT; - } - hci_send_sco(conn->hcon, skb); return len; @@ -444,6 +435,7 @@ static void __sco_sock_close(struct sock *sk) sock_set_flag(sk, SOCK_ZAPPED); break; } + } /* Must be called on unlocked socket. */ @@ -504,6 +496,10 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, sk->sk_state = BT_OPEN; sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; + sco_pi(sk)->codec.id = BT_CODEC_CVSD; + sco_pi(sk)->codec.cid = 0xffff; + sco_pi(sk)->codec.vid = 0xffff; + sco_pi(sk)->codec.data_path = 0x00; bt_sock_link(&sco_sk_list, sk); return sk; @@ -725,6 +721,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; + struct sk_buff *skb; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -736,14 +733,21 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + lock_sock(sk); if (sk->sk_state == BT_CONNECTED) - err = sco_send_frame(sk, msg, len); + err = sco_send_frame(sk, skb); else err = -ENOTCONN; release_sock(sk); + + if (err < 0) + kfree_skb(skb); return err; } @@ -825,6 +829,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, int len, err = 0; struct bt_voice voice; u32 opt; + struct bt_codecs *codecs; + struct hci_dev *hdev; + __u8 buffer[255]; BT_DBG("sk %p", sk); @@ -872,6 +879,16 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, } sco_pi(sk)->setting = voice.setting; + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, + BDADDR_BREDR); + if (!hdev) { + err = -EBADFD; + break; + } + if (enhanced_sco_capable(hdev) && + voice.setting == BT_VOICE_TRANSPARENT) + sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + hci_dev_put(hdev); break; case BT_PKT_STATUS: @@ -886,6 +903,57 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; break; + case BT_CODEC: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, + BDADDR_BREDR); + if (!hdev) { + err = -EBADFD; + break; + } + + if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + if (!hdev->get_data_path_id) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + if (optlen < sizeof(struct bt_codecs) || + optlen > sizeof(buffer)) { + hci_dev_put(hdev); + err = -EINVAL; + break; + } + + if (copy_from_sockptr(buffer, optval, optlen)) { + hci_dev_put(hdev); + err = -EFAULT; + break; + } + + codecs = (void *)buffer; + + if (codecs->num_codecs > 1) { + hci_dev_put(hdev); + err = -EINVAL; + break; + } + + sco_pi(sk)->codec = codecs->codecs[0]; + hci_dev_put(hdev); + break; + default: err = -ENOPROTOOPT; break; @@ -964,6 +1032,12 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, struct bt_voice voice; u32 phys; int pkt_status; + int buf_len; + struct codec_list *c; + u8 num_codecs, i, __user *ptr; + struct hci_dev *hdev; + struct hci_codec_caps *caps; + struct bt_codec codec; BT_DBG("sk %p", sk); @@ -1028,6 +1102,101 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, err = -EFAULT; break; + case BT_CODEC: + num_codecs = 0; + buf_len = 0; + + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); + if (!hdev) { + err = -EBADFD; + break; + } + + if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + if (!hdev->get_data_path_id) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + /* find total buffer size required to copy codec + caps */ + hci_dev_lock(hdev); + list_for_each_entry(c, &hdev->local_codecs, list) { + if (c->transport != HCI_TRANSPORT_SCO_ESCO) + continue; + num_codecs++; + for (i = 0, caps = c->caps; i < c->num_caps; i++) { + buf_len += 1 + caps->len; + caps = (void *)&caps->data[caps->len]; + } + buf_len += sizeof(struct bt_codec); + } + hci_dev_unlock(hdev); + + buf_len += sizeof(struct bt_codecs); + if (buf_len > len) { + hci_dev_put(hdev); + err = -ENOBUFS; + break; + } + ptr = optval; + + if (put_user(num_codecs, ptr)) { + hci_dev_put(hdev); + err = -EFAULT; + break; + } + ptr += sizeof(num_codecs); + + /* Iterate all the codecs supported over SCO and populate + * codec data + */ + hci_dev_lock(hdev); + list_for_each_entry(c, &hdev->local_codecs, list) { + if (c->transport != HCI_TRANSPORT_SCO_ESCO) + continue; + + codec.id = c->id; + codec.cid = c->cid; + codec.vid = c->vid; + err = hdev->get_data_path_id(hdev, &codec.data_path); + if (err < 0) + break; + codec.num_caps = c->num_caps; + if (copy_to_user(ptr, &codec, sizeof(codec))) { + err = -EFAULT; + break; + } + ptr += sizeof(codec); + + /* find codec capabilities data length */ + len = 0; + for (i = 0, caps = c->caps; i < c->num_caps; i++) { + len += 1 + caps->len; + caps = (void *)&caps->data[caps->len]; + } + + /* copy codec capabilities data */ + if (len && copy_to_user(ptr, c->caps, len)) { + err = -EFAULT; + break; + } + ptr += len; + } + + if (!err && put_user(buf_len, optlen)) + err = -EFAULT; + + hci_dev_unlock(hdev); + hci_dev_put(hdev); + + break; + default: err = -ENOPROTOOPT; break; diff --git a/net/bpf/Makefile b/net/bpf/Makefile index 1c0a98d8c28f08..1ebe270bde2383 100644 --- a/net/bpf/Makefile +++ b/net/bpf/Makefile @@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_BPF_SYSCALL) := test_run.o +ifeq ($(CONFIG_BPF_JIT),y) +obj-$(CONFIG_BPF_SYSCALL) += bpf_dummy_struct_ops.o +endif diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c new file mode 100644 index 00000000000000..fbc896323bec91 --- /dev/null +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021. Huawei Technologies Co., Ltd + */ +#include +#include +#include +#include + +extern struct bpf_struct_ops bpf_bpf_dummy_ops; + +/* A common type for test_N with return value in bpf_dummy_ops */ +typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...); + +struct bpf_dummy_ops_test_args { + u64 args[MAX_BPF_FUNC_ARGS]; + struct bpf_dummy_ops_state state; +}; + +static struct bpf_dummy_ops_test_args * +dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr) +{ + __u32 size_in; + struct bpf_dummy_ops_test_args *args; + void __user *ctx_in; + void __user *u_state; + + size_in = kattr->test.ctx_size_in; + if (size_in != sizeof(u64) * nr) + return ERR_PTR(-EINVAL); + + args = kzalloc(sizeof(*args), GFP_KERNEL); + if (!args) + return ERR_PTR(-ENOMEM); + + ctx_in = u64_to_user_ptr(kattr->test.ctx_in); + if (copy_from_user(args->args, ctx_in, size_in)) + goto out; + + /* args[0] is 0 means state argument of test_N will be NULL */ + u_state = u64_to_user_ptr(args->args[0]); + if (u_state && copy_from_user(&args->state, u_state, + sizeof(args->state))) + goto out; + + return args; +out: + kfree(args); + return ERR_PTR(-EFAULT); +} + +static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args) +{ + void __user *u_state; + + u_state = u64_to_user_ptr(args->args[0]); + if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state))) + return -EFAULT; + + return 0; +} + +static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args) +{ + dummy_ops_test_ret_fn test = (void *)image; + struct bpf_dummy_ops_state *state = NULL; + + /* state needs to be NULL if args[0] is 0 */ + if (args->args[0]) + state = &args->state; + return test(state, args->args[1], args->args[2], + args->args[3], args->args[4]); +} + +int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops; + const struct btf_type *func_proto; + struct bpf_dummy_ops_test_args *args; + struct bpf_tramp_progs *tprogs; + void *image = NULL; + unsigned int op_idx; + int prog_ret; + int err; + + if (prog->aux->attach_btf_id != st_ops->type_id) + return -EOPNOTSUPP; + + func_proto = prog->aux->attach_func_proto; + args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto)); + if (IS_ERR(args)) + return PTR_ERR(args); + + tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); + if (!tprogs) { + err = -ENOMEM; + goto out; + } + + image = bpf_jit_alloc_exec(PAGE_SIZE); + if (!image) { + err = -ENOMEM; + goto out; + } + set_vm_flush_reset_perms(image); + + op_idx = prog->expected_attach_type; + err = bpf_struct_ops_prepare_trampoline(tprogs, prog, + &st_ops->func_models[op_idx], + image, image + PAGE_SIZE); + if (err < 0) + goto out; + + set_memory_ro((long)image, 1); + set_memory_x((long)image, 1); + prog_ret = dummy_ops_call_op(image, args); + + err = dummy_ops_copy_args(args); + if (err) + goto out; + if (put_user(prog_ret, &uattr->test.retval)) + err = -EFAULT; +out: + kfree(args); + bpf_jit_free_exec(image); + kfree(tprogs); + return err; +} + +static int bpf_dummy_init(struct btf *btf) +{ + return 0; +} + +static bool bpf_dummy_ops_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); +} + +static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log, + const struct btf *btf, + const struct btf_type *t, int off, + int size, enum bpf_access_type atype, + u32 *next_btf_id) +{ + const struct btf_type *state; + s32 type_id; + int err; + + type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state", + BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + + state = btf_type_by_id(btf, type_id); + if (t != state) { + bpf_log(log, "only access to bpf_dummy_ops_state is supported\n"); + return -EACCES; + } + + err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id); + if (err < 0) + return err; + + return atype == BPF_READ ? err : NOT_INIT; +} + +static const struct bpf_verifier_ops bpf_dummy_verifier_ops = { + .is_valid_access = bpf_dummy_ops_is_valid_access, + .btf_struct_access = bpf_dummy_ops_btf_struct_access, +}; + +static int bpf_dummy_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return -EOPNOTSUPP; +} + +static int bpf_dummy_reg(void *kdata) +{ + return -EOPNOTSUPP; +} + +static void bpf_dummy_unreg(void *kdata) +{ +} + +struct bpf_struct_ops bpf_bpf_dummy_ops = { + .verifier_ops = &bpf_dummy_verifier_ops, + .init = bpf_dummy_init, + .init_member = bpf_dummy_init_member, + .reg = bpf_dummy_reg, + .unreg = bpf_dummy_unreg, + .name = "bpf_dummy_ops", +}; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index b5f4ef35357c80..46dd9575596724 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017 Facebook */ #include +#include #include #include #include @@ -241,9 +242,11 @@ BTF_ID(func, bpf_kfunc_call_test2) BTF_ID(func, bpf_kfunc_call_test3) BTF_SET_END(test_sk_kfunc_ids) -bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) +bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner) { - return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id); + if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id)) + return true; + return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner); } static void *bpf_test_init(const union bpf_attr *kattr, u32 size, @@ -355,13 +358,9 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, return -EINVAL; if (ctx_size_in) { - info.ctx = kzalloc(ctx_size_in, GFP_USER); - if (!info.ctx) - return -ENOMEM; - if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) { - err = -EFAULT; - goto out; - } + info.ctx = memdup_user(ctx_in, ctx_size_in); + if (IS_ERR(info.ctx)) + return PTR_ERR(info.ctx); } else { info.ctx = NULL; } @@ -389,7 +388,6 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) err = -EFAULT; -out: kfree(info.ctx); return err; } @@ -483,11 +481,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) return -EINVAL; /* priority is allowed */ - - if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), - offsetof(struct __sk_buff, ifindex))) - return -EINVAL; - + /* ingress_ifindex is allowed */ /* ifindex is allowed */ if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), @@ -511,11 +505,18 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) /* gso_size is allowed */ if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), + offsetof(struct __sk_buff, hwtstamp))) + return -EINVAL; + + /* hwtstamp is allowed */ + + if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), sizeof(struct __sk_buff))) return -EINVAL; skb->mark = __skb->mark; skb->priority = __skb->priority; + skb->skb_iif = __skb->ingress_ifindex; skb->tstamp = __skb->tstamp; memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); @@ -532,6 +533,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) return -EINVAL; skb_shinfo(skb)->gso_segs = __skb->gso_segs; skb_shinfo(skb)->gso_size = __skb->gso_size; + skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; return 0; } @@ -545,11 +547,13 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) __skb->mark = skb->mark; __skb->priority = skb->priority; + __skb->ingress_ifindex = skb->skb_iif; __skb->ifindex = skb->dev->ifindex; __skb->tstamp = skb->tstamp; memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); __skb->wire_len = cb->pkt_len; __skb->gso_segs = skb_shinfo(skb)->gso_segs; + __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; } static struct proto bpf_dummy_proto = { @@ -801,7 +805,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (ret) goto free_data; - bpf_prog_change_xdp(NULL, prog); + if (repeat > 1) + bpf_prog_change_xdp(NULL, prog); ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); /* We convert the xdp_buff back to an xdp_md before checking the return * code so the reference count of any held netdevice will be decremented @@ -822,7 +827,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, sizeof(struct xdp_md)); out: - bpf_prog_change_xdp(prog, NULL); + if (repeat > 1) + bpf_prog_change_xdp(prog, NULL); free_data: kfree(data); free_ctx: @@ -1041,13 +1047,9 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, return -EINVAL; if (ctx_size_in) { - ctx = kzalloc(ctx_size_in, GFP_USER); - if (!ctx) - return -ENOMEM; - if (copy_from_user(ctx, ctx_in, ctx_size_in)) { - err = -EFAULT; - goto out; - } + ctx = memdup_user(ctx_in, ctx_size_in); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); } rcu_read_lock_trace(); diff --git a/net/bridge/br.c b/net/bridge/br.c index d3a32c6813e028..1fac72cc617ff7 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -36,7 +36,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v bool changed_addr; int err; - if (dev->priv_flags & IFF_EBRIDGE) { + if (netif_is_bridge_master(dev)) { err = br_vlan_bridge_event(dev, event, ptr); if (err) return notifier_from_errno(err); @@ -349,7 +349,7 @@ static void __net_exit br_net_exit(struct net *net) rtnl_lock(); for_each_netdev(net, dev) - if (dev->priv_flags & IFF_EBRIDGE) + if (netif_is_bridge_master(dev)) br_dev_delete(dev, &list); unregister_netdevice_many(&list); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 46812b659710a0..6ccda68bd473b4 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -32,10 +32,6 @@ static const struct rhashtable_params br_fdb_rht_params = { }; static struct kmem_cache *br_fdb_cache __read_mostly; -static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, - const unsigned char *addr, u16 vid); -static void fdb_notify(struct net_bridge *br, - const struct net_bridge_fdb_entry *, int, bool); int __init br_fdb_init(void) { @@ -87,6 +83,128 @@ static void fdb_rcu_free(struct rcu_head *head) kmem_cache_free(br_fdb_cache, ent); } +static int fdb_to_nud(const struct net_bridge *br, + const struct net_bridge_fdb_entry *fdb) +{ + if (test_bit(BR_FDB_LOCAL, &fdb->flags)) + return NUD_PERMANENT; + else if (test_bit(BR_FDB_STATIC, &fdb->flags)) + return NUD_NOARP; + else if (has_expired(br, fdb)) + return NUD_STALE; + else + return NUD_REACHABLE; +} + +static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, + const struct net_bridge_fdb_entry *fdb, + u32 portid, u32 seq, int type, unsigned int flags) +{ + const struct net_bridge_port *dst = READ_ONCE(fdb->dst); + unsigned long now = jiffies; + struct nda_cacheinfo ci; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); + if (nlh == NULL) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = 0; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex; + ndm->ndm_state = fdb_to_nud(br, fdb); + + if (test_bit(BR_FDB_OFFLOADED, &fdb->flags)) + ndm->ndm_flags |= NTF_OFFLOADED; + if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) + ndm->ndm_flags |= NTF_EXT_LEARNED; + if (test_bit(BR_FDB_STICKY, &fdb->flags)) + ndm->ndm_flags |= NTF_STICKY; + + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr)) + goto nla_put_failure; + if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex)) + goto nla_put_failure; + ci.ndm_used = jiffies_to_clock_t(now - fdb->used); + ci.ndm_confirmed = 0; + ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); + ci.ndm_refcnt = 0; + if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; + + if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), + &fdb->key.vlan_id)) + goto nla_put_failure; + + if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { + struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); + u8 notify_bits = FDB_NOTIFY_BIT; + + if (!nest) + goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + notify_bits |= FDB_NOTIFY_INACTIVE_BIT; + + if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { + nla_nest_cancel(skb, nest); + goto nla_put_failure; + } + + nla_nest_end(skb, nest); + } + + nlmsg_end(skb, nlh); + return 0; + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static inline size_t fdb_nlmsg_size(void) +{ + return NLMSG_ALIGN(sizeof(struct ndmsg)) + + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + + nla_total_size(sizeof(u32)) /* NDA_MASTER */ + + nla_total_size(sizeof(u16)) /* NDA_VLAN */ + + nla_total_size(sizeof(struct nda_cacheinfo)) + + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ + + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ +} + +static void fdb_notify(struct net_bridge *br, + const struct net_bridge_fdb_entry *fdb, int type, + bool swdev_notify) +{ + struct net *net = dev_net(br->dev); + struct sk_buff *skb; + int err = -ENOBUFS; + + if (swdev_notify) + br_switchdev_fdb_notify(br, fdb, type); + + skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); + if (skb == NULL) + goto errout; + + err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0); + if (err < 0) { + /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); + goto errout; + } + rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); + return; +errout: + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); +} + static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl, const unsigned char *addr, __u16 vid) @@ -257,6 +375,66 @@ void br_fdb_find_delete_local(struct net_bridge *br, spin_unlock_bh(&br->hash_lock); } +static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, + struct net_bridge_port *source, + const unsigned char *addr, + __u16 vid, + unsigned long flags) +{ + struct net_bridge_fdb_entry *fdb; + int err; + + fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); + if (!fdb) + return NULL; + + memcpy(fdb->key.addr.addr, addr, ETH_ALEN); + WRITE_ONCE(fdb->dst, source); + fdb->key.vlan_id = vid; + fdb->flags = flags; + fdb->updated = fdb->used = jiffies; + err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode, + br_fdb_rht_params); + if (err) { + kmem_cache_free(br_fdb_cache, fdb); + return NULL; + } + + hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list); + + return fdb; +} + +static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source, + const unsigned char *addr, u16 vid) +{ + struct net_bridge_fdb_entry *fdb; + + if (!is_valid_ether_addr(addr)) + return -EINVAL; + + fdb = br_fdb_find(br, addr, vid); + if (fdb) { + /* it is okay to have multiple ports with same + * address, just use the first one. + */ + if (test_bit(BR_FDB_LOCAL, &fdb->flags)) + return 0; + br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n", + source ? source->dev->name : br->dev->name, addr, vid); + fdb_delete(br, fdb, true); + } + + fdb = fdb_create(br, source, addr, vid, + BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC)); + if (!fdb) + return -ENOMEM; + + fdb_add_hw_addr(br, addr); + fdb_notify(br, fdb, RTM_NEWNEIGH, true); + return 0; +} + void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) { struct net_bridge_vlan_group *vg; @@ -283,7 +461,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) insert: /* insert new address, may fail if invalid address or dup. */ - fdb_insert(br, p, newaddr, 0); + fdb_add_local(br, p, newaddr, 0); if (!vg || !vg->num_vlans) goto done; @@ -293,7 +471,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) * from under us. */ list_for_each_entry(v, &vg->vlan_list, vlist) - fdb_insert(br, p, newaddr, v->vid); + fdb_add_local(br, p, newaddr, v->vid); done: spin_unlock_bh(&br->hash_lock); @@ -313,7 +491,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) fdb_delete_local(br, NULL, f); - fdb_insert(br, NULL, newaddr, 0); + fdb_add_local(br, NULL, newaddr, 0); vg = br_vlan_group(br); if (!vg || !vg->num_vlans) goto out; @@ -328,7 +506,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) if (f && test_bit(BR_FDB_LOCAL, &f->flags) && !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) fdb_delete_local(br, NULL, f); - fdb_insert(br, NULL, newaddr, v->vid); + fdb_add_local(br, NULL, newaddr, v->vid); } out: spin_unlock_bh(&br->hash_lock); @@ -503,71 +681,14 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, return num; } -static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, - struct net_bridge_port *source, - const unsigned char *addr, - __u16 vid, - unsigned long flags) -{ - struct net_bridge_fdb_entry *fdb; - - fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); - if (fdb) { - memcpy(fdb->key.addr.addr, addr, ETH_ALEN); - WRITE_ONCE(fdb->dst, source); - fdb->key.vlan_id = vid; - fdb->flags = flags; - fdb->updated = fdb->used = jiffies; - if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, - &fdb->rhnode, - br_fdb_rht_params)) { - kmem_cache_free(br_fdb_cache, fdb); - fdb = NULL; - } else { - hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list); - } - } - return fdb; -} - -static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, - const unsigned char *addr, u16 vid) -{ - struct net_bridge_fdb_entry *fdb; - - if (!is_valid_ether_addr(addr)) - return -EINVAL; - - fdb = br_fdb_find(br, addr, vid); - if (fdb) { - /* it is okay to have multiple ports with same - * address, just use the first one. - */ - if (test_bit(BR_FDB_LOCAL, &fdb->flags)) - return 0; - br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n", - source ? source->dev->name : br->dev->name, addr, vid); - fdb_delete(br, fdb, true); - } - - fdb = fdb_create(br, source, addr, vid, - BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC)); - if (!fdb) - return -ENOMEM; - - fdb_add_hw_addr(br, addr); - fdb_notify(br, fdb, RTM_NEWNEIGH, true); - return 0; -} - /* Add entry for local address of interface */ -int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, - const unsigned char *addr, u16 vid) +int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source, + const unsigned char *addr, u16 vid) { int ret; spin_lock_bh(&br->hash_lock); - ret = fdb_insert(br, source, addr, vid); + ret = fdb_add_local(br, source, addr, vid); spin_unlock_bh(&br->hash_lock); return ret; } @@ -638,182 +759,6 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, } } -static int fdb_to_nud(const struct net_bridge *br, - const struct net_bridge_fdb_entry *fdb) -{ - if (test_bit(BR_FDB_LOCAL, &fdb->flags)) - return NUD_PERMANENT; - else if (test_bit(BR_FDB_STATIC, &fdb->flags)) - return NUD_NOARP; - else if (has_expired(br, fdb)) - return NUD_STALE; - else - return NUD_REACHABLE; -} - -static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, - const struct net_bridge_fdb_entry *fdb, - u32 portid, u32 seq, int type, unsigned int flags) -{ - const struct net_bridge_port *dst = READ_ONCE(fdb->dst); - unsigned long now = jiffies; - struct nda_cacheinfo ci; - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); - if (nlh == NULL) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = 0; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex; - ndm->ndm_state = fdb_to_nud(br, fdb); - - if (test_bit(BR_FDB_OFFLOADED, &fdb->flags)) - ndm->ndm_flags |= NTF_OFFLOADED; - if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) - ndm->ndm_flags |= NTF_EXT_LEARNED; - if (test_bit(BR_FDB_STICKY, &fdb->flags)) - ndm->ndm_flags |= NTF_STICKY; - - if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr)) - goto nla_put_failure; - if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex)) - goto nla_put_failure; - ci.ndm_used = jiffies_to_clock_t(now - fdb->used); - ci.ndm_confirmed = 0; - ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); - ci.ndm_refcnt = 0; - if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) - goto nla_put_failure; - - if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), - &fdb->key.vlan_id)) - goto nla_put_failure; - - if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { - struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); - u8 notify_bits = FDB_NOTIFY_BIT; - - if (!nest) - goto nla_put_failure; - if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) - notify_bits |= FDB_NOTIFY_INACTIVE_BIT; - - if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { - nla_nest_cancel(skb, nest); - goto nla_put_failure; - } - - nla_nest_end(skb, nest); - } - - nlmsg_end(skb, nlh); - return 0; - -nla_put_failure: - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; -} - -static inline size_t fdb_nlmsg_size(void) -{ - return NLMSG_ALIGN(sizeof(struct ndmsg)) - + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ - + nla_total_size(sizeof(u32)) /* NDA_MASTER */ - + nla_total_size(sizeof(u16)) /* NDA_VLAN */ - + nla_total_size(sizeof(struct nda_cacheinfo)) - + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ - + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ -} - -static int br_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb, - const struct net_bridge_fdb_entry *fdb, - unsigned long action, const void *ctx) -{ - const struct net_bridge_port *p = READ_ONCE(fdb->dst); - struct switchdev_notifier_fdb_info item; - int err; - - item.addr = fdb->key.addr.addr; - item.vid = fdb->key.vlan_id; - item.added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); - item.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); - item.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); - item.info.dev = (!p || item.is_local) ? br->dev : p->dev; - item.info.ctx = ctx; - - err = nb->notifier_call(nb, action, &item); - return notifier_to_errno(err); -} - -int br_fdb_replay(const struct net_device *br_dev, const void *ctx, bool adding, - struct notifier_block *nb) -{ - struct net_bridge_fdb_entry *fdb; - struct net_bridge *br; - unsigned long action; - int err = 0; - - if (!nb) - return 0; - - if (!netif_is_bridge_master(br_dev)) - return -EINVAL; - - br = netdev_priv(br_dev); - - if (adding) - action = SWITCHDEV_FDB_ADD_TO_DEVICE; - else - action = SWITCHDEV_FDB_DEL_TO_DEVICE; - - rcu_read_lock(); - - hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) { - err = br_fdb_replay_one(br, nb, fdb, action, ctx); - if (err) - break; - } - - rcu_read_unlock(); - - return err; -} - -static void fdb_notify(struct net_bridge *br, - const struct net_bridge_fdb_entry *fdb, int type, - bool swdev_notify) -{ - struct net *net = dev_net(br->dev); - struct sk_buff *skb; - int err = -ENOBUFS; - - if (swdev_notify) - br_switchdev_fdb_notify(br, fdb, type); - - skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); - if (skb == NULL) - goto errout; - - err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0); - if (err < 0) { - /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */ - WARN_ON(err == -EMSGSIZE); - kfree_skb(skb); - goto errout; - } - rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); - return; -errout: - rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); -} - /* Dump information about entries, in response to GETNEIGH */ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, @@ -825,7 +770,7 @@ int br_fdb_dump(struct sk_buff *skb, struct net_bridge_fdb_entry *f; int err = 0; - if (!(dev->priv_flags & IFF_EBRIDGE)) + if (!netif_is_bridge_master(dev)) return err; if (!filter_dev) { @@ -1076,7 +1021,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; } - if (dev->priv_flags & IFF_EBRIDGE) { + if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); vg = br_vlan_group(br); } else { @@ -1173,7 +1118,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_bridge *br; int err; - if (dev->priv_flags & IFF_EBRIDGE) { + if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); vg = br_vlan_group(br); } else { diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 4a02f8bb278a12..c1183fef1f21f7 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -471,7 +471,7 @@ int br_del_bridge(struct net *net, const char *name) if (dev == NULL) ret = -ENXIO; /* Could not find device */ - else if (!(dev->priv_flags & IFF_EBRIDGE)) { + else if (!netif_is_bridge_master(dev)) { /* Attempt to delete non bridge device! */ ret = -EPERM; } @@ -670,7 +670,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, else netdev_set_rx_headroom(dev, br_hr); - if (br_fdb_insert(br, p, dev->dev_addr, 0)) + if (br_fdb_add_local(br, p, dev->dev_addr, 0)) netdev_err(dev, "failed insert local address bridge forwarding table\n"); if (br->dev->addr_assign_type != NET_ADDR_SET) { diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 793b0db9d9a36f..db4ab2c2ce18b0 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -26,7 +26,7 @@ static int get_bridge_ifindices(struct net *net, int *indices, int num) for_each_netdev_rcu(net, dev) { if (i >= num) break; - if (dev->priv_flags & IFF_EBRIDGE) + if (netif_is_bridge_master(dev)) indices[i++] = dev->ifindex; } rcu_read_unlock(); @@ -71,7 +71,8 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, num = br_fdb_fillbuf(br, buf, maxnum, offset); if (num > 0) { - if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry))) + if (copy_to_user(userbuf, buf, + array_size(num, sizeof(struct __fdb_entry)))) num = -EFAULT; } kfree(buf); @@ -188,7 +189,7 @@ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user return -ENOMEM; get_port_ifindices(br, indices, num); - if (copy_to_user(argp, indices, num * sizeof(int))) + if (copy_to_user(argp, indices, array_size(num, sizeof(int)))) num = -EFAULT; kfree(indices); return num; @@ -336,7 +337,8 @@ static int old_deviceless(struct net *net, void __user *uarg) args[2] = get_bridge_ifindices(net, indices, args[2]); - ret = copy_to_user(uarg, indices, args[2]*sizeof(int)) + ret = copy_to_user(uarg, indices, + array_size(args[2], sizeof(int))) ? -EFAULT : args[2]; kfree(indices); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 0281453f776637..4556d913955bf1 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -422,7 +422,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->seq = net->dev_base_seq; for_each_netdev_rcu(net, dev) { - if (dev->priv_flags & IFF_EBRIDGE) { + if (netif_is_bridge_master(dev)) { struct net_bridge *br = netdev_priv(dev); struct br_port_msg *bpm; @@ -552,252 +552,16 @@ static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg) return nlmsg_size; } -struct br_mdb_complete_info { - struct net_bridge_port *port; - struct br_ip ip; -}; - -static void br_mdb_complete(struct net_device *dev, int err, void *priv) -{ - struct br_mdb_complete_info *data = priv; - struct net_bridge_port_group __rcu **pp; - struct net_bridge_port_group *p; - struct net_bridge_mdb_entry *mp; - struct net_bridge_port *port = data->port; - struct net_bridge *br = port->br; - - if (err) - goto err; - - spin_lock_bh(&br->multicast_lock); - mp = br_mdb_ip_get(br, &data->ip); - if (!mp) - goto out; - for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; - pp = &p->next) { - if (p->key.port != port) - continue; - p->flags |= MDB_PG_FLAGS_OFFLOAD; - } -out: - spin_unlock_bh(&br->multicast_lock); -err: - kfree(priv); -} - -static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb, - const struct net_bridge_mdb_entry *mp) -{ - if (mp->addr.proto == htons(ETH_P_IP)) - ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr); -#if IS_ENABLED(CONFIG_IPV6) - else if (mp->addr.proto == htons(ETH_P_IPV6)) - ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr); -#endif - else - ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr); - - mdb->vid = mp->addr.vid; -} - -static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - unsigned long action, const void *ctx, - struct netlink_ext_ack *extack) -{ - struct switchdev_notifier_port_obj_info obj_info = { - .info = { - .dev = dev, - .extack = extack, - .ctx = ctx, - }, - .obj = &mdb->obj, - }; - int err; - - err = nb->notifier_call(nb, action, &obj_info); - return notifier_to_errno(err); -} - -static int br_mdb_queue_one(struct list_head *mdb_list, - enum switchdev_obj_id id, - const struct net_bridge_mdb_entry *mp, - struct net_device *orig_dev) -{ - struct switchdev_obj_port_mdb *mdb; - - mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); - if (!mdb) - return -ENOMEM; - - mdb->obj.id = id; - mdb->obj.orig_dev = orig_dev; - br_switchdev_mdb_populate(mdb, mp); - list_add_tail(&mdb->obj.list, mdb_list); - - return 0; -} - -int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) -{ - const struct net_bridge_mdb_entry *mp; - struct switchdev_obj *obj, *tmp; - struct net_bridge *br; - unsigned long action; - LIST_HEAD(mdb_list); - int err = 0; - - ASSERT_RTNL(); - - if (!nb) - return 0; - - if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) - return -EINVAL; - - br = netdev_priv(br_dev); - - if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) - return 0; - - /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, - * because the write-side protection is br->multicast_lock. But we - * need to emulate the [ blocking ] calling context of a regular - * switchdev event, so since both br->multicast_lock and RCU read side - * critical sections are atomic, we have no choice but to pick the RCU - * read side lock, queue up all our events, leave the critical section - * and notify switchdev from blocking context. - */ - rcu_read_lock(); - - hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { - struct net_bridge_port_group __rcu * const *pp; - const struct net_bridge_port_group *p; - - if (mp->host_joined) { - err = br_mdb_queue_one(&mdb_list, - SWITCHDEV_OBJ_ID_HOST_MDB, - mp, br_dev); - if (err) { - rcu_read_unlock(); - goto out_free_mdb; - } - } - - for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; - pp = &p->next) { - if (p->key.port->dev != dev) - continue; - - err = br_mdb_queue_one(&mdb_list, - SWITCHDEV_OBJ_ID_PORT_MDB, - mp, dev); - if (err) { - rcu_read_unlock(); - goto out_free_mdb; - } - } - } - - rcu_read_unlock(); - - if (adding) - action = SWITCHDEV_PORT_OBJ_ADD; - else - action = SWITCHDEV_PORT_OBJ_DEL; - - list_for_each_entry(obj, &mdb_list, list) { - err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj), - action, ctx, extack); - if (err) - goto out_free_mdb; - } - -out_free_mdb: - list_for_each_entry_safe(obj, tmp, &mdb_list, list) { - list_del(&obj->list); - kfree(SWITCHDEV_OBJ_PORT_MDB(obj)); - } - - return err; -} - -static void br_mdb_switchdev_host_port(struct net_device *dev, - struct net_device *lower_dev, - struct net_bridge_mdb_entry *mp, - int type) -{ - struct switchdev_obj_port_mdb mdb = { - .obj = { - .id = SWITCHDEV_OBJ_ID_HOST_MDB, - .flags = SWITCHDEV_F_DEFER, - .orig_dev = dev, - }, - }; - - br_switchdev_mdb_populate(&mdb, mp); - - switch (type) { - case RTM_NEWMDB: - switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); - break; - case RTM_DELMDB: - switchdev_port_obj_del(lower_dev, &mdb.obj); - break; - } -} - -static void br_mdb_switchdev_host(struct net_device *dev, - struct net_bridge_mdb_entry *mp, int type) -{ - struct net_device *lower_dev; - struct list_head *iter; - - netdev_for_each_lower_dev(dev, lower_dev, iter) - br_mdb_switchdev_host_port(dev, lower_dev, mp, type); -} - void br_mdb_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp, struct net_bridge_port_group *pg, int type) { - struct br_mdb_complete_info *complete_info; - struct switchdev_obj_port_mdb mdb = { - .obj = { - .id = SWITCHDEV_OBJ_ID_PORT_MDB, - .flags = SWITCHDEV_F_DEFER, - }, - }; struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOBUFS; - if (pg) { - br_switchdev_mdb_populate(&mdb, mp); - - mdb.obj.orig_dev = pg->key.port->dev; - switch (type) { - case RTM_NEWMDB: - complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); - if (!complete_info) - break; - complete_info->port = pg->key.port; - complete_info->ip = mp->addr; - mdb.obj.complete_priv = complete_info; - mdb.obj.complete = br_mdb_complete; - if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL)) - kfree(complete_info); - break; - case RTM_DELMDB: - switchdev_port_obj_del(pg->key.port->dev, &mdb.obj); - break; - } - } else { - br_mdb_switchdev_host(dev, mp, type); - } + br_switchdev_mdb_notify(dev, mp, pg, type); skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC); if (!skb) @@ -1016,7 +780,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, return -ENODEV; } - if (!(dev->priv_flags & IFF_EBRIDGE)) { + if (!netif_is_bridge_master(dev)) { NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge"); return -EOPNOTSUPP; } diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 8edfb98ae1d583..b5af68c105a83a 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -968,7 +968,7 @@ static int brnf_device_event(struct notifier_block *unused, unsigned long event, struct net *net; int ret; - if (event != NETDEV_REGISTER || !(dev->priv_flags & IFF_EBRIDGE)) + if (event != NETDEV_REGISTER || !netif_is_bridge_master(dev)) return NOTIFY_DONE; ASSERT_RTNL(); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 5c6c4305ed2358..0c8b5f1a15bc0b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -106,7 +106,7 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, p = br_port_get_check_rcu(dev); if (p) vg = nbp_vlan_group_rcu(p); - } else if (dev->priv_flags & IFF_EBRIDGE) { + } else if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); vg = br_vlan_group_rcu(br); } @@ -1050,7 +1050,7 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself as well */ - if (!p && !(dev->priv_flags & IFF_EBRIDGE)) + if (!p && !netif_is_bridge_master(dev)) return -EINVAL; err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 37ca76406f1e82..c0efd697865abe 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -767,8 +767,8 @@ struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br, int br_fdb_test_addr(struct net_device *dev, unsigned char *addr); int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count, unsigned long off); -int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, - const unsigned char *addr, u16 vid); +int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source, + const unsigned char *addr, u16 vid); void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, unsigned long flags); @@ -792,8 +792,6 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, bool swdev_notify); void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid, bool offloaded); -int br_fdb_replay(const struct net_device *br_dev, const void *ctx, bool adding, - struct notifier_block *nb); /* br_forward.c */ enum br_pkt_type { @@ -958,9 +956,6 @@ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, struct netlink_ext_ack *extack); bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on); -int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack); int br_rports_fill_info(struct sk_buff *skb, const struct net_bridge_mcast *brmctx); int br_multicast_dump_querier_state(struct sk_buff *skb, @@ -1396,14 +1391,6 @@ static inline bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, return false; } -static inline int br_mdb_replay(struct net_device *br_dev, - struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} - static inline bool br_multicast_ctx_options_equal(const struct net_bridge_mcast *brmctx1, const struct net_bridge_mcast *brmctx2) @@ -1461,9 +1448,6 @@ void br_vlan_notify(const struct net_bridge *br, const struct net_bridge_port *p, u16 vid, u16 vid_range, int cmd); -int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack); bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *range_end); @@ -1710,13 +1694,11 @@ static inline bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, return true; } -static inline int br_vlan_replay(struct net_device *br_dev, - struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) +static inline u16 br_vlan_flags(const struct net_bridge_vlan *v, u16 pvid) { - return -EOPNOTSUPP; + return 0; } + #endif /* br_vlan_options.c */ @@ -1911,11 +1893,13 @@ static inline int br_cfm_status_fill_info(struct sk_buff *skb, static inline int br_cfm_mep_count(struct net_bridge *br, u32 *count) { + *count = 0; return -EOPNOTSUPP; } static inline int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count) { + *count = 0; return -EOPNOTSUPP; } #endif @@ -1989,6 +1973,10 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p, struct netlink_ext_ack *extack); void br_switchdev_fdb_notify(struct net_bridge *br, const struct net_bridge_fdb_entry *fdb, int type); +void br_switchdev_mdb_notify(struct net_device *dev, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg, + int type); int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags, struct netlink_ext_ack *extack); int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid); @@ -2075,6 +2063,13 @@ br_switchdev_fdb_notify(struct net_bridge *br, { } +static inline void br_switchdev_mdb_notify(struct net_device *dev, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg, + int type) +{ +} + static inline void br_switchdev_frame_unmark(struct sk_buff *skb) { } diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index ba55851fe132c8..75204d36d7f906 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -233,7 +233,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN); memcpy(br->bridge_id.addr, addr, ETH_ALEN); - memcpy(br->dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(br->dev, addr); list_for_each_entry(p, &br->port_list, list) { if (ether_addr_equal(p->designated_bridge.addr, oldaddr)) diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c index 6bf518d78f0292..f8fbaaa7c50167 100644 --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include "br_private.h" @@ -122,28 +123,38 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p, return 0; } +static void br_switchdev_fdb_populate(struct net_bridge *br, + struct switchdev_notifier_fdb_info *item, + const struct net_bridge_fdb_entry *fdb, + const void *ctx) +{ + const struct net_bridge_port *p = READ_ONCE(fdb->dst); + + item->addr = fdb->key.addr.addr; + item->vid = fdb->key.vlan_id; + item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); + item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags); + item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); + item->info.dev = (!p || item->is_local) ? br->dev : p->dev; + item->info.ctx = ctx; +} + void br_switchdev_fdb_notify(struct net_bridge *br, const struct net_bridge_fdb_entry *fdb, int type) { - const struct net_bridge_port *dst = READ_ONCE(fdb->dst); - struct switchdev_notifier_fdb_info info = { - .addr = fdb->key.addr.addr, - .vid = fdb->key.vlan_id, - .added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags), - .is_local = test_bit(BR_FDB_LOCAL, &fdb->flags), - .offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags), - }; - struct net_device *dev = (!dst || info.is_local) ? br->dev : dst->dev; + struct switchdev_notifier_fdb_info item; + + br_switchdev_fdb_populate(br, &item, fdb, NULL); switch (type) { case RTM_DELNEIGH: call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, - dev, &info.info, NULL); + item.info.dev, &item.info, NULL); break; case RTM_NEWNEIGH: call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, - dev, &info.info, NULL); + item.info.dev, &item.info, NULL); break; } } @@ -270,6 +281,397 @@ static void nbp_switchdev_del(struct net_bridge_port *p) } } +static int +br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb, + const struct net_bridge_fdb_entry *fdb, + unsigned long action, const void *ctx) +{ + struct switchdev_notifier_fdb_info item; + int err; + + br_switchdev_fdb_populate(br, &item, fdb, ctx); + + err = nb->notifier_call(nb, action, &item); + return notifier_to_errno(err); +} + +static int +br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx, + bool adding, struct notifier_block *nb) +{ + struct net_bridge_fdb_entry *fdb; + struct net_bridge *br; + unsigned long action; + int err = 0; + + if (!nb) + return 0; + + if (!netif_is_bridge_master(br_dev)) + return -EINVAL; + + br = netdev_priv(br_dev); + + if (adding) + action = SWITCHDEV_FDB_ADD_TO_DEVICE; + else + action = SWITCHDEV_FDB_DEL_TO_DEVICE; + + rcu_read_lock(); + + hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) { + err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx); + if (err) + break; + } + + rcu_read_unlock(); + + return err; +} + +static int +br_switchdev_vlan_replay_one(struct notifier_block *nb, + struct net_device *dev, + struct switchdev_obj_port_vlan *vlan, + const void *ctx, unsigned long action, + struct netlink_ext_ack *extack) +{ + struct switchdev_notifier_port_obj_info obj_info = { + .info = { + .dev = dev, + .extack = extack, + .ctx = ctx, + }, + .obj = &vlan->obj, + }; + int err; + + err = nb->notifier_call(nb, action, &obj_info); + return notifier_to_errno(err); +} + +static int br_switchdev_vlan_replay(struct net_device *br_dev, + struct net_device *dev, + const void *ctx, bool adding, + struct notifier_block *nb, + struct netlink_ext_ack *extack) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + struct net_bridge_port *p; + struct net_bridge *br; + unsigned long action; + int err = 0; + u16 pvid; + + ASSERT_RTNL(); + + if (!nb) + return 0; + + if (!netif_is_bridge_master(br_dev)) + return -EINVAL; + + if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) + return -EINVAL; + + if (netif_is_bridge_master(dev)) { + br = netdev_priv(dev); + vg = br_vlan_group(br); + p = NULL; + } else { + p = br_port_get_rtnl(dev); + if (WARN_ON(!p)) + return -EINVAL; + vg = nbp_vlan_group(p); + br = p->br; + } + + if (!vg) + return 0; + + if (adding) + action = SWITCHDEV_PORT_OBJ_ADD; + else + action = SWITCHDEV_PORT_OBJ_DEL; + + pvid = br_get_pvid(vg); + + list_for_each_entry(v, &vg->vlan_list, vlist) { + struct switchdev_obj_port_vlan vlan = { + .obj.orig_dev = dev, + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .flags = br_vlan_flags(v, pvid), + .vid = v->vid, + }; + + if (!br_vlan_should_use(v)) + continue; + + err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx, + action, extack); + if (err) + return err; + } + + return err; +} + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +struct br_switchdev_mdb_complete_info { + struct net_bridge_port *port; + struct br_ip ip; +}; + +static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv) +{ + struct br_switchdev_mdb_complete_info *data = priv; + struct net_bridge_port_group __rcu **pp; + struct net_bridge_port_group *p; + struct net_bridge_mdb_entry *mp; + struct net_bridge_port *port = data->port; + struct net_bridge *br = port->br; + + if (err) + goto err; + + spin_lock_bh(&br->multicast_lock); + mp = br_mdb_ip_get(br, &data->ip); + if (!mp) + goto out; + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; + pp = &p->next) { + if (p->key.port != port) + continue; + p->flags |= MDB_PG_FLAGS_OFFLOAD; + } +out: + spin_unlock_bh(&br->multicast_lock); +err: + kfree(priv); +} + +static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb, + const struct net_bridge_mdb_entry *mp) +{ + if (mp->addr.proto == htons(ETH_P_IP)) + ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr); +#if IS_ENABLED(CONFIG_IPV6) + else if (mp->addr.proto == htons(ETH_P_IPV6)) + ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr); +#endif + else + ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr); + + mdb->vid = mp->addr.vid; +} + +static void br_switchdev_host_mdb_one(struct net_device *dev, + struct net_device *lower_dev, + struct net_bridge_mdb_entry *mp, + int type) +{ + struct switchdev_obj_port_mdb mdb = { + .obj = { + .id = SWITCHDEV_OBJ_ID_HOST_MDB, + .flags = SWITCHDEV_F_DEFER, + .orig_dev = dev, + }, + }; + + br_switchdev_mdb_populate(&mdb, mp); + + switch (type) { + case RTM_NEWMDB: + switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); + break; + case RTM_DELMDB: + switchdev_port_obj_del(lower_dev, &mdb.obj); + break; + } +} + +static void br_switchdev_host_mdb(struct net_device *dev, + struct net_bridge_mdb_entry *mp, int type) +{ + struct net_device *lower_dev; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, lower_dev, iter) + br_switchdev_host_mdb_one(dev, lower_dev, mp, type); +} + +static int +br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + unsigned long action, const void *ctx, + struct netlink_ext_ack *extack) +{ + struct switchdev_notifier_port_obj_info obj_info = { + .info = { + .dev = dev, + .extack = extack, + .ctx = ctx, + }, + .obj = &mdb->obj, + }; + int err; + + err = nb->notifier_call(nb, action, &obj_info); + return notifier_to_errno(err); +} + +static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, + enum switchdev_obj_id id, + const struct net_bridge_mdb_entry *mp, + struct net_device *orig_dev) +{ + struct switchdev_obj_port_mdb *mdb; + + mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); + if (!mdb) + return -ENOMEM; + + mdb->obj.id = id; + mdb->obj.orig_dev = orig_dev; + br_switchdev_mdb_populate(mdb, mp); + list_add_tail(&mdb->obj.list, mdb_list); + + return 0; +} + +void br_switchdev_mdb_notify(struct net_device *dev, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg, + int type) +{ + struct br_switchdev_mdb_complete_info *complete_info; + struct switchdev_obj_port_mdb mdb = { + .obj = { + .id = SWITCHDEV_OBJ_ID_PORT_MDB, + .flags = SWITCHDEV_F_DEFER, + }, + }; + + if (!pg) + return br_switchdev_host_mdb(dev, mp, type); + + br_switchdev_mdb_populate(&mdb, mp); + + mdb.obj.orig_dev = pg->key.port->dev; + switch (type) { + case RTM_NEWMDB: + complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); + if (!complete_info) + break; + complete_info->port = pg->key.port; + complete_info->ip = mp->addr; + mdb.obj.complete_priv = complete_info; + mdb.obj.complete = br_switchdev_mdb_complete; + if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL)) + kfree(complete_info); + break; + case RTM_DELMDB: + switchdev_port_obj_del(pg->key.port->dev, &mdb.obj); + break; + } +} +#endif + +static int +br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, + const void *ctx, bool adding, struct notifier_block *nb, + struct netlink_ext_ack *extack) +{ +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING + const struct net_bridge_mdb_entry *mp; + struct switchdev_obj *obj, *tmp; + struct net_bridge *br; + unsigned long action; + LIST_HEAD(mdb_list); + int err = 0; + + ASSERT_RTNL(); + + if (!nb) + return 0; + + if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev)) + return -EINVAL; + + br = netdev_priv(br_dev); + + if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) + return 0; + + /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, + * because the write-side protection is br->multicast_lock. But we + * need to emulate the [ blocking ] calling context of a regular + * switchdev event, so since both br->multicast_lock and RCU read side + * critical sections are atomic, we have no choice but to pick the RCU + * read side lock, queue up all our events, leave the critical section + * and notify switchdev from blocking context. + */ + rcu_read_lock(); + + hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { + struct net_bridge_port_group __rcu * const *pp; + const struct net_bridge_port_group *p; + + if (mp->host_joined) { + err = br_switchdev_mdb_queue_one(&mdb_list, + SWITCHDEV_OBJ_ID_HOST_MDB, + mp, br_dev); + if (err) { + rcu_read_unlock(); + goto out_free_mdb; + } + } + + for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; + pp = &p->next) { + if (p->key.port->dev != dev) + continue; + + err = br_switchdev_mdb_queue_one(&mdb_list, + SWITCHDEV_OBJ_ID_PORT_MDB, + mp, dev); + if (err) { + rcu_read_unlock(); + goto out_free_mdb; + } + } + } + + rcu_read_unlock(); + + if (adding) + action = SWITCHDEV_PORT_OBJ_ADD; + else + action = SWITCHDEV_PORT_OBJ_DEL; + + list_for_each_entry(obj, &mdb_list, list) { + err = br_switchdev_mdb_replay_one(nb, dev, + SWITCHDEV_OBJ_PORT_MDB(obj), + action, ctx, extack); + if (err) + goto out_free_mdb; + } + +out_free_mdb: + list_for_each_entry_safe(obj, tmp, &mdb_list, list) { + list_del(&obj->list); + kfree(SWITCHDEV_OBJ_PORT_MDB(obj)); + } + + if (err) + return err; +#endif + + return 0; +} + static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx, struct notifier_block *atomic_nb, struct notifier_block *blocking_nb, @@ -279,15 +681,17 @@ static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx, struct net_device *dev = p->dev; int err; - err = br_vlan_replay(br_dev, dev, ctx, true, blocking_nb, extack); + err = br_switchdev_vlan_replay(br_dev, dev, ctx, true, blocking_nb, + extack); if (err && err != -EOPNOTSUPP) return err; - err = br_mdb_replay(br_dev, dev, ctx, true, blocking_nb, extack); + err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb, + extack); if (err && err != -EOPNOTSUPP) return err; - err = br_fdb_replay(br_dev, ctx, true, atomic_nb); + err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb); if (err && err != -EOPNOTSUPP) return err; @@ -302,11 +706,11 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, struct net_device *br_dev = p->br->dev; struct net_device *dev = p->dev; - br_vlan_replay(br_dev, dev, ctx, false, blocking_nb, NULL); + br_switchdev_vlan_replay(br_dev, dev, ctx, false, blocking_nb, NULL); - br_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); + br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); - br_fdb_replay(br_dev, ctx, false, atomic_nb); + br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb); } /* Let the bridge know that this port is offloaded, so that it can assign a diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 19f65ab91a0273..49e105e0a4479e 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -293,7 +293,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, /* Add the dev mac and count the vlan only if it's usable */ if (br_vlan_should_use(v)) { - err = br_fdb_insert(br, p, dev->dev_addr, v->vid); + err = br_fdb_add_local(br, p, dev->dev_addr, v->vid); if (err) { br_err(br, "failed insert local address into bridge forwarding table\n"); goto out_filt; @@ -683,8 +683,7 @@ static int br_vlan_add_existing(struct net_bridge *br, goto err_flags; } /* It was only kept for port vlans, now make it real */ - err = br_fdb_insert(br, NULL, br->dev->dev_addr, - vlan->vid); + err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid); if (err) { br_err(br, "failed to insert local address into bridge forwarding table\n"); goto err_fdb_insert; @@ -1861,90 +1860,6 @@ void br_vlan_notify(const struct net_bridge *br, kfree_skb(skb); } -static int br_vlan_replay_one(struct notifier_block *nb, - struct net_device *dev, - struct switchdev_obj_port_vlan *vlan, - const void *ctx, unsigned long action, - struct netlink_ext_ack *extack) -{ - struct switchdev_notifier_port_obj_info obj_info = { - .info = { - .dev = dev, - .extack = extack, - .ctx = ctx, - }, - .obj = &vlan->obj, - }; - int err; - - err = nb->notifier_call(nb, action, &obj_info); - return notifier_to_errno(err); -} - -int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) -{ - struct net_bridge_vlan_group *vg; - struct net_bridge_vlan *v; - struct net_bridge_port *p; - struct net_bridge *br; - unsigned long action; - int err = 0; - u16 pvid; - - ASSERT_RTNL(); - - if (!nb) - return 0; - - if (!netif_is_bridge_master(br_dev)) - return -EINVAL; - - if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) - return -EINVAL; - - if (netif_is_bridge_master(dev)) { - br = netdev_priv(dev); - vg = br_vlan_group(br); - p = NULL; - } else { - p = br_port_get_rtnl(dev); - if (WARN_ON(!p)) - return -EINVAL; - vg = nbp_vlan_group(p); - br = p->br; - } - - if (!vg) - return 0; - - if (adding) - action = SWITCHDEV_PORT_OBJ_ADD; - else - action = SWITCHDEV_PORT_OBJ_DEL; - - pvid = br_get_pvid(vg); - - list_for_each_entry(v, &vg->vlan_list, vlist) { - struct switchdev_obj_port_vlan vlan = { - .obj.orig_dev = dev, - .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - .flags = br_vlan_flags(v, pvid), - .vid = v->vid, - }; - - if (!br_vlan_should_use(v)) - continue; - - err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack); - if (err) - return err; - } - - return err; -} - /* check if v_curr can enter a range ending in range_end */ bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *range_end) diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index a7af4eaff17d30..1a11064f999071 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -66,7 +66,7 @@ static unsigned int ebt_broute(void *priv, struct sk_buff *skb, NFPROTO_BRIDGE, s->in, NULL, NULL, s->net, NULL); - ret = ebt_do_table(skb, &state, priv); + ret = ebt_do_table(priv, skb, &state); if (ret != NF_DROP) return ret; diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index c0b121df4a9af5..cb949436bc0e34 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -58,28 +58,21 @@ static const struct ebt_table frame_filter = { .me = THIS_MODULE, }; -static unsigned int -ebt_filter_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ebt_do_table(skb, state, priv); -} - static const struct nf_hook_ops ebt_ops_filter[] = { { - .hook = ebt_filter_hook, + .hook = ebt_do_table, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_IN, .priority = NF_BR_PRI_FILTER_BRIDGED, }, { - .hook = ebt_filter_hook, + .hook = ebt_do_table, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_FILTER_BRIDGED, }, { - .hook = ebt_filter_hook, + .hook = ebt_do_table, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_OUT, .priority = NF_BR_PRI_FILTER_OTHER, diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 4078151c224fb7..5ee0531ae50610 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -58,27 +58,21 @@ static const struct ebt_table frame_nat = { .me = THIS_MODULE, }; -static unsigned int ebt_nat_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ebt_do_table(skb, state, priv); -} - static const struct nf_hook_ops ebt_ops_nat[] = { { - .hook = ebt_nat_hook, + .hook = ebt_do_table, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_OUT, .priority = NF_BR_PRI_NAT_DST_OTHER, }, { - .hook = ebt_nat_hook, + .hook = ebt_do_table, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_POST_ROUTING, .priority = NF_BR_PRI_NAT_SRC, }, { - .hook = ebt_nat_hook, + .hook = ebt_do_table, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_NAT_DST_BRIDGED, diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index ba045f35114dd9..f2dbefb61ce836 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -189,10 +189,10 @@ ebt_get_target_c(const struct ebt_entry *e) } /* Do some firewalling */ -unsigned int ebt_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct ebt_table *table) +unsigned int ebt_do_table(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) { + struct ebt_table *table = priv; unsigned int hook = state->hook; int i, nentries; struct ebt_entry *point; @@ -1073,7 +1073,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, */ if (repl->num_counters && copy_to_user(repl->counters, counterstmp, - repl->num_counters * sizeof(struct ebt_counter))) { + array_size(repl->num_counters, sizeof(struct ebt_counter)))) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); } @@ -1401,7 +1401,8 @@ static int do_update_counters(struct net *net, const char *name, goto unlock_mutex; } - if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) { + if (copy_from_user(tmp, counters, + array_size(num_counters, sizeof(*counters)))) { ret = -EFAULT; goto unlock_mutex; } @@ -1534,7 +1535,7 @@ static int copy_counters_to_user(struct ebt_table *t, write_unlock_bh(&t->lock); if (copy_to_user(user, counterstmp, - nentries * sizeof(struct ebt_counter))) + array_size(nentries, sizeof(struct ebt_counter)))) ret = -EFAULT; vfree(counterstmp); return ret; diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index b02e1292f7f192..4be6b04879a160 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -81,7 +81,7 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, layr->up->ctrlcmd(layr->up, ctrl, layr->id); } -static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], +static struct cflayer *cfusbl_create(int phyid, const u8 ethaddr[ETH_ALEN], u8 braddr[ETH_ALEN]) { struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); diff --git a/net/can/bcm.c b/net/can/bcm.c index 508f67de0b8013..bc88d901a1c0c8 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -625,7 +625,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); if (bcm_rx_thr_flush(op)) { - hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); + hrtimer_forward_now(hrtimer, op->kt_ival2); return HRTIMER_RESTART; } else { /* rearm throttle handling */ diff --git a/net/core/Makefile b/net/core/Makefile index 35ced6201814c1..4268846f2f4759 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -36,3 +36,4 @@ obj-$(CONFIG_FAILOVER) += failover.o obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o obj-$(CONFIG_BPF_SYSCALL) += sock_map.o obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o +obj-$(CONFIG_OF) += of_net.o diff --git a/net/core/dev.c b/net/core/dev.c index eb3a366bf212cf..edeb811c454e6c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -140,7 +140,7 @@ #include #include #include -#include +#include #include #include #include @@ -303,6 +303,12 @@ static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, return NULL; } +bool netdev_name_in_use(struct net *net, const char *name) +{ + return netdev_name_node_lookup(net, name); +} +EXPORT_SYMBOL(netdev_name_in_use); + int netdev_name_node_alt_create(struct net_device *dev, const char *name) { struct netdev_name_node *name_node; @@ -1133,7 +1139,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf) } snprintf(buf, IFNAMSIZ, name, i); - if (!__dev_get_by_name(net, buf)) + if (!netdev_name_in_use(net, buf)) return i; /* It is possible to run out of possible slots @@ -1187,7 +1193,7 @@ static int dev_get_valid_name(struct net *net, struct net_device *dev, if (strchr(name, '%')) return dev_alloc_name_ns(net, dev, name); - else if (__dev_get_by_name(net, name)) + else if (netdev_name_in_use(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); @@ -1290,8 +1296,8 @@ int dev_change_name(struct net_device *dev, const char *newname) old_assign_type = NET_NAME_RENAMED; goto rollback; } else { - pr_err("%s: name change rollback failed: %d\n", - dev->name, ret); + netdev_err(dev, "name change rollback failed: %d\n", + ret); } } @@ -2345,7 +2351,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq) /* If TC0 is invalidated disable TC mapping */ if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); + netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); dev->num_tc = 0; return; } @@ -2356,8 +2362,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq) tc = &dev->tc_to_txq[q]; if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", - i, q); + netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", + i, q); netdev_set_prio_tc_map(dev, i, 0); } } @@ -2921,6 +2927,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (dev->num_tc) netif_setup_tc(dev, txq); + dev_qdisc_change_real_num_tx(dev, txq); + dev->real_num_tx_queues = txq; if (disabling) { @@ -3414,7 +3422,7 @@ EXPORT_SYMBOL(__skb_gso_segment); #ifdef CONFIG_BUG static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { - pr_err("%s: hw csum failure\n", dev ? dev->name : ""); + netdev_err(dev, "hw csum failure\n"); skb_dump(KERN_ERR, skb, true); dump_stack(); } @@ -3925,6 +3933,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); static struct sk_buff * sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { +#ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); struct tcf_result cl_res; @@ -3960,6 +3969,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) default: break; } +#endif /* CONFIG_NET_CLS_ACT */ return skb; } @@ -4153,13 +4163,20 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT skb->tc_at_ingress = 0; -# ifdef CONFIG_NET_EGRESS +#endif +#ifdef CONFIG_NET_EGRESS if (static_branch_unlikely(&egress_needed_key)) { + if (nf_hook_egress_active()) { + skb = nf_hook_egress(skb, &rc, dev); + if (!skb) + goto out; + } + nf_skip_egress(skb, true); skb = sch_handle_egress(skb, &rc, dev); if (!skb) goto out; + nf_skip_egress(skb, false); } -# endif #endif /* If device/qdisc don't need skb->dst, release it right now while * its hot in this cpu cache. @@ -5301,6 +5318,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, if (static_branch_unlikely(&ingress_needed_key)) { bool another = false; + nf_skip_egress(skb, true); skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, &another); if (another) @@ -5308,6 +5326,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, if (!skb) goto out; + nf_skip_egress(skb, false); if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) goto out; } @@ -5844,7 +5863,7 @@ static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int se gro_normal_list(napi); } -static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) +static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; @@ -5873,12 +5892,11 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); - return NET_RX_SUCCESS; + return; } out: gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); - return NET_RX_SUCCESS; } static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, @@ -6905,19 +6923,25 @@ EXPORT_SYMBOL(netif_napi_add); void napi_disable(struct napi_struct *n) { + unsigned long val, new; + might_sleep(); set_bit(NAPI_STATE_DISABLE, &n->state); - while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) - msleep(1); + do { + val = READ_ONCE(n->state); + if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { + usleep_range(20, 200); + continue; + } + + new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; + new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); + } while (cmpxchg(&n->state, val, new) != val); hrtimer_cancel(&n->timer); - clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); clear_bit(NAPI_STATE_DISABLE, &n->state); - clear_bit(NAPI_STATE_THREADED, &n->state); } EXPORT_SYMBOL(napi_disable); @@ -6995,8 +7019,8 @@ static int __napi_poll(struct napi_struct *n, bool *repoll) } if (unlikely(work > weight)) - pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", - n->poll, work, weight); + netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", + n->poll, work, weight); if (likely(work < weight)) return work; @@ -8550,8 +8574,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; - pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); return -EOVERFLOW; } } @@ -8621,8 +8644,7 @@ static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; - pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); return -EOVERFLOW; } } @@ -9159,14 +9181,11 @@ int dev_get_port_parent_id(struct net_device *dev, } err = devlink_compat_switch_id_get(dev, ppid); - if (!err || err != -EOPNOTSUPP) + if (!recurse || err != -EOPNOTSUPP) return err; - if (!recurse) - return -EOPNOTSUPP; - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = dev_get_port_parent_id(lower_dev, ppid, recurse); + err = dev_get_port_parent_id(lower_dev, ppid, true); if (err) break; if (!first.id_len) @@ -9910,6 +9929,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, } } + if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { + netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); + features &= ~NETIF_F_LRO; + } + if (features & NETIF_F_HW_TLS_TX) { bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); @@ -10867,7 +10891,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, if (!dev->ethtool_ops) dev->ethtool_ops = &default_ethtool_ops; - nf_hook_ingress_init(dev); + nf_hook_netdev_init(dev); return dev; @@ -11153,7 +11177,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, * we can use it in the destination network namespace. */ err = -EEXIST; - if (__dev_get_by_name(net, dev->name)) { + if (netdev_name_in_use(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; @@ -11506,7 +11530,7 @@ static void __net_exit default_device_exit(struct net *net) /* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); - if (__dev_get_by_name(&init_net, fb_name)) + if (netdev_name_in_use(&init_net, fb_name)) snprintf(fb_name, IFNAMSIZ, "dev%%d"); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 0e87237fd8712d..cbab5fec64b12d 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -518,9 +518,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, case SIOCETHTOOL: dev_load(net, ifr->ifr_name); - rtnl_lock(); ret = dev_ethtool(net, ifr, data); - rtnl_unlock(); if (colon) *colon = ':'; return ret; diff --git a/net/core/devlink.c b/net/core/devlink.c index a856ae401ea5c1..6b5ee862429ebe 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -30,6 +30,63 @@ #define CREATE_TRACE_POINTS #include +#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \ + (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX) + +struct devlink_dev_stats { + u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; + u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; +}; + +struct devlink { + u32 index; + struct list_head port_list; + struct list_head rate_list; + struct list_head sb_list; + struct list_head dpipe_table_list; + struct list_head resource_list; + struct list_head param_list; + struct list_head region_list; + struct list_head reporter_list; + struct mutex reporters_lock; /* protects reporter_list */ + struct devlink_dpipe_headers *dpipe_headers; + struct list_head trap_list; + struct list_head trap_group_list; + struct list_head trap_policer_list; + const struct devlink_ops *ops; + u64 features; + struct xarray snapshot_ids; + struct devlink_dev_stats stats; + struct device *dev; + possible_net_t _net; + /* Serializes access to devlink instance specific objects such as + * port, sb, dpipe, resource, params, region, traps and more. + */ + struct mutex lock; + u8 reload_failed:1; + refcount_t refcount; + struct completion comp; + char priv[0] __aligned(NETDEV_ALIGN); +}; + +void *devlink_priv(struct devlink *devlink) +{ + return &devlink->priv; +} +EXPORT_SYMBOL_GPL(devlink_priv); + +struct devlink *priv_to_devlink(void *priv) +{ + return container_of(priv, struct devlink, priv); +} +EXPORT_SYMBOL_GPL(priv_to_devlink); + +struct device *devlink_to_dev(const struct devlink *devlink) +{ + return devlink->dev; +} +EXPORT_SYMBOL_GPL(devlink_to_dev); + static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = { { .name = "destination mac", @@ -45,7 +102,7 @@ struct devlink_dpipe_header devlink_dpipe_header_ethernet = { .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ethernet), .global = true, }; -EXPORT_SYMBOL(devlink_dpipe_header_ethernet); +EXPORT_SYMBOL_GPL(devlink_dpipe_header_ethernet); static struct devlink_dpipe_field devlink_dpipe_fields_ipv4[] = { { @@ -62,7 +119,7 @@ struct devlink_dpipe_header devlink_dpipe_header_ipv4 = { .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv4), .global = true, }; -EXPORT_SYMBOL(devlink_dpipe_header_ipv4); +EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv4); static struct devlink_dpipe_field devlink_dpipe_fields_ipv6[] = { { @@ -79,7 +136,7 @@ struct devlink_dpipe_header devlink_dpipe_header_ipv6 = { .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv6), .global = true, }; -EXPORT_SYMBOL(devlink_dpipe_header_ipv6); +EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv6); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); @@ -95,6 +152,22 @@ static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC); #define DEVLINK_REGISTERED XA_MARK_1 +/* devlink instances are open to the access from the user space after + * devlink_register() call. Such logical barrier allows us to have certain + * expectations related to locking. + * + * Before *_register() - we are in initialization stage and no parallel + * access possible to the devlink instance. All drivers perform that phase + * by implicitly holding device_lock. + * + * After *_register() - users and driver can access devlink instance at + * the same time. + */ +#define ASSERT_DEVLINK_REGISTERED(d) \ + WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED)) +#define ASSERT_DEVLINK_NOT_REGISTERED(d) \ + WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED)) + /* devlink_mutex * * An overall lock guarding every operation coming from userspace. @@ -109,15 +182,17 @@ struct net *devlink_net(const struct devlink *devlink) } EXPORT_SYMBOL_GPL(devlink_net); -static void devlink_put(struct devlink *devlink) +void devlink_put(struct devlink *devlink) { if (refcount_dec_and_test(&devlink->refcount)) complete(&devlink->comp); } -static bool __must_check devlink_try_get(struct devlink *devlink) +struct devlink *__must_check devlink_try_get(struct devlink *devlink) { - return refcount_inc_not_zero(&devlink->refcount); + if (refcount_inc_not_zero(&devlink->refcount)) + return devlink; + return NULL; } static struct devlink *devlink_get_from_attrs(struct net *net, @@ -742,6 +817,7 @@ static void devlink_notify(struct devlink *devlink, enum devlink_command cmd) int err; WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL); + WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -1040,11 +1116,15 @@ static int devlink_nl_port_fill(struct sk_buff *msg, static void devlink_port_notify(struct devlink_port *devlink_port, enum devlink_command cmd) { + struct devlink *devlink = devlink_port->devlink; struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL); + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; @@ -1055,19 +1135,22 @@ static void devlink_port_notify(struct devlink_port *devlink_port, return; } - genlmsg_multicast_netns(&devlink_nl_family, - devlink_net(devlink_port->devlink), msg, 0, - DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg, + 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); } static void devlink_rate_notify(struct devlink_rate *devlink_rate, enum devlink_command cmd) { + struct devlink *devlink = devlink_rate->devlink; struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL); + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; @@ -1078,9 +1161,8 @@ static void devlink_rate_notify(struct devlink_rate *devlink_rate, return; } - genlmsg_multicast_netns(&devlink_nl_family, - devlink_net(devlink_rate->devlink), msg, 0, - DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg, + 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); } static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg, @@ -3285,7 +3367,7 @@ void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry) kfree(value[value_index].mask); } } -EXPORT_SYMBOL(devlink_dpipe_entry_clear); +EXPORT_SYMBOL_GPL(devlink_dpipe_entry_clear); static int devlink_dpipe_entries_fill(struct genl_info *info, enum devlink_command cmd, int flags, @@ -3952,9 +4034,6 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net, struct net *curr_net; int err; - if (!devlink->reload_enabled) - return -EOPNOTSUPP; - memcpy(remote_reload_stats, devlink->stats.remote_reload_stats, sizeof(remote_reload_stats)); @@ -4022,7 +4101,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) u32 actions_performed; int err; - if (!devlink_reload_supported(devlink->ops)) + if (!(devlink->features & DEVLINK_F_RELOAD)) return -EOPNOTSUPP; err = devlink_resources_validate(devlink, NULL, info); @@ -4150,6 +4229,7 @@ static void __devlink_flash_update_notify(struct devlink *devlink, WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE && cmd != DEVLINK_CMD_FLASH_UPDATE_END && cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS); + WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -4522,8 +4602,6 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink, return -EOPNOTSUPP; param_value[i] = param_item->driverinit_value; } else { - if (!param_item->published) - continue; ctx.cmode = i; err = devlink_param_get(devlink, param, &ctx); if (err) @@ -4599,6 +4677,7 @@ static void devlink_param_notify(struct devlink *devlink, WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL && cmd != DEVLINK_CMD_PORT_PARAM_NEW && cmd != DEVLINK_CMD_PORT_PARAM_DEL); + ASSERT_DEVLINK_REGISTERED(devlink); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -4848,47 +4927,6 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, info, DEVLINK_CMD_PARAM_NEW); } -static int devlink_param_register_one(struct devlink *devlink, - unsigned int port_index, - struct list_head *param_list, - const struct devlink_param *param, - enum devlink_command cmd) -{ - struct devlink_param_item *param_item; - - if (devlink_param_find_by_name(param_list, param->name)) - return -EEXIST; - - if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT)) - WARN_ON(param->get || param->set); - else - WARN_ON(!param->get || !param->set); - - param_item = kzalloc(sizeof(*param_item), GFP_KERNEL); - if (!param_item) - return -ENOMEM; - param_item->param = param; - - list_add_tail(¶m_item->list, param_list); - devlink_param_notify(devlink, port_index, param_item, cmd); - return 0; -} - -static void devlink_param_unregister_one(struct devlink *devlink, - unsigned int port_index, - struct list_head *param_list, - const struct devlink_param *param, - enum devlink_command cmd) -{ - struct devlink_param_item *param_item; - - param_item = devlink_param_find_by_name(param_list, param->name); - WARN_ON(!param_item); - devlink_param_notify(devlink, port_index, param_item, cmd); - list_del(¶m_item->list); - kfree(param_item); -} - static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { @@ -5070,6 +5108,11 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink, if (err) goto nla_put_failure; + err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS, + region->max_snapshots); + if (err) + goto nla_put_failure; + err = devlink_nl_region_snapshots_id_put(msg, devlink, region); if (err) goto nla_put_failure; @@ -5145,17 +5188,19 @@ static void devlink_nl_region_notify(struct devlink_region *region, struct devlink_snapshot *snapshot, enum devlink_command cmd) { + struct devlink *devlink = region->devlink; struct sk_buff *msg; WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL); + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0); if (IS_ERR(msg)) return; - genlmsg_multicast_netns(&devlink_nl_family, - devlink_net(region->devlink), msg, 0, - DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg, + 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); } /** @@ -6269,23 +6314,21 @@ static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg, return 0; } -int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value) +static int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value) { if (fmsg->putting_binary) return -EINVAL; return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG); } -EXPORT_SYMBOL_GPL(devlink_fmsg_bool_put); -int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value) +static int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value) { if (fmsg->putting_binary) return -EINVAL; return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8); } -EXPORT_SYMBOL_GPL(devlink_fmsg_u8_put); int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value) { @@ -6296,14 +6339,13 @@ int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value) } EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put); -int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value) +static int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value) { if (fmsg->putting_binary) return -EINVAL; return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64); } -EXPORT_SYMBOL_GPL(devlink_fmsg_u64_put); int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value) { @@ -6923,10 +6965,12 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg, static void devlink_recover_notify(struct devlink_health_reporter *reporter, enum devlink_command cmd) { + struct devlink *devlink = reporter->devlink; struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER); + WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -6938,9 +6982,8 @@ static void devlink_recover_notify(struct devlink_health_reporter *reporter, return; } - genlmsg_multicast_netns(&devlink_nl_family, - devlink_net(reporter->devlink), - msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg, + 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); } void @@ -8899,6 +8942,25 @@ static bool devlink_reload_actions_valid(const struct devlink_ops *ops) return true; } +/** + * devlink_set_features - Set devlink supported features + * + * @devlink: devlink + * @features: devlink support features + * + * This interface allows us to set reload ops separatelly from + * the devlink_alloc. + */ +void devlink_set_features(struct devlink *devlink, u64 features) +{ + ASSERT_DEVLINK_NOT_REGISTERED(devlink); + + WARN_ON(features & DEVLINK_F_RELOAD && + !devlink_reload_supported(devlink->ops)); + devlink->features = features; +} +EXPORT_SYMBOL_GPL(devlink_set_features); + /** * devlink_alloc_ns - Allocate new devlink instance resources * in specific namespace @@ -8958,18 +9020,104 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, } EXPORT_SYMBOL_GPL(devlink_alloc_ns); +static void +devlink_trap_policer_notify(struct devlink *devlink, + const struct devlink_trap_policer_item *policer_item, + enum devlink_command cmd); +static void +devlink_trap_group_notify(struct devlink *devlink, + const struct devlink_trap_group_item *group_item, + enum devlink_command cmd); +static void devlink_trap_notify(struct devlink *devlink, + const struct devlink_trap_item *trap_item, + enum devlink_command cmd); + +static void devlink_notify_register(struct devlink *devlink) +{ + struct devlink_trap_policer_item *policer_item; + struct devlink_trap_group_item *group_item; + struct devlink_param_item *param_item; + struct devlink_trap_item *trap_item; + struct devlink_port *devlink_port; + struct devlink_rate *rate_node; + struct devlink_region *region; + + devlink_notify(devlink, DEVLINK_CMD_NEW); + list_for_each_entry(devlink_port, &devlink->port_list, list) + devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); + + list_for_each_entry(policer_item, &devlink->trap_policer_list, list) + devlink_trap_policer_notify(devlink, policer_item, + DEVLINK_CMD_TRAP_POLICER_NEW); + + list_for_each_entry(group_item, &devlink->trap_group_list, list) + devlink_trap_group_notify(devlink, group_item, + DEVLINK_CMD_TRAP_GROUP_NEW); + + list_for_each_entry(trap_item, &devlink->trap_list, list) + devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW); + + list_for_each_entry(rate_node, &devlink->rate_list, list) + devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW); + + list_for_each_entry(region, &devlink->region_list, list) + devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW); + + list_for_each_entry(param_item, &devlink->param_list, list) + devlink_param_notify(devlink, 0, param_item, + DEVLINK_CMD_PARAM_NEW); +} + +static void devlink_notify_unregister(struct devlink *devlink) +{ + struct devlink_trap_policer_item *policer_item; + struct devlink_trap_group_item *group_item; + struct devlink_param_item *param_item; + struct devlink_trap_item *trap_item; + struct devlink_port *devlink_port; + struct devlink_rate *rate_node; + struct devlink_region *region; + + list_for_each_entry_reverse(param_item, &devlink->param_list, list) + devlink_param_notify(devlink, 0, param_item, + DEVLINK_CMD_PARAM_DEL); + + list_for_each_entry_reverse(region, &devlink->region_list, list) + devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL); + + list_for_each_entry_reverse(rate_node, &devlink->rate_list, list) + devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL); + + list_for_each_entry_reverse(trap_item, &devlink->trap_list, list) + devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL); + + list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list) + devlink_trap_group_notify(devlink, group_item, + DEVLINK_CMD_TRAP_GROUP_DEL); + list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list, + list) + devlink_trap_policer_notify(devlink, policer_item, + DEVLINK_CMD_TRAP_POLICER_DEL); + + list_for_each_entry_reverse(devlink_port, &devlink->port_list, list) + devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); + devlink_notify(devlink, DEVLINK_CMD_DEL); +} + /** * devlink_register - Register devlink instance * * @devlink: devlink */ -int devlink_register(struct devlink *devlink) +void devlink_register(struct devlink *devlink) { + ASSERT_DEVLINK_NOT_REGISTERED(devlink); + /* Make sure that we are in .probe() routine */ + mutex_lock(&devlink_mutex); xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); - devlink_notify(devlink, DEVLINK_CMD_NEW); + devlink_notify_register(devlink); mutex_unlock(&devlink_mutex); - return 0; } EXPORT_SYMBOL_GPL(devlink_register); @@ -8980,53 +9128,19 @@ EXPORT_SYMBOL_GPL(devlink_register); */ void devlink_unregister(struct devlink *devlink) { + ASSERT_DEVLINK_REGISTERED(devlink); + /* Make sure that we are in .remove() routine */ + devlink_put(devlink); wait_for_completion(&devlink->comp); mutex_lock(&devlink_mutex); - WARN_ON(devlink_reload_supported(devlink->ops) && - devlink->reload_enabled); - devlink_notify(devlink, DEVLINK_CMD_DEL); + devlink_notify_unregister(devlink); xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); mutex_unlock(&devlink_mutex); } EXPORT_SYMBOL_GPL(devlink_unregister); -/** - * devlink_reload_enable - Enable reload of devlink instance - * - * @devlink: devlink - * - * Should be called at end of device initialization - * process when reload operation is supported. - */ -void devlink_reload_enable(struct devlink *devlink) -{ - mutex_lock(&devlink_mutex); - devlink->reload_enabled = true; - mutex_unlock(&devlink_mutex); -} -EXPORT_SYMBOL_GPL(devlink_reload_enable); - -/** - * devlink_reload_disable - Disable reload of devlink instance - * - * @devlink: devlink - * - * Should be called at the beginning of device cleanup - * process when reload operation is supported. - */ -void devlink_reload_disable(struct devlink *devlink) -{ - mutex_lock(&devlink_mutex); - /* Mutex is taken which ensures that no reload operation is in - * progress while setting up forbidded flag. - */ - devlink->reload_enabled = false; - mutex_unlock(&devlink_mutex); -} -EXPORT_SYMBOL_GPL(devlink_reload_disable); - /** * devlink_free - Free devlink instance resources * @@ -9034,6 +9148,8 @@ EXPORT_SYMBOL_GPL(devlink_reload_disable); */ void devlink_free(struct devlink *devlink) { + ASSERT_DEVLINK_NOT_REGISTERED(devlink); + mutex_destroy(&devlink->reporters_lock); mutex_destroy(&devlink->lock); WARN_ON(!list_empty(&devlink->trap_policer_list)); @@ -9939,73 +10055,6 @@ static int devlink_param_verify(const struct devlink_param *param) return devlink_param_driver_verify(param); } -static int __devlink_param_register_one(struct devlink *devlink, - unsigned int port_index, - struct list_head *param_list, - const struct devlink_param *param, - enum devlink_command reg_cmd) -{ - int err; - - err = devlink_param_verify(param); - if (err) - return err; - - return devlink_param_register_one(devlink, port_index, - param_list, param, reg_cmd); -} - -static int __devlink_params_register(struct devlink *devlink, - unsigned int port_index, - struct list_head *param_list, - const struct devlink_param *params, - size_t params_count, - enum devlink_command reg_cmd, - enum devlink_command unreg_cmd) -{ - const struct devlink_param *param = params; - int i; - int err; - - mutex_lock(&devlink->lock); - for (i = 0; i < params_count; i++, param++) { - err = __devlink_param_register_one(devlink, port_index, - param_list, param, reg_cmd); - if (err) - goto rollback; - } - - mutex_unlock(&devlink->lock); - return 0; - -rollback: - if (!i) - goto unlock; - for (param--; i > 0; i--, param--) - devlink_param_unregister_one(devlink, port_index, param_list, - param, unreg_cmd); -unlock: - mutex_unlock(&devlink->lock); - return err; -} - -static void __devlink_params_unregister(struct devlink *devlink, - unsigned int port_index, - struct list_head *param_list, - const struct devlink_param *params, - size_t params_count, - enum devlink_command cmd) -{ - const struct devlink_param *param = params; - int i; - - mutex_lock(&devlink->lock); - for (i = 0; i < params_count; i++, param++) - devlink_param_unregister_one(devlink, 0, param_list, param, - cmd); - mutex_unlock(&devlink->lock); -} - /** * devlink_params_register - register configuration parameters * @@ -10019,10 +10068,25 @@ int devlink_params_register(struct devlink *devlink, const struct devlink_param *params, size_t params_count) { - return __devlink_params_register(devlink, 0, &devlink->param_list, - params, params_count, - DEVLINK_CMD_PARAM_NEW, - DEVLINK_CMD_PARAM_DEL); + const struct devlink_param *param = params; + int i, err; + + ASSERT_DEVLINK_NOT_REGISTERED(devlink); + + for (i = 0; i < params_count; i++, param++) { + err = devlink_param_register(devlink, param); + if (err) + goto rollback; + } + return 0; + +rollback: + if (!i) + return err; + + for (param--; i > 0; i--, param--) + devlink_param_unregister(devlink, param); + return err; } EXPORT_SYMBOL_GPL(devlink_params_register); @@ -10036,9 +10100,13 @@ void devlink_params_unregister(struct devlink *devlink, const struct devlink_param *params, size_t params_count) { - return __devlink_params_unregister(devlink, 0, &devlink->param_list, - params, params_count, - DEVLINK_CMD_PARAM_DEL); + const struct devlink_param *param = params; + int i; + + ASSERT_DEVLINK_NOT_REGISTERED(devlink); + + for (i = 0; i < params_count; i++, param++) + devlink_param_unregister(devlink, param); } EXPORT_SYMBOL_GPL(devlink_params_unregister); @@ -10054,13 +10122,26 @@ EXPORT_SYMBOL_GPL(devlink_params_unregister); int devlink_param_register(struct devlink *devlink, const struct devlink_param *param) { - int err; + struct devlink_param_item *param_item; - mutex_lock(&devlink->lock); - err = __devlink_param_register_one(devlink, 0, &devlink->param_list, - param, DEVLINK_CMD_PARAM_NEW); - mutex_unlock(&devlink->lock); - return err; + ASSERT_DEVLINK_NOT_REGISTERED(devlink); + + WARN_ON(devlink_param_verify(param)); + WARN_ON(devlink_param_find_by_name(&devlink->param_list, param->name)); + + if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT)) + WARN_ON(param->get || param->set); + else + WARN_ON(!param->get || !param->set); + + param_item = kzalloc(sizeof(*param_item), GFP_KERNEL); + if (!param_item) + return -ENOMEM; + + param_item->param = param; + + list_add_tail(¶m_item->list, &devlink->param_list); + return 0; } EXPORT_SYMBOL_GPL(devlink_param_register); @@ -10071,153 +10152,39 @@ EXPORT_SYMBOL_GPL(devlink_param_register); */ void devlink_param_unregister(struct devlink *devlink, const struct devlink_param *param) -{ - mutex_lock(&devlink->lock); - devlink_param_unregister_one(devlink, 0, &devlink->param_list, param, - DEVLINK_CMD_PARAM_DEL); - mutex_unlock(&devlink->lock); -} -EXPORT_SYMBOL_GPL(devlink_param_unregister); - -/** - * devlink_params_publish - publish configuration parameters - * - * @devlink: devlink - * - * Publish previously registered configuration parameters. - */ -void devlink_params_publish(struct devlink *devlink) -{ - struct devlink_param_item *param_item; - - list_for_each_entry(param_item, &devlink->param_list, list) { - if (param_item->published) - continue; - param_item->published = true; - devlink_param_notify(devlink, 0, param_item, - DEVLINK_CMD_PARAM_NEW); - } -} -EXPORT_SYMBOL_GPL(devlink_params_publish); - -/** - * devlink_params_unpublish - unpublish configuration parameters - * - * @devlink: devlink - * - * Unpublish previously registered configuration parameters. - */ -void devlink_params_unpublish(struct devlink *devlink) { struct devlink_param_item *param_item; - list_for_each_entry(param_item, &devlink->param_list, list) { - if (!param_item->published) - continue; - param_item->published = false; - devlink_param_notify(devlink, 0, param_item, - DEVLINK_CMD_PARAM_DEL); - } -} -EXPORT_SYMBOL_GPL(devlink_params_unpublish); - -/** - * devlink_param_publish - publish one configuration parameter - * - * @devlink: devlink - * @param: one configuration parameter - * - * Publish previously registered configuration parameter. - */ -void devlink_param_publish(struct devlink *devlink, - const struct devlink_param *param) -{ - struct devlink_param_item *param_item; + ASSERT_DEVLINK_NOT_REGISTERED(devlink); - list_for_each_entry(param_item, &devlink->param_list, list) { - if (param_item->param != param || param_item->published) - continue; - param_item->published = true; - devlink_param_notify(devlink, 0, param_item, - DEVLINK_CMD_PARAM_NEW); - break; - } + param_item = + devlink_param_find_by_name(&devlink->param_list, param->name); + WARN_ON(!param_item); + list_del(¶m_item->list); + kfree(param_item); } -EXPORT_SYMBOL_GPL(devlink_param_publish); +EXPORT_SYMBOL_GPL(devlink_param_unregister); /** - * devlink_param_unpublish - unpublish one configuration parameter + * devlink_param_driverinit_value_get - get configuration parameter + * value for driver initializing * - * @devlink: devlink - * @param: one configuration parameter + * @devlink: devlink + * @param_id: parameter ID + * @init_val: value of parameter in driverinit configuration mode * - * Unpublish previously registered configuration parameter. + * This function should be used by the driver to get driverinit + * configuration for initialization after reload command. */ -void devlink_param_unpublish(struct devlink *devlink, - const struct devlink_param *param) +int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val) { struct devlink_param_item *param_item; - list_for_each_entry(param_item, &devlink->param_list, list) { - if (param_item->param != param || !param_item->published) - continue; - param_item->published = false; - devlink_param_notify(devlink, 0, param_item, - DEVLINK_CMD_PARAM_DEL); - break; - } -} -EXPORT_SYMBOL_GPL(devlink_param_unpublish); - -/** - * devlink_port_params_register - register port configuration parameters - * - * @devlink_port: devlink port - * @params: configuration parameters array - * @params_count: number of parameters provided - * - * Register the configuration parameters supported by the port. - */ -int devlink_port_params_register(struct devlink_port *devlink_port, - const struct devlink_param *params, - size_t params_count) -{ - return __devlink_params_register(devlink_port->devlink, - devlink_port->index, - &devlink_port->param_list, params, - params_count, - DEVLINK_CMD_PORT_PARAM_NEW, - DEVLINK_CMD_PORT_PARAM_DEL); -} -EXPORT_SYMBOL_GPL(devlink_port_params_register); - -/** - * devlink_port_params_unregister - unregister port configuration - * parameters - * - * @devlink_port: devlink port - * @params: configuration parameters array - * @params_count: number of parameters provided - */ -void devlink_port_params_unregister(struct devlink_port *devlink_port, - const struct devlink_param *params, - size_t params_count) -{ - return __devlink_params_unregister(devlink_port->devlink, - devlink_port->index, - &devlink_port->param_list, - params, params_count, - DEVLINK_CMD_PORT_PARAM_DEL); -} -EXPORT_SYMBOL_GPL(devlink_port_params_unregister); - -static int -__devlink_param_driverinit_value_get(struct list_head *param_list, u32 param_id, - union devlink_param_value *init_val) -{ - struct devlink_param_item *param_item; + if (!devlink_reload_supported(devlink->ops)) + return -EOPNOTSUPP; - param_item = devlink_param_find_by_id(param_list, param_id); + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); if (!param_item) return -EINVAL; @@ -10233,54 +10200,6 @@ __devlink_param_driverinit_value_get(struct list_head *param_list, u32 param_id, return 0; } - -static int -__devlink_param_driverinit_value_set(struct devlink *devlink, - unsigned int port_index, - struct list_head *param_list, u32 param_id, - union devlink_param_value init_val, - enum devlink_command cmd) -{ - struct devlink_param_item *param_item; - - param_item = devlink_param_find_by_id(param_list, param_id); - if (!param_item) - return -EINVAL; - - if (!devlink_param_cmode_is_supported(param_item->param, - DEVLINK_PARAM_CMODE_DRIVERINIT)) - return -EOPNOTSUPP; - - if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING) - strcpy(param_item->driverinit_value.vstr, init_val.vstr); - else - param_item->driverinit_value = init_val; - param_item->driverinit_value_valid = true; - - devlink_param_notify(devlink, port_index, param_item, cmd); - return 0; -} - -/** - * devlink_param_driverinit_value_get - get configuration parameter - * value for driver initializing - * - * @devlink: devlink - * @param_id: parameter ID - * @init_val: value of parameter in driverinit configuration mode - * - * This function should be used by the driver to get driverinit - * configuration for initialization after reload command. - */ -int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, - union devlink_param_value *init_val) -{ - if (!devlink_reload_supported(devlink->ops)) - return -EOPNOTSUPP; - - return __devlink_param_driverinit_value_get(&devlink->param_list, - param_id, init_val); -} EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get); /** @@ -10298,61 +10217,26 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get); int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val) { - return __devlink_param_driverinit_value_set(devlink, 0, - &devlink->param_list, - param_id, init_val, - DEVLINK_CMD_PARAM_NEW); -} -EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set); + struct devlink_param_item *param_item; -/** - * devlink_port_param_driverinit_value_get - get configuration parameter - * value for driver initializing - * - * @devlink_port: devlink_port - * @param_id: parameter ID - * @init_val: value of parameter in driverinit configuration mode - * - * This function should be used by the driver to get driverinit - * configuration for initialization after reload command. - */ -int devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port, - u32 param_id, - union devlink_param_value *init_val) -{ - struct devlink *devlink = devlink_port->devlink; + ASSERT_DEVLINK_NOT_REGISTERED(devlink); - if (!devlink_reload_supported(devlink->ops)) - return -EOPNOTSUPP; + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + if (!param_item) + return -EINVAL; - return __devlink_param_driverinit_value_get(&devlink_port->param_list, - param_id, init_val); -} -EXPORT_SYMBOL_GPL(devlink_port_param_driverinit_value_get); + if (!devlink_param_cmode_is_supported(param_item->param, + DEVLINK_PARAM_CMODE_DRIVERINIT)) + return -EOPNOTSUPP; -/** - * devlink_port_param_driverinit_value_set - set value of configuration - * parameter for driverinit - * configuration mode - * - * @devlink_port: devlink_port - * @param_id: parameter ID - * @init_val: value of parameter to set for driverinit configuration mode - * - * This function should be used by the driver to set driverinit - * configuration mode default value. - */ -int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port, - u32 param_id, - union devlink_param_value init_val) -{ - return __devlink_param_driverinit_value_set(devlink_port->devlink, - devlink_port->index, - &devlink_port->param_list, - param_id, init_val, - DEVLINK_CMD_PORT_PARAM_NEW); + if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING) + strcpy(param_item->driverinit_value.vstr, init_val.vstr); + else + param_item->driverinit_value = init_val; + param_item->driverinit_value_valid = true; + return 0; } -EXPORT_SYMBOL_GPL(devlink_port_param_driverinit_value_set); +EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set); /** * devlink_param_value_changed - notify devlink on a parameter's value @@ -10377,50 +10261,6 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id) } EXPORT_SYMBOL_GPL(devlink_param_value_changed); -/** - * devlink_port_param_value_changed - notify devlink on a parameter's value - * change. Should be called by the driver - * right after the change. - * - * @devlink_port: devlink_port - * @param_id: parameter ID - * - * This function should be used by the driver to notify devlink on value - * change, excluding driverinit configuration mode. - * For driverinit configuration mode driver should use the function - * devlink_port_param_driverinit_value_set() instead. - */ -void devlink_port_param_value_changed(struct devlink_port *devlink_port, - u32 param_id) -{ - struct devlink_param_item *param_item; - - param_item = devlink_param_find_by_id(&devlink_port->param_list, - param_id); - WARN_ON(!param_item); - - devlink_param_notify(devlink_port->devlink, devlink_port->index, - param_item, DEVLINK_CMD_PORT_PARAM_NEW); -} -EXPORT_SYMBOL_GPL(devlink_port_param_value_changed); - -/** - * devlink_param_value_str_fill - Safely fill-up the string preventing - * from overflow of the preallocated buffer - * - * @dst_val: destination devlink_param_value - * @src: source buffer - */ -void devlink_param_value_str_fill(union devlink_param_value *dst_val, - const char *src) -{ - size_t len; - - len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE); - WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE); -} -EXPORT_SYMBOL_GPL(devlink_param_value_str_fill); - /** * devlink_region_create - create a new address region * @@ -10839,6 +10679,8 @@ devlink_trap_group_notify(struct devlink *devlink, WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW && cmd != DEVLINK_CMD_TRAP_GROUP_DEL); + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -10880,6 +10722,8 @@ static void devlink_trap_notify(struct devlink *devlink, WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW && cmd != DEVLINK_CMD_TRAP_DEL); + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -11261,6 +11105,8 @@ devlink_trap_policer_notify(struct devlink *devlink, WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW && cmd != DEVLINK_CMD_TRAP_POLICER_DEL); + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -11429,45 +11275,36 @@ static void __devlink_compat_running_version(struct devlink *devlink, nlmsg_free(msg); } -void devlink_compat_running_version(struct net_device *dev, - char *buf, size_t len) +static struct devlink_port *netdev_to_devlink_port(struct net_device *dev) { - struct devlink *devlink; + if (!dev->netdev_ops->ndo_get_devlink_port) + return NULL; - dev_hold(dev); - rtnl_unlock(); + return dev->netdev_ops->ndo_get_devlink_port(dev); +} - devlink = netdev_to_devlink(dev); - if (!devlink || !devlink->ops->info_get) - goto out; +void devlink_compat_running_version(struct devlink *devlink, + char *buf, size_t len) +{ + if (!devlink->ops->info_get) + return; mutex_lock(&devlink->lock); __devlink_compat_running_version(devlink, buf, len); mutex_unlock(&devlink->lock); - -out: - rtnl_lock(); - dev_put(dev); } -int devlink_compat_flash_update(struct net_device *dev, const char *file_name) +int devlink_compat_flash_update(struct devlink *devlink, const char *file_name) { struct devlink_flash_update_params params = {}; - struct devlink *devlink; int ret; - dev_hold(dev); - rtnl_unlock(); - - devlink = netdev_to_devlink(dev); - if (!devlink || !devlink->ops->flash_update) { - ret = -EOPNOTSUPP; - goto out; - } + if (!devlink->ops->flash_update) + return -EOPNOTSUPP; ret = request_firmware(¶ms.fw, file_name, devlink->dev); if (ret) - goto out; + return ret; mutex_lock(&devlink->lock); devlink_flash_update_begin_notify(devlink); @@ -11477,10 +11314,6 @@ int devlink_compat_flash_update(struct net_device *dev, const char *file_name) release_firmware(params.fw); -out: - rtnl_lock(); - dev_put(dev); - return ret; } @@ -11538,7 +11371,7 @@ static void __net_exit devlink_pernet_pre_exit(struct net *net) if (!net_eq(devlink_net(devlink), net)) goto retry; - WARN_ON(!devlink_reload_supported(devlink->ops)); + WARN_ON(!(devlink->features & DEVLINK_F_RELOAD)); err = devlink_reload(devlink, &init_net, DEVLINK_RELOAD_ACTION_DRIVER_REINIT, DEVLINK_RELOAD_LIMIT_UNSPEC, diff --git a/net/core/filter.c b/net/core/filter.c index 2e32cee2c46900..8e8d3b49c29767 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7765,6 +7765,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type break; case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; + case bpf_ctx_range(struct __sk_buff, hwtstamp): + if (type == BPF_WRITE || size != sizeof(__u64)) + return false; + break; case bpf_ctx_range(struct __sk_buff, tstamp): if (size != sizeof(__u64)) return false; @@ -7774,6 +7778,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; + case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1: + /* Explicitly prohibit access to padding in __sk_buff. */ + return false; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { @@ -7802,6 +7809,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): + case bpf_ctx_range(struct __sk_buff, hwtstamp): return false; } @@ -7872,6 +7880,7 @@ static bool lwt_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): + case bpf_ctx_range(struct __sk_buff, hwtstamp): return false; } @@ -8373,6 +8382,7 @@ static bool sk_skb_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): + case bpf_ctx_range(struct __sk_buff, hwtstamp): return false; } @@ -8884,6 +8894,17 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); break; + case offsetof(struct __sk_buff, hwtstamp): + BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8); + BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0); + + insn = bpf_convert_shinfo_access(si, insn); + *insn++ = BPF_LDX_MEM(BPF_DW, + si->dst_reg, si->dst_reg, + bpf_target_off(struct skb_shared_info, + hwtstamps, 8, + target_size)); + break; } return insn - insn_buf; @@ -10702,6 +10723,26 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], }; +BPF_CALL_1(bpf_skc_to_unix_sock, struct sock *, sk) +{ + /* unix_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct unix_sock); + if (sk && sk_fullsock(sk) && sk->sk_family == AF_UNIX) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_unix_sock_proto = { + .func = bpf_skc_to_unix_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UNIX], +}; + BPF_CALL_1(bpf_sock_from_file, struct file *, file) { return (unsigned long)sock_from_file(file); @@ -10741,6 +10782,9 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skc_to_udp6_sock: func = &bpf_skc_to_udp6_sock_proto; break; + case BPF_FUNC_skc_to_unix_sock: + func = &bpf_skc_to_unix_sock_proto; + break; default: return bpf_base_func_proto(func_id); } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index bac0184cf3de77..3255f57f5131af 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1196,9 +1196,8 @@ bool __skb_flow_dissect(const struct net *net, break; } - proto = hdr->proto; nhoff += PPPOE_SES_HLEN; - switch (proto) { + switch (hdr->proto) { case htons(PPP_IP): proto = htons(ETH_P_IP); fdret = FLOW_DISSECT_RET_PROTO_AGAIN; @@ -1307,6 +1306,11 @@ bool __skb_flow_dissect(const struct net *net, switch (ip_proto) { case IPPROTO_GRE: + if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { + fdret = FLOW_DISSECT_RET_OUT_GOOD; + break; + } + fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector, target_container, data, &proto, &nhoff, &hlen, flags); @@ -1364,6 +1368,11 @@ bool __skb_flow_dissect(const struct net *net, break; } case IPPROTO_IPIP: + if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { + fdret = FLOW_DISSECT_RET_OUT_GOOD; + break; + } + proto = htons(ETH_P_IP); key_control->flags |= FLOW_DIS_ENCAPSULATION; @@ -1376,6 +1385,11 @@ bool __skb_flow_dissect(const struct net *net, break; case IPPROTO_IPV6: + if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { + fdret = FLOW_DISSECT_RET_OUT_GOOD; + break; + } + proto = htons(ETH_P_IPV6); key_control->flags |= FLOW_DIS_ENCAPSULATION; diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 8e582e29a41e39..4fcbdd71c59fa4 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -40,10 +40,10 @@ */ struct net_rate_estimator { - struct gnet_stats_basic_packed *bstats; + struct gnet_stats_basic_sync *bstats; spinlock_t *stats_lock; - seqcount_t *running; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; + bool running; + struct gnet_stats_basic_sync __percpu *cpu_bstats; u8 ewma_log; u8 intvl_log; /* period : (250ms << intvl_log) */ @@ -60,13 +60,13 @@ struct net_rate_estimator { }; static void est_fetch_counters(struct net_rate_estimator *e, - struct gnet_stats_basic_packed *b) + struct gnet_stats_basic_sync *b) { - memset(b, 0, sizeof(*b)); + gnet_stats_basic_sync_init(b); if (e->stats_lock) spin_lock(e->stats_lock); - __gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats); + gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running); if (e->stats_lock) spin_unlock(e->stats_lock); @@ -76,14 +76,18 @@ static void est_fetch_counters(struct net_rate_estimator *e, static void est_timer(struct timer_list *t) { struct net_rate_estimator *est = from_timer(est, t, timer); - struct gnet_stats_basic_packed b; + struct gnet_stats_basic_sync b; + u64 b_bytes, b_packets; u64 rate, brate; est_fetch_counters(est, &b); - brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log); + b_bytes = u64_stats_read(&b.bytes); + b_packets = u64_stats_read(&b.packets); + + brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log); brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log); - rate = (b.packets - est->last_packets) << (10 - est->intvl_log); + rate = (b_packets - est->last_packets) << (10 - est->intvl_log); rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log); write_seqcount_begin(&est->seq); @@ -91,8 +95,8 @@ static void est_timer(struct timer_list *t) est->avpps += rate; write_seqcount_end(&est->seq); - est->last_bytes = b.bytes; - est->last_packets = b.packets; + est->last_bytes = b_bytes; + est->last_packets = b_packets; est->next_jiffies += ((HZ/4) << est->intvl_log); @@ -109,7 +113,9 @@ static void est_timer(struct timer_list *t) * @cpu_bstats: bstats per cpu * @rate_est: rate estimator statistics * @lock: lock for statistics and control path - * @running: qdisc running seqcount + * @running: true if @bstats represents a running qdisc, thus @bstats' + * internal values might change during basic reads. Only used + * if @bstats_cpu is NULL * @opt: rate estimator configuration TLV * * Creates a new rate estimator with &bstats as source and &rate_est @@ -121,16 +127,16 @@ static void est_timer(struct timer_list *t) * Returns 0 on success or a negative error code. * */ -int gen_new_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, +int gen_new_estimator(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, - seqcount_t *running, + bool running, struct nlattr *opt) { struct gnet_estimator *parm = nla_data(opt); struct net_rate_estimator *old, *est; - struct gnet_stats_basic_packed b; + struct gnet_stats_basic_sync b; int intvl_log; if (nla_len(opt) < sizeof(*parm)) @@ -164,8 +170,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, est_fetch_counters(est, &b); if (lock) local_bh_enable(); - est->last_bytes = b.bytes; - est->last_packets = b.packets; + est->last_bytes = u64_stats_read(&b.bytes); + est->last_packets = u64_stats_read(&b.packets); if (lock) spin_lock_bh(lock); @@ -214,7 +220,9 @@ EXPORT_SYMBOL(gen_kill_estimator); * @cpu_bstats: bstats per cpu * @rate_est: rate estimator statistics * @lock: lock for statistics and control path - * @running: qdisc running seqcount (might be NULL) + * @running: true if @bstats represents a running qdisc, thus @bstats' + * internal values might change during basic reads. Only used + * if @cpu_bstats is NULL * @opt: rate estimator configuration TLV * * Replaces the configuration of a rate estimator by calling @@ -222,11 +230,11 @@ EXPORT_SYMBOL(gen_kill_estimator); * * Returns 0 on success or a negative error code. */ -int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, +int gen_replace_estimator(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, - seqcount_t *running, struct nlattr *opt) + bool running, struct nlattr *opt) { return gen_new_estimator(bstats, cpu_bstats, rate_est, lock, running, opt); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index e491b083b34854..a10335b4ba2d0b 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -18,7 +18,7 @@ #include #include #include - +#include static inline int gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) @@ -114,63 +114,112 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, } EXPORT_SYMBOL(gnet_stats_start_copy); -static void -__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu) +/* Must not be inlined, due to u64_stats seqcount_t lockdep key */ +void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b) { + u64_stats_set(&b->bytes, 0); + u64_stats_set(&b->packets, 0); + u64_stats_init(&b->syncp); +} +EXPORT_SYMBOL(gnet_stats_basic_sync_init); + +static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu) +{ + u64 t_bytes = 0, t_packets = 0; int i; for_each_possible_cpu(i) { - struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); + struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i); unsigned int start; u64 bytes, packets; do { start = u64_stats_fetch_begin_irq(&bcpu->syncp); - bytes = bcpu->bstats.bytes; - packets = bcpu->bstats.packets; + bytes = u64_stats_read(&bcpu->bytes); + packets = u64_stats_read(&bcpu->packets); } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); - bstats->bytes += bytes; - bstats->packets += packets; + t_bytes += bytes; + t_packets += packets; + } + _bstats_update(bstats, t_bytes, t_packets); +} + +void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, bool running) +{ + unsigned int start; + u64 bytes = 0; + u64 packets = 0; + + WARN_ON_ONCE((cpu || running) && in_hardirq()); + + if (cpu) { + gnet_stats_add_basic_cpu(bstats, cpu); + return; } + do { + if (running) + start = u64_stats_fetch_begin_irq(&b->syncp); + bytes = u64_stats_read(&b->bytes); + packets = u64_stats_read(&b->packets); + } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); + + _bstats_update(bstats, bytes, packets); } +EXPORT_SYMBOL(gnet_stats_add_basic); -void -__gnet_stats_copy_basic(const seqcount_t *running, - struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b) +static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, bool running) { - unsigned int seq; + unsigned int start; if (cpu) { - __gnet_stats_copy_basic_cpu(bstats, cpu); + u64 t_bytes = 0, t_packets = 0; + int i; + + for_each_possible_cpu(i) { + struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i); + unsigned int start; + u64 bytes, packets; + + do { + start = u64_stats_fetch_begin_irq(&bcpu->syncp); + bytes = u64_stats_read(&bcpu->bytes); + packets = u64_stats_read(&bcpu->packets); + } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); + + t_bytes += bytes; + t_packets += packets; + } + *ret_bytes = t_bytes; + *ret_packets = t_packets; return; } do { if (running) - seq = read_seqcount_begin(running); - bstats->bytes = b->bytes; - bstats->packets = b->packets; - } while (running && read_seqcount_retry(running, seq)); + start = u64_stats_fetch_begin_irq(&b->syncp); + *ret_bytes = u64_stats_read(&b->bytes); + *ret_packets = u64_stats_read(&b->packets); + } while (running && u64_stats_fetch_retry_irq(&b->syncp, start)); } -EXPORT_SYMBOL(__gnet_stats_copy_basic); static int -___gnet_stats_copy_basic(const seqcount_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b, - int type) +___gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, + int type, bool running) { - struct gnet_stats_basic_packed bstats = {0}; + u64 bstats_bytes, bstats_packets; - __gnet_stats_copy_basic(running, &bstats, cpu, b); + gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running); if (d->compat_tc_stats && type == TCA_STATS_BASIC) { - d->tc_stats.bytes = bstats.bytes; - d->tc_stats.packets = bstats.packets; + d->tc_stats.bytes = bstats_bytes; + d->tc_stats.packets = bstats_packets; } if (d->tail) { @@ -178,24 +227,28 @@ ___gnet_stats_copy_basic(const seqcount_t *running, int res; memset(&sb, 0, sizeof(sb)); - sb.bytes = bstats.bytes; - sb.packets = bstats.packets; + sb.bytes = bstats_bytes; + sb.packets = bstats_packets; res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD); - if (res < 0 || sb.packets == bstats.packets) + if (res < 0 || sb.packets == bstats_packets) return res; /* emit 64bit stats only if needed */ - return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets, - sizeof(bstats.packets), TCA_STATS_PAD); + return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets, + sizeof(bstats_packets), TCA_STATS_PAD); } return 0; } /** * gnet_stats_copy_basic - copy basic statistics into statistic TLV - * @running: seqcount_t pointer * @d: dumping handle * @cpu: copy statistic per cpu * @b: basic statistics + * @running: true if @b represents a running qdisc, thus @b's + * internal values might change during basic reads. + * Only used if @cpu is NULL + * + * Context: task; must not be run from IRQ or BH contexts * * Appends the basic statistics to the top level TLV created by * gnet_stats_start_copy(). @@ -204,22 +257,25 @@ ___gnet_stats_copy_basic(const seqcount_t *running, * if the room in the socket buffer was not sufficient. */ int -gnet_stats_copy_basic(const seqcount_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b) +gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, + bool running) { - return ___gnet_stats_copy_basic(running, d, cpu, b, - TCA_STATS_BASIC); + return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running); } EXPORT_SYMBOL(gnet_stats_copy_basic); /** * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV - * @running: seqcount_t pointer * @d: dumping handle * @cpu: copy statistic per cpu * @b: basic statistics + * @running: true if @b represents a running qdisc, thus @b's + * internal values might change during basic reads. + * Only used if @cpu is NULL + * + * Context: task; must not be run from IRQ or BH contexts * * Appends the basic statistics to the top level TLV created by * gnet_stats_start_copy(). @@ -228,13 +284,12 @@ EXPORT_SYMBOL(gnet_stats_copy_basic); * if the room in the socket buffer was not sufficient. */ int -gnet_stats_copy_basic_hw(const seqcount_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b) +gnet_stats_copy_basic_hw(struct gnet_dump *d, + struct gnet_stats_basic_sync __percpu *cpu, + struct gnet_stats_basic_sync *b, + bool running) { - return ___gnet_stats_copy_basic(running, d, cpu, b, - TCA_STATS_BASIC_HW); + return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running); } EXPORT_SYMBOL(gnet_stats_copy_basic_hw); @@ -282,16 +337,15 @@ gnet_stats_copy_rate_est(struct gnet_dump *d, } EXPORT_SYMBOL(gnet_stats_copy_rate_est); -static void -__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, - const struct gnet_stats_queue __percpu *q) +static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *q) { int i; for_each_possible_cpu(i) { const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); - qstats->qlen = 0; + qstats->qlen += qcpu->backlog; qstats->backlog += qcpu->backlog; qstats->drops += qcpu->drops; qstats->requeues += qcpu->requeues; @@ -299,24 +353,21 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, } } -void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, - const struct gnet_stats_queue __percpu *cpu, - const struct gnet_stats_queue *q, - __u32 qlen) +void gnet_stats_add_queue(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *cpu, + const struct gnet_stats_queue *q) { if (cpu) { - __gnet_stats_copy_queue_cpu(qstats, cpu); + gnet_stats_add_queue_cpu(qstats, cpu); } else { - qstats->qlen = q->qlen; - qstats->backlog = q->backlog; - qstats->drops = q->drops; - qstats->requeues = q->requeues; - qstats->overlimits = q->overlimits; + qstats->qlen += q->qlen; + qstats->backlog += q->backlog; + qstats->drops += q->drops; + qstats->requeues += q->requeues; + qstats->overlimits += q->overlimits; } - - qstats->qlen = qlen; } -EXPORT_SYMBOL(__gnet_stats_copy_queue); +EXPORT_SYMBOL(gnet_stats_add_queue); /** * gnet_stats_copy_queue - copy queue statistics into statistics TLV @@ -339,7 +390,8 @@ gnet_stats_copy_queue(struct gnet_dump *d, { struct gnet_stats_queue qstats = {0}; - __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); + gnet_stats_add_queue(&qstats, cpu_q, q); + qstats.qlen = qlen; if (d->compat_tc_stats) { d->tc_stats.drops = qstats.drops; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 2d5bc3a75faeca..47931c8be04b80 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -122,6 +122,8 @@ static void neigh_mark_dead(struct neighbour *n) list_del_init(&n->gc_list); atomic_dec(&n->tbl->gc_entries); } + if (!list_empty(&n->managed_list)) + list_del_init(&n->managed_list); } static void neigh_update_gc_list(struct neighbour *n) @@ -130,7 +132,6 @@ static void neigh_update_gc_list(struct neighbour *n) write_lock_bh(&n->tbl->lock); write_lock(&n->lock); - if (n->dead) goto out; @@ -149,32 +150,59 @@ static void neigh_update_gc_list(struct neighbour *n) list_add_tail(&n->gc_list, &n->tbl->gc_list); atomic_inc(&n->tbl->gc_entries); } +out: + write_unlock(&n->lock); + write_unlock_bh(&n->tbl->lock); +} +static void neigh_update_managed_list(struct neighbour *n) +{ + bool on_managed_list, add_to_managed; + + write_lock_bh(&n->tbl->lock); + write_lock(&n->lock); + if (n->dead) + goto out; + + add_to_managed = n->flags & NTF_MANAGED; + on_managed_list = !list_empty(&n->managed_list); + + if (!add_to_managed && on_managed_list) + list_del_init(&n->managed_list); + else if (add_to_managed && !on_managed_list) + list_add_tail(&n->managed_list, &n->tbl->managed_list); out: write_unlock(&n->lock); write_unlock_bh(&n->tbl->lock); } -static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags, - int *notify) +static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify, + bool *gc_update, bool *managed_update) { - bool rc = false; - u8 ndm_flags; + u32 ndm_flags, old_flags = neigh->flags; if (!(flags & NEIGH_UPDATE_F_ADMIN)) - return rc; + return; + + ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; + ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0; - ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; - if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) { + if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) { if (ndm_flags & NTF_EXT_LEARNED) neigh->flags |= NTF_EXT_LEARNED; else neigh->flags &= ~NTF_EXT_LEARNED; - rc = true; *notify = 1; + *gc_update = true; + } + if ((old_flags ^ ndm_flags) & NTF_MANAGED) { + if (ndm_flags & NTF_MANAGED) + neigh->flags |= NTF_MANAGED; + else + neigh->flags &= ~NTF_MANAGED; + *notify = 1; + *managed_update = true; } - - return rc; } static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, @@ -379,7 +407,7 @@ EXPORT_SYMBOL(neigh_ifdown); static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev, - bool exempt_from_gc) + u32 flags, bool exempt_from_gc) { struct neighbour *n = NULL; unsigned long now = jiffies; @@ -412,6 +440,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, n->updated = n->used = now; n->nud_state = NUD_NONE; n->output = neigh_blackhole; + n->flags = flags; seqlock_init(&n->hh.hh_lock); n->parms = neigh_parms_clone(&tbl->parms); timer_setup(&n->timer, neigh_timer_handler, 0); @@ -421,6 +450,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, refcount_set(&n->refcnt, 1); n->dead = 1; INIT_LIST_HEAD(&n->gc_list); + INIT_LIST_HEAD(&n->managed_list); atomic_inc(&tbl->entries); out: @@ -575,19 +605,18 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, } EXPORT_SYMBOL(neigh_lookup_nodev); -static struct neighbour *___neigh_create(struct neigh_table *tbl, - const void *pkey, - struct net_device *dev, - bool exempt_from_gc, bool want_ref) +static struct neighbour * +___neigh_create(struct neigh_table *tbl, const void *pkey, + struct net_device *dev, u32 flags, + bool exempt_from_gc, bool want_ref) { - struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc); - u32 hash_val; - unsigned int key_len = tbl->key_len; - int error; + u32 hash_val, key_len = tbl->key_len; + struct neighbour *n1, *rc, *n; struct neigh_hash_table *nht; + int error; + n = neigh_alloc(tbl, dev, flags, exempt_from_gc); trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); - if (!n) { rc = ERR_PTR(-ENOBUFS); goto out; @@ -650,7 +679,8 @@ static struct neighbour *___neigh_create(struct neigh_table *tbl, n->dead = 0; if (!exempt_from_gc) list_add_tail(&n->gc_list, &n->tbl->gc_list); - + if (n->flags & NTF_MANAGED) + list_add_tail(&n->managed_list, &n->tbl->managed_list); if (want_ref) neigh_hold(n); rcu_assign_pointer(n->next, @@ -674,7 +704,7 @@ static struct neighbour *___neigh_create(struct neigh_table *tbl, struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev, bool want_ref) { - return ___neigh_create(tbl, pkey, dev, false, want_ref); + return ___neigh_create(tbl, pkey, dev, 0, false, want_ref); } EXPORT_SYMBOL(__neigh_create); @@ -1205,8 +1235,6 @@ static void neigh_update_hhs(struct neighbour *neigh) } } - - /* Generic update routine. -- lladdr is new lladdr or NULL, if it is not supplied. -- new is new state. @@ -1217,7 +1245,8 @@ static void neigh_update_hhs(struct neighbour *neigh) lladdr instead of overriding it if it is different. NEIGH_UPDATE_F_ADMIN means that the change is administrative. - + NEIGH_UPDATE_F_USE means that the entry is user triggered. + NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed. NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing NTF_ROUTER flag. NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as @@ -1225,17 +1254,15 @@ static void neigh_update_hhs(struct neighbour *neigh) Caller MUST hold reference count on the entry. */ - static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u32 nlmsg_pid, struct netlink_ext_ack *extack) { - bool ext_learn_change = false; - u8 old; - int err; - int notify = 0; - struct net_device *dev; + bool gc_update = false, managed_update = false; int update_isrouter = 0; + struct net_device *dev; + int err, notify = 0; + u8 old; trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); @@ -1254,7 +1281,13 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, (old & (NUD_NOARP | NUD_PERMANENT))) goto out; - ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); + neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update); + if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) { + new = old & ~NUD_PERMANENT; + neigh->nud_state = new; + err = 0; + goto out; + } if (!(new & NUD_VALID)) { neigh_del_timer(neigh); @@ -1399,15 +1432,13 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, if (update_isrouter) neigh_update_is_router(neigh, flags, ¬ify); write_unlock_bh(&neigh->lock); - - if (((new ^ old) & NUD_PERMANENT) || ext_learn_change) + if (((new ^ old) & NUD_PERMANENT) || gc_update) neigh_update_gc_list(neigh); - + if (managed_update) + neigh_update_managed_list(neigh); if (notify) neigh_update_notify(neigh, nlmsg_pid); - trace_neigh_update_done(neigh, err); - return err; } @@ -1533,6 +1564,20 @@ int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) } EXPORT_SYMBOL(neigh_direct_output); +static void neigh_managed_work(struct work_struct *work) +{ + struct neigh_table *tbl = container_of(work, struct neigh_table, + managed_work.work); + struct neighbour *neigh; + + write_lock_bh(&tbl->lock); + list_for_each_entry(neigh, &tbl->managed_list, managed_list) + neigh_event_send(neigh, NULL); + queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, + NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME)); + write_unlock_bh(&tbl->lock); +} + static void neigh_proxy_process(struct timer_list *t) { struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); @@ -1679,6 +1724,8 @@ void neigh_table_init(int index, struct neigh_table *tbl) INIT_LIST_HEAD(&tbl->parms_list); INIT_LIST_HEAD(&tbl->gc_list); + INIT_LIST_HEAD(&tbl->managed_list); + list_add(&tbl->parms.list, &tbl->parms_list); write_pnet(&tbl->parms.net, &init_net); refcount_set(&tbl->parms.refcnt, 1); @@ -1710,9 +1757,13 @@ void neigh_table_init(int index, struct neigh_table *tbl) WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); rwlock_init(&tbl->lock); + INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, tbl->parms.reachable_time); + INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work); + queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0); + timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); skb_queue_head_init_class(&tbl->proxy_queue, &neigh_table_proxy_queue_class); @@ -1783,6 +1834,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = { [NDA_MASTER] = { .type = NLA_U32 }, [NDA_PROTOCOL] = { .type = NLA_U8 }, [NDA_NH_ID] = { .type = NLA_U32 }, + [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK), [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, }; @@ -1855,7 +1907,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | - NEIGH_UPDATE_F_OVERRIDE_ISROUTER; + NEIGH_UPDATE_F_OVERRIDE_ISROUTER; struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; @@ -1864,6 +1916,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, struct neighbour *neigh; void *dst, *lladdr; u8 protocol = 0; + u32 ndm_flags; int err; ASSERT_RTNL(); @@ -1879,6 +1932,15 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, } ndm = nlmsg_data(nlh); + ndm_flags = ndm->ndm_flags; + if (tb[NDA_FLAGS_EXT]) { + u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]); + + BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE < + (sizeof(ndm->ndm_flags) * BITS_PER_BYTE + + hweight32(NTF_EXT_MASK))); + ndm_flags |= (ext << NTF_EXT_SHIFT); + } if (ndm->ndm_ifindex) { dev = __dev_get_by_index(net, ndm->ndm_ifindex); if (dev == NULL) { @@ -1906,14 +1968,18 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[NDA_PROTOCOL]) protocol = nla_get_u8(tb[NDA_PROTOCOL]); - - if (ndm->ndm_flags & NTF_PROXY) { + if (ndm_flags & NTF_PROXY) { struct pneigh_entry *pn; + if (ndm_flags & NTF_MANAGED) { + NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination"); + goto out; + } + err = -ENOBUFS; pn = pneigh_lookup(tbl, net, dst, dev, 1); if (pn) { - pn->flags = ndm->ndm_flags; + pn->flags = ndm_flags; if (protocol) pn->protocol = protocol; err = 0; @@ -1933,16 +1999,24 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, neigh = neigh_lookup(tbl, dst, dev); if (neigh == NULL) { - bool exempt_from_gc; + bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT; + bool exempt_from_gc = ndm_permanent || + ndm_flags & NTF_EXT_LEARNED; if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { err = -ENOENT; goto out; } + if (ndm_permanent && (ndm_flags & NTF_MANAGED)) { + NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry"); + err = -EINVAL; + goto out; + } - exempt_from_gc = ndm->ndm_state & NUD_PERMANENT || - ndm->ndm_flags & NTF_EXT_LEARNED; - neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true); + neigh = ___neigh_create(tbl, dst, dev, + ndm_flags & + (NTF_EXT_LEARNED | NTF_MANAGED), + exempt_from_gc, true); if (IS_ERR(neigh)) { err = PTR_ERR(neigh); goto out; @@ -1961,22 +2035,22 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, if (protocol) neigh->protocol = protocol; - - if (ndm->ndm_flags & NTF_EXT_LEARNED) + if (ndm_flags & NTF_EXT_LEARNED) flags |= NEIGH_UPDATE_F_EXT_LEARNED; - - if (ndm->ndm_flags & NTF_ROUTER) + if (ndm_flags & NTF_ROUTER) flags |= NEIGH_UPDATE_F_ISROUTER; + if (ndm_flags & NTF_MANAGED) + flags |= NEIGH_UPDATE_F_MANAGED; + if (ndm_flags & NTF_USE) + flags |= NEIGH_UPDATE_F_USE; - if (ndm->ndm_flags & NTF_USE) { + err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, + NETLINK_CB(skb).portid, extack); + if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) { neigh_event_send(neigh, NULL); err = 0; - } else - err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, - NETLINK_CB(skb).portid, extack); - + } neigh_release(neigh); - out: return err; } @@ -2427,6 +2501,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, u32 pid, u32 seq, int type, unsigned int flags) { + u32 neigh_flags, neigh_flags_ext; unsigned long now = jiffies; struct nda_cacheinfo ci; struct nlmsghdr *nlh; @@ -2436,11 +2511,14 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, if (nlh == NULL) return -EMSGSIZE; + neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT; + neigh_flags = neigh->flags & NTF_OLD_MASK; + ndm = nlmsg_data(nlh); ndm->ndm_family = neigh->ops->family; ndm->ndm_pad1 = 0; ndm->ndm_pad2 = 0; - ndm->ndm_flags = neigh->flags; + ndm->ndm_flags = neigh_flags; ndm->ndm_type = neigh->type; ndm->ndm_ifindex = neigh->dev->ifindex; @@ -2471,6 +2549,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) goto nla_put_failure; + if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) + goto nla_put_failure; nlmsg_end(skb, nlh); return 0; @@ -2484,6 +2564,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, u32 pid, u32 seq, int type, unsigned int flags, struct neigh_table *tbl) { + u32 neigh_flags, neigh_flags_ext; struct nlmsghdr *nlh; struct ndmsg *ndm; @@ -2491,11 +2572,14 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, if (nlh == NULL) return -EMSGSIZE; + neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT; + neigh_flags = pn->flags & NTF_OLD_MASK; + ndm = nlmsg_data(nlh); ndm->ndm_family = tbl->family; ndm->ndm_pad1 = 0; ndm->ndm_pad2 = 0; - ndm->ndm_flags = pn->flags | NTF_PROXY; + ndm->ndm_flags = neigh_flags | NTF_PROXY; ndm->ndm_type = RTN_UNICAST; ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; ndm->ndm_state = NUD_NONE; @@ -2505,6 +2589,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) goto nla_put_failure; + if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) + goto nla_put_failure; nlmsg_end(skb, nlh); return 0; @@ -2820,6 +2906,7 @@ static inline size_t neigh_nlmsg_size(void) + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ + nla_total_size(sizeof(struct nda_cacheinfo)) + nla_total_size(4) /* NDA_PROBES */ + + nla_total_size(4) /* NDA_FLAGS_EXT */ + nla_total_size(1); /* NDA_PROTOCOL */ } @@ -2848,6 +2935,7 @@ static inline size_t pneigh_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ + + nla_total_size(4) /* NDA_FLAGS_EXT */ + nla_total_size(1); /* NDA_PROTOCOL */ } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b2e49eb7001d6c..9c01c642cf9ef3 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -175,6 +175,14 @@ static int change_carrier(struct net_device *dev, unsigned long new_carrier) static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_carrier; this helps returning early + * without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_carrier) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_carrier); } @@ -196,6 +204,12 @@ static ssize_t speed_show(struct device *dev, struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; + /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall(); @@ -216,6 +230,12 @@ static ssize_t duplex_show(struct device *dev, struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; + /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall(); @@ -468,6 +488,14 @@ static ssize_t proto_down_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_proto_down; this helps returning + * early without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_proto_down) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); @@ -478,6 +506,12 @@ static ssize_t phys_port_id_show(struct device *dev, struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; + /* The check is also done in dev_get_phys_port_id; this helps returning + * early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_id) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall(); @@ -500,6 +534,13 @@ static ssize_t phys_port_name_show(struct device *dev, struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; + /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_name && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall(); @@ -522,6 +563,14 @@ static ssize_t phys_switch_id_show(struct device *dev, struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; + /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. This works + * because recurse is false when calling dev_get_port_parent_id. + */ + if (!netdev->netdev_ops->ndo_get_port_parent_id && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall(); @@ -1226,6 +1275,12 @@ static ssize_t tx_maxrate_store(struct netdev_queue *queue, if (!capable(CAP_NET_ADMIN)) return -EPERM; + /* The check is also done later; this helps returning early without + * hitting the trylock/restart below. + */ + if (!dev->netdev_ops->ndo_set_tx_maxrate) + return -EOPNOTSUPP; + err = kstrtou32(buf, 10, &rate); if (err < 0) return err; @@ -1869,7 +1924,7 @@ static struct class net_class __ro_after_init = { .get_ownership = net_get_ownership, }; -#ifdef CONFIG_OF_NET +#ifdef CONFIG_OF static int of_dev_node_match(struct device *dev, const void *data) { for (; dev; dev = dev->parent) { diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a448a9b5bb2d6c..202fa5eacd0f9b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -473,7 +473,9 @@ struct net *copy_net_ns(unsigned long flags, if (rv < 0) { put_userns: +#ifdef CONFIG_KEYS key_remove_domain(net->key_domain); +#endif put_user_ns(user_ns); net_free(net); dec_ucounts: @@ -605,7 +607,9 @@ static void cleanup_net(struct work_struct *work) list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); dec_net_namespaces(net->ucounts); +#ifdef CONFIG_KEYS key_remove_domain(net->key_domain); +#endif put_user_ns(net->user_ns); net_free(net); } diff --git a/drivers/of/of_net.c b/net/core/of_net.c similarity index 85% rename from drivers/of/of_net.c rename to net/core/of_net.c index dbac3a172a11eb..f1a9bf7578e7a7 100644 --- a/drivers/of/of_net.c +++ b/net/core/of_net.c @@ -143,3 +143,28 @@ int of_get_mac_address(struct device_node *np, u8 *addr) return of_get_mac_addr_nvmem(np, addr); } EXPORT_SYMBOL(of_get_mac_address); + +/** + * of_get_ethdev_address() + * @np: Caller's Device Node + * @dev: Pointer to netdevice which address will be updated + * + * Search the device tree for the best MAC address to use. + * If found set @dev->dev_addr to that address. + * + * See documentation of of_get_mac_address() for more information on how + * the best address is determined. + * + * Return: 0 on success and errno in case of error. + */ +int of_get_ethdev_address(struct device_node *np, struct net_device *dev) +{ + u8 addr[ETH_ALEN]; + int ret; + + ret = of_get_mac_address(np, addr); + if (!ret) + eth_hw_addr_set(dev, addr); + return ret; +} +EXPORT_SYMBOL(of_get_ethdev_address); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 1a6978427d6c86..9b60e4301a44fb 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -49,6 +49,12 @@ static int page_pool_init(struct page_pool *pool, * which is the XDP_TX use-case. */ if (pool->p.flags & PP_FLAG_DMA_MAP) { + /* DMA-mapping is not supported on 32-bit systems with + * 64-bit DMA mapping. + */ + if (sizeof(dma_addr_t) > sizeof(unsigned long)) + return -EOPNOTSUPP; + if ((pool->p.dma_dir != DMA_FROM_DEVICE) && (pool->p.dma_dir != DMA_BIDIRECTIONAL)) return -EINVAL; @@ -69,10 +75,6 @@ static int page_pool_init(struct page_pool *pool, */ } - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT && - pool->p.flags & PP_FLAG_PAGE_FRAG) - return -EINVAL; - if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8ccce85562a1da..2af8aeeadadf0a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -301,7 +301,7 @@ int rtnl_unregister(int protocol, int msgtype) } link = rtnl_dereference(tab[msgindex]); - rcu_assign_pointer(tab[msgindex], NULL); + RCU_INIT_POINTER(tab[msgindex], NULL); rtnl_unlock(); kfree_rcu(link, rcu); @@ -337,7 +337,7 @@ void rtnl_unregister_all(int protocol) if (!link) continue; - rcu_assign_pointer(tab[msgindex], NULL); + RCU_INIT_POINTER(tab[msgindex], NULL); kfree_rcu(link, rcu); } rtnl_unlock(); @@ -3204,8 +3204,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, dev->mtu = mtu; } if (tb[IFLA_ADDRESS]) { - memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), - nla_len(tb[IFLA_ADDRESS])); + __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]), + nla_len(tb[IFLA_ADDRESS])); dev->addr_assign_type = NET_ADDR_SET; } if (tb[IFLA_BROADCAST]) @@ -3804,9 +3804,8 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOBUFS; - size_t if_info_size; - skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags); + skb = nlmsg_new(if_nlmsg_size(dev, 0), flags); if (skb == NULL) goto errout; @@ -4384,7 +4383,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) continue; if (br_dev != netdev_master_upper_dev_get(dev) && - !(dev->priv_flags & IFF_EBRIDGE)) + !netif_is_bridge_master(dev)) continue; cops = ops; } diff --git a/net/core/selftests.c b/net/core/selftests.c index 9077fa9698928e..acb1ee97bbd324 100644 --- a/net/core/selftests.c +++ b/net/core/selftests.c @@ -15,8 +15,8 @@ #include struct net_packet_attrs { - unsigned char *src; - unsigned char *dst; + const unsigned char *src; + const unsigned char *dst; u32 ip_src; u32 ip_dst; bool tcp; @@ -173,8 +173,8 @@ static int net_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct net_test_priv *tpriv = pt->af_packet_priv; - unsigned char *src = tpriv->packet->src; - unsigned char *dst = tpriv->packet->dst; + const unsigned char *src = tpriv->packet->src; + const unsigned char *dst = tpriv->packet->dst; struct netsfhdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fe9358437380c8..67a9188d8a49c8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include @@ -135,34 +136,31 @@ struct napi_alloc_cache { static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); -static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, - unsigned int align_mask) +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); - return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); -} - -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) -{ fragsz = SKB_DATA_ALIGN(fragsz); - return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); } EXPORT_SYMBOL(__napi_alloc_frag_align); void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { - struct page_frag_cache *nc; void *data; fragsz = SKB_DATA_ALIGN(fragsz); if (in_hardirq() || irqs_disabled()) { - nc = this_cpu_ptr(&netdev_alloc_cache); + struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); } else { + struct napi_alloc_cache *nc; + local_bh_disable(); - data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + nc = this_cpu_ptr(&napi_alloc_cache); + data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); local_bh_enable(); } return data; @@ -398,8 +396,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, { struct kmem_cache *cache; struct sk_buff *skb; - u8 *data; + unsigned int osize; bool pfmemalloc; + u8 *data; cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; @@ -431,7 +430,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ - size = SKB_WITH_OVERHEAD(ksize(data)); + osize = ksize(data); + size = SKB_WITH_OVERHEAD(osize); prefetchw(data + size); /* @@ -440,7 +440,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); - __build_skb_around(skb, data, 0); + __build_skb_around(skb, data, osize); skb->pfmemalloc = pfmemalloc; if (flags & SKB_ALLOC_FCLONE) { @@ -4441,6 +4441,9 @@ static const u8 skb_ext_type_len[] = { #if IS_ENABLED(CONFIG_MPTCP) [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), #endif +#if IS_ENABLED(CONFIG_MCTP_FLOWS) + [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), +#endif }; static __always_inline unsigned int skb_ext_total_length(void) @@ -4457,6 +4460,9 @@ static __always_inline unsigned int skb_ext_total_length(void) #endif #if IS_ENABLED(CONFIG_MPTCP) skb_ext_type_len[SKB_EXT_MPTCP] + +#endif +#if IS_ENABLED(CONFIG_MCTP_FLOWS) + skb_ext_type_len[SKB_EXT_MCTP] + #endif 0; } @@ -6530,6 +6536,14 @@ static void skb_ext_put_sp(struct sec_path *sp) } #endif +#ifdef CONFIG_MCTP_FLOWS +static void skb_ext_put_mctp(struct mctp_flow *flow) +{ + if (flow->key) + mctp_key_unref(flow->key); +} +#endif + void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) { struct skb_ext *ext = skb->extensions; @@ -6565,6 +6579,10 @@ void __skb_ext_put(struct skb_ext *ext) if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); #endif +#ifdef CONFIG_MCTP_FLOWS + if (__skb_ext_exist(ext, SKB_EXT_MCTP)) + skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); +#endif kmem_cache_free(skbuff_ext_cache, ext); } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index a86ef7e844f8c8..1ae52ac943f626 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -508,6 +508,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, } static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, + u32 off, u32 len, struct sk_psock *psock, struct sock *sk, struct sk_msg *msg) @@ -521,11 +522,11 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, */ if (skb_linearize(skb)) return -EAGAIN; - num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); + num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); if (unlikely(num_sge < 0)) return num_sge; - copied = skb->len; + copied = len; msg->sg.start = 0; msg->sg.size = copied; msg->sg.end = num_sge; @@ -536,9 +537,11 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, return copied; } -static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb); +static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, + u32 off, u32 len); -static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) +static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, + u32 off, u32 len) { struct sock *sk = psock->sk; struct sk_msg *msg; @@ -549,7 +552,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) * correctly. */ if (unlikely(skb->sk == sk)) - return sk_psock_skb_ingress_self(psock, skb); + return sk_psock_skb_ingress_self(psock, skb, off, len); msg = sk_psock_create_ingress_msg(sk, skb); if (!msg) return -EAGAIN; @@ -561,7 +564,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) * into user buffers. */ skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); if (err < 0) kfree(msg); return err; @@ -571,7 +574,8 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) * skb. In this case we do not need to check memory limits or skb_set_owner_r * because the skb is already accounted for here. */ -static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb) +static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, + u32 off, u32 len) { struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); struct sock *sk = psock->sk; @@ -581,7 +585,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb return -EAGAIN; sk_msg_init(msg); skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); if (err < 0) kfree(msg); return err; @@ -595,7 +599,7 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, return -EAGAIN; return skb_send_sock(psock->sk, skb, off, len); } - return sk_psock_skb_ingress(psock, skb); + return sk_psock_skb_ingress(psock, skb, off, len); } static void sk_psock_skb_state(struct sk_psock *psock, @@ -638,6 +642,12 @@ static void sk_psock_backlog(struct work_struct *work) while ((skb = skb_dequeue(&psock->ingress_skb))) { len = skb->len; off = 0; + if (skb_bpf_strparser(skb)) { + struct strp_msg *stm = strp_msg(skb); + + off = stm->offset; + len = stm->full_len; + } start: ingress = skb_bpf_ingress(skb); skb_bpf_redirect_clear(skb); @@ -877,6 +887,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) * return code, but then didn't set a redirect interface. */ if (unlikely(!sk_other)) { + skb_bpf_redirect_clear(skb); sock_drop(from->sk, skb); return -EIO; } @@ -944,6 +955,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, { struct sock *sk_other; int err = 0; + u32 len, off; switch (verdict) { case __SK_PASS: @@ -951,6 +963,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, sk_other = psock->sk; if (sock_flag(sk_other, SOCK_DEAD) || !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { + skb_bpf_redirect_clear(skb); goto out_free; } @@ -963,7 +976,15 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, * retrying later from workqueue. */ if (skb_queue_empty(&psock->ingress_skb)) { - err = sk_psock_skb_ingress_self(psock, skb); + len = skb->len; + off = 0; + if (skb_bpf_strparser(skb)) { + struct strp_msg *stm = strp_msg(skb); + + off = stm->offset; + len = stm->full_len; + } + err = sk_psock_skb_ingress_self(psock, skb, off, len); } if (err < 0) { spin_lock_bh(&psock->ingress_lock); @@ -1029,6 +1050,8 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) skb_dst_drop(skb); skb_bpf_redirect_clear(skb); ret = bpf_prog_run_pin_on_cpu(prog, skb); + if (ret == SK_PASS) + skb_bpf_set_strparser(skb); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); skb->sk = NULL; } diff --git a/net/core/sock.c b/net/core/sock.c index c1601f75ec4b3e..9862eefce21ede 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -350,7 +350,7 @@ void sk_error_report(struct sock *sk) } EXPORT_SYMBOL(sk_error_report); -static int sock_get_timeout(long timeo, void *optval, bool old_timeval) +int sock_get_timeout(long timeo, void *optval, bool old_timeval) { struct __kernel_sock_timeval tv; @@ -379,12 +379,11 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval) *(struct __kernel_sock_timeval *)optval = tv; return sizeof(tv); } +EXPORT_SYMBOL(sock_get_timeout); -static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, - bool old_timeval) +int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, + sockptr_t optval, int optlen, bool old_timeval) { - struct __kernel_sock_timeval tv; - if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { struct old_timeval32 tv32; @@ -393,8 +392,8 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) return -EFAULT; - tv.tv_sec = tv32.tv_sec; - tv.tv_usec = tv32.tv_usec; + tv->tv_sec = tv32.tv_sec; + tv->tv_usec = tv32.tv_usec; } else if (old_timeval) { struct __kernel_old_timeval old_tv; @@ -402,14 +401,28 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, return -EINVAL; if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) return -EFAULT; - tv.tv_sec = old_tv.tv_sec; - tv.tv_usec = old_tv.tv_usec; + tv->tv_sec = old_tv.tv_sec; + tv->tv_usec = old_tv.tv_usec; } else { - if (optlen < sizeof(tv)) + if (optlen < sizeof(*tv)) return -EINVAL; - if (copy_from_sockptr(&tv, optval, sizeof(tv))) + if (copy_from_sockptr(tv, optval, sizeof(*tv))) return -EFAULT; } + + return 0; +} +EXPORT_SYMBOL(sock_copy_user_timeval); + +static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, + bool old_timeval) +{ + struct __kernel_sock_timeval tv; + int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); + + if (err) + return err; + if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) return -EDOM; @@ -947,6 +960,53 @@ void sock_set_mark(struct sock *sk, u32 val) } EXPORT_SYMBOL(sock_set_mark); +static void sock_release_reserved_memory(struct sock *sk, int bytes) +{ + /* Round down bytes to multiple of pages */ + bytes &= ~(SK_MEM_QUANTUM - 1); + + WARN_ON(bytes > sk->sk_reserved_mem); + sk->sk_reserved_mem -= bytes; + sk_mem_reclaim(sk); +} + +static int sock_reserve_memory(struct sock *sk, int bytes) +{ + long allocated; + bool charged; + int pages; + + if (!mem_cgroup_sockets_enabled || !sk->sk_memcg) + return -EOPNOTSUPP; + + if (!bytes) + return 0; + + pages = sk_mem_pages(bytes); + + /* pre-charge to memcg */ + charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages, + GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!charged) + return -ENOMEM; + + /* pre-charge to forward_alloc */ + allocated = sk_memory_allocated_add(sk, pages); + /* If the system goes into memory pressure with this + * precharge, give up and return error. + */ + if (allocated > sk_prot_mem_limits(sk, 1)) { + sk_memory_allocated_sub(sk, pages); + mem_cgroup_uncharge_skmem(sk->sk_memcg, pages); + return -ENOMEM; + } + sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT; + + sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT; + + return 0; +} + /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. @@ -1367,6 +1427,23 @@ int sock_setsockopt(struct socket *sock, int level, int optname, ~SOCK_BUF_LOCK_MASK); break; + case SO_RESERVE_MEM: + { + int delta; + + if (val < 0) { + ret = -EINVAL; + break; + } + + delta = val - sk->sk_reserved_mem; + if (delta < 0) + sock_release_reserved_memory(sk, -delta); + else + ret = sock_reserve_memory(sk, delta); + break; + } + default: ret = -ENOPROTOOPT; break; @@ -1750,6 +1827,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; break; + case SO_RESERVE_MEM: + v.val = sk->sk_reserved_mem; + break; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). @@ -2063,6 +2144,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_dst_pending_confirm = 0; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; + newsk->sk_reserved_mem = 0; atomic_set(&newsk->sk_drops, 0); newsk->sk_send_head = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; diff --git a/net/core/stream.c b/net/core/stream.c index 4f1d4aa5fb38d9..06b36c730ce8a2 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -195,14 +195,11 @@ void sk_stream_kill_queues(struct sock *sk) /* First the read buffer. */ __skb_queue_purge(&sk->sk_receive_queue); - /* Next, the error queue. */ - __skb_queue_purge(&sk->sk_error_queue); - /* Next, the write queue. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); /* Account for returned memory. */ - sk_mem_reclaim(sk); + sk_mem_reclaim_final(sk); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); diff --git a/net/core/xdp.c b/net/core/xdp.c index cc92ccb384325f..5ddc29f29bada2 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -143,8 +143,6 @@ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) if (xdp_rxq->reg_state == REG_STATE_UNUSED) return; - WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); - xdp_rxq_info_unreg_mem_model(xdp_rxq); xdp_rxq->reg_state = REG_STATE_UNREGISTERED; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index c5c1d2b8045e8e..5183e627468d89 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -48,7 +48,7 @@ extern bool dccp_debug; extern struct inet_hashinfo dccp_hashinfo; -extern struct percpu_counter dccp_orphan_count; +DECLARE_PER_CPU(unsigned int, dccp_orphan_count); void dccp_time_wait(struct sock *sk, int state, int timeo); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index abb5c596a81763..fc44dadc778bbe 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -42,8 +42,8 @@ DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly; EXPORT_SYMBOL_GPL(dccp_statistics); -struct percpu_counter dccp_orphan_count; -EXPORT_SYMBOL_GPL(dccp_orphan_count); +DEFINE_PER_CPU(unsigned int, dccp_orphan_count); +EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count); struct inet_hashinfo dccp_hashinfo; EXPORT_SYMBOL_GPL(dccp_hashinfo); @@ -1055,7 +1055,7 @@ void dccp_close(struct sock *sk, long timeout) bh_lock_sock(sk); WARN_ON(sock_owned_by_user(sk)); - percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(dccp_orphan_count); /* Have we already been destroyed by a softirq or backlog? */ if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) @@ -1115,13 +1115,10 @@ static int __init dccp_init(void) BUILD_BUG_ON(sizeof(struct dccp_skb_cb) > sizeof_field(struct sk_buff, cb)); - rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL); - if (rc) - goto out_fail; inet_hashinfo_init(&dccp_hashinfo); rc = inet_hashinfo2_init_mod(&dccp_hashinfo); if (rc) - goto out_free_percpu; + goto out_fail; rc = -ENOBUFS; dccp_hashinfo.bind_bucket_cachep = kmem_cache_create("dccp_bind_bucket", @@ -1226,8 +1223,6 @@ static int __init dccp_init(void) kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); out_free_hashinfo2: inet_hashinfo2_free_mod(&dccp_hashinfo); -out_free_percpu: - percpu_counter_destroy(&dccp_orphan_count); out_fail: dccp_hashinfo.bhash = NULL; dccp_hashinfo.ehash = NULL; @@ -1250,7 +1245,6 @@ static void __exit dccp_fini(void) dccp_ackvec_exit(); dccp_sysctl_exit(); inet_hashinfo2_free_mod(&dccp_hashinfo); - percpu_counter_destroy(&dccp_orphan_count); } module_init(dccp_init); diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index d8ee15f1c7a9ff..8cb87b5067ee2c 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -92,13 +92,6 @@ config NET_DSA_TAG_KSZ Say Y if you want to enable support for tagging frames for the Microchip 8795/9477/9893 families of switches. -config NET_DSA_TAG_RTL4_A - tristate "Tag driver for Realtek 4 byte protocol A tags" - help - Say Y or M if you want to enable support for tagging frames for the - Realtek switches with 4 byte protocol A tags, sich as found in - the Realtek RTL8366RB. - config NET_DSA_TAG_OCELOT tristate "Tag driver for Ocelot family of switches, using NPI port" select PACKING @@ -126,6 +119,19 @@ config NET_DSA_TAG_QCA Say Y or M if you want to enable support for tagging frames for the Qualcomm Atheros QCA8K switches. +config NET_DSA_TAG_RTL4_A + tristate "Tag driver for Realtek 4 byte protocol A tags" + help + Say Y or M if you want to enable support for tagging frames for the + Realtek switches with 4 byte protocol A tags, sich as found in + the Realtek RTL8366RB. + +config NET_DSA_TAG_RTL8_4 + tristate "Tag driver for Realtek 8 byte protocol 4 tags" + help + Say Y or M if you want to enable support for tagging frames for Realtek + switches with 8 byte protocol 4 tags, such as the Realtek RTL8365MB-VC. + config NET_DSA_TAG_LAN9303 tristate "Tag driver for SMSC/Microchip LAN9303 family of switches" help diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 67ea009f242cbc..9f75820e7c9802 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -10,12 +10,13 @@ obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o -obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o +obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o +obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 41f36ad8b0ec67..ea5169e671aeae 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -280,23 +280,22 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, } #ifdef CONFIG_PM_SLEEP -static bool dsa_is_port_initialized(struct dsa_switch *ds, int p) +static bool dsa_port_is_initialized(const struct dsa_port *dp) { - const struct dsa_port *dp = dsa_to_port(ds, p); - return dp->type == DSA_PORT_TYPE_USER && dp->slave; } int dsa_switch_suspend(struct dsa_switch *ds) { - int i, ret = 0; + struct dsa_port *dp; + int ret = 0; /* Suspend slave network devices */ - for (i = 0; i < ds->num_ports; i++) { - if (!dsa_is_port_initialized(ds, i)) + dsa_switch_for_each_port(dp, ds) { + if (!dsa_port_is_initialized(dp)) continue; - ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave); + ret = dsa_slave_suspend(dp->slave); if (ret) return ret; } @@ -310,7 +309,8 @@ EXPORT_SYMBOL_GPL(dsa_switch_suspend); int dsa_switch_resume(struct dsa_switch *ds) { - int i, ret = 0; + struct dsa_port *dp; + int ret = 0; if (ds->ops->resume) ret = ds->ops->resume(ds); @@ -319,11 +319,11 @@ int dsa_switch_resume(struct dsa_switch *ds) return ret; /* Resume slave network devices */ - for (i = 0; i < ds->num_ports; i++) { - if (!dsa_is_port_initialized(ds, i)) + dsa_switch_for_each_port(dp, ds) { + if (!dsa_port_is_initialized(dp)) continue; - ret = dsa_slave_resume(dsa_to_port(ds, i)->slave); + ret = dsa_slave_resume(dp->slave); if (ret) return ret; } diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index e9911b18bdbfa3..826957b6442b06 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -399,11 +399,8 @@ static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst) if (!dsa_port_is_cpu(cpu_dp)) continue; - list_for_each_entry(dp, &dst->ports, list) { - /* Prefer a local CPU port */ - if (dp->ds != cpu_dp->ds) - continue; - + /* Prefer a local CPU port */ + dsa_switch_for_each_port(dp, cpu_dp->ds) { /* Prefer the first local CPU port found */ if (dp->cpu_dp) continue; @@ -436,6 +433,7 @@ static int dsa_port_setup(struct dsa_port *dp) if (dp->setup) return 0; + mutex_init(&dp->addr_lists_lock); INIT_LIST_HEAD(&dp->fdbs); INIT_LIST_HEAD(&dp->mdbs); @@ -802,17 +800,16 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) { const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; struct dsa_switch_tree *dst = ds->dst; - int port, err; + struct dsa_port *cpu_dp; + int err; if (tag_ops->proto == dst->default_proto) return 0; - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; - + dsa_switch_for_each_cpu_port(cpu_dp, ds) { rtnl_lock(); - err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto); + err = ds->ops->change_tag_protocol(ds, cpu_dp->index, + tag_ops->proto); rtnl_unlock(); if (err) { dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", @@ -850,19 +847,13 @@ static int dsa_switch_setup(struct dsa_switch *ds) dl_priv = devlink_priv(ds->devlink); dl_priv->ds = ds; - err = devlink_register(ds->devlink); - if (err) - goto free_devlink; - /* Setup devlink port instances now, so that the switch * setup() can register regions etc, against the ports */ - list_for_each_entry(dp, &ds->dst->ports, list) { - if (dp->ds == ds) { - err = dsa_port_devlink_setup(dp); - if (err) - goto unregister_devlink_ports; - } + dsa_switch_for_each_port(dp, ds) { + err = dsa_port_devlink_setup(dp); + if (err) + goto unregister_devlink_ports; } err = dsa_switch_register_notifier(ds); @@ -879,8 +870,6 @@ static int dsa_switch_setup(struct dsa_switch *ds) if (err) goto teardown; - devlink_params_publish(ds->devlink); - if (!ds->slave_mii_bus && ds->ops->phy_read) { ds->slave_mii_bus = mdiobus_alloc(); if (!ds->slave_mii_bus) { @@ -896,7 +885,7 @@ static int dsa_switch_setup(struct dsa_switch *ds) } ds->setup = true; - + devlink_register(ds->devlink); return 0; free_slave_mii_bus: @@ -908,14 +897,10 @@ static int dsa_switch_setup(struct dsa_switch *ds) unregister_notifier: dsa_switch_unregister_notifier(ds); unregister_devlink_ports: - list_for_each_entry(dp, &ds->dst->ports, list) - if (dp->ds == ds) - dsa_port_devlink_teardown(dp); - devlink_unregister(ds->devlink); -free_devlink: + dsa_switch_for_each_port(dp, ds) + dsa_port_devlink_teardown(dp); devlink_free(ds->devlink); ds->devlink = NULL; - return err; } @@ -926,22 +911,23 @@ static void dsa_switch_teardown(struct dsa_switch *ds) if (!ds->setup) return; + if (ds->devlink) + devlink_unregister(ds->devlink); + if (ds->slave_mii_bus && ds->ops->phy_read) { mdiobus_unregister(ds->slave_mii_bus); mdiobus_free(ds->slave_mii_bus); ds->slave_mii_bus = NULL; } - dsa_switch_unregister_notifier(ds); - if (ds->ops->teardown) ds->ops->teardown(ds); + dsa_switch_unregister_notifier(ds); + if (ds->devlink) { - list_for_each_entry(dp, &ds->dst->ports, list) - if (dp->ds == ds) - dsa_port_devlink_teardown(dp); - devlink_unregister(ds->devlink); + dsa_switch_for_each_port(dp, ds) + dsa_port_devlink_teardown(dp); devlink_free(ds->devlink); ds->devlink = NULL; } @@ -1157,7 +1143,7 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, goto out_unlock; list_for_each_entry(dp, &dst->ports, list) { - if (!dsa_is_user_port(dp->ds, dp->index)) + if (!dsa_port_is_user(dp)) continue; if (dp->slave->flags & IFF_UP) @@ -1188,8 +1174,8 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) struct dsa_switch_tree *dst = ds->dst; struct dsa_port *dp; - list_for_each_entry(dp, &dst->ports, list) - if (dp->ds == ds && dp->index == index) + dsa_switch_for_each_port(dp, ds) + if (dp->index == index) return dp; dp = kzalloc(sizeof(*dp), GFP_KERNEL); @@ -1535,12 +1521,9 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd) static void dsa_switch_release_ports(struct dsa_switch *ds) { - struct dsa_switch_tree *dst = ds->dst; struct dsa_port *dp, *next; - list_for_each_entry_safe(dp, next, &dst->ports, list) { - if (dp->ds != ds) - continue; + dsa_switch_for_each_port_safe(dp, next, ds) { list_del(&dp->list); kfree(dp); } @@ -1632,13 +1615,7 @@ void dsa_switch_shutdown(struct dsa_switch *ds) mutex_lock(&dsa2_mutex); rtnl_lock(); - list_for_each_entry(dp, &ds->dst->ports, list) { - if (dp->ds != ds) - continue; - - if (!dsa_port_is_user(dp)) - continue; - + dsa_switch_for_each_user_port(dp, ds) { master = dp->cpu_dp->master; slave_dev = dp->slave; diff --git a/net/dsa/port.c b/net/dsa/port.c index 616330a16d3191..f6f12ad2b52514 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -380,6 +380,8 @@ void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) switchdev_bridge_port_unoffload(brport_dev, dp, &dsa_slave_switchdev_notifier, &dsa_slave_switchdev_blocking_notifier); + + dsa_flush_workqueue(); } void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) @@ -515,14 +517,15 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, struct netlink_ext_ack *extack) { struct dsa_switch *ds = dp->ds; - int err, i; + struct dsa_port *other_dp; + int err; /* VLAN awareness was off, so the question is "can we turn it on". * We may have had 8021q uppers, those need to go. Make sure we don't * enter an inconsistent state: deny changing the VLAN awareness state * as long as we have 8021q uppers. */ - if (vlan_filtering && dsa_is_user_port(ds, dp->index)) { + if (vlan_filtering && dsa_port_is_user(dp)) { struct net_device *upper_dev, *slave = dp->slave; struct net_device *br = dp->bridge_dev; struct list_head *iter; @@ -557,10 +560,10 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, * different ports of the same switch device and one of them has a * different setting than what is being requested. */ - for (i = 0; i < ds->num_ports; i++) { + dsa_switch_for_each_port(other_dp, ds) { struct net_device *other_bridge; - other_bridge = dsa_to_port(ds, i)->bridge_dev; + other_bridge = other_dp->bridge_dev; if (!other_bridge) continue; /* If it's the same bridge, it also has same @@ -607,20 +610,16 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, return err; if (ds->vlan_filtering_is_global) { - int port; + struct dsa_port *other_dp; ds->vlan_filtering = vlan_filtering; - for (port = 0; port < ds->num_ports; port++) { - struct net_device *slave; - - if (!dsa_is_user_port(ds, port)) - continue; + dsa_switch_for_each_user_port(other_dp, ds) { + struct net_device *slave = dp->slave; /* We might be called in the unbind path, so not * all slave devices might still be registered. */ - slave = dsa_to_port(ds, port)->slave; if (!slave) continue; @@ -1041,7 +1040,7 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config, struct phy_device *phydev = NULL; struct dsa_switch *ds = dp->ds; - if (dsa_is_user_port(ds, dp->index)) + if (dsa_port_is_user(dp)) phydev = dp->slave->phydev; if (!ds->ops->phylink_mac_link_down) { @@ -1169,6 +1168,10 @@ static int dsa_port_phylink_register(struct dsa_port *dp) dp->pl_config.type = PHYLINK_DEV; dp->pl_config.pcs_poll = ds->pcs_poll; + if (ds->ops->phylink_get_interfaces) + ds->ops->phylink_get_interfaces(ds, dp->index, + dp->pl_config.supported_interfaces); + dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, &dsa_port_phylink_mac_ops); if (IS_ERR(dp->pl)) { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a2bf2d8ac65b7d..ad61f6bc88868a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -174,7 +174,7 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) dev_uc_del(master, dev->dev_addr); out: - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -789,6 +789,37 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } +static void dsa_slave_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eth_phy_stats) + ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); +} + +static void dsa_slave_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eth_mac_stats) + ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); +} + +static void +dsa_slave_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eth_ctrl_stats) + ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); +} + static void dsa_slave_net_selftest(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) { @@ -1695,6 +1726,9 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_strings = dsa_slave_get_strings, .get_ethtool_stats = dsa_slave_get_ethtool_stats, .get_sset_count = dsa_slave_get_sset_count, + .get_eth_phy_stats = dsa_slave_get_eth_phy_stats, + .get_eth_mac_stats = dsa_slave_get_eth_mac_stats, + .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats, .set_wol = dsa_slave_set_wol, .get_wol = dsa_slave_get_wol, .set_eee = dsa_slave_set_eee, @@ -1837,6 +1871,10 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) dp->pl_config.poll_fixed_state = true; } + if (ds->ops->phylink_get_interfaces) + ds->ops->phylink_get_interfaces(ds, dp->index, + dp->pl_config.supported_interfaces); + dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, &dsa_port_phylink_mac_ops); if (IS_ERR(dp->pl)) { @@ -1954,7 +1992,7 @@ int dsa_slave_create(struct dsa_port *port) slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; if (!is_zero_ether_addr(port->mac)) - ether_addr_copy(slave_dev->dev_addr, port->mac); + eth_hw_addr_set(slave_dev, port->mac); else eth_hw_addr_inherit(slave_dev, master); slave_dev->priv_flags |= IFF_NO_QUEUE; @@ -2334,7 +2372,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, dst = cpu_dp->ds->dst; list_for_each_entry(dp, &dst->ports, list) { - if (!dsa_is_user_port(dp->ds, dp->index)) + if (!dsa_port_is_user(dp)) continue; list_add(&dp->slave->close_list, &close_list); @@ -2379,7 +2417,6 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) dp = dsa_to_port(ds, switchdev_work->port); - rtnl_lock(); switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: if (switchdev_work->host_addr) @@ -2414,9 +2451,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) break; } - rtnl_unlock(); - dev_put(switchdev_work->dev); kfree(switchdev_work); } @@ -2437,10 +2472,9 @@ static bool dsa_foreign_dev_check(const struct net_device *dev, } static int dsa_slave_fdb_event(struct net_device *dev, - const struct net_device *orig_dev, - const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info, - unsigned long event) + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info) { struct dsa_switchdev_event_work *switchdev_work; struct dsa_port *dp = dsa_slave_to_port(dev); @@ -2489,31 +2523,11 @@ static int dsa_slave_fdb_event(struct net_device *dev, switchdev_work->vid = fdb_info->vid; switchdev_work->host_addr = host_addr; - /* Hold a reference for dsa_fdb_offload_notify */ - dev_hold(dev); dsa_schedule_work(&switchdev_work->work); return 0; } -static int -dsa_slave_fdb_add_to_device(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info) -{ - return dsa_slave_fdb_event(dev, orig_dev, ctx, fdb_info, - SWITCHDEV_FDB_ADD_TO_DEVICE); -} - -static int -dsa_slave_fdb_del_to_device(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info) -{ - return dsa_slave_fdb_event(dev, orig_dev, ctx, fdb_info, - SWITCHDEV_FDB_DEL_TO_DEVICE); -} - /* Called under rcu_read_lock() */ static int dsa_slave_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) @@ -2528,18 +2542,12 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused, dsa_slave_port_attr_set); return notifier_from_errno(err); case SWITCHDEV_FDB_ADD_TO_DEVICE: - err = switchdev_handle_fdb_add_to_device(dev, ptr, - dsa_slave_dev_check, - dsa_foreign_dev_check, - dsa_slave_fdb_add_to_device, - NULL); - return notifier_from_errno(err); case SWITCHDEV_FDB_DEL_TO_DEVICE: - err = switchdev_handle_fdb_del_to_device(dev, ptr, - dsa_slave_dev_check, - dsa_foreign_dev_check, - dsa_slave_fdb_del_to_device, - NULL); + err = switchdev_handle_fdb_event_to_device(dev, event, ptr, + dsa_slave_dev_check, + dsa_foreign_dev_check, + dsa_slave_fdb_event, + NULL); return notifier_from_errno(err); default: return NOTIFY_DONE; diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 6466d0539af9ff..bb155a16d45409 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -17,14 +17,11 @@ static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { - int i; - - for (i = 0; i < ds->num_ports; ++i) { - struct dsa_port *dp = dsa_to_port(ds, i); + struct dsa_port *dp; + dsa_switch_for_each_port(dp, ds) if (dp->ageing_time && dp->ageing_time < ageing_time) ageing_time = dp->ageing_time; - } return ageing_time; } @@ -49,10 +46,10 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds, return 0; } -static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port, - struct dsa_notifier_mtu_info *info) +static bool dsa_port_mtu_match(struct dsa_port *dp, + struct dsa_notifier_mtu_info *info) { - if (ds->index == info->sw_index && port == info->port) + if (dp->ds->index == info->sw_index && dp->index == info->port) return true; /* Do not propagate to other switches in the tree if the notifier was @@ -61,7 +58,7 @@ static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port, if (info->targeted_match) return false; - if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) return true; return false; @@ -70,14 +67,16 @@ static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port, static int dsa_switch_mtu(struct dsa_switch *ds, struct dsa_notifier_mtu_info *info) { - int port, ret; + struct dsa_port *dp; + int ret; if (!ds->ops->port_change_mtu) return -EOPNOTSUPP; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_mtu_match(ds, port, info)) { - ret = ds->ops->port_change_mtu(ds, port, info->mtu); + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_mtu_match(dp, info)) { + ret = ds->ops->port_change_mtu(ds, dp->index, + info->mtu); if (ret) return ret; } @@ -120,7 +119,8 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, struct netlink_ext_ack extack = {0}; bool change_vlan_filtering = false; bool vlan_filtering; - int err, port; + struct dsa_port *dp; + int err; if (dst->index == info->tree_index && ds->index == info->sw_index && ds->ops->port_bridge_leave) @@ -150,10 +150,10 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, * VLAN-aware bridge. */ if (change_vlan_filtering && ds->vlan_filtering_is_global) { - for (port = 0; port < ds->num_ports; port++) { + dsa_switch_for_each_port(dp, ds) { struct net_device *bridge_dev; - bridge_dev = dsa_to_port(ds, port)->bridge_dev; + bridge_dev = dp->bridge_dev; if (bridge_dev && br_vlan_enabled(bridge_dev)) { change_vlan_filtering = false; @@ -179,19 +179,19 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, * DSA links) that sit between the targeted port on which the notifier was * emitted and its dedicated CPU port. */ -static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port, - int info_sw_index, int info_port) +static bool dsa_port_host_address_match(struct dsa_port *dp, + int info_sw_index, int info_port) { struct dsa_port *targeted_dp, *cpu_dp; struct dsa_switch *targeted_ds; - targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index); + targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index); targeted_dp = dsa_to_port(targeted_ds, info_port); cpu_dp = targeted_dp->cpu_dp; - if (dsa_switch_is_upstream_of(ds, targeted_ds)) - return port == dsa_towards_port(ds, cpu_dp->ds->index, - cpu_dp->index); + if (dsa_switch_is_upstream_of(dp->ds, targeted_ds)) + return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index, + cpu_dp->index); return false; } @@ -209,31 +209,36 @@ static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list, return NULL; } -static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) +static int dsa_port_do_mdb_add(struct dsa_port *dp, + const struct switchdev_obj_port_mdb *mdb) { - struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_switch *ds = dp->ds; struct dsa_mac_addr *a; - int err; + int port = dp->index; + int err = 0; /* No need to bother with refcounting for user ports */ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) return ds->ops->port_mdb_add(ds, port, mdb); + mutex_lock(&dp->addr_lists_lock); + a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); if (a) { refcount_inc(&a->refcount); - return 0; + goto out; } a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; + if (!a) { + err = -ENOMEM; + goto out; + } err = ds->ops->port_mdb_add(ds, port, mdb); if (err) { kfree(a); - return err; + goto out; } ether_addr_copy(a->addr, mdb->addr); @@ -241,64 +246,80 @@ static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port, refcount_set(&a->refcount, 1); list_add_tail(&a->list, &dp->mdbs); - return 0; +out: + mutex_unlock(&dp->addr_lists_lock); + + return err; } -static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) +static int dsa_port_do_mdb_del(struct dsa_port *dp, + const struct switchdev_obj_port_mdb *mdb) { - struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_switch *ds = dp->ds; struct dsa_mac_addr *a; - int err; + int port = dp->index; + int err = 0; /* No need to bother with refcounting for user ports */ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) return ds->ops->port_mdb_del(ds, port, mdb); + mutex_lock(&dp->addr_lists_lock); + a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); - if (!a) - return -ENOENT; + if (!a) { + err = -ENOENT; + goto out; + } if (!refcount_dec_and_test(&a->refcount)) - return 0; + goto out; err = ds->ops->port_mdb_del(ds, port, mdb); if (err) { - refcount_inc(&a->refcount); - return err; + refcount_set(&a->refcount, 1); + goto out; } list_del(&a->list); kfree(a); - return 0; +out: + mutex_unlock(&dp->addr_lists_lock); + + return err; } -static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, + u16 vid) { - struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_switch *ds = dp->ds; struct dsa_mac_addr *a; - int err; + int port = dp->index; + int err = 0; /* No need to bother with refcounting for user ports */ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) return ds->ops->port_fdb_add(ds, port, addr, vid); + mutex_lock(&dp->addr_lists_lock); + a = dsa_mac_addr_find(&dp->fdbs, addr, vid); if (a) { refcount_inc(&a->refcount); - return 0; + goto out; } a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; + if (!a) { + err = -ENOMEM; + goto out; + } err = ds->ops->port_fdb_add(ds, port, addr, vid); if (err) { kfree(a); - return err; + goto out; } ether_addr_copy(a->addr, addr); @@ -306,53 +327,63 @@ static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port, refcount_set(&a->refcount, 1); list_add_tail(&a->list, &dp->fdbs); - return 0; +out: + mutex_unlock(&dp->addr_lists_lock); + + return err; } -static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, + u16 vid) { - struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_switch *ds = dp->ds; struct dsa_mac_addr *a; - int err; + int port = dp->index; + int err = 0; /* No need to bother with refcounting for user ports */ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) return ds->ops->port_fdb_del(ds, port, addr, vid); + mutex_lock(&dp->addr_lists_lock); + a = dsa_mac_addr_find(&dp->fdbs, addr, vid); - if (!a) - return -ENOENT; + if (!a) { + err = -ENOENT; + goto out; + } if (!refcount_dec_and_test(&a->refcount)) - return 0; + goto out; err = ds->ops->port_fdb_del(ds, port, addr, vid); if (err) { - refcount_inc(&a->refcount); - return err; + refcount_set(&a->refcount, 1); + goto out; } list_del(&a->list); kfree(a); - return 0; +out: + mutex_unlock(&dp->addr_lists_lock); + + return err; } static int dsa_switch_host_fdb_add(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { + struct dsa_port *dp; int err = 0; - int port; if (!ds->ops->port_fdb_add) return -EOPNOTSUPP; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_host_address_match(ds, port, info->sw_index, - info->port)) { - err = dsa_switch_do_fdb_add(ds, port, info->addr, - info->vid); + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_host_address_match(dp, info->sw_index, + info->port)) { + err = dsa_port_do_fdb_add(dp, info->addr, info->vid); if (err) break; } @@ -364,17 +395,16 @@ static int dsa_switch_host_fdb_add(struct dsa_switch *ds, static int dsa_switch_host_fdb_del(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { + struct dsa_port *dp; int err = 0; - int port; if (!ds->ops->port_fdb_del) return -EOPNOTSUPP; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_host_address_match(ds, port, info->sw_index, - info->port)) { - err = dsa_switch_do_fdb_del(ds, port, info->addr, - info->vid); + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_host_address_match(dp, info->sw_index, + info->port)) { + err = dsa_port_do_fdb_del(dp, info->addr, info->vid); if (err) break; } @@ -387,22 +417,24 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { int port = dsa_towards_port(ds, info->sw_index, info->port); + struct dsa_port *dp = dsa_to_port(ds, port); if (!ds->ops->port_fdb_add) return -EOPNOTSUPP; - return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid); + return dsa_port_do_fdb_add(dp, info->addr, info->vid); } static int dsa_switch_fdb_del(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { int port = dsa_towards_port(ds, info->sw_index, info->port); + struct dsa_port *dp = dsa_to_port(ds, port); if (!ds->ops->port_fdb_del) return -EOPNOTSUPP; - return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid); + return dsa_port_do_fdb_del(dp, info->addr, info->vid); } static int dsa_switch_hsr_join(struct dsa_switch *ds, @@ -468,37 +500,39 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds, struct dsa_notifier_mdb_info *info) { int port = dsa_towards_port(ds, info->sw_index, info->port); + struct dsa_port *dp = dsa_to_port(ds, port); if (!ds->ops->port_mdb_add) return -EOPNOTSUPP; - return dsa_switch_do_mdb_add(ds, port, info->mdb); + return dsa_port_do_mdb_add(dp, info->mdb); } static int dsa_switch_mdb_del(struct dsa_switch *ds, struct dsa_notifier_mdb_info *info) { int port = dsa_towards_port(ds, info->sw_index, info->port); + struct dsa_port *dp = dsa_to_port(ds, port); if (!ds->ops->port_mdb_del) return -EOPNOTSUPP; - return dsa_switch_do_mdb_del(ds, port, info->mdb); + return dsa_port_do_mdb_del(dp, info->mdb); } static int dsa_switch_host_mdb_add(struct dsa_switch *ds, struct dsa_notifier_mdb_info *info) { + struct dsa_port *dp; int err = 0; - int port; if (!ds->ops->port_mdb_add) return -EOPNOTSUPP; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_host_address_match(ds, port, info->sw_index, - info->port)) { - err = dsa_switch_do_mdb_add(ds, port, info->mdb); + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_host_address_match(dp, info->sw_index, + info->port)) { + err = dsa_port_do_mdb_add(dp, info->mdb); if (err) break; } @@ -510,16 +544,16 @@ static int dsa_switch_host_mdb_add(struct dsa_switch *ds, static int dsa_switch_host_mdb_del(struct dsa_switch *ds, struct dsa_notifier_mdb_info *info) { + struct dsa_port *dp; int err = 0; - int port; if (!ds->ops->port_mdb_del) return -EOPNOTSUPP; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_host_address_match(ds, port, info->sw_index, - info->port)) { - err = dsa_switch_do_mdb_del(ds, port, info->mdb); + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_host_address_match(dp, info->sw_index, + info->port)) { + err = dsa_port_do_mdb_del(dp, info->mdb); if (err) break; } @@ -528,13 +562,13 @@ static int dsa_switch_host_mdb_del(struct dsa_switch *ds, return err; } -static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port, - struct dsa_notifier_vlan_info *info) +static bool dsa_port_vlan_match(struct dsa_port *dp, + struct dsa_notifier_vlan_info *info) { - if (ds->index == info->sw_index && port == info->port) + if (dp->ds->index == info->sw_index && dp->index == info->port) return true; - if (dsa_is_dsa_port(ds, port)) + if (dsa_port_is_dsa(dp)) return true; return false; @@ -543,14 +577,15 @@ static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port, static int dsa_switch_vlan_add(struct dsa_switch *ds, struct dsa_notifier_vlan_info *info) { - int port, err; + struct dsa_port *dp; + int err; if (!ds->ops->port_vlan_add) return -EOPNOTSUPP; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_vlan_match(ds, port, info)) { - err = ds->ops->port_vlan_add(ds, port, info->vlan, + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_vlan_match(dp, info)) { + err = ds->ops->port_vlan_add(ds, dp->index, info->vlan, info->extack); if (err) return err; @@ -579,38 +614,34 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, struct dsa_notifier_tag_proto_info *info) { const struct dsa_device_ops *tag_ops = info->tag_ops; - int port, err; + struct dsa_port *dp, *cpu_dp; + int err; if (!ds->ops->change_tag_protocol) return -EOPNOTSUPP; ASSERT_RTNL(); - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; - - err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto); + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = ds->ops->change_tag_protocol(ds, cpu_dp->index, + tag_ops->proto); if (err) return err; - dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops); + dsa_port_set_tag_protocol(cpu_dp, tag_ops); } /* Now that changing the tag protocol can no longer fail, let's update * the remaining bits which are "duplicated for faster access", and the * bits that depend on the tagger, such as the MTU. */ - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_user_port(ds, port)) { - struct net_device *slave; + dsa_switch_for_each_user_port(dp, ds) { + struct net_device *slave = dp->slave; - slave = dsa_to_port(ds, port)->slave; - dsa_slave_setup_tagger(slave); + dsa_slave_setup_tagger(slave); - /* rtnl_mutex is held in dsa_tree_change_tag_proto */ - dsa_slave_change_mtu(slave, slave->mtu); - } + /* rtnl_mutex is held in dsa_tree_change_tag_proto */ + dsa_slave_change_mtu(slave, slave->mtu); } return 0; diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index f8f7b7c34e7da3..72cac2c0af7b5a 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -6,7 +6,6 @@ * dsa_8021q_netdev_ops is registered for API compliance and not used * directly by callers. */ -#include #include #include @@ -78,22 +77,22 @@ EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid); /* Returns the VID to be inserted into the frame from xmit for switch steering * instructions on egress. Encodes switch ID and port ID. */ -u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port) +u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp) { - return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(ds->index) | - DSA_8021Q_PORT(port); + return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(dp->ds->index) | + DSA_8021Q_PORT(dp->index); } -EXPORT_SYMBOL_GPL(dsa_8021q_tx_vid); +EXPORT_SYMBOL_GPL(dsa_tag_8021q_tx_vid); /* Returns the VID that will be installed as pvid for this switch port, sent as * tagged egress towards the CPU port and decoded by the rcv function. */ -u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port) +u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp) { - return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) | - DSA_8021Q_PORT(port); + return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(dp->ds->index) | + DSA_8021Q_PORT(dp->index); } -EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid); +EXPORT_SYMBOL_GPL(dsa_tag_8021q_rx_vid); /* Returns the decoded switch ID from the RX VID. */ int dsa_8021q_rx_switch_id(u16 vid) @@ -139,12 +138,13 @@ dsa_tag_8021q_vlan_find(struct dsa_8021q_context *ctx, int port, u16 vid) return NULL; } -static int dsa_switch_do_tag_8021q_vlan_add(struct dsa_switch *ds, int port, - u16 vid, u16 flags) +static int dsa_port_do_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, + u16 flags) { - struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; - struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx; + struct dsa_switch *ds = dp->ds; struct dsa_tag_8021q_vlan *v; + int port = dp->index; int err; /* No need to bother with refcounting for user ports */ @@ -175,12 +175,12 @@ static int dsa_switch_do_tag_8021q_vlan_add(struct dsa_switch *ds, int port, return 0; } -static int dsa_switch_do_tag_8021q_vlan_del(struct dsa_switch *ds, int port, - u16 vid) +static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid) { - struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; - struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_8021q_context *ctx = dp->ds->tag_8021q_ctx; + struct dsa_switch *ds = dp->ds; struct dsa_tag_8021q_vlan *v; + int port = dp->index; int err; /* No need to bother with refcounting for user ports */ @@ -207,14 +207,16 @@ static int dsa_switch_do_tag_8021q_vlan_del(struct dsa_switch *ds, int port, } static bool -dsa_switch_tag_8021q_vlan_match(struct dsa_switch *ds, int port, - struct dsa_notifier_tag_8021q_vlan_info *info) +dsa_port_tag_8021q_vlan_match(struct dsa_port *dp, + struct dsa_notifier_tag_8021q_vlan_info *info) { - if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + struct dsa_switch *ds = dp->ds; + + if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) return true; if (ds->dst->index == info->tree_index && ds->index == info->sw_index) - return port == info->port; + return dp->index == info->port; return false; } @@ -222,7 +224,8 @@ dsa_switch_tag_8021q_vlan_match(struct dsa_switch *ds, int port, int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, struct dsa_notifier_tag_8021q_vlan_info *info) { - int port, err; + struct dsa_port *dp; + int err; /* Since we use dsa_broadcast(), there might be other switches in other * trees which don't support tag_8021q, so don't return an error. @@ -232,21 +235,20 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, if (!ds->ops->tag_8021q_vlan_add || !ds->tag_8021q_ctx) return 0; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_tag_8021q_vlan_match(ds, port, info)) { + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_tag_8021q_vlan_match(dp, info)) { u16 flags = 0; - if (dsa_is_user_port(ds, port)) + if (dsa_port_is_user(dp)) flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (vid_is_dsa_8021q_rxvlan(info->vid) && dsa_8021q_rx_switch_id(info->vid) == ds->index && - dsa_8021q_rx_source_port(info->vid) == port) + dsa_8021q_rx_source_port(info->vid) == dp->index) flags |= BRIDGE_VLAN_INFO_PVID; - err = dsa_switch_do_tag_8021q_vlan_add(ds, port, - info->vid, - flags); + err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid, + flags); if (err) return err; } @@ -258,15 +260,15 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds, struct dsa_notifier_tag_8021q_vlan_info *info) { - int port, err; + struct dsa_port *dp; + int err; if (!ds->ops->tag_8021q_vlan_del || !ds->tag_8021q_ctx) return 0; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_switch_tag_8021q_vlan_match(ds, port, info)) { - err = dsa_switch_do_tag_8021q_vlan_del(ds, port, - info->vid); + dsa_switch_for_each_port(dp, ds) { + if (dsa_port_tag_8021q_vlan_match(dp, info)) { + err = dsa_port_do_tag_8021q_vlan_del(dp, info->vid); if (err) return err; } @@ -322,15 +324,14 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds, * +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+ * swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3 */ -static bool dsa_tag_8021q_bridge_match(struct dsa_switch *ds, int port, - struct dsa_notifier_bridge_info *info) +static bool +dsa_port_tag_8021q_bridge_match(struct dsa_port *dp, + struct dsa_notifier_bridge_info *info) { - struct dsa_port *dp = dsa_to_port(ds, port); - /* Don't match on self */ - if (ds->dst->index == info->tree_index && - ds->index == info->sw_index && - port == info->port) + if (dp->ds->dst->index == info->tree_index && + dp->ds->index == info->sw_index && + dp->index == info->port) return false; if (dsa_port_is_user(dp)) @@ -344,21 +345,21 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, { struct dsa_switch *targeted_ds; struct dsa_port *targeted_dp; + struct dsa_port *dp; u16 targeted_rx_vid; - int err, port; + int err; if (!ds->tag_8021q_ctx) return 0; targeted_ds = dsa_switch_find(info->tree_index, info->sw_index); targeted_dp = dsa_to_port(targeted_ds, info->port); - targeted_rx_vid = dsa_8021q_rx_vid(targeted_ds, info->port); + targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp); - for (port = 0; port < ds->num_ports; port++) { - struct dsa_port *dp = dsa_to_port(ds, port); - u16 rx_vid = dsa_8021q_rx_vid(ds, port); + dsa_switch_for_each_port(dp, ds) { + u16 rx_vid = dsa_tag_8021q_rx_vid(dp); - if (!dsa_tag_8021q_bridge_match(ds, port, info)) + if (!dsa_port_tag_8021q_bridge_match(dp, info)) continue; /* Install the RX VID of the targeted port in our VLAN table */ @@ -380,21 +381,20 @@ int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, { struct dsa_switch *targeted_ds; struct dsa_port *targeted_dp; + struct dsa_port *dp; u16 targeted_rx_vid; - int port; if (!ds->tag_8021q_ctx) return 0; targeted_ds = dsa_switch_find(info->tree_index, info->sw_index); targeted_dp = dsa_to_port(targeted_ds, info->port); - targeted_rx_vid = dsa_8021q_rx_vid(targeted_ds, info->port); + targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp); - for (port = 0; port < ds->num_ports; port++) { - struct dsa_port *dp = dsa_to_port(ds, port); - u16 rx_vid = dsa_8021q_rx_vid(ds, port); + dsa_switch_for_each_port(dp, ds) { + u16 rx_vid = dsa_tag_8021q_rx_vid(dp); - if (!dsa_tag_8021q_bridge_match(ds, port, info)) + if (!dsa_port_tag_8021q_bridge_match(dp, info)) continue; /* Remove the RX VID of the targeted port from our VLAN table */ @@ -433,8 +433,8 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) { struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; struct dsa_port *dp = dsa_to_port(ds, port); - u16 rx_vid = dsa_8021q_rx_vid(ds, port); - u16 tx_vid = dsa_8021q_tx_vid(ds, port); + u16 rx_vid = dsa_tag_8021q_rx_vid(dp); + u16 tx_vid = dsa_tag_8021q_tx_vid(dp); struct net_device *master; int err; @@ -478,8 +478,8 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port) { struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; struct dsa_port *dp = dsa_to_port(ds, port); - u16 rx_vid = dsa_8021q_rx_vid(ds, port); - u16 tx_vid = dsa_8021q_tx_vid(ds, port); + u16 rx_vid = dsa_tag_8021q_rx_vid(dp); + u16 tx_vid = dsa_tag_8021q_tx_vid(dp); struct net_device *master; /* The CPU port is implicitly configured by diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index fa1d60d13ad903..3509fc967ca9d6 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -6,7 +6,6 @@ #include #include -#include #include #include "dsa_priv.h" diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 605b51ca692105..cd60b94fc175f7 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -4,15 +4,52 @@ #include #include "dsa_priv.h" +/* If the port is under a VLAN-aware bridge, remove the VLAN header from the + * payload and move it into the DSA tag, which will make the switch classify + * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero, + * which is the pvid of standalone and VLAN-unaware bridge ports. + */ +static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp, + u64 *vlan_tci, u64 *tag_type) +{ + struct net_device *br = READ_ONCE(dp->bridge_dev); + struct vlan_ethhdr *hdr; + u16 proto, tci; + + if (!br || !br_vlan_enabled(br)) { + *vlan_tci = 0; + *tag_type = IFH_TAG_TYPE_C; + return; + } + + hdr = (struct vlan_ethhdr *)skb_mac_header(skb); + br_vlan_get_proto(br, &proto); + + if (ntohs(hdr->h_vlan_proto) == proto) { + __skb_vlan_pop(skb, &tci); + *vlan_tci = tci; + } else { + rcu_read_lock(); + br_vlan_get_pvid_rcu(br, &tci); + rcu_read_unlock(); + *vlan_tci = tci; + } + + *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C; +} + static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, __be32 ifh_prefix, void **ifh) { struct dsa_port *dp = dsa_slave_to_port(netdev); struct dsa_switch *ds = dp->ds; + u64 vlan_tci, tag_type; void *injection; __be32 *prefix; u32 rew_op = 0; + ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type); + injection = skb_push(skb, OCELOT_TAG_LEN); prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN); @@ -21,6 +58,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, ocelot_ifh_set_bypass(injection, 1); ocelot_ifh_set_src(injection, ds->num_ports); ocelot_ifh_set_qos_class(injection, skb->priority); + ocelot_ifh_set_vlan_tci(injection, vlan_tci); + ocelot_ifh_set_tag_type(injection, tag_type); rew_op = ocelot_ptp_rew_op(skb); if (rew_op) diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index 3412051981d7bd..a1919ea5e828d5 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -39,9 +39,9 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); + u16 tx_vid = dsa_tag_8021q_tx_vid(dp); struct ethhdr *hdr = eth_hdr(skb); if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest)) diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c index f920487ae1458e..6d928ee3ef7a2f 100644 --- a/net/dsa/tag_rtl4_a.c +++ b/net/dsa/tag_rtl4_a.c @@ -54,7 +54,7 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb, p = (__be16 *)tag; *p = htons(RTL4_A_ETHERTYPE); - out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8); + out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT); /* The lower bits indicate the port number */ out |= BIT(dp->index); diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c new file mode 100644 index 00000000000000..02686ad4045da9 --- /dev/null +++ b/net/dsa/tag_rtl8_4.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handler for Realtek 8 byte switch tags + * + * Copyright (C) 2021 Alvin Ć ipraga + * + * NOTE: Currently only supports protocol "4" found in the RTL8365MB, hence + * named tag_rtl8_4. + * + * This tag header has the following format: + * + * ------------------------------------------- + * | MAC DA | MAC SA | 8 byte tag | Type | ... + * ------------------------------------------- + * _______________/ \______________________________________ + * / \ + * 0 7|8 15 + * |-----------------------------------+-----------------------------------|--- + * | (16-bit) | ^ + * | Realtek EtherType [0x8899] | | + * |-----------------------------------+-----------------------------------| 8 + * | (8-bit) | (8-bit) | + * | Protocol [0x04] | REASON | b + * |-----------------------------------+-----------------------------------| y + * | (1) | (1) | (2) | (1) | (3) | (1) | (1) | (1) | (5) | t + * | FID_EN | X | FID | PRI_EN | PRI | KEEP | X | LEARN_DIS | X | e + * |-----------------------------------+-----------------------------------| s + * | (1) | (15-bit) | | + * | ALLOW | TX/RX | v + * |-----------------------------------+-----------------------------------|--- + * + * With the following field descriptions: + * + * field | description + * ------------+------------- + * Realtek | 0x8899: indicates that this is a proprietary Realtek tag; + * EtherType | note that Realtek uses the same EtherType for + * | other incompatible tag formats (e.g. tag_rtl4_a.c) + * Protocol | 0x04: indicates that this tag conforms to this format + * X | reserved + * ------------+------------- + * REASON | reason for forwarding packet to CPU + * | 0: packet was forwarded or flooded to CPU + * | 80: packet was trapped to CPU + * FID_EN | 1: packet has an FID + * | 0: no FID + * FID | FID of packet (if FID_EN=1) + * PRI_EN | 1: force priority of packet + * | 0: don't force priority + * PRI | priority of packet (if PRI_EN=1) + * KEEP | preserve packet VLAN tag format + * LEARN_DIS | don't learn the source MAC address of the packet + * ALLOW | 1: treat TX/RX field as an allowance port mask, meaning the + * | packet may only be forwarded to ports specified in the + * | mask + * | 0: no allowance port mask, TX/RX field is the forwarding + * | port mask + * TX/RX | TX (switch->CPU): port number the packet was received on + * | RX (CPU->switch): forwarding port mask (if ALLOW=0) + * | allowance port mask (if ALLOW=1) + */ + +#include +#include +#include + +#include "dsa_priv.h" + +/* Protocols supported: + * + * 0x04 = RTL8365MB DSA protocol + */ + +#define RTL8_4_TAG_LEN 8 + +#define RTL8_4_PROTOCOL GENMASK(15, 8) +#define RTL8_4_PROTOCOL_RTL8365MB 0x04 +#define RTL8_4_REASON GENMASK(7, 0) +#define RTL8_4_REASON_FORWARD 0 +#define RTL8_4_REASON_TRAP 80 + +#define RTL8_4_LEARN_DIS BIT(5) + +#define RTL8_4_TX GENMASK(3, 0) +#define RTL8_4_RX GENMASK(10, 0) + +static struct sk_buff *rtl8_4_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + __be16 *tag; + + skb_push(skb, RTL8_4_TAG_LEN); + + dsa_alloc_etype_header(skb, RTL8_4_TAG_LEN); + tag = dsa_etype_header_pos_tx(skb); + + /* Set Realtek EtherType */ + tag[0] = htons(ETH_P_REALTEK); + + /* Set Protocol; zero REASON */ + tag[1] = htons(FIELD_PREP(RTL8_4_PROTOCOL, RTL8_4_PROTOCOL_RTL8365MB)); + + /* Zero FID_EN, FID, PRI_EN, PRI, KEEP; set LEARN_DIS */ + tag[2] = htons(FIELD_PREP(RTL8_4_LEARN_DIS, 1)); + + /* Zero ALLOW; set RX (CPU->switch) forwarding port mask */ + tag[3] = htons(FIELD_PREP(RTL8_4_RX, BIT(dp->index))); + + return skb; +} + +static struct sk_buff *rtl8_4_tag_rcv(struct sk_buff *skb, + struct net_device *dev) +{ + __be16 *tag; + u16 etype; + u8 reason; + u8 proto; + u8 port; + + if (unlikely(!pskb_may_pull(skb, RTL8_4_TAG_LEN))) + return NULL; + + tag = dsa_etype_header_pos_rx(skb); + + /* Parse Realtek EtherType */ + etype = ntohs(tag[0]); + if (unlikely(etype != ETH_P_REALTEK)) { + dev_warn_ratelimited(&dev->dev, + "non-realtek ethertype 0x%04x\n", etype); + return NULL; + } + + /* Parse Protocol */ + proto = FIELD_GET(RTL8_4_PROTOCOL, ntohs(tag[1])); + if (unlikely(proto != RTL8_4_PROTOCOL_RTL8365MB)) { + dev_warn_ratelimited(&dev->dev, + "unknown realtek protocol 0x%02x\n", + proto); + return NULL; + } + + /* Parse REASON */ + reason = FIELD_GET(RTL8_4_REASON, ntohs(tag[1])); + + /* Parse TX (switch->CPU) */ + port = FIELD_GET(RTL8_4_TX, ntohs(tag[3])); + skb->dev = dsa_master_find_slave(dev, 0, port); + if (!skb->dev) { + dev_warn_ratelimited(&dev->dev, + "could not find slave for port %d\n", + port); + return NULL; + } + + /* Remove tag and recalculate checksum */ + skb_pull_rcsum(skb, RTL8_4_TAG_LEN); + + dsa_strip_etype_header(skb, RTL8_4_TAG_LEN); + + if (reason != RTL8_4_REASON_TRAP) + dsa_default_offload_fwd_mark(skb); + + return skb; +} + +static const struct dsa_device_ops rtl8_4_netdev_ops = { + .name = "rtl8_4", + .proto = DSA_TAG_PROTO_RTL8_4, + .xmit = rtl8_4_tag_xmit, + .rcv = rtl8_4_tag_rcv, + .needed_headroom = RTL8_4_TAG_LEN, +}; +module_dsa_tag_driver(rtl8_4_netdev_ops); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL8_4); diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 2edede9ddac93b..262c8833a91096 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -158,10 +158,7 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp) * we're sure about that). It may not be on this port though, so we * need to find it. */ - list_for_each_entry(other_dp, &ds->dst->ports, list) { - if (other_dp->ds != ds) - continue; - + dsa_switch_for_each_port(other_dp, ds) { if (!other_dp->bridge_dev) continue; @@ -238,9 +235,9 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); + u16 tx_vid = dsa_tag_8021q_tx_vid(dp); if (skb->offload_fwd_mark) return sja1105_imprecise_xmit(skb, netdev); @@ -266,9 +263,9 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb, { struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; struct dsa_port *dp = dsa_slave_to_port(netdev); - u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); + u16 tx_vid = dsa_tag_8021q_tx_vid(dp); __be32 *tx_trailer; __be16 *tx_header; int trailer_pos; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 73fce9467467e1..c7d9e08107cb43 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -304,7 +305,7 @@ void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); } EXPORT_SYMBOL(eth_commit_mac_addr_change); @@ -522,6 +523,26 @@ int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr) } EXPORT_SYMBOL(eth_platform_get_mac_address); +/** + * platform_get_ethdev_address - Set netdev's MAC address from a given device + * @dev: Pointer to the device + * @netdev: Pointer to netdev to write the address to + * + * Wrapper around eth_platform_get_mac_address() which writes the address + * directly to netdev->dev_addr. + */ +int platform_get_ethdev_address(struct device *dev, struct net_device *netdev) +{ + u8 addr[ETH_ALEN] __aligned(2); + int ret; + + ret = eth_platform_get_mac_address(dev, addr); + if (!ret) + eth_hw_addr_set(netdev, addr); + return ret; +} +EXPORT_SYMBOL(platform_get_ethdev_address); + /** * nvmem_get_mac_address - Obtain the MAC address from an nvmem cell named * 'mac-address' associated with given device. @@ -557,4 +578,81 @@ int nvmem_get_mac_address(struct device *dev, void *addrbuf) return 0; } -EXPORT_SYMBOL(nvmem_get_mac_address); + +static int fwnode_get_mac_addr(struct fwnode_handle *fwnode, + const char *name, char *addr) +{ + int ret; + + ret = fwnode_property_read_u8_array(fwnode, name, addr, ETH_ALEN); + if (ret) + return ret; + + if (!is_valid_ether_addr(addr)) + return -EINVAL; + return 0; +} + +/** + * fwnode_get_mac_address - Get the MAC from the firmware node + * @fwnode: Pointer to the firmware node + * @addr: Address of buffer to store the MAC in + * + * Search the firmware node for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address. If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'mac-address' and 'local-mac-address', with + * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. + * In this case, the real MAC is in 'local-mac-address', and 'mac-address' + * exists but is all zeros. + */ +int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr) +{ + if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) || + !fwnode_get_mac_addr(fwnode, "local-mac-address", addr) || + !fwnode_get_mac_addr(fwnode, "address", addr)) + return 0; + + return -ENOENT; +} +EXPORT_SYMBOL(fwnode_get_mac_address); + +/** + * device_get_mac_address - Get the MAC for a given device + * @dev: Pointer to the device + * @addr: Address of buffer to store the MAC in + */ +int device_get_mac_address(struct device *dev, char *addr) +{ + return fwnode_get_mac_address(dev_fwnode(dev), addr); +} +EXPORT_SYMBOL(device_get_mac_address); + +/** + * device_get_ethdev_address - Set netdev's MAC address from a given device + * @dev: Pointer to the device + * @netdev: Pointer to netdev to write the address to + * + * Wrapper around device_get_mac_address() which writes the address + * directly to netdev->dev_addr. + */ +int device_get_ethdev_address(struct device *dev, struct net_device *netdev) +{ + u8 addr[ETH_ALEN]; + int ret; + + ret = device_get_mac_address(dev, addr); + if (!ret) + eth_hw_addr_set(netdev, addr); + return ret; +} +EXPORT_SYMBOL(device_get_ethdev_address); diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 0a19470efbfb9c..b76432e70e6baa 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \ linkstate.o debug.o wol.o features.o privflags.o rings.o \ channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ - tunnels.o fec.o eeprom.o stats.o phc_vclocks.o + tunnels.o fec.o eeprom.o stats.o phc_vclocks.o module.o diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index f2abc315288839..65e9bc1058b57c 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -32,6 +32,29 @@ #include #include "common.h" +/* State held across locks and calls for commands which have devlink fallback */ +struct ethtool_devlink_compat { + struct devlink *devlink; + union { + struct ethtool_flash efl; + struct ethtool_drvinfo info; + }; +}; + +static struct devlink *netdev_to_devlink_get(struct net_device *dev) +{ + struct devlink_port *devlink_port; + + if (!dev->netdev_ops->ndo_get_devlink_port) + return NULL; + + devlink_port = dev->netdev_ops->ndo_get_devlink_port(dev); + if (!devlink_port) + return NULL; + + return devlink_try_get(devlink_port->devlink); +} + /* * Some useful ethtool_ops methods that're device independent. * If we find that all drivers want to do the same thing here, @@ -89,7 +112,8 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; useraddr += sizeof(cmd); - if (copy_to_user(useraddr, features, copy_size * sizeof(*features))) + if (copy_to_user(useraddr, features, + array_size(copy_size, sizeof(*features)))) return -EFAULT; return 0; @@ -335,7 +359,7 @@ EXPORT_SYMBOL(ethtool_intersect_link_masks); void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32) { - bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(dst); dst[0] = legacy_u32; } EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); @@ -350,11 +374,10 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) { __ETHTOOL_DECLARE_LINK_MODE_MASK(ext); - bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_zero(ext); bitmap_fill(ext, 32); bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS); - if (bitmap_intersects(ext, src, - __ETHTOOL_LINK_MODE_MASK_NBITS)) { + if (linkmode_intersects(ext, src)) { /* src mask goes beyond bit 31 */ retval = false; } @@ -697,22 +720,20 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) return ret; } -static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, - void __user *useraddr) +static int +ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) { - struct ethtool_drvinfo info; const struct ethtool_ops *ops = dev->ethtool_ops; - memset(&info, 0, sizeof(info)); - info.cmd = ETHTOOL_GDRVINFO; - strlcpy(info.version, UTS_RELEASE, sizeof(info.version)); + rsp->info.cmd = ETHTOOL_GDRVINFO; + strlcpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version)); if (ops->get_drvinfo) { - ops->get_drvinfo(dev, &info); + ops->get_drvinfo(dev, &rsp->info); } else if (dev->dev.parent && dev->dev.parent->driver) { - strlcpy(info.bus_info, dev_name(dev->dev.parent), - sizeof(info.bus_info)); - strlcpy(info.driver, dev->dev.parent->driver->name, - sizeof(info.driver)); + strlcpy(rsp->info.bus_info, dev_name(dev->dev.parent), + sizeof(rsp->info.bus_info)); + strlcpy(rsp->info.driver, dev->dev.parent->driver->name, + sizeof(rsp->info.driver)); } else { return -EOPNOTSUPP; } @@ -726,30 +747,27 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, rc = ops->get_sset_count(dev, ETH_SS_TEST); if (rc >= 0) - info.testinfo_len = rc; + rsp->info.testinfo_len = rc; rc = ops->get_sset_count(dev, ETH_SS_STATS); if (rc >= 0) - info.n_stats = rc; + rsp->info.n_stats = rc; rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); if (rc >= 0) - info.n_priv_flags = rc; + rsp->info.n_priv_flags = rc; } if (ops->get_regs_len) { int ret = ops->get_regs_len(dev); if (ret > 0) - info.regdump_len = ret; + rsp->info.regdump_len = ret; } if (ops->get_eeprom_len) - info.eedump_len = ops->get_eeprom_len(dev); + rsp->info.eedump_len = ops->get_eeprom_len(dev); - if (!info.fw_version[0]) - devlink_compat_running_version(dev, info.fw_version, - sizeof(info.fw_version)); + if (!rsp->info.fw_version[0]) + rsp->devlink = netdev_to_devlink_get(dev); - if (copy_to_user(useraddr, &info, sizeof(info))) - return -EFAULT; return 0; } @@ -799,7 +817,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, goto out; useraddr += offsetof(struct ethtool_sset_info, data); - if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) + if (copy_to_user(useraddr, info_buf, array_size(idx, sizeof(u32)))) goto out; ret = 0; @@ -1022,7 +1040,7 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, { int i; - if (copy_from_user(indir, useraddr, size * sizeof(indir[0]))) + if (copy_from_user(indir, useraddr, array_size(size, sizeof(indir[0])))) return -EFAULT; /* Validate ring indices */ @@ -1537,6 +1555,10 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, ret = getter(dev, &eeprom, data); if (ret) break; + if (!eeprom.len) { + ret = -EIO; + break; + } if (copy_to_user(userbuf, data, eeprom.len)) { ret = -EFAULT; break; @@ -1891,7 +1913,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr) if (copy_to_user(useraddr, &test, sizeof(test))) goto out; useraddr += sizeof(test); - if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + if (copy_to_user(useraddr, data, array_size(test.len, sizeof(u64)))) goto out; ret = 0; @@ -1933,7 +1955,8 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) goto out; useraddr += sizeof(gstrings); if (gstrings.len && - copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + copy_to_user(useraddr, data, + array_size(gstrings.len, ETH_GSTRING_LEN))) goto out; ret = 0; @@ -2173,19 +2196,15 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, return actor(dev, edata.data); } -static noinline_for_stack int ethtool_flash_device(struct net_device *dev, - char __user *useraddr) +static int +ethtool_flash_device(struct net_device *dev, struct ethtool_devlink_compat *req) { - struct ethtool_flash efl; - - if (copy_from_user(&efl, useraddr, sizeof(efl))) - return -EFAULT; - efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; - - if (!dev->ethtool_ops->flash_device) - return devlink_compat_flash_update(dev, efl.data); + if (!dev->ethtool_ops->flash_device) { + req->devlink = netdev_to_devlink_get(dev); + return 0; + } - return dev->ethtool_ops->flash_device(dev, &efl); + return dev->ethtool_ops->flash_device(dev, &req->efl); } static int ethtool_set_dump(struct net_device *dev, @@ -2695,19 +2714,19 @@ static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr) /* The main entry point in this file. Called from net/core/dev_ioctl.c */ -int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) +static int +__dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, + u32 ethcmd, struct ethtool_devlink_compat *devlink_state) { - struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); - u32 ethcmd, sub_cmd; + struct net_device *dev; + u32 sub_cmd; int rc; netdev_features_t old_features; + dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) return -ENODEV; - if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) - return -EFAULT; - if (ethcmd == ETHTOOL_PERQUEUE) { if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd))) return -EFAULT; @@ -2781,7 +2800,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) rc = ethtool_set_settings(dev, useraddr); break; case ETHTOOL_GDRVINFO: - rc = ethtool_get_drvinfo(dev, useraddr); + rc = ethtool_get_drvinfo(dev, devlink_state); break; case ETHTOOL_GREGS: rc = ethtool_get_regs(dev, useraddr); @@ -2883,7 +2902,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); break; case ETHTOOL_FLASHDEV: - rc = ethtool_flash_device(dev, useraddr); + rc = ethtool_flash_device(dev, devlink_state); break; case ETHTOOL_RESET: rc = ethtool_reset(dev, useraddr); @@ -2995,6 +3014,60 @@ int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) return rc; } +int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr) +{ + struct ethtool_devlink_compat *state; + u32 ethcmd; + int rc; + + if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) + return -EFAULT; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + switch (ethcmd) { + case ETHTOOL_FLASHDEV: + if (copy_from_user(&state->efl, useraddr, sizeof(state->efl))) { + rc = -EFAULT; + goto exit_free; + } + state->efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; + break; + } + + rtnl_lock(); + rc = __dev_ethtool(net, ifr, useraddr, ethcmd, state); + rtnl_unlock(); + if (rc) + goto exit_free; + + switch (ethcmd) { + case ETHTOOL_FLASHDEV: + if (state->devlink) + rc = devlink_compat_flash_update(state->devlink, + state->efl.data); + break; + case ETHTOOL_GDRVINFO: + if (state->devlink) + devlink_compat_running_version(state->devlink, + state->info.fw_version, + sizeof(state->info.fw_version)); + if (copy_to_user(useraddr, &state->info, sizeof(state->info))) { + rc = -EFAULT; + goto exit_free; + } + break; + } + +exit_free: + if (state->devlink) + devlink_put(state->devlink); + kfree(state); + return rc; +} + struct ethtool_rx_flow_key { struct flow_dissector_key_basic basic; union { diff --git a/net/ethtool/module.c b/net/ethtool/module.c new file mode 100644 index 00000000000000..bc2cef11bbdad6 --- /dev/null +++ b/net/ethtool/module.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include + +#include "netlink.h" +#include "common.h" +#include "bitset.h" + +struct module_req_info { + struct ethnl_req_info base; +}; + +struct module_reply_data { + struct ethnl_reply_data base; + struct ethtool_module_power_mode_params power; +}; + +#define MODULE_REPDATA(__reply_base) \ + container_of(__reply_base, struct module_reply_data, base) + +/* MODULE_GET */ + +const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1] = { + [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int module_get_power_mode(struct net_device *dev, + struct module_reply_data *data, + struct netlink_ext_ack *extack) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_module_power_mode) + return 0; + + return ops->get_module_power_mode(dev, &data->power, extack); +} + +static int module_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + struct genl_info *info) +{ + struct module_reply_data *data = MODULE_REPDATA(reply_base); + struct netlink_ext_ack *extack = info ? info->extack : NULL; + struct net_device *dev = reply_base->dev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + + ret = module_get_power_mode(dev, data, extack); + if (ret < 0) + goto out_complete; + +out_complete: + ethnl_ops_complete(dev); + return ret; +} + +static int module_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + struct module_reply_data *data = MODULE_REPDATA(reply_base); + int len = 0; + + if (data->power.policy) + len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE_POLICY */ + + if (data->power.mode) + len += nla_total_size(sizeof(u8)); /* _MODULE_POWER_MODE */ + + return len; +} + +static int module_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct module_reply_data *data = MODULE_REPDATA(reply_base); + + if (data->power.policy && + nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE_POLICY, + data->power.policy)) + return -EMSGSIZE; + + if (data->power.mode && + nla_put_u8(skb, ETHTOOL_A_MODULE_POWER_MODE, data->power.mode)) + return -EMSGSIZE; + + return 0; +} + +const struct ethnl_request_ops ethnl_module_request_ops = { + .request_cmd = ETHTOOL_MSG_MODULE_GET, + .reply_cmd = ETHTOOL_MSG_MODULE_GET_REPLY, + .hdr_attr = ETHTOOL_A_MODULE_HEADER, + .req_info_size = sizeof(struct module_req_info), + .reply_data_size = sizeof(struct module_reply_data), + + .prepare_data = module_prepare_data, + .reply_size = module_reply_size, + .fill_reply = module_fill_reply, +}; + +/* MODULE_SET */ + +const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1] = { + [ETHTOOL_A_MODULE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_MODULE_POWER_MODE_POLICY] = + NLA_POLICY_RANGE(NLA_U8, ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH, + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO), +}; + +static int module_set_power_mode(struct net_device *dev, struct nlattr **tb, + bool *p_mod, struct netlink_ext_ack *extack) +{ + struct ethtool_module_power_mode_params power = {}; + struct ethtool_module_power_mode_params power_new; + const struct ethtool_ops *ops = dev->ethtool_ops; + int ret; + + if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]) + return 0; + + if (!ops->get_module_power_mode || !ops->set_module_power_mode) { + NL_SET_ERR_MSG_ATTR(extack, + tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY], + "Setting power mode policy is not supported by this device"); + return -EOPNOTSUPP; + } + + power_new.policy = nla_get_u8(tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]); + ret = ops->get_module_power_mode(dev, &power, extack); + if (ret < 0) + return ret; + + if (power_new.policy == power.policy) + return 0; + *p_mod = true; + + return ops->set_module_power_mode(dev, &power_new, extack); +} + +int ethnl_set_module(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + struct nlattr **tb = info->attrs; + struct net_device *dev; + bool mod = false; + int ret; + + ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_MODULE_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + dev = req_info.dev; + + rtnl_lock(); + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_rtnl; + + ret = module_set_power_mode(dev, tb, &mod, info->extack); + if (ret < 0) + goto out_ops; + + if (!mod) + goto out_ops; + + ethtool_notify(dev, ETHTOOL_MSG_MODULE_NTF, NULL); + +out_ops: + ethnl_ops_complete(dev); +out_rtnl: + rtnl_unlock(); + dev_put(dev); + return ret; +} diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 1797a0a9001955..38b44c0291b11a 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -282,6 +282,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_MODULE_EEPROM_GET] = ðnl_module_eeprom_request_ops, [ETHTOOL_MSG_STATS_GET] = ðnl_stats_request_ops, [ETHTOOL_MSG_PHC_VCLOCKS_GET] = ðnl_phc_vclocks_request_ops, + [ETHTOOL_MSG_MODULE_GET] = ðnl_module_request_ops, }; static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) @@ -593,6 +594,7 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = { [ETHTOOL_MSG_PAUSE_NTF] = ðnl_pause_request_ops, [ETHTOOL_MSG_EEE_NTF] = ðnl_eee_request_ops, [ETHTOOL_MSG_FEC_NTF] = ðnl_fec_request_ops, + [ETHTOOL_MSG_MODULE_NTF] = ðnl_module_request_ops, }; /* default notification handler */ @@ -686,6 +688,7 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = { [ETHTOOL_MSG_PAUSE_NTF] = ethnl_default_notify, [ETHTOOL_MSG_EEE_NTF] = ethnl_default_notify, [ETHTOOL_MSG_FEC_NTF] = ethnl_default_notify, + [ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify, }; void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data) @@ -999,6 +1002,22 @@ static const struct genl_ops ethtool_genl_ops[] = { .policy = ethnl_phc_vclocks_get_policy, .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1, }, + { + .cmd = ETHTOOL_MSG_MODULE_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_module_get_policy, + .maxattr = ARRAY_SIZE(ethnl_module_get_policy) - 1, + }, + { + .cmd = ETHTOOL_MSG_MODULE_SET, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_set_module, + .policy = ethnl_module_set_policy, + .maxattr = ARRAY_SIZE(ethnl_module_set_policy) - 1, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index e8987e28036fe9..836ee71578487c 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -337,6 +337,7 @@ extern const struct ethnl_request_ops ethnl_fec_request_ops; extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops; extern const struct ethnl_request_ops ethnl_stats_request_ops; extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops; +extern const struct ethnl_request_ops ethnl_module_request_ops; extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1]; extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1]; @@ -373,6 +374,8 @@ extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1]; extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1]; extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1]; extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1]; +extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1]; +extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1]; int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info); int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info); @@ -391,6 +394,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info); int ethnl_tunnel_info_start(struct netlink_callback *cb); int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb); int ethnl_set_fec(struct sk_buff *skb, struct genl_info *info); +int ethnl_set_module(struct sk_buff *skb, struct genl_info *info); extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN]; extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN]; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 26c32407f02909..737e4f17e1c6dd 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -309,9 +309,9 @@ static void send_hsr_supervision_frame(struct hsr_port *master, } spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); - hsr_stag->HSR_TLV_type = type; + hsr_stag->tlv.HSR_TLV_type = type; /* TODO: Why 12 in HSRv0? */ - hsr_stag->HSR_TLV_length = hsr->prot_version ? + hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ? sizeof(struct hsr_sup_payload) : 12; /* Payload: MacAddressA */ @@ -350,8 +350,8 @@ static void send_prp_supervision_frame(struct hsr_port *master, spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); hsr->sup_sequence_nr++; - hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD; - hsr_stag->HSR_TLV_length = sizeof(struct hsr_sup_payload); + hsr_stag->tlv.HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD; + hsr_stag->tlv.HSR_TLV_length = sizeof(struct hsr_sup_payload); /* Payload: MacAddressA */ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); @@ -493,7 +493,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], INIT_LIST_HEAD(&hsr->self_node_db); spin_lock_init(&hsr->list_lock); - ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); + eth_hw_addr_set(hsr_dev, slave[0]->dev_addr); /* initialize protocol specific functions */ if (protocol_version == PRP_V1) { diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ceb8afb2a62f4b..e59cbb4f0cd15e 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -37,6 +37,8 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) struct ethhdr *eth_hdr; struct hsr_sup_tag *hsr_sup_tag; struct hsrv1_ethhdr_sp *hsr_V1_hdr; + struct hsr_sup_tlv *hsr_sup_tlv; + u16 total_length = 0; WARN_ON_ONCE(!skb_mac_header_was_set(skb)); eth_hdr = (struct ethhdr *)skb_mac_header(skb); @@ -53,23 +55,63 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) /* Get the supervision header from correct location. */ if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ + total_length = sizeof(struct hsrv1_ethhdr_sp); + if (!pskb_may_pull(skb, total_length)) + return false; + hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb); if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP)) return false; hsr_sup_tag = &hsr_V1_hdr->hsr_sup; } else { + total_length = sizeof(struct hsrv0_ethhdr_sp); + if (!pskb_may_pull(skb, total_length)) + return false; + hsr_sup_tag = &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup; } - if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE && - hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK && - hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD && - hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA) + if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE && + hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK && + hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD && + hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA) return false; - if (hsr_sup_tag->HSR_TLV_length != 12 && - hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload)) + if (hsr_sup_tag->tlv.HSR_TLV_length != 12 && + hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload)) + return false; + + /* Get next tlv */ + total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length; + if (!pskb_may_pull(skb, total_length)) + return false; + skb_pull(skb, total_length); + hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data; + skb_push(skb, total_length); + + /* if this is a redbox supervision frame we need to verify + * that more data is available + */ + if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) { + /* tlv length must be a length of a mac address */ + if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload)) + return false; + + /* make sure another tlv follows */ + total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length; + if (!pskb_may_pull(skb, total_length)) + return false; + + /* get next tlv */ + skb_pull(skb, total_length); + hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data; + skb_push(skb, total_length); + } + + /* end of tlvs must follow at the end */ + if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT && + hsr_sup_tlv->HSR_TLV_length != 0) return false; return true; diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index e31949479305e4..0775f0f95dbf3b 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -76,8 +76,8 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db, * frames from self that's been looped over the HSR ring. */ int hsr_create_self_node(struct hsr_priv *hsr, - unsigned char addr_a[ETH_ALEN], - unsigned char addr_b[ETH_ALEN]) + const unsigned char addr_a[ETH_ALEN], + const unsigned char addr_b[ETH_ALEN]) { struct list_head *self_node_db = &hsr->self_node_db; struct hsr_node *node, *oldnode; @@ -265,11 +265,14 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) struct hsr_port *port_rcv = frame->port_rcv; struct hsr_priv *hsr = port_rcv->hsr; struct hsr_sup_payload *hsr_sp; + struct hsr_sup_tlv *hsr_sup_tlv; struct hsr_node *node_real; struct sk_buff *skb = NULL; struct list_head *node_db; struct ethhdr *ethhdr; int i; + unsigned int pull_size = 0; + unsigned int total_pull_size = 0; /* Here either frame->skb_hsr or frame->skb_prp should be * valid as supervision frame always will have protocol @@ -284,18 +287,26 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) if (!skb) return; - ethhdr = (struct ethhdr *)skb_mac_header(skb); - /* Leave the ethernet header. */ - skb_pull(skb, sizeof(struct ethhdr)); + pull_size = sizeof(struct ethhdr); + skb_pull(skb, pull_size); + total_pull_size += pull_size; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); /* And leave the HSR tag. */ - if (ethhdr->h_proto == htons(ETH_P_HSR)) - skb_pull(skb, sizeof(struct hsr_tag)); + if (ethhdr->h_proto == htons(ETH_P_HSR)) { + pull_size = sizeof(struct ethhdr); + skb_pull(skb, pull_size); + total_pull_size += pull_size; + } /* And leave the HSR sup tag. */ - skb_pull(skb, sizeof(struct hsr_sup_tag)); + pull_size = sizeof(struct hsr_tag); + skb_pull(skb, pull_size); + total_pull_size += pull_size; + /* get HSR sup payload */ hsr_sp = (struct hsr_sup_payload *)skb->data; /* Merge node_curr (registered on macaddress_B) into node_real */ @@ -312,6 +323,37 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) /* Node has already been merged */ goto done; + /* Leave the first HSR sup payload. */ + pull_size = sizeof(struct hsr_sup_payload); + skb_pull(skb, pull_size); + total_pull_size += pull_size; + + /* Get second supervision tlv */ + hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data; + /* And check if it is a redbox mac TLV */ + if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) { + /* We could stop here after pushing hsr_sup_payload, + * or proceed and allow macaddress_B and for redboxes. + */ + /* Sanity check length */ + if (hsr_sup_tlv->HSR_TLV_length != 6) + goto done; + + /* Leave the second HSR sup tlv. */ + pull_size = sizeof(struct hsr_sup_tlv); + skb_pull(skb, pull_size); + total_pull_size += pull_size; + + /* Get redbox mac address. */ + hsr_sp = (struct hsr_sup_payload *)skb->data; + + /* Check if redbox mac and node mac are equal. */ + if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) { + /* This is a redbox supervision frame for a VDAN! */ + goto done; + } + } + ether_addr_copy(node_real->macaddress_B, ethhdr->h_source); for (i = 0; i < HSR_PT_PORTS; i++) { if (!node_curr->time_in_stale[i] && @@ -331,11 +373,8 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) kfree_rcu(node_curr, rcu_head); done: - /* PRP uses v0 header */ - if (ethhdr->h_proto == htons(ETH_P_HSR)) - skb_push(skb, sizeof(struct hsrv1_ethhdr_sp)); - else - skb_push(skb, sizeof(struct hsrv0_ethhdr_sp)); + /* Push back here */ + skb_push(skb, total_pull_size); } /* 'skb' is a frame meant for this host, that is to be passed to upper layers. diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index d9628e7a5f051a..bdbb8c822ba1ab 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -48,8 +48,8 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, void hsr_prune_nodes(struct timer_list *t); int hsr_create_self_node(struct hsr_priv *hsr, - unsigned char addr_a[ETH_ALEN], - unsigned char addr_b[ETH_ALEN]); + const unsigned char addr_a[ETH_ALEN], + const unsigned char addr_b[ETH_ALEN]); void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos, unsigned char addr[ETH_ALEN]); diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index f7e284f23b1f3e..b099c315015096 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -75,7 +75,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); if (port->type == HSR_PT_SLAVE_A) { - ether_addr_copy(master->dev->dev_addr, dev->dev_addr); + eth_hw_addr_set(master->dev, dev->dev_addr); call_netdevice_notifiers(NETDEV_CHANGEADDR, master->dev); } diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 53d1f7a8246308..043e4e9a169454 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -35,13 +35,15 @@ * HSR_NODE_FORGET_TIME? */ #define PRUNE_PERIOD 3000 /* ms */ - +#define HSR_TLV_EOT 0 /* End of TLVs */ #define HSR_TLV_ANNOUNCE 22 #define HSR_TLV_LIFE_CHECK 23 /* PRP V1 life check for Duplicate discard */ #define PRP_TLV_LIFE_CHECK_DD 20 /* PRP V1 life check for Duplicate Accept */ #define PRP_TLV_LIFE_CHECK_DA 21 +/* PRP V1 life redundancy box MAC address */ +#define PRP_TLV_REDBOX_MAC 30 /* HSR Tag. * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB, @@ -94,14 +96,18 @@ struct hsr_vlan_ethhdr { struct hsr_tag hsr_tag; } __packed; +struct hsr_sup_tlv { + u8 HSR_TLV_type; + u8 HSR_TLV_length; +}; + /* HSR/PRP Supervision Frame data types. * Field names as defined in the IEC:2010 standard for HSR. */ struct hsr_sup_tag { - __be16 path_and_HSR_ver; - __be16 sequence_nr; - __u8 HSR_TLV_type; - __u8 HSR_TLV_length; + __be16 path_and_HSR_ver; + __be16 sequence_nr; + struct hsr_sup_tlv tlv; } __packed; struct hsr_sup_payload { diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 3297e7fa99458b..2cf62718a282a0 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -157,7 +157,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, lowpan_802154_dev(ldev)->wdev = wdev; /* Set the lowpan hardware address to the wpan hardware address. */ - memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); + __dev_addr_set(ldev, wdev->dev_addr, IEEE802154_ADDR_LEN); /* We need headroom for possible wpan_dev_hard_header call and tailroom * for encryption/fcs handling. The lowpan interface will replace * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1d816a5fd3eb91..0189e3cd4a7df2 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -133,13 +133,9 @@ void inet_sock_destruct(struct sock *sk) struct inet_sock *inet = inet_sk(sk); __skb_queue_purge(&sk->sk_receive_queue); - if (sk->sk_rx_skb_cache) { - __kfree_skb(sk->sk_rx_skb_cache); - sk->sk_rx_skb_cache = NULL; - } __skb_queue_purge(&sk->sk_error_queue); - sk_mem_reclaim(sk); + sk_mem_reclaim_final(sk); if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { pr_err("Attempt to release TCP socket in state %d %p\n", @@ -154,7 +150,7 @@ void inet_sock_destruct(struct sock *sk) WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(refcount_read(&sk->sk_wmem_alloc)); WARN_ON(sk->sk_wmem_queued); - WARN_ON(sk->sk_forward_alloc); + WARN_ON(sk_forward_alloc_get(sk)); kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); @@ -773,26 +769,28 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr); sin->sin_family = AF_INET; + lock_sock(sk); if (peer) { if (!inet->inet_dport || (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && - peer == 1)) + peer == 1)) { + release_sock(sk); return -ENOTCONN; + } sin->sin_port = inet->inet_dport; sin->sin_addr.s_addr = inet->inet_daddr; - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin, - CGROUP_INET4_GETPEERNAME, - NULL); + BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, + CGROUP_INET4_GETPEERNAME); } else { __be32 addr = inet->inet_rcv_saddr; if (!addr) addr = inet->inet_saddr; sin->sin_port = inet->inet_sport; sin->sin_addr.s_addr = addr; - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin, - CGROUP_INET4_GETSOCKNAME, - NULL); + BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, + CGROUP_INET4_GETSOCKNAME); } + release_sock(sk); memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); return sizeof(*sin); } @@ -1666,12 +1664,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, } EXPORT_SYMBOL_GPL(inet_ctl_sock_create); -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) -{ - return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); -} -EXPORT_SYMBOL_GPL(snmp_get_cpu_field); - unsigned long snmp_fold_field(void __percpu *mib, int offt) { unsigned long res = 0; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 922dd73e57406e..857a144b1ea96c 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1247,6 +1247,8 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_change_info *change_info; + struct in_device *in_dev; + bool evict_nocarrier; switch (event) { case NETDEV_CHANGEADDR: @@ -1257,7 +1259,14 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, change_info = ptr; if (change_info->flags_changed & IFF_NOARP) neigh_changeaddr(&arp_tbl, dev); - if (!netif_carrier_ok(dev)) + + in_dev = __in_dev_get_rtnl(dev); + if (!in_dev) + evict_nocarrier = true; + else + evict_nocarrier = IN_DEV_ARP_EVICT_NOCARRIER(in_dev); + + if (evict_nocarrier && !netif_carrier_ok(dev)) neigh_carrier_down(&arp_tbl, dev); break; default: diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 0dcee9df132689..2cf02b4d77fbf0 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -81,14 +81,7 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) - return false; - if (type != BPF_READ) - return false; - if (off % size != 0) - return false; - - if (!btf_ctx_access(off, size, type, prog, info)) + if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info)) return false; if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id) @@ -223,41 +216,13 @@ BTF_ID(func, tcp_reno_cong_avoid) BTF_ID(func, tcp_reno_undo_cwnd) BTF_ID(func, tcp_slow_start) BTF_ID(func, tcp_cong_avoid_ai) -#ifdef CONFIG_X86 -#ifdef CONFIG_DYNAMIC_FTRACE -#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC) -BTF_ID(func, cubictcp_init) -BTF_ID(func, cubictcp_recalc_ssthresh) -BTF_ID(func, cubictcp_cong_avoid) -BTF_ID(func, cubictcp_state) -BTF_ID(func, cubictcp_cwnd_event) -BTF_ID(func, cubictcp_acked) -#endif -#if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP) -BTF_ID(func, dctcp_init) -BTF_ID(func, dctcp_update_alpha) -BTF_ID(func, dctcp_cwnd_event) -BTF_ID(func, dctcp_ssthresh) -BTF_ID(func, dctcp_cwnd_undo) -BTF_ID(func, dctcp_state) -#endif -#if IS_BUILTIN(CONFIG_TCP_CONG_BBR) -BTF_ID(func, bbr_init) -BTF_ID(func, bbr_main) -BTF_ID(func, bbr_sndbuf_expand) -BTF_ID(func, bbr_undo_cwnd) -BTF_ID(func, bbr_cwnd_event) -BTF_ID(func, bbr_ssthresh) -BTF_ID(func, bbr_min_tso_segs) -BTF_ID(func, bbr_set_state) -#endif -#endif /* CONFIG_DYNAMIC_FTRACE */ -#endif /* CONFIG_X86 */ BTF_SET_END(bpf_tcp_ca_kfunc_ids) -static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id) +static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id, struct module *owner) { - return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id); + if (btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id)) + return true; + return bpf_check_mod_kfunc_call(&bpf_tcp_ca_kfunc_list, kfunc_btf_id, owner); } static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = { diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 099259fc826aa2..62d5f99760aacd 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -73,7 +73,7 @@ struct cipso_v4_map_cache_entry { static struct cipso_v4_map_cache_bkt *cipso_v4_cache; /* Restricted bitmap (tag #1) flags */ -int cipso_v4_rbm_optfmt = 0; +int cipso_v4_rbm_optfmt; int cipso_v4_rbm_strictvalid = 1; /* diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 4a8550c49202db..48f337ccf94951 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -9,7 +9,6 @@ #include #include -#include #include #include #include diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f4468980b6757d..ec73a0d52d3e35 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -75,6 +75,7 @@ static struct ipv4_devconf ipv4_devconf = { [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1, [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/, [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/, + [IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1, }, }; @@ -87,6 +88,7 @@ static struct ipv4_devconf ipv4_devconf_dflt = { [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1, [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/, [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/, + [IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1, }, }; @@ -2532,6 +2534,8 @@ static struct devinet_sysctl_table { DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), + DEVINET_SYSCTL_RW_ENTRY(ARP_EVICT_NOCARRIER, + "arp_evict_nocarrier"), DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"), DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION, "force_igmp_version"), diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c index 0c28bd469a6810..0e23ade74493ce 100644 --- a/net/ipv4/fib_notifier.c +++ b/net/ipv4/fib_notifier.c @@ -6,7 +6,6 @@ #include #include #include -#include #include int call_fib4_notifier(struct notifier_block *nb, diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f25d02ad4a8af4..f7fea3a7c5e64b 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1015,7 +1015,7 @@ void inet_csk_destroy_sock(struct sock *sk) sk_refcnt_debug_release(sk); - percpu_counter_dec(sk->sk_prot->orphan_count); + this_cpu_dec(*sk->sk_prot->orphan_count); sock_put(sk); } @@ -1074,7 +1074,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req, sock_orphan(child); - percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(*sk->sk_prot->orphan_count); if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ef7897226f08e5..c8fa6e7f7d1241 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -271,7 +271,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct inet_diag_meminfo minfo = { .idiag_rmem = sk_rmem_alloc_get(sk), .idiag_wmem = READ_ONCE(sk->sk_wmem_queued), - .idiag_fmem = sk->sk_forward_alloc, + .idiag_fmem = sk_forward_alloc_get(sk), .idiag_tmem = sk_wmem_alloc_get(sk), }; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index bfb522e513461a..75737267746f85 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -598,7 +598,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) if (ok) { sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } else { - percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(*sk->sk_prot->orphan_count); inet_sk_set_state(sk, TCP_CLOSE); sock_set_flag(sk, SOCK_DEAD); inet_csk_destroy_sock(sk); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 0fe6c936dc54ac..2ac2b95c569436 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -986,7 +986,7 @@ static int ipgre_tunnel_init(struct net_device *dev) __gre_tunnel_init(dev); - memcpy(dev->dev_addr, &iph->saddr, 4); + __dev_addr_set(dev, &iph->saddr, 4); memcpy(dev->broadcast, &iph->daddr, 4); dev->flags = IFF_NOARP; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index b297bb28556ec5..38d29b175ca664 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -886,6 +886,8 @@ static int compat_ip_mcast_join_leave(struct sock *sk, int optname, return ip_mc_leave_group(sk, &mreq); } +DEFINE_STATIC_KEY_FALSE(ip4_min_ttl); + static int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -1352,7 +1354,14 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, goto e_inval; if (val < 0 || val > 255) goto e_inval; - inet->min_ttl = val; + + if (val) + static_branch_enable(&ip4_min_ttl); + + /* tcp_v4_err() and tcp_v4_rcv() might read min_ttl + * while we are changint it. + */ + WRITE_ONCE(inet->min_ttl, val); break; default: diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index fe9101d3d69e01..5a473319d3a5c8 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -834,7 +834,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, t->parms.i_key = p->i_key; t->parms.o_key = p->o_key; if (dev->type != ARPHRD_ETHER) { - memcpy(dev->dev_addr, &p->iph.saddr, 4); + __dev_addr_set(dev, &p->iph.saddr, 4); memcpy(dev->broadcast, &p->iph.daddr, 4); } ip_tunnel_add(itn, t); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index efe25a0172e6ff..8c2bd1d9ddce38 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -425,7 +425,7 @@ static int vti_tunnel_init(struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; - memcpy(dev->dev_addr, &iph->saddr, 4); + __dev_addr_set(dev, &iph->saddr, 4); memcpy(dev->broadcast, &iph->daddr, 4); dev->flags = IFF_NOARP; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 816d8aad5a6843..9d41d5d5cd1e50 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -262,6 +262,11 @@ static int __init ic_open_devs(void) dev->name, able, d->xid); } } + /* Devices with a complex topology like SFP ethernet interfaces needs + * the rtnl_lock at init. The carrier wait-loop must therefore run + * without holding it. + */ + rtnl_unlock(); /* no point in waiting if we could not bring up at least one device */ if (!ic_first_dev) @@ -274,9 +279,13 @@ static int __init ic_open_devs(void) msecs_to_jiffies(carrier_timeout * 1000))) { int wait, elapsed; + rtnl_lock(); for_each_netdev(&init_net, dev) - if (ic_is_init_dev(dev) && netif_carrier_ok(dev)) + if (ic_is_init_dev(dev) && netif_carrier_ok(dev)) { + rtnl_unlock(); goto have_carrier; + } + rtnl_unlock(); msleep(1); @@ -289,7 +298,6 @@ static int __init ic_open_devs(void) next_msg = jiffies + msecs_to_jiffies(20000); } have_carrier: - rtnl_unlock(); *last = NULL; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 3aa78ccbec3ed8..123ea63a04cbf0 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -380,7 +380,7 @@ static int ipip_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); - memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); + __dev_addr_set(dev, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); tunnel->tun_hlen = 0; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index c53f14b9435607..ffc0cab7cf189f 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -179,10 +179,11 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) return (void *)entry + entry->next_offset; } -unsigned int arpt_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct xt_table *table) +unsigned int arpt_do_table(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) { + const struct xt_table *table = priv; unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index 3de78416ec7629..78cd5ee24448f6 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c @@ -26,14 +26,6 @@ static const struct xt_table packet_filter = { .priority = NF_IP_PRI_FILTER, }; -/* The work comes in here from netfilter.c */ -static unsigned int -arptable_filter_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return arpt_do_table(skb, state, priv); -} - static struct nf_hook_ops *arpfilter_ops __read_mostly; static int arptable_filter_table_init(struct net *net) @@ -72,7 +64,7 @@ static int __init arptable_filter_init(void) if (ret < 0) return ret; - arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arptable_filter_hook); + arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arpt_do_table); if (IS_ERR(arpfilter_ops)) { xt_unregister_template(&packet_filter); return PTR_ERR(arpfilter_ops); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 13acb687c19ab7..2ed7c58b471aca 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -222,10 +222,11 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int -ipt_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct xt_table *table) +ipt_do_table(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) { + const struct xt_table *table = priv; unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); const struct iphdr *ip; diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 0eb0e2ab9bfc4f..b9062f4552ace5 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c @@ -28,13 +28,6 @@ static const struct xt_table packet_filter = { .priority = NF_IP_PRI_FILTER, }; -static unsigned int -iptable_filter_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ipt_do_table(skb, state, priv); -} - static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ @@ -90,7 +83,7 @@ static int __init iptable_filter_init(void) if (ret < 0) return ret; - filter_ops = xt_hook_ops_alloc(&packet_filter, iptable_filter_hook); + filter_ops = xt_hook_ops_alloc(&packet_filter, ipt_do_table); if (IS_ERR(filter_ops)) { xt_unregister_template(&packet_filter); return PTR_ERR(filter_ops); diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 40417a3f930b2f..3abb430af9e6f1 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -34,7 +34,7 @@ static const struct xt_table packet_mangler = { }; static unsigned int -ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *priv) +ipt_mangle_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { unsigned int ret; const struct iphdr *iph; @@ -50,7 +50,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *pri daddr = iph->daddr; tos = iph->tos; - ret = ipt_do_table(skb, state, priv); + ret = ipt_do_table(priv, skb, state); /* Reroute for ANY change. */ if (ret != NF_DROP && ret != NF_STOLEN) { iph = ip_hdr(skb); @@ -75,8 +75,8 @@ iptable_mangle_hook(void *priv, const struct nf_hook_state *state) { if (state->hook == NF_INET_LOCAL_OUT) - return ipt_mangle_out(skb, state, priv); - return ipt_do_table(skb, state, priv); + return ipt_mangle_out(priv, skb, state); + return ipt_do_table(priv, skb, state); } static struct nf_hook_ops *mangle_ops __read_mostly; diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index 45d7e072e6a549..56f6ecc43451ec 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -29,34 +29,27 @@ static const struct xt_table nf_nat_ipv4_table = { .af = NFPROTO_IPV4, }; -static unsigned int iptable_nat_do_chain(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ipt_do_table(skb, state, priv); -} - static const struct nf_hook_ops nf_nat_ipv4_ops[] = { { - .hook = iptable_nat_do_chain, + .hook = ipt_do_table, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_NAT_DST, }, { - .hook = iptable_nat_do_chain, + .hook = ipt_do_table, .pf = NFPROTO_IPV4, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP_PRI_NAT_SRC, }, { - .hook = iptable_nat_do_chain, + .hook = ipt_do_table, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_NAT_DST, }, { - .hook = iptable_nat_do_chain, + .hook = ipt_do_table, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP_PRI_NAT_SRC, diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 8265c676570533..ca5e5b21587cda 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -32,14 +32,6 @@ static const struct xt_table packet_raw_before_defrag = { .priority = NF_IP_PRI_RAW_BEFORE_DEFRAG, }; -/* The work comes in here from netfilter.c. */ -static unsigned int -iptable_raw_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ipt_do_table(skb, state, priv); -} - static struct nf_hook_ops *rawtable_ops __read_mostly; static int iptable_raw_table_init(struct net *net) @@ -90,7 +82,7 @@ static int __init iptable_raw_init(void) if (ret < 0) return ret; - rawtable_ops = xt_hook_ops_alloc(table, iptable_raw_hook); + rawtable_ops = xt_hook_ops_alloc(table, ipt_do_table); if (IS_ERR(rawtable_ops)) { xt_unregister_template(table); return PTR_ERR(rawtable_ops); diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index f519162a2fa512..d885443cb26798 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c @@ -33,13 +33,6 @@ static const struct xt_table security_table = { .priority = NF_IP_PRI_SECURITY, }; -static unsigned int -iptable_security_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ipt_do_table(skb, state, priv); -} - static struct nf_hook_ops *sectbl_ops __read_mostly; static int iptable_security_table_init(struct net *net) @@ -78,7 +71,7 @@ static int __init iptable_security_init(void) if (ret < 0) return ret; - sectbl_ops = xt_hook_ops_alloc(&security_table, iptable_security_hook); + sectbl_ops = xt_hook_ops_alloc(&security_table, ipt_do_table); if (IS_ERR(sectbl_ops)) { xt_unregister_template(&security_table); return PTR_ERR(sectbl_ops); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index b0d3a09dc84e7a..f30273afb5399d 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -53,7 +53,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) struct net *net = seq->private; int orphans, sockets; - orphans = percpu_counter_sum_positive(&tcp_orphan_count); + orphans = tcp_orphan_count_sum(); sockets = proto_sockets_allocated_sum_positive(&tcp_prot); socket_seq_show(seq); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d6899ab5fb39b3..0b4103b1e6220f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -61,15 +61,11 @@ #define pr_fmt(fmt) "IPv4: " fmt #include -#include #include -#include #include #include #include -#include #include -#include #include #include #include @@ -84,20 +80,17 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include #include #include #include #include -#include #include #include #include @@ -109,7 +102,6 @@ #endif #include #include -#include #include "fib_lookup.h" diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 33792cf55a7934..8696dc343ad2cd 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -7,8 +7,6 @@ */ #include -#include -#include #include #include #include diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 6f1e64d4923287..97eb5477492447 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -6,25 +6,16 @@ * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] */ -#include -#include #include -#include -#include #include #include #include -#include -#include -#include #include #include #include -#include #include #include #include -#include #include #include #include @@ -594,18 +585,6 @@ static struct ctl_table ipv4_table[] = { .extra1 = &sysctl_fib_sync_mem_min, .extra2 = &sysctl_fib_sync_mem_max, }, - { - .procname = "tcp_rx_skb_cache", - .data = &tcp_rx_skb_cache_key.key, - .mode = 0644, - .proc_handler = proc_do_static_key, - }, - { - .procname = "tcp_tx_skb_cache", - .data = &tcp_tx_skb_cache_key.key, - .mode = 0644, - .proc_handler = proc_do_static_key, - }, { } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f5c336f8b0c8e9..bc7f419184aa50 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -287,8 +287,8 @@ enum { TCP_CMSG_TS = 2 }; -struct percpu_counter tcp_orphan_count; -EXPORT_SYMBOL_GPL(tcp_orphan_count); +DEFINE_PER_CPU(unsigned int, tcp_orphan_count); +EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); @@ -325,11 +325,6 @@ struct tcp_splice_state { unsigned long tcp_memory_pressure __read_mostly; EXPORT_SYMBOL_GPL(tcp_memory_pressure); -DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); -EXPORT_SYMBOL(tcp_rx_skb_cache_key); - -DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); - void tcp_enter_memory_pressure(struct sock *sk) { unsigned long val; @@ -644,7 +639,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) } EXPORT_SYMBOL(tcp_ioctl); -static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) +void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; @@ -655,15 +650,13 @@ static inline bool forced_push(const struct tcp_sock *tp) return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } -static void skb_entail(struct sock *sk, struct sk_buff *skb) +void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - skb->csum = 0; tcb->seq = tcb->end_seq = tp->write_seq; tcb->tcp_flags = TCPHDR_ACK; - tcb->sacked = 0; __skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk_wmem_queued_add(sk, skb->truesize); @@ -858,30 +851,15 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, } EXPORT_SYMBOL(tcp_splice_read); -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule) +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule) { struct sk_buff *skb; - if (likely(!size)) { - skb = sk->sk_tx_skb_cache; - if (skb) { - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); - sk->sk_tx_skb_cache = NULL; - pskb_trim(skb, 0); - INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); - skb_shinfo(skb)->tx_flags = 0; - memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); - return skb; - } - } - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - if (unlikely(tcp_under_memory_pressure(sk))) sk_mem_reclaim_partial(sk); - skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); + skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); if (likely(skb)) { bool mem_scheduled; @@ -892,12 +870,8 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, mem_scheduled = sk_wmem_schedule(sk, skb->truesize); } if (likely(mem_scheduled)) { - skb_reserve(skb, sk->sk_prot->max_header); - /* - * Make sure that we have exactly size bytes - * available to the caller, no more, no less. - */ - skb->reserved_tailroom = skb->end - skb->tail - size; + skb_reserve(skb, MAX_TCP_HEADER); + skb->ip_summed = CHECKSUM_PARTIAL; INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); return skb; } @@ -950,18 +924,20 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags) * importantly be able to generate EPOLLOUT for Edge Trigger epoll() * users. */ -void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) +void tcp_remove_empty_skb(struct sock *sk) { - if (skb && !skb->len) { + struct sk_buff *skb = tcp_write_queue_tail(sk); + + if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { tcp_unlink_write_queue(skb, sk); if (tcp_write_queue_empty(sk)) tcp_chrono_stop(sk, TCP_CHRONO_BUSY); - sk_wmem_free_skb(sk, skb); + tcp_wmem_free_skb(sk, skb); } } -struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, - struct page *page, int offset, size_t *size) +static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, + struct page *page, int offset, size_t *size) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -974,15 +950,15 @@ struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, if (!sk_stream_memory_free(sk)) return NULL; - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - tcp_rtx_and_write_queues_empty(sk)); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + tcp_rtx_and_write_queues_empty(sk)); if (!skb) return NULL; #ifdef CONFIG_TLS_DEVICE skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); #endif - skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal; } @@ -1013,7 +989,6 @@ struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, skb->truesize += copy; sk_wmem_queued_add(sk, copy); sk_mem_charge(sk, copy); - skb->ip_summed = CHECKSUM_PARTIAL; WRITE_ONCE(tp->write_seq, tp->write_seq + copy); TCP_SKB_CB(skb)->end_seq += copy; tcp_skb_pcount_set(skb, 0); @@ -1104,7 +1079,7 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, return copied; do_error: - tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); + tcp_remove_empty_skb(sk); if (copied) goto out; out_err: @@ -1303,15 +1278,14 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - first_skb); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + first_skb); if (!skb) goto wait_for_space; process_backlog++; - skb->ip_summed = CHECKSUM_PARTIAL; - skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal; /* All packets are restored as if they have @@ -1326,14 +1300,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) if (copy > msg_data_left(msg)) copy = msg_data_left(msg); - /* Where to copy to? */ - if (skb_availroom(skb) > 0 && !zc) { - /* We have some space in skb head. Superb! */ - copy = min_t(int, copy, skb_availroom(skb)); - err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); - if (err) - goto do_fault; - } else if (!zc) { + if (!zc) { bool merge = true; int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); @@ -1432,9 +1399,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) return copied + copied_syn; do_error: - skb = tcp_write_queue_tail(sk); -do_fault: - tcp_remove_empty_skb(sk, skb); + tcp_remove_empty_skb(sk); if (copied + copied_syn) goto out; @@ -2687,11 +2652,36 @@ void tcp_shutdown(struct sock *sk, int how) } EXPORT_SYMBOL(tcp_shutdown); +int tcp_orphan_count_sum(void) +{ + int i, total = 0; + + for_each_possible_cpu(i) + total += per_cpu(tcp_orphan_count, i); + + return max(total, 0); +} + +static int tcp_orphan_cache; +static struct timer_list tcp_orphan_timer; +#define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) + +static void tcp_orphan_update(struct timer_list *unused) +{ + WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); +} + +static bool tcp_too_many_orphans(int shift) +{ + return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans; +} + bool tcp_check_oom(struct sock *sk, int shift) { bool too_many_orphans, out_of_socket_memory; - too_many_orphans = tcp_too_many_orphans(sk, shift); + too_many_orphans = tcp_too_many_orphans(shift); out_of_socket_memory = tcp_out_of_memory(sk); if (too_many_orphans) @@ -2800,7 +2790,7 @@ void __tcp_close(struct sock *sk, long timeout) /* remove backlog if any, without releasing ownership. */ __release_sock(sk); - percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(tcp_orphan_count); /* Have we already been destroyed by a softirq or backlog? */ if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) @@ -2903,7 +2893,7 @@ static void tcp_rtx_queue_purge(struct sock *sk) * list_del(&skb->tcp_tsorted_anchor) */ tcp_rtx_queue_unlink(skb, sk); - sk_wmem_free_skb(sk, skb); + tcp_wmem_free_skb(sk, skb); } } @@ -2914,14 +2904,9 @@ void tcp_write_queue_purge(struct sock *sk) tcp_chrono_stop(sk, TCP_CHRONO_BUSY); while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { tcp_skb_tsorted_anchor_cleanup(skb); - sk_wmem_free_skb(sk, skb); + tcp_wmem_free_skb(sk, skb); } tcp_rtx_queue_purge(sk); - skb = sk->sk_tx_skb_cache; - if (skb) { - __kfree_skb(skb); - sk->sk_tx_skb_cache = NULL; - } INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); @@ -2958,10 +2943,6 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); - if (sk->sk_rx_skb_cache) { - __kfree_skb(sk->sk_rx_skb_cache); - sk->sk_rx_skb_cache = NULL; - } WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); tp->urg_data = 0; tcp_write_queue_purge(sk); @@ -4502,7 +4483,10 @@ void __init tcp_init(void) sizeof_field(struct sk_buff, cb)); percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); - percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); + + timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); + inet_hashinfo_init(&tcp_hashinfo); inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", thash_entries, 21, /* one slot per 2 MB*/ diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 6274462b86b4b9..ec5550089b4d29 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -56,6 +56,8 @@ * otherwise TCP stack falls back to an internal pacing using one high * resolution timer per TCP socket and may use more resources. */ +#include +#include #include #include #include @@ -1152,14 +1154,38 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { .set_state = bbr_set_state, }; +BTF_SET_START(tcp_bbr_kfunc_ids) +#ifdef CONFIG_X86 +#ifdef CONFIG_DYNAMIC_FTRACE +BTF_ID(func, bbr_init) +BTF_ID(func, bbr_main) +BTF_ID(func, bbr_sndbuf_expand) +BTF_ID(func, bbr_undo_cwnd) +BTF_ID(func, bbr_cwnd_event) +BTF_ID(func, bbr_ssthresh) +BTF_ID(func, bbr_min_tso_segs) +BTF_ID(func, bbr_set_state) +#endif +#endif +BTF_SET_END(tcp_bbr_kfunc_ids) + +static DEFINE_KFUNC_BTF_ID_SET(&tcp_bbr_kfunc_ids, tcp_bbr_kfunc_btf_set); + static int __init bbr_register(void) { + int ret; + BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); - return tcp_register_congestion_control(&tcp_bbr_cong_ops); + ret = tcp_register_congestion_control(&tcp_bbr_cong_ops); + if (ret) + return ret; + register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set); + return 0; } static void __exit bbr_unregister(void) { + unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set); tcp_unregister_congestion_control(&tcp_bbr_cong_ops); } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4a30deaa9a37f4..5e9d9c51164c4d 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -25,6 +25,8 @@ */ #include +#include +#include #include #include #include @@ -482,8 +484,25 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { .name = "cubic", }; +BTF_SET_START(tcp_cubic_kfunc_ids) +#ifdef CONFIG_X86 +#ifdef CONFIG_DYNAMIC_FTRACE +BTF_ID(func, cubictcp_init) +BTF_ID(func, cubictcp_recalc_ssthresh) +BTF_ID(func, cubictcp_cong_avoid) +BTF_ID(func, cubictcp_state) +BTF_ID(func, cubictcp_cwnd_event) +BTF_ID(func, cubictcp_acked) +#endif +#endif +BTF_SET_END(tcp_cubic_kfunc_ids) + +static DEFINE_KFUNC_BTF_ID_SET(&tcp_cubic_kfunc_ids, tcp_cubic_kfunc_btf_set); + static int __init cubictcp_register(void) { + int ret; + BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE); /* Precompute a bunch of the scaling factors that are used per-packet @@ -514,11 +533,16 @@ static int __init cubictcp_register(void) /* divide by bic_scale and by constant Srtt (100ms) */ do_div(cube_factor, bic_scale * 10); - return tcp_register_congestion_control(&cubictcp); + ret = tcp_register_congestion_control(&cubictcp); + if (ret) + return ret; + register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set); + return 0; } static void __exit cubictcp_unregister(void) { + unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set); tcp_unregister_congestion_control(&cubictcp); } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 79f705450c1628..0d7ab3cc7b614c 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -36,6 +36,8 @@ * Glenn Judd */ +#include +#include #include #include #include @@ -236,14 +238,36 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = { .name = "dctcp-reno", }; +BTF_SET_START(tcp_dctcp_kfunc_ids) +#ifdef CONFIG_X86 +#ifdef CONFIG_DYNAMIC_FTRACE +BTF_ID(func, dctcp_init) +BTF_ID(func, dctcp_update_alpha) +BTF_ID(func, dctcp_cwnd_event) +BTF_ID(func, dctcp_ssthresh) +BTF_ID(func, dctcp_cwnd_undo) +BTF_ID(func, dctcp_state) +#endif +#endif +BTF_SET_END(tcp_dctcp_kfunc_ids) + +static DEFINE_KFUNC_BTF_ID_SET(&tcp_dctcp_kfunc_ids, tcp_dctcp_kfunc_btf_set); + static int __init dctcp_register(void) { + int ret; + BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE); - return tcp_register_congestion_control(&dctcp); + ret = tcp_register_congestion_control(&dctcp); + if (ret) + return ret; + register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set); + return 0; } static void __exit dctcp_unregister(void) { + unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set); tcp_unregister_congestion_control(&dctcp); } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 59412d6354a01a..fdbcf2a6d08ef4 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -1,13 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include #include -#include #include #include -#include -#include #include void tcp_fastopen_init_key_once(struct net *net) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 141e85e6422b1f..246ab7b5e857eb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb, room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; + if (room <= 0) + return; + /* Check #1 */ - if (room > 0 && !tcp_under_memory_pressure(sk)) { + if (!tcp_under_memory_pressure(sk)) { unsigned int truesize = truesize_adjust(adjust, skb); int incr; @@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb, tp->rcv_ssthresh += min(room, incr); inet_csk(sk)->icsk_ack.quick |= 1; } + } else { + /* Under pressure: + * Adjust rcv_ssthresh according to reserved mem + */ + tcp_adjust_rcv_ssthresh(sk); } } @@ -3221,7 +3229,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, long seq_rtt_us = -1L; long ca_rtt_us = -1L; u32 pkts_acked = 0; - u32 last_in_flight = 0; bool rtt_update; int flag = 0; @@ -3257,7 +3264,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, if (!first_ackt) first_ackt = last_ackt; - last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; if (before(start_seq, reord)) reord = start_seq; if (!after(scb->end_seq, tp->high_seq)) @@ -3323,8 +3329,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); - if (pkts_acked == 1 && last_in_flight < tp->mss_cache && - last_in_flight && !prior_sacked && fully_acked && + if (pkts_acked == 1 && fully_acked && !prior_sacked && + (tp->snd_una - prior_snd_una) < tp->mss_cache && sack->rate->prior_delivered + 1 == tp->delivered && !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) { /* Conservatively mark a delayed ACK. It's typically @@ -3381,9 +3387,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, if (icsk->icsk_ca_ops->pkts_acked) { struct ack_sample sample = { .pkts_acked = pkts_acked, - .rtt_us = sack->rate->rtt_us, - .in_flight = last_in_flight }; + .rtt_us = sack->rate->rtt_us }; + sample.in_flight = tp->mss_cache * + (tp->delivered - sack->rate->prior_delivered); icsk->icsk_ca_ops->pkts_acked(sk, &sample); } @@ -5346,7 +5353,7 @@ static int tcp_prune_queue(struct sock *sk) if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); else if (tcp_under_memory_pressure(sk)) - tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + tcp_adjust_rcv_ssthresh(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; @@ -5381,7 +5388,7 @@ static int tcp_prune_queue(struct sock *sk) return -1; } -static bool tcp_should_expand_sndbuf(const struct sock *sk) +static bool tcp_should_expand_sndbuf(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); @@ -5392,8 +5399,18 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk) return false; /* If we are under global TCP memory pressure, do not expand. */ - if (tcp_under_memory_pressure(sk)) + if (tcp_under_memory_pressure(sk)) { + int unused_mem = sk_unused_reserved_mem(sk); + + /* Adjust sndbuf according to reserved mem. But make sure + * it never goes below SOCK_MIN_SNDBUF. + * See sk_stream_moderate_sndbuf() for more details. + */ + if (unused_mem > SOCK_MIN_SNDBUF) + WRITE_ONCE(sk->sk_sndbuf, unused_mem); + return false; + } /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5b8ce65dfc0678..13d868c4328458 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -508,9 +508,12 @@ int tcp_v4_err(struct sk_buff *skb, u32 info) if (sk->sk_state == TCP_CLOSE) goto out; - if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); - goto out; + if (static_branch_unlikely(&ip4_min_ttl)) { + /* min_ttl can be changed concurrently from do_ip_setsockopt() */ + if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) { + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); + goto out; + } } tp = tcp_sk(sk); @@ -1703,7 +1706,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { - if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || + if (sk->sk_rx_dst_ifindex != skb->skb_iif || !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check, dst, 0)) { dst_release(dst); @@ -1788,7 +1791,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) if (dst) dst = dst_check(dst, 0); if (dst && - inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) + sk->sk_rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } @@ -1960,7 +1963,6 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); - struct sk_buff *skb_to_free; int sdif = inet_sdif(skb); int dif = inet_iif(skb); const struct iphdr *iph; @@ -2069,9 +2071,13 @@ int tcp_v4_rcv(struct sk_buff *skb) return 0; } } - if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); - goto discard_and_relse; + + if (static_branch_unlikely(&ip4_min_ttl)) { + /* min_ttl can be changed concurrently from do_ip_setsockopt() */ + if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) { + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); + goto discard_and_relse; + } } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) @@ -2101,17 +2107,12 @@ int tcp_v4_rcv(struct sk_buff *skb) tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { - skb_to_free = sk->sk_rx_skb_cache; - sk->sk_rx_skb_cache = NULL; ret = tcp_v4_do_rcv(sk, skb); } else { if (tcp_add_backlog(sk, skb)) goto discard_and_relse; - skb_to_free = NULL; } bh_unlock_sock(sk); - if (skb_to_free) - __kfree_skb(skb_to_free); put_and_return: if (refcounted) @@ -2201,7 +2202,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) if (dst && dst_hold_safe(dst)) { sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + sk->sk_rx_dst_ifindex = skb->skb_iif; } } EXPORT_SYMBOL(inet_sk_rx_dst_set); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0a4f3f16140ad8..cf913a66df1702 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -19,14 +19,7 @@ * Jorge Cwik, */ -#include -#include -#include -#include -#include -#include #include -#include #include #include diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c index 95db7a11ba2ada..ab552356bdba8f 100644 --- a/net/ipv4/tcp_nv.c +++ b/net/ipv4/tcp_nv.c @@ -25,7 +25,6 @@ * 1) Add mechanism to deal with reverse congestion. */ -#include #include #include #include diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6d72f3ea48c4ef..6fbbf155803372 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -394,7 +394,6 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) skb->ip_summed = CHECKSUM_PARTIAL; TCP_SKB_CB(skb)->tcp_flags = flags; - TCP_SKB_CB(skb)->sacked = 0; tcp_skb_pcount_set(skb, 1); @@ -1256,8 +1255,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); skb->skb_mstamp_ns = tp->tcp_wstamp_ns; if (clone_it) { - TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq - - tp->snd_una; oskb = skb; tcp_skb_tsorted_save(oskb) { @@ -1566,7 +1563,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, return -ENOMEM; /* Get a new skb... force flag on. */ - buff = sk_stream_alloc_skb(sk, nsize, gfp, true); + buff = tcp_stream_alloc_skb(sk, nsize, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ skb_copy_decrypted(buff, skb); @@ -1592,8 +1589,6 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, skb_split(skb, buff, len); - buff->ip_summed = CHECKSUM_PARTIAL; - buff->tstamp = skb->tstamp; tcp_fragment_tstamp(skb, buff); @@ -1678,7 +1673,6 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) delta_truesize = __pskb_trim_head(skb, len); TCP_SKB_CB(skb)->seq += len; - skb->ip_summed = CHECKSUM_PARTIAL; if (delta_truesize) { skb->truesize -= delta_truesize; @@ -2123,7 +2117,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, skb, len, mss_now, gfp); - buff = sk_stream_alloc_skb(sk, 0, gfp, true); + buff = tcp_stream_alloc_skb(sk, 0, gfp, true); if (unlikely(!buff)) return -ENOMEM; skb_copy_decrypted(buff, skb); @@ -2144,12 +2138,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->tcp_flags = flags; - /* This packet was never sent out yet, so no SACK bits. */ - TCP_SKB_CB(buff)->sacked = 0; - tcp_skb_fragment_eor(skb, buff); - buff->ip_summed = CHECKSUM_PARTIAL; skb_split(skb, buff, len); tcp_fragment_tstamp(skb, buff); @@ -2390,7 +2380,7 @@ static int tcp_mtu_probe(struct sock *sk) return -1; /* We're allowed to probe. Build it now. */ - nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); + nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); if (!nskb) return -1; sk_wmem_queued_add(sk, nskb->truesize); @@ -2403,9 +2393,6 @@ static int tcp_mtu_probe(struct sock *sk) TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; - TCP_SKB_CB(nskb)->sacked = 0; - nskb->csum = 0; - nskb->ip_summed = CHECKSUM_PARTIAL; tcp_insert_write_queue_before(nskb, skb, sk); tcp_highest_sack_replace(sk, skb, nskb); @@ -2425,7 +2412,7 @@ static int tcp_mtu_probe(struct sock *sk) TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; tcp_skb_collapse_tstamp(nskb, skb); tcp_unlink_write_queue(skb, sk); - sk_wmem_free_skb(sk, skb); + tcp_wmem_free_skb(sk, skb); } else { TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & ~(TCPHDR_FIN|TCPHDR_PSH); @@ -2969,8 +2956,7 @@ u32 __tcp_select_window(struct sock *sk) icsk->icsk_ack.quick = 0; if (tcp_under_memory_pressure(sk)) - tp->rcv_ssthresh = min(tp->rcv_ssthresh, - 4U * tp->advmss); + tcp_adjust_rcv_ssthresh(sk); /* free_space might become our new window, make sure we don't * increase it due to wscale. @@ -3048,13 +3034,9 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); - if (next_skb_size) { - if (next_skb_size <= skb_availroom(skb)) - skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), - next_skb_size); - else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) - return false; - } + if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size)) + return false; + tcp_highest_sack_replace(sk, next_skb, skb); /* Update sequence range on original skb. */ @@ -3757,10 +3739,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) /* limit to order-0 allocations */ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); - syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); + syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false); if (!syn_data) goto fallback; - syn_data->ip_summed = CHECKSUM_PARTIAL; memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); if (space) { int copied = copy_from_iter(skb_put(syn_data, space), space, @@ -3838,7 +3819,7 @@ int tcp_connect(struct sock *sk) return 0; } - buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); + buff = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, true); if (unlikely(!buff)) return -ENOBUFS; diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index 0de6935659635f..fbab921670cc91 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -65,6 +65,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; TCP_SKB_CB(skb)->tx.delivered = tp->delivered; + TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce; TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; } @@ -86,6 +87,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, if (!rs->prior_delivered || after(scb->tx.delivered, rs->prior_delivered)) { + rs->prior_delivered_ce = scb->tx.delivered_ce; rs->prior_delivered = scb->tx.delivered; rs->prior_mstamp = scb->tx.delivered_mstamp; rs->is_app_limited = scb->tx.is_app_limited; @@ -138,6 +140,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, } rs->delivered = tp->delivered - rs->prior_delivered; + rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce; + /* delivered_ce occupies less than 32 bits in the skb control block */ + rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK; + /* Model sending data and receiving ACKs as separate pipeline phases * for a window. Usually the ACK phase is longer, but with ACK * compression the send phase can be longer. To be safe we use the diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index b97e3635acf50b..8efaf8c3fe2a9a 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -2,11 +2,8 @@ #include #include #include -#include -#include #include #include -#include #include #include diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index f4555a88f86bcf..9d4f418f1bf816 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -8,9 +8,7 @@ #include #include -#include #include -#include #include static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index e504204bca924b..bf2e5e5fe14273 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -332,10 +332,10 @@ config IPV6_IOAM6_LWTUNNEL bool "IPv6: IOAM Pre-allocated Trace insertion support" depends on IPV6 select LWTUNNEL + select DST_CACHE help - Support for the inline insertion of IOAM Pre-allocated - Trace Header (only on locally generated packets), using - the lightweight tunnels mechanism. + Support for the insertion of IOAM Pre-allocated Trace + Header using the lightweight tunnels mechanism. If unsure, say N. diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 1bc7e143217b4d..3036a45e8a1ecd 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -5,16 +5,14 @@ obj-$(CONFIG_IPV6) += ipv6.o -ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ +ipv6-y := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ addrlabel.o \ route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \ exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \ udp_offload.o seg6.o fib6_notifier.o rpl.o ioam6.o -ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o - -ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o +ipv6-$(CONFIG_SYSCTL) += sysctl_net_ipv6.o ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ @@ -29,8 +27,6 @@ ipv6-$(CONFIG_IPV6_SEG6_HMAC) += seg6_hmac.o ipv6-$(CONFIG_IPV6_RPL_LWTUNNEL) += rpl_iptunnel.o ipv6-$(CONFIG_IPV6_IOAM6_LWTUNNEL) += ioam6_iptunnel.o -ipv6-objs += $(ipv6-y) - obj-$(CONFIG_INET6_AH) += ah6.o obj-$(CONFIG_INET6_ESP) += esp6.o obj-$(CONFIG_INET6_ESP_OFFLOAD) += esp6_offload.o @@ -48,7 +44,8 @@ obj-$(CONFIG_IPV6_GRE) += ip6_gre.o obj-$(CONFIG_IPV6_FOU) += fou6.o obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o -obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) +obj-$(CONFIG_INET) += output_core.o protocol.o \ + ip6_offload.o tcpv6_offload.o exthdrs_offload.o obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c6a90b7bbb70e9..3445f8017430f1 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -241,6 +241,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .ioam6_enabled = 0, .ioam6_id = IOAM6_DEFAULT_IF_ID, .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, + .ndisc_evict_nocarrier = 1, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -300,6 +301,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .ioam6_enabled = 0, .ioam6_id = IOAM6_DEFAULT_IF_ID, .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, + .ndisc_evict_nocarrier = 1, }; /* Check if link is ready: is it up and is a valid qdisc available */ @@ -2237,12 +2239,12 @@ static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev) static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev) { - union fwnet_hwaddr *ha; + const union fwnet_hwaddr *ha; if (dev->addr_len != FWNET_ALEN) return -1; - ha = (union fwnet_hwaddr *)dev->dev_addr; + ha = (const union fwnet_hwaddr *)dev->dev_addr; memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id)); eui[0] ^= 2; @@ -3110,6 +3112,9 @@ static void add_v4_addrs(struct inet6_dev *idev) memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); if (idev->dev->flags&IFF_POINTOPOINT) { + if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE) + return; + addr.s6_addr32[0] = htonl(0xfe800000); scope = IFA_LINK; plen = 64; @@ -5542,6 +5547,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled; array[DEVCONF_IOAM6_ID] = cnf->ioam6_id; array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide; + array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier; } static inline size_t inet6_ifla6_size(void) @@ -6983,6 +6989,15 @@ static const struct ctl_table addrconf_sysctl[] = { .mode = 0644, .proc_handler = proc_douintvec, }, + { + .procname = "ndisc_evict_nocarrier", + .data = &ipv6_devconf.ndisc_evict_nocarrier, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = (void *)SYSCTL_ZERO, + .extra2 = (void *)SYSCTL_ONE, + }, { /* sentinel */ } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b5878bb8e419d6..0c4da163535ad9 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -521,31 +521,32 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; + lock_sock(sk); if (peer) { - if (!inet->inet_dport) - return -ENOTCONN; - if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && - peer == 1) + if (!inet->inet_dport || + (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && + peer == 1)) { + release_sock(sk); return -ENOTCONN; + } sin->sin6_port = inet->inet_dport; sin->sin6_addr = sk->sk_v6_daddr; if (np->sndflow) sin->sin6_flowinfo = np->flow_label; - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin, - CGROUP_INET6_GETPEERNAME, - NULL); + BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, + CGROUP_INET6_GETPEERNAME); } else { if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) sin->sin6_addr = np->saddr; else sin->sin6_addr = sk->sk_v6_rcv_saddr; sin->sin6_port = inet->inet_sport; - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin, - CGROUP_INET6_GETSOCKNAME, - NULL); + BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, + CGROUP_INET6_GETSOCKNAME); } sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr, sk->sk_bound_dev_if); + release_sock(sk); return sizeof(*sin); } EXPORT_SYMBOL(inet6_getname); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 3a871a09f96254..38ece3b7b8395d 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -979,7 +979,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff) if (!skb_valid_dst(skb)) ip6_route_input(skb); - ioam6_fill_trace_data(skb, ns, trace); + ioam6_fill_trace_data(skb, ns, trace, true); break; default: break; diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index a1ac0e3d8c60c5..47447f0241df60 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -610,7 +610,11 @@ int ila_xlat_init_net(struct net *net) if (err) return err; - rhashtable_init(&ilan->xlat.rhash_table, &rht_params); + err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params); + if (err) { + free_bucket_spinlocks(ilan->xlat.locks); + return err; + } return 0; } diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c index d128172bb54976..122a3d47424c7f 100644 --- a/net/ipv6/ioam6.c +++ b/net/ipv6/ioam6.c @@ -631,7 +631,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, struct ioam6_namespace *ns, struct ioam6_trace_hdr *trace, struct ioam6_schema *sc, - u8 sclen) + u8 sclen, bool is_input) { struct __kernel_sock_timeval ts; u64 raw64; @@ -645,7 +645,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, /* hop_lim and node_id */ if (trace->type.bit0) { byte = ipv6_hdr(skb)->hop_limit; - if (skb->dev) + if (is_input) byte--; raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id; @@ -730,7 +730,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, /* hop_lim and node_id (wide) */ if (trace->type.bit8) { byte = ipv6_hdr(skb)->hop_limit; - if (skb->dev) + if (is_input) byte--; raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide; @@ -846,7 +846,8 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, /* called with rcu_read_lock() */ void ioam6_fill_trace_data(struct sk_buff *skb, struct ioam6_namespace *ns, - struct ioam6_trace_hdr *trace) + struct ioam6_trace_hdr *trace, + bool is_input) { struct ioam6_schema *sc; u8 sclen = 0; @@ -876,7 +877,7 @@ void ioam6_fill_trace_data(struct sk_buff *skb, return; } - __ioam6_fill_trace_data(skb, ns, trace, sc, sclen); + __ioam6_fill_trace_data(skb, ns, trace, sc, sclen, is_input); trace->remlen -= trace->nodelen + sclen; } diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c index 9b7b726f8f45f2..f90a87389fcc5c 100644 --- a/net/ipv6/ioam6_iptunnel.c +++ b/net/ipv6/ioam6_iptunnel.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -17,18 +16,26 @@ #include #include #include +#include +#include +#include +#include +#include #define IOAM6_MASK_SHORT_FIELDS 0xff100000 #define IOAM6_MASK_WIDE_FIELDS 0xe00000 struct ioam6_lwt_encap { - struct ipv6_hopopt_hdr eh; - u8 pad[2]; /* 2-octet padding for 4n-alignment */ - struct ioam6_hdr ioamh; - struct ioam6_trace_hdr traceh; + struct ipv6_hopopt_hdr eh; + u8 pad[2]; /* 2-octet padding for 4n-alignment */ + struct ioam6_hdr ioamh; + struct ioam6_trace_hdr traceh; } __packed; struct ioam6_lwt { + struct dst_cache cache; + u8 mode; + struct in6_addr tundst; struct ioam6_lwt_encap tuninfo; }; @@ -42,34 +49,19 @@ static struct ioam6_lwt_encap *ioam6_lwt_info(struct lwtunnel_state *lwt) return &ioam6_lwt_state(lwt)->tuninfo; } -static struct ioam6_trace_hdr *ioam6_trace(struct lwtunnel_state *lwt) +static struct ioam6_trace_hdr *ioam6_lwt_trace(struct lwtunnel_state *lwt) { return &(ioam6_lwt_state(lwt)->tuninfo.traceh); } static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = { + [IOAM6_IPTUNNEL_MODE] = NLA_POLICY_RANGE(NLA_U8, + IOAM6_IPTUNNEL_MODE_MIN, + IOAM6_IPTUNNEL_MODE_MAX), + [IOAM6_IPTUNNEL_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)), }; -static int nla_put_ioam6_trace(struct sk_buff *skb, int attrtype, - struct ioam6_trace_hdr *trace) -{ - struct ioam6_trace_hdr *data; - struct nlattr *nla; - int len; - - len = sizeof(*trace); - - nla = nla_reserve(skb, attrtype, len); - if (!nla) - return -EMSGSIZE; - - data = nla_data(nla); - memcpy(data, trace, len); - - return 0; -} - static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace) { u32 fields; @@ -101,9 +93,10 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla, struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1]; struct ioam6_lwt_encap *tuninfo; struct ioam6_trace_hdr *trace; - struct lwtunnel_state *s; - int len_aligned; - int len, err; + struct lwtunnel_state *lwt; + struct ioam6_lwt *ilwt; + int len_aligned, err; + u8 mode; if (family != AF_INET6) return -EINVAL; @@ -113,6 +106,16 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla, if (err < 0) return err; + if (!tb[IOAM6_IPTUNNEL_MODE]) + mode = IOAM6_IPTUNNEL_MODE_INLINE; + else + mode = nla_get_u8(tb[IOAM6_IPTUNNEL_MODE]); + + if (!tb[IOAM6_IPTUNNEL_DST] && mode != IOAM6_IPTUNNEL_MODE_INLINE) { + NL_SET_ERR_MSG(extack, "this mode needs a tunnel destination"); + return -EINVAL; + } + if (!tb[IOAM6_IPTUNNEL_TRACE]) { NL_SET_ERR_MSG(extack, "missing trace"); return -EINVAL; @@ -125,15 +128,24 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla, return -EINVAL; } - len = sizeof(*tuninfo) + trace->remlen * 4; - len_aligned = ALIGN(len, 8); - - s = lwtunnel_state_alloc(len_aligned); - if (!s) + len_aligned = ALIGN(trace->remlen * 4, 8); + lwt = lwtunnel_state_alloc(sizeof(*ilwt) + len_aligned); + if (!lwt) return -ENOMEM; - tuninfo = ioam6_lwt_info(s); - tuninfo->eh.hdrlen = (len_aligned >> 3) - 1; + ilwt = ioam6_lwt_state(lwt); + err = dst_cache_init(&ilwt->cache, GFP_ATOMIC); + if (err) { + kfree(lwt); + return err; + } + + ilwt->mode = mode; + if (tb[IOAM6_IPTUNNEL_DST]) + ilwt->tundst = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_DST]); + + tuninfo = ioam6_lwt_info(lwt); + tuninfo->eh.hdrlen = ((sizeof(*tuninfo) + len_aligned) >> 3) - 1; tuninfo->pad[0] = IPV6_TLV_PADN; tuninfo->ioamh.type = IOAM6_TYPE_PREALLOC; tuninfo->ioamh.opt_type = IPV6_TLV_IOAM; @@ -142,27 +154,39 @@ static int ioam6_build_state(struct net *net, struct nlattr *nla, memcpy(&tuninfo->traceh, trace, sizeof(*trace)); - len = len_aligned - len; - if (len == 1) { - tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PAD1; - } else if (len > 0) { + if (len_aligned - trace->remlen * 4) { tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PADN; - tuninfo->traceh.data[trace->remlen * 4 + 1] = len - 2; + tuninfo->traceh.data[trace->remlen * 4 + 1] = 2; } - s->type = LWTUNNEL_ENCAP_IOAM6; - s->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT; + lwt->type = LWTUNNEL_ENCAP_IOAM6; + lwt->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT; - *ts = s; + *ts = lwt; return 0; } -static int ioam6_do_inline(struct sk_buff *skb, struct ioam6_lwt_encap *tuninfo) +static int ioam6_do_fill(struct net *net, struct sk_buff *skb) { struct ioam6_trace_hdr *trace; - struct ipv6hdr *oldhdr, *hdr; struct ioam6_namespace *ns; + + trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb) + + sizeof(struct ipv6_hopopt_hdr) + 2 + + sizeof(struct ioam6_hdr)); + + ns = ioam6_namespace(net, trace->namespace_id); + if (ns) + ioam6_fill_trace_data(skb, ns, trace, false); + + return 0; +} + +static int ioam6_do_inline(struct net *net, struct sk_buff *skb, + struct ioam6_lwt_encap *tuninfo) +{ + struct ipv6hdr *oldhdr, *hdr; int hdrlen, err; hdrlen = (tuninfo->eh.hdrlen + 1) << 3; @@ -191,80 +215,200 @@ static int ioam6_do_inline(struct sk_buff *skb, struct ioam6_lwt_encap *tuninfo) hdr->nexthdr = NEXTHDR_HOP; hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr)); - trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb) - + sizeof(struct ipv6_hopopt_hdr) + 2 - + sizeof(struct ioam6_hdr)); + return ioam6_do_fill(net, skb); +} - ns = ioam6_namespace(dev_net(skb_dst(skb)->dev), trace->namespace_id); - if (ns) - ioam6_fill_trace_data(skb, ns, trace); +static int ioam6_do_encap(struct net *net, struct sk_buff *skb, + struct ioam6_lwt_encap *tuninfo, + struct in6_addr *tundst) +{ + struct dst_entry *dst = skb_dst(skb); + struct ipv6hdr *hdr, *inner_hdr; + int hdrlen, len, err; - return 0; + hdrlen = (tuninfo->eh.hdrlen + 1) << 3; + len = sizeof(*hdr) + hdrlen; + + err = skb_cow_head(skb, len + skb->mac_len); + if (unlikely(err)) + return err; + + inner_hdr = ipv6_hdr(skb); + + skb_push(skb, len); + skb_reset_network_header(skb); + skb_mac_header_rebuild(skb); + skb_set_transport_header(skb, sizeof(*hdr)); + + tuninfo->eh.nexthdr = NEXTHDR_IPV6; + memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen); + + hdr = ipv6_hdr(skb); + memcpy(hdr, inner_hdr, sizeof(*hdr)); + + hdr->nexthdr = NEXTHDR_HOP; + hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr)); + hdr->daddr = *tundst; + ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr, + IPV6_PREFER_SRC_PUBLIC, &hdr->saddr); + + skb_postpush_rcsum(skb, hdr, len); + + return ioam6_do_fill(net, skb); } static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct lwtunnel_state *lwt = skb_dst(skb)->lwtstate; + struct dst_entry *dst = skb_dst(skb); + struct in6_addr orig_daddr; + struct ioam6_lwt *ilwt; int err = -EINVAL; if (skb->protocol != htons(ETH_P_IPV6)) goto drop; - /* Only for packets we send and - * that do not contain a Hop-by-Hop yet - */ - if (skb->dev || ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP) - goto out; - - err = ioam6_do_inline(skb, ioam6_lwt_info(lwt)); - if (unlikely(err)) + ilwt = ioam6_lwt_state(dst->lwtstate); + orig_daddr = ipv6_hdr(skb)->daddr; + + switch (ilwt->mode) { + case IOAM6_IPTUNNEL_MODE_INLINE: +do_inline: + /* Direct insertion - if there is no Hop-by-Hop yet */ + if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP) + goto out; + + err = ioam6_do_inline(net, skb, &ilwt->tuninfo); + if (unlikely(err)) + goto drop; + + break; + case IOAM6_IPTUNNEL_MODE_ENCAP: +do_encap: + /* Encapsulation (ip6ip6) */ + err = ioam6_do_encap(net, skb, &ilwt->tuninfo, &ilwt->tundst); + if (unlikely(err)) + goto drop; + + break; + case IOAM6_IPTUNNEL_MODE_AUTO: + /* Automatic (RFC8200 compliant): + * - local packets -> INLINE mode + * - in-transit packets -> ENCAP mode + */ + if (!skb->dev) + goto do_inline; + + goto do_encap; + default: goto drop; + } - err = skb_cow_head(skb, LL_RESERVED_SPACE(skb_dst(skb)->dev)); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) goto drop; + if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) { + preempt_disable(); + dst = dst_cache_get(&ilwt->cache); + preempt_enable(); + + if (unlikely(!dst)) { + struct ipv6hdr *hdr = ipv6_hdr(skb); + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + fl6.daddr = hdr->daddr; + fl6.saddr = hdr->saddr; + fl6.flowlabel = ip6_flowinfo(hdr); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = hdr->nexthdr; + + dst = ip6_route_output(net, NULL, &fl6); + if (dst->error) { + err = dst->error; + dst_release(dst); + goto drop; + } + + preempt_disable(); + dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr); + preempt_enable(); + } + + skb_dst_drop(skb); + skb_dst_set(skb, dst); + + return dst_output(net, sk, skb); + } out: - return lwt->orig_output(net, sk, skb); - + return dst->lwtstate->orig_output(net, sk, skb); drop: kfree_skb(skb); return err; } +static void ioam6_destroy_state(struct lwtunnel_state *lwt) +{ + dst_cache_destroy(&ioam6_lwt_state(lwt)->cache); +} + static int ioam6_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { - struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate); + struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate); + int err; - if (nla_put_ioam6_trace(skb, IOAM6_IPTUNNEL_TRACE, trace)) - return -EMSGSIZE; + err = nla_put_u8(skb, IOAM6_IPTUNNEL_MODE, ilwt->mode); + if (err) + goto ret; - return 0; + if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) { + err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_DST, &ilwt->tundst); + if (err) + goto ret; + } + + err = nla_put(skb, IOAM6_IPTUNNEL_TRACE, sizeof(ilwt->tuninfo.traceh), + &ilwt->tuninfo.traceh); +ret: + return err; } static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate) { - struct ioam6_trace_hdr *trace = ioam6_trace(lwtstate); + struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate); + int nlsize; + + nlsize = nla_total_size(sizeof(ilwt->mode)) + + nla_total_size(sizeof(ilwt->tuninfo.traceh)); - return nla_total_size(sizeof(*trace)); + if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) + nlsize += nla_total_size(sizeof(ilwt->tundst)); + + return nlsize; } static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) { - struct ioam6_trace_hdr *a_hdr = ioam6_trace(a); - struct ioam6_trace_hdr *b_hdr = ioam6_trace(b); - - return (a_hdr->namespace_id != b_hdr->namespace_id); + struct ioam6_trace_hdr *trace_a = ioam6_lwt_trace(a); + struct ioam6_trace_hdr *trace_b = ioam6_lwt_trace(b); + struct ioam6_lwt *ilwt_a = ioam6_lwt_state(a); + struct ioam6_lwt *ilwt_b = ioam6_lwt_state(b); + + return (ilwt_a->mode != ilwt_b->mode || + (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE && + !ipv6_addr_equal(&ilwt_a->tundst, &ilwt_b->tundst)) || + trace_a->namespace_id != trace_b->namespace_id); } static const struct lwtunnel_encap_ops ioam6_iptun_ops = { - .build_state = ioam6_build_state, + .build_state = ioam6_build_state, + .destroy_state = ioam6_destroy_state, .output = ioam6_output, - .fill_encap = ioam6_fill_encap_info, + .fill_encap = ioam6_fill_encap_info, .get_encap_size = ioam6_encap_nlsize, - .cmp_encap = ioam6_encap_cmp, - .owner = THIS_MODULE, + .cmp_encap = ioam6_encap_cmp, + .owner = THIS_MODULE, }; int __init ioam6_iptunnel_init(void) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3ad201d372d886..d831d243969327 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1088,7 +1088,7 @@ static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) struct flowi6 *fl6 = &t->fl.u.ip6; if (dev->type != ARPHRD_ETHER) { - memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); + __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); } @@ -1521,7 +1521,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) if (tunnel->parms.collect_md) return 0; - memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); + __dev_addr_set(dev, &tunnel->parms.laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); if (ipv6_addr_any(&tunnel->parms.raddr)) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 20a67efda47f52..484aca492cc068 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1449,7 +1449,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) unsigned int mtu; int t_hlen; - memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); + __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); /* Set up flowi template */ diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 1d8e3ffa225d83..527e9ead7449e5 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -660,7 +660,7 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu) struct net_device *tdev = NULL; int mtu; - memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); + __dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV | diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index e4bdb09c558670..41efca817db422 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -55,6 +55,8 @@ struct ip6_ra_chain *ip6_ra_chain; DEFINE_RWLOCK(ip6_ra_lock); +DEFINE_STATIC_KEY_FALSE(ip6_min_hopcount); + int ip6_ra_control(struct sock *sk, int sel) { struct ip6_ra_chain *ra, *new_ra, **rap; @@ -950,7 +952,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, goto e_inval; if (val < 0 || val > 255) goto e_inval; - np->min_hopcount = val; + + if (val) + static_branch_enable(&ip6_min_hopcount); + + /* tcp_v6_err() and tcp_v6_rcv() might read min_hopcount + * while we are changing it. + */ + WRITE_ONCE(np->min_hopcount, val); retv = 0; break; case IPV6_DONTFRAG: diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 4b098521a44cd7..f03b597e4121b5 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -142,7 +142,7 @@ struct neigh_table nd_tbl = { }; EXPORT_SYMBOL_GPL(nd_tbl); -void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data, +void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data, int data_len, int pad) { int space = __ndisc_opt_addr_space(data_len, pad); @@ -165,7 +165,7 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data, EXPORT_SYMBOL_GPL(__ndisc_fill_addr_option); static inline void ndisc_fill_addr_option(struct sk_buff *skb, int type, - void *data, u8 icmp6_type) + const void *data, u8 icmp6_type) { __ndisc_fill_addr_option(skb, type, data, skb->dev->addr_len, ndisc_addr_option_pad(skb->dev->type)); @@ -1794,6 +1794,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, struct netdev_notifier_change_info *change_info; struct net *net = dev_net(dev); struct inet6_dev *idev; + bool evict_nocarrier; switch (event) { case NETDEV_CHANGEADDR: @@ -1810,10 +1811,19 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, in6_dev_put(idev); break; case NETDEV_CHANGE: + idev = in6_dev_get(dev); + if (!idev) + evict_nocarrier = true; + else { + evict_nocarrier = idev->cnf.ndisc_evict_nocarrier && + net->ipv6.devconf_all->ndisc_evict_nocarrier; + in6_dev_put(idev); + } + change_info = ptr; if (change_info->flags_changed & IFF_NOARP) neigh_changeaddr(&nd_tbl, dev); - if (!netif_carrier_ok(dev)) + if (evict_nocarrier && !netif_carrier_ok(dev)) neigh_carrier_down(&nd_tbl, dev); break; case NETDEV_DOWN: diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index a579ea14a69b67..2d816277f2c5a8 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -247,10 +247,10 @@ ip6t_next_entry(const struct ip6t_entry *entry) /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int -ip6t_do_table(struct sk_buff *skb, - const struct nf_hook_state *state, - struct xt_table *table) +ip6t_do_table(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) { + const struct xt_table *table = priv; unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index 727ee80970124d..df785ebda0ca43 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c @@ -27,14 +27,6 @@ static const struct xt_table packet_filter = { .priority = NF_IP6_PRI_FILTER, }; -/* The work comes in here from netfilter.c. */ -static unsigned int -ip6table_filter_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip6t_do_table(skb, state, priv); -} - static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ @@ -90,7 +82,7 @@ static int __init ip6table_filter_init(void) if (ret < 0) return ret; - filter_ops = xt_hook_ops_alloc(&packet_filter, ip6table_filter_hook); + filter_ops = xt_hook_ops_alloc(&packet_filter, ip6t_do_table); if (IS_ERR(filter_ops)) { xt_unregister_template(&packet_filter); return PTR_ERR(filter_ops); diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 9b518ce37d6ae6..a88b2ce4a3cb8d 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -29,7 +29,7 @@ static const struct xt_table packet_mangler = { }; static unsigned int -ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *priv) +ip6t_mangle_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { unsigned int ret; struct in6_addr saddr, daddr; @@ -46,7 +46,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state, void *pr /* flowlabel and prio (includes version, which shouldn't change either */ flowlabel = *((u_int32_t *)ipv6_hdr(skb)); - ret = ip6t_do_table(skb, state, priv); + ret = ip6t_do_table(priv, skb, state); if (ret != NF_DROP && ret != NF_STOLEN && (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) || @@ -68,8 +68,8 @@ ip6table_mangle_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { if (state->hook == NF_INET_LOCAL_OUT) - return ip6t_mangle_out(skb, state, priv); - return ip6t_do_table(skb, state, priv); + return ip6t_mangle_out(priv, skb, state); + return ip6t_do_table(priv, skb, state); } static struct nf_hook_ops *mangle_ops __read_mostly; diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index 921c1723a01e4a..bf3cb3a13600cd 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -31,34 +31,27 @@ static const struct xt_table nf_nat_ipv6_table = { .af = NFPROTO_IPV6, }; -static unsigned int ip6table_nat_do_chain(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip6t_do_table(skb, state, priv); -} - static const struct nf_hook_ops nf_nat_ipv6_ops[] = { { - .hook = ip6table_nat_do_chain, + .hook = ip6t_do_table, .pf = NFPROTO_IPV6, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_NAT_DST, }, { - .hook = ip6table_nat_do_chain, + .hook = ip6t_do_table, .pf = NFPROTO_IPV6, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP6_PRI_NAT_SRC, }, { - .hook = ip6table_nat_do_chain, + .hook = ip6t_do_table, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP6_PRI_NAT_DST, }, { - .hook = ip6table_nat_do_chain, + .hook = ip6t_do_table, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP6_PRI_NAT_SRC, diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index 4f2a04af71d320..08861d5d1f4db3 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c @@ -31,14 +31,6 @@ static const struct xt_table packet_raw_before_defrag = { .priority = NF_IP6_PRI_RAW_BEFORE_DEFRAG, }; -/* The work comes in here from netfilter.c. */ -static unsigned int -ip6table_raw_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip6t_do_table(skb, state, priv); -} - static struct nf_hook_ops *rawtable_ops __read_mostly; static int ip6table_raw_table_init(struct net *net) @@ -88,7 +80,7 @@ static int __init ip6table_raw_init(void) return ret; /* Register hooks */ - rawtable_ops = xt_hook_ops_alloc(table, ip6table_raw_hook); + rawtable_ops = xt_hook_ops_alloc(table, ip6t_do_table); if (IS_ERR(rawtable_ops)) { xt_unregister_template(table); return PTR_ERR(rawtable_ops); diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c index 931674034d8be5..4df14a9bae782d 100644 --- a/net/ipv6/netfilter/ip6table_security.c +++ b/net/ipv6/netfilter/ip6table_security.c @@ -32,13 +32,6 @@ static const struct xt_table security_table = { .priority = NF_IP6_PRI_SECURITY, }; -static unsigned int -ip6table_security_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip6t_do_table(skb, state, priv); -} - static struct nf_hook_ops *sectbl_ops __read_mostly; static int ip6table_security_table_init(struct net *net) @@ -77,7 +70,7 @@ static int __init ip6table_security_init(void) if (ret < 0) return ret; - sectbl_ops = xt_hook_ops_alloc(&security_table, ip6table_security_hook); + sectbl_ops = xt_hook_ops_alloc(&security_table, ip6t_do_table); if (IS_ERR(sectbl_ops)) { xt_unregister_template(&security_table); return PTR_ERR(sectbl_ops); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9b9ef09382ab91..3ae25b8ffbd6fb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6306,11 +6306,11 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, static struct ctl_table ipv6_route_table_template[] = { { - .procname = "flush", - .data = &init_net.ipv6.sysctl.flush_delay, + .procname = "max_size", + .data = &init_net.ipv6.sysctl.ip6_rt_max_size, .maxlen = sizeof(int), - .mode = 0200, - .proc_handler = ipv6_sysctl_rtcache_flush + .mode = 0644, + .proc_handler = proc_dointvec, }, { .procname = "gc_thresh", @@ -6320,11 +6320,11 @@ static struct ctl_table ipv6_route_table_template[] = { .proc_handler = proc_dointvec, }, { - .procname = "max_size", - .data = &init_net.ipv6.sysctl.ip6_rt_max_size, + .procname = "flush", + .data = &init_net.ipv6.sysctl.flush_delay, .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, + .mode = 0200, + .proc_handler = ipv6_sysctl_rtcache_flush }, { .procname = "gc_min_interval", @@ -6396,10 +6396,10 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) GFP_KERNEL); if (table) { - table[0].data = &net->ipv6.sysctl.flush_delay; - table[0].extra1 = net; + table[0].data = &net->ipv6.sysctl.ip6_rt_max_size; table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; - table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; + table[2].data = &net->ipv6.sysctl.flush_delay; + table[2].extra1 = net; table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; @@ -6411,7 +6411,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) - table[0].procname = NULL; + table[1].procname = NULL; } return table; diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index e412817fba2f32..5daa1c3ed83ba8 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -374,7 +374,11 @@ static int __net_init seg6_net_init(struct net *net) net->ipv6.seg6_data = sdata; #ifdef CONFIG_IPV6_SEG6_HMAC - seg6_hmac_net_init(net); + if (seg6_hmac_net_init(net)) { + kfree(rcu_dereference_raw(sdata->tun_src)); + kfree(sdata); + return -ENOMEM; + }; #endif return 0; @@ -388,7 +392,7 @@ static void __net_exit seg6_net_exit(struct net *net) seg6_hmac_net_exit(net); #endif - kfree(sdata->tun_src); + kfree(rcu_dereference_raw(sdata->tun_src)); kfree(sdata); } diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 687d95dce08522..29bc4e7c3046e2 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -405,9 +405,7 @@ int __net_init seg6_hmac_net_init(struct net *net) { struct seg6_pernet_data *sdata = seg6_pernet(net); - rhashtable_init(&sdata->hmac_infos, &rht_params); - - return 0; + return rhashtable_init(&sdata->hmac_infos, &rht_params); } EXPORT_SYMBOL(seg6_hmac_net_init); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index ef0c7a7c18e237..1b57ee36d6682e 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -204,7 +204,7 @@ static int ipip6_tunnel_create(struct net_device *dev) struct sit_net *sitn = net_generic(net, sit_net_id); int err; - memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); + __dev_addr_set(dev, &t->parms.iph.saddr, 4); memcpy(dev->broadcast, &t->parms.iph.daddr, 4); if ((__force u16)t->parms.i_flags & SIT_ISATAP) @@ -1149,7 +1149,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p, synchronize_net(); t->parms.iph.saddr = p->iph.saddr; t->parms.iph.daddr = p->iph.daddr; - memcpy(t->dev->dev_addr, &p->iph.saddr, 4); + __dev_addr_set(t->dev, &p->iph.saddr, 4); memcpy(t->dev->broadcast, &p->iph.daddr, 4); ipip6_tunnel_link(sitn, t); t->parms.iph.ttl = p->iph.ttl; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b03dd02c9f13c5..2cc9b0e53ad1c8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -108,8 +108,8 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) const struct rt6_info *rt = (const struct rt6_info *)dst; sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; - tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); + sk->sk_rx_dst_ifindex = skb->skb_iif; + sk->sk_rx_dst_cookie = rt6_get_cookie(rt); } } @@ -414,9 +414,12 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (sk->sk_state == TCP_CLOSE) goto out; - if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) { - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); - goto out; + if (static_branch_unlikely(&ip6_min_hopcount)) { + /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */ + if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) { + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); + goto out; + } } tp = tcp_sk(sk); @@ -569,7 +572,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->ipv6_opt); - kfree_skb(inet_rsk(req)->pktopts); + consume_skb(inet_rsk(req)->pktopts); } #ifdef CONFIG_TCP_MD5SIG @@ -966,7 +969,6 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 fl6.flowlabel = label; buff->ip_summed = CHECKSUM_PARTIAL; - buff->csum = 0; __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); @@ -1509,9 +1511,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { - if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || + if (sk->sk_rx_dst_ifindex != skb->skb_iif || INDIRECT_CALL_1(dst->ops->check, ip6_dst_check, - dst, np->rx_dst_cookie) == NULL) { + dst, sk->sk_rx_dst_cookie) == NULL) { dst_release(dst); sk->sk_rx_dst = NULL; } @@ -1591,7 +1593,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) } } - kfree_skb(opt_skb); + consume_skb(opt_skb); return 0; } @@ -1621,7 +1623,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) { - struct sk_buff *skb_to_free; int sdif = inet6_sdif(skb); int dif = inet6_iif(skb); const struct tcphdr *th; @@ -1727,9 +1728,13 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) return 0; } } - if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) { - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); - goto discard_and_relse; + + if (static_branch_unlikely(&ip6_min_hopcount)) { + /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */ + if (hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) { + __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); + goto discard_and_relse; + } } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) @@ -1757,17 +1762,12 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { - skb_to_free = sk->sk_rx_skb_cache; - sk->sk_rx_skb_cache = NULL; ret = tcp_v6_do_rcv(sk, skb); } else { if (tcp_add_backlog(sk, skb)) goto discard_and_relse; - skb_to_free = NULL; } bh_unlock_sock(sk); - if (skb_to_free) - __kfree_skb(skb_to_free); put_and_return: if (refcounted) sock_put(sk); @@ -1878,9 +1878,9 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); if (dst) - dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); + dst = dst_check(dst, sk->sk_rx_dst_cookie); if (dst && - inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) + sk->sk_rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8d785232b4796b..12c12619ee357d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -884,7 +884,7 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) if (udp_sk_rx_dst_set(sk, dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); + sk->sk_rx_dst_cookie = rt6_get_cookie(rt); } } @@ -1073,7 +1073,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) dst = READ_ONCE(sk->sk_rx_dst); if (dst) - dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); + dst = dst_check(dst, sk->sk_rx_dst_cookie); if (dst) { /* set noref for now. * any place which wants to hold dst has to call @@ -1435,7 +1435,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (!fl6.flowi6_oif) fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; - fl6.flowi6_mark = ipc6.sockc.mark; fl6.flowi6_uid = sk->sk_uid; if (msg->msg_controllen) { @@ -1471,6 +1470,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc6.opt = opt; fl6.flowi6_proto = sk->sk_protocol; + fl6.flowi6_mark = ipc6.sockc.mark; fl6.daddr = *daddr; if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) fl6.saddr = np->saddr; diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 647c0554d04cd8..40ca3c1e42a2e0 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -781,7 +781,7 @@ int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) if (nskb) { struct llc_sap *sap = llc->sap; - u8 *dmac = llc->daddr.mac; + const u8 *dmac = llc->daddr.mac; if (llc->dev->flags & IFF_LOOPBACK) dmac = llc->dev->dev_addr; diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index ad6547736c219d..dde9bf08a593ef 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c @@ -80,7 +80,7 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb) * establishment will inform to upper layer via calling it's confirm * function and passing proper information. */ -int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap) +int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac, u8 dsap) { int rc = -EISCONN; struct llc_addr laddr, daddr; diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index b9ad087bcbd746..5a6466fc626a72 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -56,7 +56,7 @@ int llc_mac_hdr_init(struct sk_buff *skb, * package primitive as an event and send to SAP event handler */ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, - unsigned char *dmac, unsigned char dsap) + const unsigned char *dmac, unsigned char dsap) { int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap, diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index a4eccb98220a9d..0ff490a73fae89 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -26,7 +26,7 @@ #include #include -static void llc_ui_format_mac(struct seq_file *seq, u8 *addr) +static void llc_ui_format_mac(struct seq_file *seq, const u8 *addr) { seq_printf(seq, "%pM", addr); } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index cce28e3b223231..470ff0ce3dc763 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -477,7 +477,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, size_t len) { u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num; - struct ieee802_11_elems elems = { }; + struct ieee802_11_elems *elems = NULL; u8 dialog_token; int ies_len; @@ -495,16 +495,18 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, ies_len = len - offsetof(struct ieee80211_mgmt, u.action.u.addba_req.variable); if (ies_len) { - ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable, - ies_len, true, &elems, mgmt->bssid, NULL); - if (elems.parse_error) - return; + elems = ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable, + ies_len, true, mgmt->bssid, NULL); + if (!elems || elems->parse_error) + goto free; } __ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, buf_size, true, false, - elems.addba_ext_ie); + elems ? elems->addba_ext_ie : NULL); +free: + kfree(elems); } void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d69b31c20fe283..e2b791c37591f3 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -111,6 +111,36 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, return 0; } +static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, + struct cfg80211_mbssid_config params) +{ + struct ieee80211_sub_if_data *tx_sdata; + + sdata->vif.mbssid_tx_vif = NULL; + sdata->vif.bss_conf.bssid_index = 0; + sdata->vif.bss_conf.nontransmitted = false; + sdata->vif.bss_conf.ema_ap = false; + + if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev) + return -EINVAL; + + tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params.tx_wdev); + if (!tx_sdata) + return -EINVAL; + + if (tx_sdata == sdata) { + sdata->vif.mbssid_tx_vif = &sdata->vif; + } else { + sdata->vif.mbssid_tx_vif = &tx_sdata->vif; + sdata->vif.bss_conf.nontransmitted = true; + sdata->vif.bss_conf.bssid_index = params.index; + } + if (params.ema) + sdata->vif.bss_conf.ema_ap = true; + + return 0; +} + static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, @@ -1105,6 +1135,14 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, changed |= BSS_CHANGED_HE_BSS_COLOR; } + if (sdata->vif.type == NL80211_IFTYPE_AP && + params->mbssid_config.tx_wdev) { + err = ieee80211_set_ap_mbssid_options(sdata, + params->mbssid_config); + if (err) + return err; + } + mutex_lock(&local->mtx); err = ieee80211_vif_use_channel(sdata, ¶ms->chandef, IEEE80211_CHANCTX_SHARED); diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 8be28cfd6f648a..481f01b0f65cca 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -5,7 +5,7 @@ * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation + * Copyright (C) 2018 - 2021 Intel Corporation */ #include @@ -153,20 +153,20 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, rcu_read_lock(); p += scnprintf(p, - bufsz+buf-p, + bufsz + buf - p, "target %uus interval %uus ecn %s\n", codel_time_to_us(sta->cparams.target), codel_time_to_us(sta->cparams.interval), sta->cparams.ecn ? "yes" : "no"); p += scnprintf(p, - bufsz+buf-p, + bufsz + buf - p, "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n"); for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { if (!sta->sta.txq[i]) continue; txqi = to_txq_info(sta->sta.txq[i]); - p += scnprintf(p, bufsz+buf-p, + p += scnprintf(p, bufsz + buf - p, "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n", txqi->txq.tid, txqi->txq.ac, @@ -314,17 +314,24 @@ STA_OPS_RW(aql); static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf; + char *buf, *p; + ssize_t bufsz = 71 + IEEE80211_NUM_TIDS * 40; int i; struct sta_info *sta = file->private_data; struct tid_ampdu_rx *tid_rx; struct tid_ampdu_tx *tid_tx; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; rcu_read_lock(); - p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", + p += scnprintf(p, bufsz + buf - p, "next dialog_token: %#02x\n", sta->ampdu_mlme.dialog_token_allocator + 1); - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); for (i = 0; i < IEEE80211_NUM_TIDS; i++) { @@ -334,25 +341,27 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); tid_rx_valid = test_bit(i, sta->ampdu_mlme.agg_session_valid); - p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); - p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", + p += scnprintf(p, bufsz + buf - p, "%02d", i); + p += scnprintf(p, bufsz + buf - p, "\t\t%x", tid_rx_valid); - p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", + p += scnprintf(p, bufsz + buf - p, "\t%#.2x", tid_rx_valid ? sta->ampdu_mlme.tid_rx_token[i] : 0); - p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", + p += scnprintf(p, bufsz + buf - p, "\t%#.3x", tid_rx ? tid_rx->ssn : 0); - p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx); - p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", + p += scnprintf(p, bufsz + buf - p, "\t\t%x", !!tid_tx); + p += scnprintf(p, bufsz + buf - p, "\t%#.2x", tid_tx ? tid_tx->dialog_token : 0); - p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", + p += scnprintf(p, bufsz + buf - p, "\t%03d", tid_tx ? skb_queue_len(&tid_tx->pending) : 0); - p += scnprintf(p, sizeof(buf) + buf - p, "\n"); + p += scnprintf(p, bufsz + buf - p, "\n"); } rcu_read_unlock(); - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; } static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, @@ -434,15 +443,22 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, if (_cond) \ p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ } while (0) - char buf[512], *p = buf; + char *buf, *p; int i; + ssize_t bufsz = 512; struct sta_info *sta = file->private_data; struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; + ssize_t ret; - p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, bufsz + buf - p, "ht %ssupported\n", htc->ht_supported ? "" : "not "); if (htc->ht_supported) { - p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); + p += scnprintf(p, bufsz + buf - p, "cap: %#.4x\n", htc->cap); PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); @@ -484,81 +500,90 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); - p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", + p += scnprintf(p, bufsz + buf - p, "ampdu factor/density: %d/%d\n", htc->ampdu_factor, htc->ampdu_density); - p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); + p += scnprintf(p, bufsz + buf - p, "MCS mask:"); for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) - p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", + p += scnprintf(p, bufsz + buf - p, " %.2x", htc->mcs.rx_mask[i]); - p += scnprintf(p, sizeof(buf)+buf-p, "\n"); + p += scnprintf(p, bufsz + buf - p, "\n"); /* If not set this is meaningless */ if (le16_to_cpu(htc->mcs.rx_highest)) { - p += scnprintf(p, sizeof(buf)+buf-p, + p += scnprintf(p, bufsz + buf - p, "MCS rx highest: %d Mbps\n", le16_to_cpu(htc->mcs.rx_highest)); } - p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", + p += scnprintf(p, bufsz + buf - p, "MCS tx params: %x\n", htc->mcs.tx_params); } - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; } STA_OPS(ht_capa); static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - char buf[512], *p = buf; + char *buf, *p; struct sta_info *sta = file->private_data; struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap; + ssize_t ret; + ssize_t bufsz = 512; - p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n", + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, bufsz + buf - p, "VHT %ssupported\n", vhtc->vht_supported ? "" : "not "); if (vhtc->vht_supported) { - p += scnprintf(p, sizeof(buf) + buf - p, "cap: %#.8x\n", + p += scnprintf(p, bufsz + buf - p, "cap: %#.8x\n", vhtc->cap); #define PFLAG(a, b) \ do { \ if (vhtc->cap & IEEE80211_VHT_CAP_ ## a) \ - p += scnprintf(p, sizeof(buf) + buf - p, \ + p += scnprintf(p, bufsz + buf - p, \ "\t\t%s\n", b); \ } while (0) switch (vhtc->cap & 0x3) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tMAX-MPDU-3895\n"); break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tMAX-MPDU-7991\n"); break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tMAX-MPDU-11454\n"); break; default: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tMAX-MPDU-UNKNOWN\n"); } switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case 0: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\t80Mhz\n"); break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\t160Mhz\n"); break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\t80+80Mhz\n"); break; default: - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tUNKNOWN-MHZ: 0x%x\n", (vhtc->cap >> 2) & 0x3); } @@ -566,15 +591,15 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, PFLAG(SHORT_GI_80, "SHORT-GI-80"); PFLAG(SHORT_GI_160, "SHORT-GI-160"); PFLAG(TXSTBC, "TXSTBC"); - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tRXSTBC_%d\n", (vhtc->cap >> 8) & 0x7); PFLAG(SU_BEAMFORMER_CAPABLE, "SU-BEAMFORMER-CAPABLE"); PFLAG(SU_BEAMFORMEE_CAPABLE, "SU-BEAMFORMEE-CAPABLE"); - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tBEAMFORMEE-STS: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK) >> IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tSOUNDING-DIMENSIONS: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK) >> IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT); @@ -582,34 +607,36 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, PFLAG(MU_BEAMFORMEE_CAPABLE, "MU-BEAMFORMEE-CAPABLE"); PFLAG(VHT_TXOP_PS, "TXOP-PS"); PFLAG(HTC_VHT, "HTC-VHT"); - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tMPDU-LENGTH-EXPONENT: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); PFLAG(VHT_LINK_ADAPTATION_VHT_UNSOL_MFB, "LINK-ADAPTATION-VHT-UNSOL-MFB"); - p += scnprintf(p, sizeof(buf) + buf - p, + p += scnprintf(p, bufsz + buf - p, "\t\tLINK-ADAPTATION-VHT-MRQ-MFB: 0x%x\n", (vhtc->cap & IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB) >> 26); PFLAG(RX_ANTENNA_PATTERN, "RX-ANTENNA-PATTERN"); PFLAG(TX_ANTENNA_PATTERN, "TX-ANTENNA-PATTERN"); - p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n", + p += scnprintf(p, bufsz + buf - p, "RX MCS: %.4x\n", le16_to_cpu(vhtc->vht_mcs.rx_mcs_map)); if (vhtc->vht_mcs.rx_highest) - p += scnprintf(p, sizeof(buf)+buf-p, + p += scnprintf(p, bufsz + buf - p, "MCS RX highest: %d Mbps\n", le16_to_cpu(vhtc->vht_mcs.rx_highest)); - p += scnprintf(p, sizeof(buf)+buf-p, "TX MCS: %.4x\n", + p += scnprintf(p, bufsz + buf - p, "TX MCS: %.4x\n", le16_to_cpu(vhtc->vht_mcs.tx_mcs_map)); if (vhtc->vht_mcs.tx_highest) - p += scnprintf(p, sizeof(buf)+buf-p, + p += scnprintf(p, bufsz + buf - p, "MCS TX highest: %d Mbps\n", le16_to_cpu(vhtc->vht_mcs.tx_highest)); #undef PFLAG } - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; } STA_OPS(vht_capa); diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c index a13ae148937ec6..e1d4cfd9912854 100644 --- a/net/mac80211/fils_aead.c +++ b/net/mac80211/fils_aead.c @@ -219,7 +219,8 @@ int fils_encrypt_assoc_req(struct sk_buff *skb, { struct ieee80211_mgmt *mgmt = (void *)skb->data; u8 *capab, *ies, *encr; - const u8 *addr[5 + 1], *session; + const u8 *addr[5 + 1]; + const struct element *session; size_t len[5 + 1]; size_t crypt_len; @@ -231,12 +232,12 @@ int fils_encrypt_assoc_req(struct sk_buff *skb, ies = mgmt->u.assoc_req.variable; } - session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION, - ies, skb->data + skb->len - ies); - if (!session || session[1] != 1 + 8) + session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION, + ies, skb->data + skb->len - ies); + if (!session || session->datalen != 1 + 8) return -EINVAL; /* encrypt after FILS Session element */ - encr = (u8 *)session + 2 + 1 + 8; + encr = (u8 *)session->data + 1 + 8; /* AES-SIV AAD vectors */ @@ -270,7 +271,8 @@ int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, { struct ieee80211_mgmt *mgmt = (void *)frame; u8 *capab, *ies, *encr; - const u8 *addr[5 + 1], *session; + const u8 *addr[5 + 1]; + const struct element *session; size_t len[5 + 1]; int res; size_t crypt_len; @@ -280,16 +282,16 @@ int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, capab = (u8 *)&mgmt->u.assoc_resp.capab_info; ies = mgmt->u.assoc_resp.variable; - session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION, - ies, frame + *frame_len - ies); - if (!session || session[1] != 1 + 8) { + session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION, + ies, frame + *frame_len - ies); + if (!session || session->datalen != 1 + 8) { mlme_dbg(sdata, "No (valid) FILS Session element in (Re)Association Response frame from %pM", mgmt->sa); return -EINVAL; } /* decrypt after FILS Session element */ - encr = (u8 *)session + 2 + 1 + 8; + encr = (u8 *)session->data + 1 + 8; /* AES-SIV AAD vectors */ diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 5d6ca4c3e69819..0416c4d222929b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -9,7 +9,7 @@ * Copyright 2009, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018-2020 Intel Corporation + * Copyright(c) 2018-2021 Intel Corporation */ #include @@ -1589,7 +1589,7 @@ void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status) { size_t baselen; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) != offsetof(typeof(mgmt->u.beacon), variable)); @@ -1602,10 +1602,14 @@ void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; - ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - false, &elems, mgmt->bssid, NULL); + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, false, + mgmt->bssid, NULL); - ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); + if (elems) { + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, elems); + kfree(elems); + } } void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, @@ -1614,7 +1618,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 fc; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; int ies_len; rx_status = IEEE80211_SKB_RXCB(skb); @@ -1651,15 +1655,16 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, if (ies_len < 0) break; - ieee802_11_parse_elems( + elems = ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, - ies_len, true, &elems, mgmt->bssid, NULL); - - if (elems.parse_error) - break; - - ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len, - rx_status, &elems); + ies_len, true, mgmt->bssid, NULL); + + if (elems && !elems->parse_error) + ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, + skb->len, + rx_status, + elems); + kfree(elems); break; } } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 159af6c3ffb05b..5666bbb8860bba 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -631,10 +631,9 @@ struct ieee80211_if_ocb { */ struct ieee802_11_elems; struct ieee80211_mesh_sync_ops { - void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, - u16 stype, - struct ieee80211_mgmt *mgmt, - struct ieee802_11_elems *elems, + void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, u16 stype, + struct ieee80211_mgmt *mgmt, unsigned int len, + const struct ieee80211_meshconf_ie *mesh_cfg, struct ieee80211_rx_status *rx_status); /* should be called with beacon_data under RCU read lock */ @@ -1242,6 +1241,9 @@ struct ieee80211_local { */ bool suspended; + /* suspending is true during the whole suspend process */ + bool suspending; + /* * Resuming is true while suspended, but when we're reprogramming the * hardware -- at that time it's allowed to use ieee80211_queue_work() @@ -1508,6 +1510,7 @@ struct ieee80211_csa_ie { struct ieee802_11_elems { const u8 *ie_start; size_t total_len; + u32 crc; /* pointers to IEs */ const struct ieee80211_tdls_lnkie *lnk_id; @@ -1517,7 +1520,6 @@ struct ieee802_11_elems { const u8 *supp_rates; const u8 *ds_params; const struct ieee80211_tim_ie *tim; - const u8 *challenge; const u8 *rsn; const u8 *rsnx; const u8 *erp_info; @@ -1571,7 +1573,6 @@ struct ieee802_11_elems { u8 ssid_len; u8 supp_rates_len; u8 tim_len; - u8 challenge_len; u8 rsn_len; u8 rsnx_len; u8 ext_supp_rates_len; @@ -2194,18 +2195,18 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, ieee80211_tx_skb_tid(sdata, skb, 7); } -u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, - struct ieee802_11_elems *elems, - u64 filter, u32 crc, u8 *transmitter_bssid, - u8 *bss_bssid); -static inline void ieee802_11_parse_elems(const u8 *start, size_t len, - bool action, - struct ieee802_11_elems *elems, - u8 *transmitter_bssid, - u8 *bss_bssid) +struct ieee802_11_elems *ieee802_11_parse_elems_crc(const u8 *start, size_t len, + bool action, + u64 filter, u32 crc, + const u8 *transmitter_bssid, + const u8 *bss_bssid); +static inline struct ieee802_11_elems * +ieee802_11_parse_elems(const u8 *start, size_t len, bool action, + const u8 *transmitter_bssid, + const u8 *bss_bssid) { - ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0, - transmitter_bssid, bss_bssid); + return ieee802_11_parse_elems_crc(start, len, action, 0, 0, + transmitter_bssid, bss_bssid); } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 62c95597704b40..9a2145c8192b6a 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -632,17 +632,46 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do ieee80211_add_virtual_monitor(local); } +static void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *tx_sdata, *non_tx_sdata, *tmp_sdata; + struct ieee80211_vif *tx_vif = sdata->vif.mbssid_tx_vif; + + if (!tx_vif) + return; + + tx_sdata = vif_to_sdata(tx_vif); + sdata->vif.mbssid_tx_vif = NULL; + + list_for_each_entry_safe(non_tx_sdata, tmp_sdata, + &tx_sdata->local->interfaces, list) { + if (non_tx_sdata != sdata && non_tx_sdata != tx_sdata && + non_tx_sdata->vif.mbssid_tx_vif == tx_vif && + ieee80211_sdata_running(non_tx_sdata)) { + non_tx_sdata->vif.mbssid_tx_vif = NULL; + dev_close(non_tx_sdata->wdev.netdev); + } + } + + if (sdata != tx_sdata && ieee80211_sdata_running(tx_sdata)) { + tx_sdata->vif.mbssid_tx_vif = NULL; + dev_close(tx_sdata->wdev.netdev); + } +} + static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - /* close all dependent VLAN interfaces before locking wiphy */ + /* close dependent VLAN and MBSSID interfaces before locking wiphy */ if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_sub_if_data *vlan, *tmpsdata; list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, u.vlan.list) dev_close(vlan->dev); + + ieee80211_stop_mbssid(sdata); } wiphy_lock(sdata->local->hw.wiphy); @@ -1108,9 +1137,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) * this interface, if it has the special null one. */ if (dev && is_zero_ether_addr(dev->dev_addr)) { - memcpy(dev->dev_addr, - local->hw.wiphy->perm_addr, - ETH_ALEN); + eth_hw_addr_set(dev, local->hw.wiphy->perm_addr); memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); if (!is_valid_ether_addr(dev->dev_addr)) { @@ -1964,9 +1991,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ieee80211_assign_perm_addr(local, ndev->perm_addr, type); if (is_valid_ether_addr(params->macaddr)) - memcpy(ndev->dev_addr, params->macaddr, ETH_ALEN); + eth_hw_addr_set(ndev, params->macaddr); else - memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); + eth_hw_addr_set(ndev, ndev->perm_addr); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */ diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 5dcfd53a4ab6c5..15ac08d111ea11 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1247,7 +1247,7 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *presp; struct beacon_data *bcn; struct ieee80211_mgmt *hdr; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u8 *pos; @@ -1256,22 +1256,24 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; - ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid, - NULL); - - if (!elems.mesh_id) + elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid, + NULL); + if (!elems) return; + if (!elems->mesh_id) + goto free; + /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && !is_broadcast_ether_addr(mgmt->da)) || - elems.ssid_len != 0) - return; + elems->ssid_len != 0) + goto free; - if (elems.mesh_id_len != 0 && - (elems.mesh_id_len != ifmsh->mesh_id_len || - memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) - return; + if (elems->mesh_id_len != 0 && + (elems->mesh_id_len != ifmsh->mesh_id_len || + memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) + goto free; rcu_read_lock(); bcn = rcu_dereference(ifmsh->beacon); @@ -1295,6 +1297,8 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, ieee80211_tx_skb(sdata, presp); out: rcu_read_unlock(); +free: + kfree(elems); } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, @@ -1305,7 +1309,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; struct ieee80211_channel *channel; size_t baselen; int freq; @@ -1320,42 +1324,47 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; - ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - false, &elems, mgmt->bssid, NULL); + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, + false, mgmt->bssid, NULL); + if (!elems) + return; /* ignore non-mesh or secure / unsecure mismatch */ - if ((!elems.mesh_id || !elems.mesh_config) || - (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || - (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) - return; + if ((!elems->mesh_id || !elems->mesh_config) || + (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || + (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) + goto free; - if (elems.ds_params) - freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); + if (elems->ds_params) + freq = ieee80211_channel_to_frequency(elems->ds_params[0], band); else freq = rx_status->freq; channel = ieee80211_get_channel(local->hw.wiphy, freq); if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) - return; + goto free; - if (mesh_matches_local(sdata, &elems)) { + if (mesh_matches_local(sdata, elems)) { mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); if (!sdata->u.mesh.user_mpm || sdata->u.mesh.mshcfg.rssi_threshold == 0 || sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) - mesh_neighbour_update(sdata, mgmt->sa, &elems, + mesh_neighbour_update(sdata, mgmt->sa, elems, rx_status); if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && !sdata->vif.csa_active) - ieee80211_mesh_process_chnswitch(sdata, &elems, true); + ieee80211_mesh_process_chnswitch(sdata, elems, true); } if (ifmsh->sync_ops) - ifmsh->sync_ops->rx_bcn_presp(sdata, - stype, mgmt, &elems, rx_status); + ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len, + elems->mesh_config, rx_status); +free: + kfree(elems); } int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) @@ -1447,7 +1456,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; u16 pre_value; bool fwd_csa = true; size_t baselen; @@ -1460,33 +1469,37 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, pos = mgmt->u.action.u.chan_switch.variable; baselen = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); - ieee802_11_parse_elems(pos, len - baselen, true, &elems, - mgmt->bssid, NULL); - - if (!mesh_matches_local(sdata, &elems)) + elems = ieee802_11_parse_elems(pos, len - baselen, true, + mgmt->bssid, NULL); + if (!elems) return; - ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; + if (!mesh_matches_local(sdata, elems)) + goto free; + + ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false; - pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); + pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value); if (ifmsh->pre_value >= pre_value) - return; + goto free; ifmsh->pre_value = pre_value; if (!sdata->vif.csa_active && - !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) { + !ieee80211_mesh_process_chnswitch(sdata, elems, false)) { mcsa_dbg(sdata, "Failed to process CSA action frame"); - return; + goto free; } /* forward or re-broadcast the CSA frame */ if (fwd_csa) { - if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) + if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0) mcsa_dbg(sdata, "Failed to forward the CSA frame"); } +free: + kfree(elems); } static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index a05b615deb5177..44a6fdb6efbd44 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019, 2021 Intel Corporation * Author: Luis Carlos Cobo */ @@ -908,7 +908,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u32 path_metric; struct sta_info *sta; @@ -926,37 +926,41 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; - ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, - len - baselen, false, &elems, mgmt->bssid, NULL); + elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, + len - baselen, false, mgmt->bssid, NULL); + if (!elems) + return; - if (elems.preq) { - if (elems.preq_len != 37) + if (elems->preq) { + if (elems->preq_len != 37) /* Right now we support just 1 destination and no AE */ - return; - path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq, + goto free; + path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq, MPATH_PREQ); if (path_metric) - hwmp_preq_frame_process(sdata, mgmt, elems.preq, + hwmp_preq_frame_process(sdata, mgmt, elems->preq, path_metric); } - if (elems.prep) { - if (elems.prep_len != 31) + if (elems->prep) { + if (elems->prep_len != 31) /* Right now we support no AE */ - return; - path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep, + goto free; + path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep, MPATH_PREP); if (path_metric) - hwmp_prep_frame_process(sdata, mgmt, elems.prep, + hwmp_prep_frame_process(sdata, mgmt, elems->prep, path_metric); } - if (elems.perr) { - if (elems.perr_len != 15) + if (elems->perr) { + if (elems->perr_len != 15) /* Right now we support only one destination per PERR */ - return; - hwmp_perr_frame_process(sdata, mgmt, elems.perr); + goto free; + hwmp_perr_frame_process(sdata, mgmt, elems->perr); } - if (elems.rann) - hwmp_rann_frame_process(sdata, mgmt, elems.rann); + if (elems->rann) + hwmp_rann_frame_process(sdata, mgmt, elems->rann); +free: + kfree(elems); } /** diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index a6915847d78aed..a829470dd59ede 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019, 2021 Intel Corporation * Author: Luis Carlos Cobo */ #include @@ -1200,7 +1200,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u8 *baseaddr; @@ -1228,7 +1228,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; } - ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems, - mgmt->bssid, NULL); - mesh_process_plink_frame(sdata, mgmt, &elems, rx_status); + elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, + mgmt->bssid, NULL); + mesh_process_plink_frame(sdata, mgmt, elems, rx_status); + kfree(elems); } diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index fde93de2b80ace..9e342cc2504c06 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -3,6 +3,7 @@ * Copyright 2011-2012, Pavel Zubarev * Copyright 2011-2012, Marco Porsch * Copyright 2011-2012, cozybit Inc. + * Copyright (C) 2021 Intel Corporation */ #include "ieee80211_i.h" @@ -35,12 +36,12 @@ struct sync_method { /** * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT * - * @ie: information elements of a management frame from the mesh peer + * @cfg: mesh config element from the mesh peer (or %NULL) */ -static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) +static bool mesh_peer_tbtt_adjusting(const struct ieee80211_meshconf_ie *cfg) { - return (ie->mesh_config->meshconf_cap & - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; + return cfg && + (cfg->meshconf_cap & IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING); } void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata) @@ -76,11 +77,11 @@ void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata) } } -static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, - u16 stype, - struct ieee80211_mgmt *mgmt, - struct ieee802_11_elems *elems, - struct ieee80211_rx_status *rx_status) +static void +mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, u16 stype, + struct ieee80211_mgmt *mgmt, unsigned int len, + const struct ieee80211_meshconf_ie *mesh_cfg, + struct ieee80211_rx_status *rx_status) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; @@ -101,10 +102,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, */ if (ieee80211_have_rx_timestamp(rx_status)) t_r = ieee80211_calculate_rx_timestamp(local, rx_status, - 24 + 12 + - elems->total_len + - FCS_LEN, - 24); + len + FCS_LEN, 24); else t_r = drv_get_tsf(local, sdata); @@ -119,7 +117,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors */ - if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { + if (mesh_peer_tbtt_adjusting(mesh_cfg)) { msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); goto no_sync; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c0ea3b1aa9e1cd..54ab0e1ef6ca58 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1490,6 +1490,7 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, fallthrough; case NL80211_BAND_2GHZ: case NL80211_BAND_60GHZ: + case NL80211_BAND_LC: chan_increment = 1; break; case NL80211_BAND_5GHZ: @@ -2258,6 +2259,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; struct ieee80211_prep_tx_info info = { .subtype = stype, @@ -2407,6 +2409,10 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, cancel_delayed_work_sync(&ifmgd->tx_tspec_wk); sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; + + bss_conf->pwr_reduction = 0; + bss_conf->tx_pwr_env_num = 0; + memset(bss_conf->tx_pwr_env, 0, sizeof(bss_conf->tx_pwr_env)); } static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) @@ -2509,7 +2515,7 @@ static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - const u8 *ssid; + const struct element *ssid; u8 *dst = ifmgd->associated->bssid; u8 unicast_limit = max(1, max_probe_tries - 3); struct sta_info *sta; @@ -2546,14 +2552,14 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) int ssid_len; rcu_read_lock(); - ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); + ssid = ieee80211_bss_get_elem(ifmgd->associated, WLAN_EID_SSID); if (WARN_ON_ONCE(ssid == NULL)) ssid_len = 0; else - ssid_len = ssid[1]; + ssid_len = ssid->datalen; ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst, - ssid + 2, ssid_len, + ssid->data, ssid_len, ifmgd->associated->channel); rcu_read_unlock(); } @@ -2583,6 +2589,13 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, goto out; } + if (sdata->local->suspending) { + /* reschedule after resume */ + mutex_unlock(&sdata->local->mtx); + ieee80211_reset_ap_probe(sdata); + goto out; + } + if (beacon) { mlme_dbg_ratelimited(sdata, "detected beacon loss from AP (missed %d beacons) - probing\n", @@ -2629,7 +2642,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct cfg80211_bss *cbss; struct sk_buff *skb; - const u8 *ssid; + const struct element *ssid; int ssid_len; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) @@ -2647,16 +2660,17 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, return NULL; rcu_read_lock(); - ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID); - if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN, - "invalid SSID element (len=%d)", ssid ? ssid[1] : -1)) + ssid = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID); + if (WARN_ONCE(!ssid || ssid->datalen > IEEE80211_MAX_SSID_LEN, + "invalid SSID element (len=%d)", + ssid ? ssid->datalen : -1)) ssid_len = 0; else - ssid_len = ssid[1]; + ssid_len = ssid->datalen; skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, (u32) -1, cbss->channel, - ssid + 2, ssid_len, + ssid->data, ssid_len, NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); rcu_read_unlock(); @@ -2870,17 +2884,17 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; + const struct element *challenge; u8 *pos; - struct ieee802_11_elems elems; u32 tx_flags = 0; struct ieee80211_prep_tx_info info = { .subtype = IEEE80211_STYPE_AUTH, }; pos = mgmt->u.auth.variable; - ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, - mgmt->bssid, auth_data->bss->bssid); - if (!elems.challenge) + challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos, + len - (pos - (u8 *)mgmt)); + if (!challenge) return; auth_data->expected_transaction = 4; drv_mgd_prepare_tx(sdata->local, sdata, &info); @@ -2888,7 +2902,8 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, - elems.challenge - 2, elems.challenge_len + 2, + (void *)challenge, + challenge->datalen + sizeof(*challenge), auth_data->bss->bssid, auth_data->bss->bssid, auth_data->key, auth_data->key_len, auth_data->key_idx, tx_flags); @@ -3290,8 +3305,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, aid = 0; /* TODO */ } capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); - ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, elems, - mgmt->bssid, assoc_data->bss->bssid); + elems = ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, + mgmt->bssid, assoc_data->bss->bssid); + + if (!elems) + return false; if (elems->aid_resp) aid = le16_to_cpu(elems->aid_resp->aid); @@ -3313,7 +3331,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (!is_s1g && !elems->supp_rates) { sdata_info(sdata, "no SuppRates element in AssocResp\n"); - return false; + ret = false; + goto out; } sdata->vif.bss_conf.aid = aid; @@ -3335,7 +3354,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && (!elems->vht_cap_elem || !elems->vht_operation)))) { const struct cfg80211_bss_ies *ies; - struct ieee802_11_elems bss_elems; + struct ieee802_11_elems *bss_elems; rcu_read_lock(); ies = rcu_dereference(cbss->ies); @@ -3343,16 +3362,22 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC); rcu_read_unlock(); - if (!bss_ies) - return false; + if (!bss_ies) { + ret = false; + goto out; + } + + bss_elems = ieee802_11_parse_elems(bss_ies->data, bss_ies->len, + false, mgmt->bssid, + assoc_data->bss->bssid); + if (!bss_elems) { + ret = false; + goto out; + } - ieee802_11_parse_elems(bss_ies->data, bss_ies->len, - false, &bss_elems, - mgmt->bssid, - assoc_data->bss->bssid); if (assoc_data->wmm && - !elems->wmm_param && bss_elems.wmm_param) { - elems->wmm_param = bss_elems.wmm_param; + !elems->wmm_param && bss_elems->wmm_param) { + elems->wmm_param = bss_elems->wmm_param; sdata_info(sdata, "AP bug: WMM param missing from AssocResp\n"); } @@ -3361,30 +3386,32 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, * Also check if we requested HT/VHT, otherwise the AP doesn't * have to include the IEs in the (re)association response. */ - if (!elems->ht_cap_elem && bss_elems.ht_cap_elem && + if (!elems->ht_cap_elem && bss_elems->ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { - elems->ht_cap_elem = bss_elems.ht_cap_elem; + elems->ht_cap_elem = bss_elems->ht_cap_elem; sdata_info(sdata, "AP bug: HT capability missing from AssocResp\n"); } - if (!elems->ht_operation && bss_elems.ht_operation && + if (!elems->ht_operation && bss_elems->ht_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { - elems->ht_operation = bss_elems.ht_operation; + elems->ht_operation = bss_elems->ht_operation; sdata_info(sdata, "AP bug: HT operation missing from AssocResp\n"); } - if (!elems->vht_cap_elem && bss_elems.vht_cap_elem && + if (!elems->vht_cap_elem && bss_elems->vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { - elems->vht_cap_elem = bss_elems.vht_cap_elem; + elems->vht_cap_elem = bss_elems->vht_cap_elem; sdata_info(sdata, "AP bug: VHT capa missing from AssocResp\n"); } - if (!elems->vht_operation && bss_elems.vht_operation && + if (!elems->vht_operation && bss_elems->vht_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { - elems->vht_operation = bss_elems.vht_operation; + elems->vht_operation = bss_elems->vht_operation; sdata_info(sdata, "AP bug: VHT operation missing from AssocResp\n"); } + + kfree(bss_elems); } /* @@ -3629,6 +3656,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ret = true; out: + kfree(elems); kfree(bss_ies); return ret; } @@ -3640,7 +3668,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u16 capab_info, status_code, aid; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; int ac, uapsd_queues = -1; u8 *pos; bool reassoc; @@ -3697,14 +3725,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, fils_decrypt_assoc_resp(sdata, (u8 *)mgmt, &len, assoc_data) < 0) return; - ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, - mgmt->bssid, assoc_data->bss->bssid); + elems = ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, + mgmt->bssid, assoc_data->bss->bssid); + if (!elems) + goto notify_driver; if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && - elems.timeout_int && - elems.timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { + elems->timeout_int && + elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; - tu = le32_to_cpu(elems.timeout_int->value); + tu = le32_to_cpu(elems->timeout_int->value); ms = tu * 1024 / 1000; sdata_info(sdata, "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", @@ -3724,7 +3754,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, event.u.mlme.reason = status_code; drv_event_callback(sdata->local, sdata, &event); } else { - if (!ieee80211_assoc_success(sdata, cbss, mgmt, len, &elems)) { + if (!ieee80211_assoc_success(sdata, cbss, mgmt, len, elems)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false, false); cfg80211_assoc_timeout(sdata->dev, cbss); @@ -3754,6 +3784,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ifmgd->assoc_req_ies, ifmgd->assoc_req_ies_len); notify_driver: drv_mgd_complete_tx(sdata->local, sdata, &info); + kfree(elems); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@ -3958,7 +3989,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; struct ieee80211_mgmt *mgmt = (void *) hdr; size_t baselen; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; @@ -4004,15 +4035,16 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->bss)) { - ieee802_11_parse_elems(variable, - len - baselen, false, &elems, - bssid, - ifmgd->assoc_data->bss->bssid); + elems = ieee802_11_parse_elems(variable, len - baselen, false, + bssid, + ifmgd->assoc_data->bss->bssid); + if (!elems) + return; ieee80211_rx_bss_info(sdata, mgmt, len, rx_status); - if (elems.dtim_period) - ifmgd->dtim_period = elems.dtim_period; + if (elems->dtim_period) + ifmgd->dtim_period = elems->dtim_period; ifmgd->have_beacon = true; ifmgd->assoc_data->need_beacon = false; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { @@ -4020,17 +4052,17 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, le64_to_cpu(mgmt->u.beacon.timestamp); sdata->vif.bss_conf.sync_device_ts = rx_status->device_timestamp; - sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count; + sdata->vif.bss_conf.sync_dtim_count = elems->dtim_count; } - if (elems.mbssid_config_ie) + if (elems->mbssid_config_ie) bss_conf->profile_periodicity = - elems.mbssid_config_ie->profile_periodicity; + elems->mbssid_config_ie->profile_periodicity; else bss_conf->profile_periodicity = 0; - if (elems.ext_capab_len >= 11 && - (elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) + if (elems->ext_capab_len >= 11 && + (elems->ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT)) bss_conf->ema_ap = true; else bss_conf->ema_ap = false; @@ -4039,6 +4071,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; run_again(sdata, ifmgd->assoc_data->timeout); + kfree(elems); return; } @@ -4070,13 +4103,15 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, */ if (!ieee80211_is_s1g_beacon(hdr->frame_control)) ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); - ncrc = ieee802_11_parse_elems_crc(variable, - len - baselen, false, &elems, - care_about_ies, ncrc, - mgmt->bssid, bssid); + elems = ieee802_11_parse_elems_crc(variable, len - baselen, + false, care_about_ies, ncrc, + mgmt->bssid, bssid); + if (!elems) + return; + ncrc = elems->crc; if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && - ieee80211_check_tim(elems.tim, elems.tim_len, bss_conf->aid)) { + ieee80211_check_tim(elems->tim, elems->tim_len, bss_conf->aid)) { if (local->hw.conf.dynamic_ps_timeout > 0) { if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; @@ -4146,12 +4181,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, le64_to_cpu(mgmt->u.beacon.timestamp); sdata->vif.bss_conf.sync_device_ts = rx_status->device_timestamp; - sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count; + sdata->vif.bss_conf.sync_dtim_count = elems->dtim_count; } if ((ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) || ieee80211_is_s1g_short_beacon(mgmt->frame_control)) - return; + goto free; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true; @@ -4159,12 +4194,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, rx_status->device_timestamp, - &elems, true); + elems, true); if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && - ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, - elems.wmm_param_len, - elems.mu_edca_param_set)) + ieee80211_sta_wmm_params(local, sdata, elems->wmm_param, + elems->wmm_param_len, + elems->mu_edca_param_set)) changed |= BSS_CHANGED_QOS; /* @@ -4173,7 +4208,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, */ if (!ifmgd->have_beacon) { /* a few bogus AP send dtim_period = 0 or no TIM IE */ - bss_conf->dtim_period = elems.dtim_period ?: 1; + bss_conf->dtim_period = elems->dtim_period ?: 1; changed |= BSS_CHANGED_BEACON_INFO; ifmgd->have_beacon = true; @@ -4185,9 +4220,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_recalc_ps_vif(sdata); } - if (elems.erp_info) { + if (elems->erp_info) { erp_valid = true; - erp_value = elems.erp_info[0]; + erp_value = elems->erp_info[0]; } else { erp_valid = false; } @@ -4200,12 +4235,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, bssid); - changed |= ieee80211_recalc_twt_req(sdata, sta, &elems); + changed |= ieee80211_recalc_twt_req(sdata, sta, elems); - if (ieee80211_config_bw(sdata, sta, elems.ht_cap_elem, - elems.vht_cap_elem, elems.ht_operation, - elems.vht_operation, elems.he_operation, - elems.s1g_oper, bssid, &changed)) { + if (ieee80211_config_bw(sdata, sta, elems->ht_cap_elem, + elems->vht_cap_elem, elems->ht_operation, + elems->vht_operation, elems->he_operation, + elems->s1g_oper, bssid, &changed)) { mutex_unlock(&local->sta_mtx); sdata_info(sdata, "failed to follow AP %pM bandwidth change, disconnect\n", @@ -4217,21 +4252,23 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, sizeof(deauth_buf), true, WLAN_REASON_DEAUTH_LEAVING, false); - return; + goto free; } - if (sta && elems.opmode_notif) - ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, + if (sta && elems->opmode_notif) + ieee80211_vht_handle_opmode(sdata, sta, *elems->opmode_notif, rx_status->band); mutex_unlock(&local->sta_mtx); changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, - elems.country_elem, - elems.country_elem_len, - elems.pwr_constr_elem, - elems.cisco_dtpc_elem); + elems->country_elem, + elems->country_elem_len, + elems->pwr_constr_elem, + elems->cisco_dtpc_elem); ieee80211_bss_info_change_notify(sdata, changed); +free: + kfree(elems); } void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, @@ -4260,7 +4297,6 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 fc; - struct ieee802_11_elems elems; int ies_len; rx_status = (struct ieee80211_rx_status *) skb->cb; @@ -4292,6 +4328,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { + struct ieee802_11_elems *elems; + ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); @@ -4300,18 +4338,19 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; /* CSA IE cannot be overridden, no need for BSSID */ - ieee802_11_parse_elems( - mgmt->u.action.u.chan_switch.variable, - ies_len, true, &elems, mgmt->bssid, NULL); - - if (elems.parse_error) - break; - - ieee80211_sta_process_chanswitch(sdata, - rx_status->mactime, - rx_status->device_timestamp, - &elems, false); + elems = ieee802_11_parse_elems( + mgmt->u.action.u.chan_switch.variable, + ies_len, true, mgmt->bssid, NULL); + + if (elems && !elems->parse_error) + ieee80211_sta_process_chanswitch(sdata, + rx_status->mactime, + rx_status->device_timestamp, + elems, false); + kfree(elems); } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { + struct ieee802_11_elems *elems; + ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.ext_chan_switch.variable); @@ -4323,21 +4362,22 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, * extended CSA IE can't be overridden, no need for * BSSID */ - ieee802_11_parse_elems( - mgmt->u.action.u.ext_chan_switch.variable, - ies_len, true, &elems, mgmt->bssid, NULL); - - if (elems.parse_error) - break; - - /* for the handling code pretend this was also an IE */ - elems.ext_chansw_ie = - &mgmt->u.action.u.ext_chan_switch.data; + elems = ieee802_11_parse_elems( + mgmt->u.action.u.ext_chan_switch.variable, + ies_len, true, mgmt->bssid, NULL); + + if (elems && !elems->parse_error) { + /* for the handling code pretend it was an IE */ + elems->ext_chansw_ie = + &mgmt->u.action.u.ext_chan_switch.data; + + ieee80211_sta_process_chanswitch(sdata, + rx_status->mactime, + rx_status->device_timestamp, + elems, false); + } - ieee80211_sta_process_chanswitch(sdata, - rx_status->mactime, - rx_status->device_timestamp, - &elems, false); + kfree(elems); } break; } @@ -4972,10 +5012,22 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee802_11_elems *elems; + const struct cfg80211_bss_ies *ies; int ret; u32 i; bool have_80mhz; + rcu_read_lock(); + + ies = rcu_dereference(cbss->ies); + elems = ieee802_11_parse_elems(ies->data, ies->len, false, + NULL, NULL); + if (!elems) { + rcu_read_unlock(); + return -ENOMEM; + } + sband = local->hw.wiphy->bands[cbss->channel->band]; ifmgd->flags &= ~(IEEE80211_STA_DISABLE_40MHZ | @@ -4998,18 +5050,9 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ieee80211_vif_type_p2p(&sdata->vif))) ifmgd->flags |= IEEE80211_STA_DISABLE_HE; - rcu_read_lock(); - if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && !is_6ghz) { - const u8 *ht_oper_ie, *ht_cap_ie; - - ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION); - if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) - ht_oper = (void *)(ht_oper_ie + 2); - - ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); - if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) - ht_cap = (void *)(ht_cap_ie + 2); + ht_oper = elems->ht_operation; + ht_cap = elems->ht_cap_elem; if (!ht_cap) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; @@ -5018,12 +5061,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && !is_6ghz) { - const u8 *vht_oper_ie, *vht_cap; - - vht_oper_ie = ieee80211_bss_get_ie(cbss, - WLAN_EID_VHT_OPERATION); - if (vht_oper_ie && vht_oper_ie[1] >= sizeof(*vht_oper)) - vht_oper = (void *)(vht_oper_ie + 2); + vht_oper = elems->vht_operation; if (vht_oper && !ht_oper) { vht_oper = NULL; sdata_info(sdata, @@ -5033,25 +5071,38 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } - vht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY); - if (!vht_cap || vht_cap[1] < sizeof(struct ieee80211_vht_cap)) { + if (!elems->vht_cap_elem) { ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; vht_oper = NULL; } } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) { - const struct cfg80211_bss_ies *ies; - const u8 *he_oper_ie; + he_oper = elems->he_operation; - ies = rcu_dereference(cbss->ies); - he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, - ies->data, ies->len); - if (he_oper_ie && - he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3])) - he_oper = (void *)(he_oper_ie + 3); - else - he_oper = NULL; + if (is_6ghz) { + struct ieee80211_bss_conf *bss_conf; + u8 i, j = 0; + + bss_conf = &sdata->vif.bss_conf; + + if (elems->pwr_constr_elem) + bss_conf->pwr_reduction = *elems->pwr_constr_elem; + + BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) != + ARRAY_SIZE(elems->tx_pwr_env)); + + for (i = 0; i < elems->tx_pwr_env_num; i++) { + if (elems->tx_pwr_env_len[i] > + sizeof(bss_conf->tx_pwr_env[j])) + continue; + + bss_conf->tx_pwr_env_num++; + memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i], + elems->tx_pwr_env_len[i]); + j++; + } + } if (!ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper)) ifmgd->flags |= IEEE80211_STA_DISABLE_HE; @@ -5072,13 +5123,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; if (sband->band == NL80211_BAND_S1GHZ) { - const u8 *s1g_oper_ie; - - s1g_oper_ie = ieee80211_bss_get_ie(cbss, - WLAN_EID_S1G_OPERATION); - if (s1g_oper_ie && s1g_oper_ie[1] >= sizeof(*s1g_oper)) - s1g_oper = (void *)(s1g_oper_ie + 2); - else + s1g_oper = elems->s1g_oper; + if (!s1g_oper) sdata_info(sdata, "AP missing S1G operation element?\n"); } @@ -5094,6 +5140,9 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, local->rx_chains); rcu_read_unlock(); + /* the element data was RCU protected so no longer valid anyway */ + kfree(elems); + elems = NULL; if (ifmgd->flags & IEEE80211_STA_DISABLE_HE && is_6ghz) { sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection"); @@ -5498,7 +5547,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, const struct cfg80211_bss_ies *beacon_ies; struct ieee80211_supported_band *sband; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; - const u8 *ssidie, *ht_ie, *vht_ie; + const struct element *ssid_elem, *ht_elem, *vht_elem; int i, err; bool override = false; @@ -5507,14 +5556,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, return -ENOMEM; rcu_read_lock(); - ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); - if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) { + ssid_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_SSID); + if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) { rcu_read_unlock(); kfree(assoc_data); return -EINVAL; } - memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]); - assoc_data->ssid_len = ssidie[1]; + memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen); + assoc_data->ssid_len = ssid_elem->datalen; memcpy(bss_conf->ssid, assoc_data->ssid, assoc_data->ssid_len); bss_conf->ssid_len = assoc_data->ssid_len; rcu_read_unlock(); @@ -5628,15 +5677,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, assoc_data->supp_rates_len = bss->supp_rates_len; rcu_read_lock(); - ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); - if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation)) + ht_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_HT_OPERATION); + if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation)) assoc_data->ap_ht_param = - ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param; + ((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param; else if (!is_6ghz) ifmgd->flags |= IEEE80211_STA_DISABLE_HT; - vht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_VHT_CAPABILITY); - if (vht_ie && vht_ie[1] >= sizeof(struct ieee80211_vht_cap)) - memcpy(&assoc_data->ap_vht_cap, vht_ie + 2, + vht_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_VHT_CAPABILITY); + if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) + memcpy(&assoc_data->ap_vht_cap, vht_elem->data, sizeof(struct ieee80211_vht_cap)); else if (is_5ghz) ifmgd->flags |= IEEE80211_STA_DISABLE_VHT | diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 7809a906d7fe99..0ccb5701c7f398 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -27,6 +27,9 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) if (!local->open_count) goto suspend; + local->suspending = true; + mb(); /* make suspending visible before any cancellation */ + ieee80211_scan_cancel(local); ieee80211_dfs_cac_cancel(local); @@ -176,6 +179,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) /* need suspended to be visible before quiescing is false */ barrier(); local->quiescing = false; + local->suspending = false; return 0; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c4071b015c1884..fc5c608d02e216 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3216,10 +3216,7 @@ static bool ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); struct ieee80211_sub_if_data *sdata = rx->sdata; - const struct ieee80211_sta_he_cap *hecap; - struct ieee80211_supported_band *sband; /* TWT actions are only supported in AP for the moment */ if (sdata->vif.type != NL80211_IFTYPE_AP) @@ -3228,14 +3225,7 @@ ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) if (!rx->local->ops->add_twt_setup) return false; - sband = rx->local->hw.wiphy->bands[status->band]; - hecap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); - if (!hecap) - return false; - - if (!(hecap->he_cap_elem.mac_cap_info[0] & - IEEE80211_HE_MAC_CAP0_TWT_RES)) + if (!sdata->vif.bss_conf.twt_responder) return false; if (!rx->sta) diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c index 7e35ab5b61664c..4141bc80cdfd64 100644 --- a/net/mac80211/s1g.c +++ b/net/mac80211/s1g.c @@ -104,9 +104,11 @@ ieee80211_s1g_rx_twt_setup(struct ieee80211_sub_if_data *sdata, /* broadcast TWT not supported yet */ if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) { - le16p_replace_bits(&twt_agrt->req_type, - TWT_SETUP_CMD_REJECT, - IEEE80211_TWT_REQTYPE_SETUP_CMD); + twt_agrt->req_type &= + ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); + twt_agrt->req_type |= + le16_encode_bits(TWT_SETUP_CMD_REJECT, + IEEE80211_TWT_REQTYPE_SETUP_CMD); goto out; } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 6b50cb5e0e3ccc..5e6b275afc9e62 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include @@ -155,7 +155,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local, }; bool signal_valid; struct ieee80211_sub_if_data *scan_sdata; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u8 *elements; @@ -209,8 +209,10 @@ ieee80211_bss_info_update(struct ieee80211_local *local, if (baselen > len) return NULL; - ieee802_11_parse_elems(elements, len - baselen, false, &elems, - mgmt->bssid, cbss->bssid); + elems = ieee802_11_parse_elems(elements, len - baselen, false, + mgmt->bssid, cbss->bssid); + if (!elems) + return NULL; /* In case the signal is invalid update the status */ signal_valid = channel == cbss->channel; @@ -218,15 +220,17 @@ ieee80211_bss_info_update(struct ieee80211_local *local, rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; bss = (void *)cbss->priv; - ieee80211_update_bss_from_elems(local, bss, &elems, rx_status, beacon); + ieee80211_update_bss_from_elems(local, bss, elems, rx_status, beacon); list_for_each_entry(non_tx_cbss, &cbss->nontrans_list, nontrans_list) { non_tx_bss = (void *)non_tx_cbss->priv; - ieee80211_update_bss_from_elems(local, non_tx_bss, &elems, + ieee80211_update_bss_from_elems(local, non_tx_bss, elems, rx_status, beacon); } + kfree(elems); + return bss; } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 2b5acb37587f70..51b49f0d3ad48a 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -444,6 +444,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, switch (i) { case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: /* * We use both here, even if we cannot really know for * sure the station will support both, but the only use @@ -513,6 +514,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->cparams.target = MS2TIME(20); sta->cparams.interval = MS2TIME(100); sta->cparams.ecn = true; + sta->cparams.ce_threshold_selector = 0; + sta->cparams.ce_threshold_mask = 0; sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 45e532ad1215ba..137be9ec94af14 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -6,7 +6,7 @@ * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH * Copyright 2015 - 2016 Intel Deutschland GmbH - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019, 2021 Intel Corporation */ #include @@ -1684,7 +1684,7 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems = NULL; struct sta_info *sta; struct ieee80211_tdls_data *tf = (void *)skb->data; bool local_initiator; @@ -1718,16 +1718,20 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, goto call_drv; } - ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, - skb->len - baselen, false, &elems, - NULL, NULL); - if (elems.parse_error) { + elems = ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, + skb->len - baselen, false, NULL, NULL); + if (!elems) { + ret = -ENOMEM; + goto out; + } + + if (elems->parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n"); ret = -EINVAL; goto out; } - if (!elems.ch_sw_timing || !elems.lnk_id) { + if (!elems->ch_sw_timing || !elems->lnk_id) { tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n"); ret = -EINVAL; goto out; @@ -1735,15 +1739,15 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, /* validate the initiator is set correctly */ local_initiator = - !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); + !memcmp(elems->lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; goto out; } - params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); - params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); + params.switch_time = le16_to_cpu(elems->ch_sw_timing->switch_time); + params.switch_timeout = le16_to_cpu(elems->ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, ¶ms.ch_sw_tm_ie); @@ -1763,6 +1767,7 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); + kfree(elems); return ret; } @@ -1771,7 +1776,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; enum nl80211_channel_type chan_type; @@ -1831,22 +1836,27 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, return -EINVAL; } - ieee802_11_parse_elems(tf->u.chan_switch_req.variable, - skb->len - baselen, false, &elems, NULL, NULL); - if (elems.parse_error) { + elems = ieee802_11_parse_elems(tf->u.chan_switch_req.variable, + skb->len - baselen, false, NULL, NULL); + if (!elems) + return -ENOMEM; + + if (elems->parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n"); - return -EINVAL; + ret = -EINVAL; + goto free; } - if (!elems.ch_sw_timing || !elems.lnk_id) { + if (!elems->ch_sw_timing || !elems->lnk_id) { tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n"); - return -EINVAL; + ret = -EINVAL; + goto free; } - if (!elems.sec_chan_offs) { + if (!elems->sec_chan_offs) { chan_type = NL80211_CHAN_HT20; } else { - switch (elems.sec_chan_offs->sec_chan_offs) { + switch (elems->sec_chan_offs->sec_chan_offs) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: chan_type = NL80211_CHAN_HT40PLUS; break; @@ -1865,7 +1875,8 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef, sdata->wdev.iftype)) { tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n"); - return -EINVAL; + ret = -EINVAL; + goto free; } mutex_lock(&local->sta_mtx); @@ -1881,7 +1892,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, /* validate the initiator is set correctly */ local_initiator = - !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); + !memcmp(elems->lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; @@ -1889,16 +1900,16 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, } /* peer should have known better */ - if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs && - elems.sec_chan_offs->sec_chan_offs) { + if (!sta->sta.ht_cap.ht_supported && elems->sec_chan_offs && + elems->sec_chan_offs->sec_chan_offs) { tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n"); ret = -ENOTSUPP; goto out; } params.chandef = &chandef; - params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); - params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); + params.switch_time = le16_to_cpu(elems->ch_sw_timing->switch_time); + params.switch_timeout = le16_to_cpu(elems->ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, @@ -1917,6 +1928,8 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); +free: + kfree(elems); return ret; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8921088a5df65f..a756a197c770fe 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -146,7 +146,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, rate = DIV_ROUND_UP(r->bitrate, 1 << shift); switch (sband->band) { - case NL80211_BAND_2GHZ: { + case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: { u32 flag; if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) flag = IEEE80211_RATE_MANDATORY_G; @@ -4991,6 +4992,115 @@ static int ieee80211_beacon_protect(struct sk_buff *skb, return 0; } +static void +ieee80211_beacon_get_finish(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_mutable_offsets *offs, + struct beacon_data *beacon, + struct sk_buff *skb, + struct ieee80211_chanctx_conf *chanctx_conf, + u16 csa_off_base) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_tx_info *info; + enum nl80211_band band; + struct ieee80211_tx_rate_control txrc; + + /* CSA offsets */ + if (offs && beacon) { + u16 i; + + for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) { + u16 csa_off = beacon->cntdwn_counter_offsets[i]; + + if (!csa_off) + continue; + + offs->cntdwn_counter_offs[i] = csa_off_base + csa_off; + } + } + + band = chanctx_conf->def.chan->band; + info = IEEE80211_SKB_CB(skb); + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->flags |= IEEE80211_TX_CTL_NO_ACK; + info->band = band; + + memset(&txrc, 0, sizeof(txrc)); + txrc.hw = hw; + txrc.sband = local->hw.wiphy->bands[band]; + txrc.bss_conf = &sdata->vif.bss_conf; + txrc.skb = skb; + txrc.reported_rate.idx = -1; + if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band]) + txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band]; + else + txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; + txrc.bss = true; + rate_control_get_rate(sdata, NULL, &txrc); + + info->control.vif = vif; + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_ASSIGN_SEQ | + IEEE80211_TX_CTL_FIRST_FRAGMENT; +} + +static struct sk_buff * +ieee80211_beacon_get_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_mutable_offsets *offs, + bool is_template, + struct beacon_data *beacon, + struct ieee80211_chanctx_conf *chanctx_conf) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_ap *ap = &sdata->u.ap; + struct sk_buff *skb = NULL; + u16 csa_off_base = 0; + + if (beacon->cntdwn_counter_offsets[0]) { + if (!is_template) + ieee80211_beacon_update_cntdwn(vif); + + ieee80211_set_beacon_cntdwn(sdata, beacon); + } + + /* headroom, head length, + * tail length and maximum TIM length + */ + skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + + beacon->tail_len + 256 + + local->hw.extra_beacon_tailroom); + if (!skb) + return NULL; + + skb_reserve(skb, local->tx_headroom); + skb_put_data(skb, beacon->head, beacon->head_len); + + ieee80211_beacon_add_tim(sdata, &ap->ps, skb, is_template); + + if (offs) { + offs->tim_offset = beacon->head_len; + offs->tim_length = skb->len - beacon->head_len; + offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0]; + + /* for AP the csa offsets are from tail */ + csa_off_base = skb->len; + } + + if (beacon->tail) + skb_put_data(skb, beacon->tail, beacon->tail_len); + + if (ieee80211_beacon_protect(skb, local, sdata) < 0) + return NULL; + + ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb, chanctx_conf, + csa_off_base); + return skb; +} + static struct sk_buff * __ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -5000,12 +5110,8 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_local *local = hw_to_local(hw); struct beacon_data *beacon = NULL; struct sk_buff *skb = NULL; - struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata = NULL; - enum nl80211_band band; - struct ieee80211_tx_rate_control txrc; struct ieee80211_chanctx_conf *chanctx_conf; - int csa_off_base = 0; rcu_read_lock(); @@ -5022,48 +5128,11 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_if_ap *ap = &sdata->u.ap; beacon = rcu_dereference(ap->beacon); - if (beacon) { - if (beacon->cntdwn_counter_offsets[0]) { - if (!is_template) - ieee80211_beacon_update_cntdwn(vif); - - ieee80211_set_beacon_cntdwn(sdata, beacon); - } - - /* - * headroom, head length, - * tail length and maximum TIM length - */ - skb = dev_alloc_skb(local->tx_headroom + - beacon->head_len + - beacon->tail_len + 256 + - local->hw.extra_beacon_tailroom); - if (!skb) - goto out; - - skb_reserve(skb, local->tx_headroom); - skb_put_data(skb, beacon->head, beacon->head_len); - - ieee80211_beacon_add_tim(sdata, &ap->ps, skb, - is_template); - - if (offs) { - offs->tim_offset = beacon->head_len; - offs->tim_length = skb->len - beacon->head_len; - offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0]; - - /* for AP the csa offsets are from tail */ - csa_off_base = skb->len; - } - - if (beacon->tail) - skb_put_data(skb, beacon->tail, - beacon->tail_len); - - if (ieee80211_beacon_protect(skb, local, sdata) < 0) - goto out; - } else + if (!beacon) goto out; + + skb = ieee80211_beacon_get_ap(hw, vif, offs, is_template, + beacon, chanctx_conf); } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_hdr *hdr; @@ -5089,6 +5158,9 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); + + ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb, + chanctx_conf, 0); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; @@ -5128,51 +5200,13 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, } skb_put_data(skb, beacon->tail, beacon->tail_len); + ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb, + chanctx_conf, 0); } else { WARN_ON(1); goto out; } - /* CSA offsets */ - if (offs && beacon) { - int i; - - for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) { - u16 csa_off = beacon->cntdwn_counter_offsets[i]; - - if (!csa_off) - continue; - - offs->cntdwn_counter_offs[i] = csa_off_base + csa_off; - } - } - - band = chanctx_conf->def.chan->band; - - info = IEEE80211_SKB_CB(skb); - - info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; - info->flags |= IEEE80211_TX_CTL_NO_ACK; - info->band = band; - - memset(&txrc, 0, sizeof(txrc)); - txrc.hw = hw; - txrc.sband = local->hw.wiphy->bands[band]; - txrc.bss_conf = &sdata->vif.bss_conf; - txrc.skb = skb; - txrc.reported_rate.idx = -1; - if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band]) - txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band]; - else - txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; - txrc.bss = true; - rate_control_get_rate(sdata, NULL, &txrc); - - info->control.vif = vif; - - info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | - IEEE80211_TX_CTL_ASSIGN_SEQ | - IEEE80211_TX_CTL_FIRST_FRAGMENT; out: rcu_read_unlock(); return skb; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 49cb96d251695a..39fa2a50385d89 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1112,10 +1112,6 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, } else elem_parse_failed = true; break; - case WLAN_EID_CHALLENGE: - elems->challenge = pos; - elems->challenge_len = elen; - break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { @@ -1395,8 +1391,8 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, struct ieee802_11_elems *elems, - u8 *transmitter_bssid, - u8 *bss_bssid, + const u8 *transmitter_bssid, + const u8 *bss_bssid, u8 *nontransmitted_profile) { const struct element *elem, *sub; @@ -1461,16 +1457,20 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, return found ? profile_len : 0; } -u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, - struct ieee802_11_elems *elems, - u64 filter, u32 crc, u8 *transmitter_bssid, - u8 *bss_bssid) +struct ieee802_11_elems *ieee802_11_parse_elems_crc(const u8 *start, size_t len, + bool action, u64 filter, + u32 crc, + const u8 *transmitter_bssid, + const u8 *bss_bssid) { + struct ieee802_11_elems *elems; const struct element *non_inherit = NULL; u8 *nontransmitted_profile; int nontransmitted_profile_len = 0; - memset(elems, 0, sizeof(*elems)); + elems = kzalloc(sizeof(*elems), GFP_ATOMIC); + if (!elems) + return NULL; elems->ie_start = start; elems->total_len = len; @@ -1516,7 +1516,9 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, kfree(nontransmitted_profile); - return crc; + elems->crc = crc; + + return elems; } void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, @@ -3383,6 +3385,7 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, const struct ieee80211_sta_he_cap *he_cap; struct cfg80211_chan_def he_chandef = *chandef; const struct ieee80211_he_6ghz_oper *he_6ghz_oper; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; bool support_80_80, support_160; u8 he_phy_cap; u32 freq; @@ -3426,6 +3429,19 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, NL80211_BAND_6GHZ); he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq); + switch (u8_get_bits(he_6ghz_oper->control, + IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { + case IEEE80211_6GHZ_CTRL_REG_LPI_AP: + bss_conf->power_type = IEEE80211_REG_LPI_AP; + break; + case IEEE80211_6GHZ_CTRL_REG_SP_AP: + bss_conf->power_type = IEEE80211_REG_SP_AP; + break; + default: + bss_conf->power_type = IEEE80211_REG_UNSET_AP; + break; + } + switch (u8_get_bits(he_6ghz_oper->control, IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH)) { case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ: diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 323d3d2d986f8d..500ed1b8125037 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -129,15 +129,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p) if (!ieee802154_is_valid_extended_unicast_addr(extended_addr)) return -EINVAL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + dev_addr_set(dev, addr->sa_data); sdata->wpan_dev.extended_addr = extended_addr; /* update lowpan interface mac address when * wpan mac has been changed */ if (sdata->wpan_dev.lowpan_dev) - memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr, - dev->addr_len); + dev_addr_set(sdata->wpan_dev.lowpan_dev, dev->dev_addr); return mac802154_wpan_update_llsec(dev); } @@ -615,6 +614,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name, unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr) { + u8 addr[IEEE802154_EXTENDED_ADDR_LEN]; struct net_device *ndev = NULL; struct ieee802154_sub_if_data *sdata = NULL; int ret; @@ -638,11 +638,12 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name, switch (type) { case NL802154_IFTYPE_NODE: ndev->type = ARPHRD_IEEE802154; - if (ieee802154_is_valid_extended_unicast_addr(extended_addr)) - ieee802154_le64_to_be64(ndev->dev_addr, &extended_addr); - else - memcpy(ndev->dev_addr, ndev->perm_addr, - IEEE802154_EXTENDED_ADDR_LEN); + if (ieee802154_is_valid_extended_unicast_addr(extended_addr)) { + ieee802154_le64_to_be64(addr, &extended_addr); + dev_addr_set(ndev, addr); + } else { + dev_addr_set(ndev, ndev->perm_addr); + } break; case NL802154_IFTYPE_MONITOR: ndev->type = ARPHRD_IEEE802154_MONITOR; diff --git a/net/mctp/Kconfig b/net/mctp/Kconfig index 2cdf3d0a28c950..3a5c0e70da776c 100644 --- a/net/mctp/Kconfig +++ b/net/mctp/Kconfig @@ -1,7 +1,7 @@ menuconfig MCTP depends on NET - tristate "MCTP core protocol support" + bool "MCTP core protocol support" help Management Component Transport Protocol (MCTP) is an in-system protocol for communicating between management controllers and @@ -11,3 +11,13 @@ menuconfig MCTP This option enables core MCTP support. For communicating with other devices, you'll want to enable a driver for a specific hardware channel. + +config MCTP_TEST + bool "MCTP core tests" if !KUNIT_ALL_TESTS + depends on MCTP=y && KUNIT=y + default KUNIT_ALL_TESTS + +config MCTP_FLOWS + bool + depends on MCTP + select SKB_EXTENSIONS diff --git a/net/mctp/Makefile b/net/mctp/Makefile index 0171333384d786..6cd55233e68517 100644 --- a/net/mctp/Makefile +++ b/net/mctp/Makefile @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MCTP) += mctp.o mctp-objs := af_mctp.o device.o route.o neigh.o + +# tests +obj-$(CONFIG_MCTP_TEST) += test/utils.o diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index a9526ac29dffec..d344b02a1cde68 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -16,6 +16,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + /* socket implementation */ static int mctp_release(struct socket *sock) @@ -74,6 +77,7 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) const int hlen = MCTP_HEADER_MAXLEN + sizeof(struct mctp_hdr); int rc, addrlen = msg->msg_namelen; struct sock *sk = sock->sk; + struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); struct mctp_skb_cb *cb; struct mctp_route *rt; struct sk_buff *skb; @@ -97,11 +101,6 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (addr->smctp_network == MCTP_NET_ANY) addr->smctp_network = mctp_default_net(sock_net(sk)); - rt = mctp_route_lookup(sock_net(sk), addr->smctp_network, - addr->smctp_addr.s_addr); - if (!rt) - return -EHOSTUNREACH; - skb = sock_alloc_send_skb(sk, hlen + 1 + len, msg->msg_flags & MSG_DONTWAIT, &rc); if (!skb) @@ -113,19 +112,45 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) *(u8 *)skb_put(skb, 1) = addr->smctp_type; rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len); - if (rc < 0) { - kfree_skb(skb); - return rc; - } + if (rc < 0) + goto err_free; /* set up cb */ cb = __mctp_cb(skb); cb->net = addr->smctp_network; + /* direct addressing */ + if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) { + DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, + extaddr, msg->msg_name); + + if (extaddr->smctp_halen > sizeof(cb->haddr)) { + rc = -EINVAL; + goto err_free; + } + + cb->ifindex = extaddr->smctp_ifindex; + cb->halen = extaddr->smctp_halen; + memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen); + + rt = NULL; + } else { + rt = mctp_route_lookup(sock_net(sk), addr->smctp_network, + addr->smctp_addr.s_addr); + if (!rt) { + rc = -EHOSTUNREACH; + goto err_free; + } + } + rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr, addr->smctp_tag); return rc ? : len; + +err_free: + kfree_skb(skb); + return rc; } static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, @@ -133,6 +158,7 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, { DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name); struct sock *sk = sock->sk; + struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); struct sk_buff *skb; size_t msglen; u8 type; @@ -178,6 +204,16 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, addr->smctp_tag = hdr->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); msg->msg_namelen = sizeof(*addr); + + if (msk->addr_ext) { + DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae, + msg->msg_name); + msg->msg_namelen = sizeof(*ae); + ae->smctp_ifindex = cb->ifindex; + ae->smctp_halen = cb->halen; + memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr)); + memcpy(ae->smctp_haddr, cb->haddr, cb->halen); + } } rc = len; @@ -193,12 +229,45 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, static int mctp_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { - return -EINVAL; + struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk); + int val; + + if (level != SOL_MCTP) + return -EINVAL; + + if (optname == MCTP_OPT_ADDR_EXT) { + if (optlen != sizeof(int)) + return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(int))) + return -EFAULT; + msk->addr_ext = val; + return 0; + } + + return -ENOPROTOOPT; } static int mctp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { + struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk); + int len, val; + + if (level != SOL_MCTP) + return -EINVAL; + + if (get_user(len, optlen)) + return -EFAULT; + + if (optname == MCTP_OPT_ADDR_EXT) { + if (len != sizeof(int)) + return -EINVAL; + val = !!msk->addr_ext; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; + } + return -EINVAL; } @@ -223,16 +292,61 @@ static const struct proto_ops mctp_dgram_ops = { .sendpage = sock_no_sendpage, }; +static void mctp_sk_expire_keys(struct timer_list *timer) +{ + struct mctp_sock *msk = container_of(timer, struct mctp_sock, + key_expiry); + struct net *net = sock_net(&msk->sk); + unsigned long next_expiry, flags; + struct mctp_sk_key *key; + struct hlist_node *tmp; + bool next_expiry_valid = false; + + spin_lock_irqsave(&net->mctp.keys_lock, flags); + + hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { + spin_lock(&key->lock); + + if (!time_after_eq(key->expiry, jiffies)) { + trace_mctp_key_release(key, MCTP_TRACE_KEY_TIMEOUT); + key->valid = false; + hlist_del_rcu(&key->hlist); + hlist_del_rcu(&key->sklist); + spin_unlock(&key->lock); + mctp_key_unref(key); + continue; + } + + if (next_expiry_valid) { + if (time_before(key->expiry, next_expiry)) + next_expiry = key->expiry; + } else { + next_expiry = key->expiry; + next_expiry_valid = true; + } + spin_unlock(&key->lock); + } + + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); + + if (next_expiry_valid) + mod_timer(timer, next_expiry); +} + static int mctp_sk_init(struct sock *sk) { struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); INIT_HLIST_HEAD(&msk->keys); + timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0); return 0; } static void mctp_sk_close(struct sock *sk, long timeout) { + struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); + + del_timer_sync(&msk->key_expiry); sk_common_release(sk); } @@ -263,21 +377,23 @@ static void mctp_sk_unhash(struct sock *sk) /* remove tag allocations */ spin_lock_irqsave(&net->mctp.keys_lock, flags); hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { - hlist_del_rcu(&key->sklist); - hlist_del_rcu(&key->hlist); + hlist_del(&key->sklist); + hlist_del(&key->hlist); - spin_lock(&key->reasm_lock); + trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED); + + spin_lock(&key->lock); if (key->reasm_head) kfree_skb(key->reasm_head); key->reasm_head = NULL; key->reasm_dead = true; - spin_unlock(&key->reasm_lock); + key->valid = false; + spin_unlock(&key->lock); - kfree_rcu(key, rcu); + /* key is no longer on the lookup lists, unref */ + mctp_key_unref(key); } spin_unlock_irqrestore(&net->mctp.keys_lock, flags); - - synchronize_rcu(); } static struct proto mctp_proto = { @@ -385,7 +501,7 @@ static __exit void mctp_exit(void) sock_unregister(PF_MCTP); } -module_init(mctp_init); +subsys_initcall(mctp_init); module_exit(mctp_exit); MODULE_DESCRIPTION("MCTP core"); diff --git a/net/mctp/device.c b/net/mctp/device.c index b9f38e765f6193..8799ee77e7b7fa 100644 --- a/net/mctp/device.c +++ b/net/mctp/device.c @@ -35,14 +35,6 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->mctp_ptr); } -static void mctp_dev_destroy(struct mctp_dev *mdev) -{ - struct net_device *dev = mdev->dev; - - dev_put(dev); - kfree_rcu(mdev, rcu); -} - static int mctp_fill_addrinfo(struct sk_buff *skb, struct netlink_callback *cb, struct mctp_dev *mdev, mctp_eid_t eid) { @@ -255,6 +247,37 @@ static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; } +void mctp_dev_hold(struct mctp_dev *mdev) +{ + refcount_inc(&mdev->refs); +} + +void mctp_dev_put(struct mctp_dev *mdev) +{ + if (refcount_dec_and_test(&mdev->refs)) { + dev_put(mdev->dev); + kfree_rcu(mdev, rcu); + } +} + +void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key) + __must_hold(&key->lock) +{ + if (!dev) + return; + if (dev->ops && dev->ops->release_flow) + dev->ops->release_flow(dev, key); + key->dev = NULL; + mctp_dev_put(dev); +} + +void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key) + __must_hold(&key->lock) +{ + mctp_dev_hold(dev); + key->dev = dev; +} + static struct mctp_dev *mctp_add_dev(struct net_device *dev) { struct mctp_dev *mdev; @@ -270,7 +293,9 @@ static struct mctp_dev *mctp_add_dev(struct net_device *dev) mdev->net = mctp_default_net(dev_net(dev)); /* associate to net_device */ + refcount_set(&mdev->refs, 1); rcu_assign_pointer(dev->mctp_ptr, mdev); + dev_hold(dev); mdev->dev = dev; @@ -330,12 +355,26 @@ static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr, return 0; } +/* Matches netdev types that should have MCTP handling */ +static bool mctp_known(struct net_device *dev) +{ + /* only register specific types (inc. NONE for TUN devices) */ + return dev->type == ARPHRD_MCTP || + dev->type == ARPHRD_LOOPBACK || + dev->type == ARPHRD_NONE; +} + static void mctp_unregister(struct net_device *dev) { struct mctp_dev *mdev; mdev = mctp_dev_get_rtnl(dev); - + if (mctp_known(dev) != (bool)mdev) { + // Sanity check, should match what was set in mctp_register + netdev_warn(dev, "%s: mdev pointer %d but type (%d) match is %d", + __func__, (bool)mdev, mctp_known(dev), dev->type); + return; + } if (!mdev) return; @@ -345,7 +384,7 @@ static void mctp_unregister(struct net_device *dev) mctp_neigh_remove_dev(mdev); kfree(mdev->addrs); - mctp_dev_destroy(mdev); + mctp_dev_put(mdev); } static int mctp_register(struct net_device *dev) @@ -353,11 +392,17 @@ static int mctp_register(struct net_device *dev) struct mctp_dev *mdev; /* Already registered? */ - if (rtnl_dereference(dev->mctp_ptr)) + mdev = rtnl_dereference(dev->mctp_ptr); + + if (mdev) { + if (!mctp_known(dev)) + netdev_warn(dev, "%s: mctp_dev set for unknown type %d", + __func__, dev->type); return 0; + } - /* only register specific types; MCTP-specific and loopback for now */ - if (dev->type != ARPHRD_MCTP && dev->type != ARPHRD_LOOPBACK) + /* only register specific types */ + if (!mctp_known(dev)) return 0; mdev = mctp_add_dev(dev); @@ -387,6 +432,39 @@ static int mctp_dev_notify(struct notifier_block *this, unsigned long event, return NOTIFY_OK; } +static int mctp_register_netdevice(struct net_device *dev, + const struct mctp_netdev_ops *ops) +{ + struct mctp_dev *mdev; + + mdev = mctp_add_dev(dev); + if (IS_ERR(mdev)) + return PTR_ERR(mdev); + + mdev->ops = ops; + + return register_netdevice(dev); +} + +int mctp_register_netdev(struct net_device *dev, + const struct mctp_netdev_ops *ops) +{ + int rc; + + rtnl_lock(); + rc = mctp_register_netdevice(dev, ops); + rtnl_unlock(); + + return rc; +} +EXPORT_SYMBOL_GPL(mctp_register_netdev); + +void mctp_unregister_netdev(struct net_device *dev) +{ + unregister_netdev(dev); +} +EXPORT_SYMBOL_GPL(mctp_unregister_netdev); + static struct rtnl_af_ops mctp_af_ops = { .family = AF_MCTP, .fill_link_af = mctp_fill_link_af, diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c index 90ed2f02d1fb02..5cc04212149383 100644 --- a/net/mctp/neigh.c +++ b/net/mctp/neigh.c @@ -47,7 +47,7 @@ static int mctp_neigh_add(struct mctp_dev *mdev, mctp_eid_t eid, } INIT_LIST_HEAD(&neigh->list); neigh->dev = mdev; - dev_hold(neigh->dev->dev); + mctp_dev_hold(neigh->dev); neigh->eid = eid; neigh->source = source; memcpy(neigh->ha, lladdr, lladdr_len); @@ -63,7 +63,7 @@ static void __mctp_neigh_free(struct rcu_head *rcu) { struct mctp_neigh *neigh = container_of(rcu, struct mctp_neigh, rcu); - dev_put(neigh->dev->dev); + mctp_dev_put(neigh->dev); kfree(neigh); } diff --git a/net/mctp/route.c b/net/mctp/route.c index 5ca186d53cb0f1..46c44823edb7dd 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -23,7 +24,12 @@ #include #include +#include + static const unsigned int mctp_message_maxlen = 64 * 1024; +static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ; + +static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev); /* route output callbacks */ static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb) @@ -83,25 +89,43 @@ static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local, return true; } +/* returns a key (with key->lock held, and refcounted), or NULL if no such + * key exists. + */ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb, - mctp_eid_t peer) + mctp_eid_t peer, + unsigned long *irqflags) + __acquires(&key->lock) { struct mctp_sk_key *key, *ret; + unsigned long flags; struct mctp_hdr *mh; u8 tag; - WARN_ON(!rcu_read_lock_held()); - mh = mctp_hdr(skb); tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); ret = NULL; + spin_lock_irqsave(&net->mctp.keys_lock, flags); - hlist_for_each_entry_rcu(key, &net->mctp.keys, hlist) { - if (mctp_key_match(key, mh->dest, peer, tag)) { + hlist_for_each_entry(key, &net->mctp.keys, hlist) { + if (!mctp_key_match(key, mh->dest, peer, tag)) + continue; + + spin_lock(&key->lock); + if (key->valid) { + refcount_inc(&key->refs); ret = key; break; } + spin_unlock(&key->lock); + } + + if (ret) { + spin_unlock(&net->mctp.keys_lock); + *irqflags = flags; + } else { + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); } return ret; @@ -121,11 +145,30 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk, key->local_addr = local; key->tag = tag; key->sk = &msk->sk; - spin_lock_init(&key->reasm_lock); + key->valid = true; + spin_lock_init(&key->lock); + refcount_set(&key->refs, 1); return key; } +void mctp_key_unref(struct mctp_sk_key *key) +{ + unsigned long flags; + + if (!refcount_dec_and_test(&key->refs)) + return; + + /* even though no refs exist here, the lock allows us to stay + * consistent with the locking requirement of mctp_dev_release_key + */ + spin_lock_irqsave(&key->lock, flags); + mctp_dev_release_key(key->dev, key); + spin_unlock_irqrestore(&key->lock, flags); + + kfree(key); +} + static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) { struct net *net = sock_net(&msk->sk); @@ -138,12 +181,20 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { if (mctp_key_match(tmp, key->local_addr, key->peer_addr, key->tag)) { - rc = -EEXIST; - break; + spin_lock(&tmp->lock); + if (tmp->valid) + rc = -EEXIST; + spin_unlock(&tmp->lock); + if (rc) + break; } } if (!rc) { + refcount_inc(&key->refs); + key->expiry = jiffies + mctp_key_lifetime; + timer_reduce(&msk->key_expiry, key->expiry); + hlist_add_head(&key->hlist, &net->mctp.keys); hlist_add_head(&key->sklist, &msk->keys); } @@ -153,30 +204,72 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) return rc; } -/* Must be called with key->reasm_lock, which it will release. Will schedule - * the key for an RCU free. +/* We're done with the key; unset valid and remove from lists. There may still + * be outstanding refs on the key though... */ static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net, unsigned long flags) - __releases(&key->reasm_lock) + __releases(&key->lock) { struct sk_buff *skb; skb = key->reasm_head; key->reasm_head = NULL; key->reasm_dead = true; - spin_unlock_irqrestore(&key->reasm_lock, flags); + key->valid = false; + mctp_dev_release_key(key->dev, key); + spin_unlock_irqrestore(&key->lock, flags); spin_lock_irqsave(&net->mctp.keys_lock, flags); - hlist_del_rcu(&key->hlist); - hlist_del_rcu(&key->sklist); + hlist_del(&key->hlist); + hlist_del(&key->sklist); spin_unlock_irqrestore(&net->mctp.keys_lock, flags); - kfree_rcu(key, rcu); + + /* one unref for the lists */ + mctp_key_unref(key); + + /* and one for the local reference */ + mctp_key_unref(key); if (skb) kfree_skb(skb); + } +#ifdef CONFIG_MCTP_FLOWS +static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) +{ + struct mctp_flow *flow; + + flow = skb_ext_add(skb, SKB_EXT_MCTP); + if (!flow) + return; + + refcount_inc(&key->refs); + flow->key = key; +} + +static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) +{ + struct mctp_sk_key *key; + struct mctp_flow *flow; + + flow = skb_ext_find(skb, SKB_EXT_MCTP); + if (!flow) + return; + + key = flow->key; + + if (WARN_ON(key->dev && key->dev != dev)) + return; + + mctp_dev_set_key(dev, key); +} +#else +static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {} +static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {} +#endif + static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) { struct mctp_hdr *hdr = mctp_hdr(skb); @@ -248,8 +341,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) rcu_read_lock(); - /* lookup socket / reasm context, exactly matching (src,dest,tag) */ - key = mctp_lookup_key(net, skb, mh->src); + /* lookup socket / reasm context, exactly matching (src,dest,tag). + * we hold a ref on the key, and key->lock held. + */ + key = mctp_lookup_key(net, skb, mh->src, &f); if (flags & MCTP_HDR_FLAG_SOM) { if (key) { @@ -260,10 +355,12 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * key for reassembly - we'll create a more specific * one for future packets if required (ie, !EOM). */ - key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY); + key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); if (key) { msk = container_of(key->sk, struct mctp_sock, sk); + spin_unlock_irqrestore(&key->lock, f); + mctp_key_unref(key); key = NULL; } } @@ -282,11 +379,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) if (flags & MCTP_HDR_FLAG_EOM) { sock_queue_rcv_skb(&msk->sk, skb); if (key) { - spin_lock_irqsave(&key->reasm_lock, f); /* we've hit a pending reassembly; not much we * can do but drop it */ + trace_mctp_key_release(key, + MCTP_TRACE_KEY_REPLIED); __mctp_key_unlock_drop(key, net, f); + key = NULL; } rc = 0; goto out_unlock; @@ -303,7 +402,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) goto out_unlock; } - /* we can queue without the reasm lock here, as the + /* we can queue without the key lock here, as the * key isn't observable yet */ mctp_frag_queue(key, skb); @@ -318,17 +417,22 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) if (rc) kfree(key); - } else { - /* existing key: start reassembly */ - spin_lock_irqsave(&key->reasm_lock, f); + trace_mctp_key_acquire(key); + + /* we don't need to release key->lock on exit */ + mctp_key_unref(key); + key = NULL; + } else { if (key->reasm_head || key->reasm_dead) { /* duplicate start? drop everything */ + trace_mctp_key_release(key, + MCTP_TRACE_KEY_INVALIDATED); __mctp_key_unlock_drop(key, net, f); rc = -EEXIST; + key = NULL; } else { rc = mctp_frag_queue(key, skb); - spin_unlock_irqrestore(&key->reasm_lock, f); } } @@ -337,8 +441,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * using the message-specific key */ - spin_lock_irqsave(&key->reasm_lock, f); - /* we need to be continuing an existing reassembly... */ if (!key->reasm_head) rc = -EINVAL; @@ -351,9 +453,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) if (!rc && flags & MCTP_HDR_FLAG_EOM) { sock_queue_rcv_skb(key->sk, key->reasm_head); key->reasm_head = NULL; + trace_mctp_key_release(key, MCTP_TRACE_KEY_REPLIED); __mctp_key_unlock_drop(key, net, f); - } else { - spin_unlock_irqrestore(&key->reasm_lock, f); + key = NULL; } } else { @@ -363,6 +465,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) out_unlock: rcu_read_unlock(); + if (key) { + spin_unlock_irqrestore(&key->lock, f); + mctp_key_unref(key); + } out: if (rc) kfree_skb(skb); @@ -376,6 +482,7 @@ static unsigned int mctp_route_mtu(struct mctp_route *rt) static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb) { + struct mctp_skb_cb *cb = mctp_cb(skb); struct mctp_hdr *hdr = mctp_hdr(skb); char daddr_buf[MAX_ADDR_LEN]; char *daddr = NULL; @@ -390,9 +497,14 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb) return -EMSGSIZE; } - /* If lookup fails let the device handle daddr==NULL */ - if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0) - daddr = daddr_buf; + if (cb->ifindex) { + /* direct route; use the hwaddr we stashed in sendmsg */ + daddr = cb->haddr; + } else { + /* If lookup fails let the device handle daddr==NULL */ + if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0) + daddr = daddr_buf; + } rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol), daddr, skb->dev->dev_addr, skb->len); @@ -401,6 +513,8 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb) return -EHOSTUNREACH; } + mctp_flow_prepare_output(skb, route->dev); + rc = dev_queue_xmit(skb); if (rc) rc = net_xmit_errno(rc); @@ -412,7 +526,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb) static void mctp_route_release(struct mctp_route *rt) { if (refcount_dec_and_test(&rt->refs)) { - dev_put(rt->dev->dev); + mctp_dev_put(rt->dev); kfree_rcu(rt, rcu); } } @@ -454,30 +568,38 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key, lockdep_assert_held(&mns->keys_lock); + key->expiry = jiffies + mctp_key_lifetime; + timer_reduce(&msk->key_expiry, key->expiry); + /* we hold the net->key_lock here, allowing updates to both * then net and sk */ hlist_add_head_rcu(&key->hlist, &mns->keys); hlist_add_head_rcu(&key->sklist, &msk->keys); + refcount_inc(&key->refs); } /* Allocate a locally-owned tag value for (saddr, daddr), and reserve * it for the socket msk */ -static int mctp_alloc_local_tag(struct mctp_sock *msk, - mctp_eid_t saddr, mctp_eid_t daddr, u8 *tagp) +static struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, + mctp_eid_t saddr, + mctp_eid_t daddr, u8 *tagp) { struct net *net = sock_net(&msk->sk); struct netns_mctp *mns = &net->mctp; struct mctp_sk_key *key, *tmp; unsigned long flags; - int rc = -EAGAIN; u8 tagbits; + /* for NULL destination EIDs, we may get a response from any peer */ + if (daddr == MCTP_ADDR_NULL) + daddr = MCTP_ADDR_ANY; + /* be optimistic, alloc now */ key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL); if (!key) - return -ENOMEM; + return ERR_PTR(-ENOMEM); /* 8 possible tag values */ tagbits = 0xff; @@ -488,14 +610,26 @@ static int mctp_alloc_local_tag(struct mctp_sock *msk, * tags. If we find a conflict, clear that bit from tagbits */ hlist_for_each_entry(tmp, &mns->keys, hlist) { + /* We can check the lookup fields (*_addr, tag) without the + * lock held, they don't change over the lifetime of the key. + */ + /* if we don't own the tag, it can't conflict */ if (tmp->tag & MCTP_HDR_FLAG_TO) continue; - if ((tmp->peer_addr == daddr || - tmp->peer_addr == MCTP_ADDR_ANY) && - tmp->local_addr == saddr) + if (!((tmp->peer_addr == daddr || + tmp->peer_addr == MCTP_ADDR_ANY) && + tmp->local_addr == saddr)) + continue; + + spin_lock(&tmp->lock); + /* key must still be valid. If we find a match, clear the + * potential tag value + */ + if (tmp->valid) tagbits &= ~(1 << tmp->tag); + spin_unlock(&tmp->lock); if (!tagbits) break; @@ -504,16 +638,19 @@ static int mctp_alloc_local_tag(struct mctp_sock *msk, if (tagbits) { key->tag = __ffs(tagbits); mctp_reserve_tag(net, key, msk); + trace_mctp_key_acquire(key); + *tagp = key->tag; - rc = 0; } spin_unlock_irqrestore(&mns->keys_lock, flags); - if (!tagbits) + if (!tagbits) { kfree(key); + return ERR_PTR(-EBUSY); + } - return rc; + return key; } /* routing lookups */ @@ -552,14 +689,18 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, return rt; } -/* sends a skb to rt and releases the route. */ -int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb) +static struct mctp_route *mctp_route_lookup_null(struct net *net, + struct net_device *dev) { - int rc; + struct mctp_route *rt; - rc = rt->output(rt, skb); - mctp_route_release(rt); - return rc; + list_for_each_entry_rcu(rt, &net->mctp.routes, list) { + if (rt->dev->dev == dev && rt->type == RTN_LOCAL && + refcount_inc_not_zero(&rt->refs)) + return rt; + } + + return NULL; } static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb, @@ -628,7 +769,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb, /* copy message payload */ skb_copy_bits(skb, pos, skb_transport_header(skb2), size); - /* do route, but don't drop the rt reference */ + /* do route */ rc = rt->output(rt, skb2); if (rc) break; @@ -637,7 +778,6 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb, pos += size; } - mctp_route_release(rt); consume_skb(skb); return rc; } @@ -647,15 +787,52 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, { struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); struct mctp_skb_cb *cb = mctp_cb(skb); + struct mctp_route tmp_rt; + struct mctp_sk_key *key; + struct net_device *dev; struct mctp_hdr *hdr; unsigned long flags; unsigned int mtu; mctp_eid_t saddr; + bool ext_rt; int rc; u8 tag; - if (WARN_ON(!rt->dev)) + rc = -ENODEV; + + if (rt) { + ext_rt = false; + dev = NULL; + + if (WARN_ON(!rt->dev)) + goto out_release; + + } else if (cb->ifindex) { + ext_rt = true; + rt = &tmp_rt; + + rcu_read_lock(); + dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex); + if (!dev) { + rcu_read_unlock(); + return rc; + } + + rt->dev = __mctp_dev_get(dev); + rcu_read_unlock(); + + if (!rt->dev) + goto out_release; + + /* establish temporary route - we set up enough to keep + * mctp_route_output happy + */ + rt->output = mctp_route_output; + rt->mtu = 0; + + } else { return -EINVAL; + } spin_lock_irqsave(&rt->dev->addrs_lock, flags); if (rt->dev->num_addrs == 0) { @@ -668,18 +845,23 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, spin_unlock_irqrestore(&rt->dev->addrs_lock, flags); if (rc) - return rc; + goto out_release; if (req_tag & MCTP_HDR_FLAG_TO) { - rc = mctp_alloc_local_tag(msk, saddr, daddr, &tag); - if (rc) - return rc; + key = mctp_alloc_local_tag(msk, saddr, daddr, &tag); + if (IS_ERR(key)) { + rc = PTR_ERR(key); + goto out_release; + } + mctp_skb_set_flow(skb, key); + /* done with the key in this scope */ + mctp_key_unref(key); tag |= MCTP_HDR_FLAG_TO; } else { + key = NULL; tag = req_tag; } - skb->protocol = htons(ETH_P_MCTP); skb->priority = 0; skb_reset_transport_header(skb); @@ -699,12 +881,22 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, mtu = mctp_route_mtu(rt); if (skb->len + sizeof(struct mctp_hdr) <= mtu) { - hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM | - tag; - return mctp_do_route(rt, skb); + hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | + MCTP_HDR_FLAG_EOM | tag; + rc = rt->output(rt, skb); } else { - return mctp_do_fragment_route(rt, skb, mtu, tag); + rc = mctp_do_fragment_route(rt, skb, mtu, tag); } + +out_release: + if (!ext_rt) + mctp_route_release(rt); + + if (dev) + dev_put(dev); + + return rc; + } /* route management */ @@ -741,7 +933,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, rt->max = daddr_start + daddr_extent; rt->mtu = mtu; rt->dev = mdev; - dev_hold(rt->dev->dev); + mctp_dev_hold(rt->dev); rt->type = type; rt->output = rtfn; @@ -821,13 +1013,18 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, struct net_device *orig_dev) { struct net *net = dev_net(dev); + struct mctp_dev *mdev; struct mctp_skb_cb *cb; struct mctp_route *rt; struct mctp_hdr *mh; - /* basic non-data sanity checks */ - if (dev->type != ARPHRD_MCTP) + rcu_read_lock(); + mdev = __mctp_dev_get(dev); + rcu_read_unlock(); + if (!mdev) { + /* basic non-data sanity checks */ goto err_drop; + } if (!pskb_may_pull(skb, sizeof(struct mctp_hdr))) goto err_drop; @@ -840,16 +1037,27 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX) goto err_drop; - cb = __mctp_cb(skb); - rcu_read_lock(); - cb->net = READ_ONCE(__mctp_dev_get(dev)->net); - rcu_read_unlock(); + /* MCTP drivers must populate halen/haddr */ + if (dev->type == ARPHRD_MCTP) { + cb = mctp_cb(skb); + } else { + cb = __mctp_cb(skb); + cb->halen = 0; + } + cb->net = READ_ONCE(mdev->net); + cb->ifindex = dev->ifindex; rt = mctp_route_lookup(net, cb->net, mh->dest); + + /* NULL EID, but addressed to our physical address */ + if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST) + rt = mctp_route_lookup_null(net, dev); + if (!rt) goto err_drop; - mctp_do_route(rt, skb); + rt->output(rt, skb); + mctp_route_release(rt); return NET_RX_SUCCESS; @@ -926,10 +1134,15 @@ static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; } +static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = { + [RTAX_MTU] = { .type = NLA_U32 }, +}; + static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RTA_MAX + 1]; + struct nlattr *tbx[RTAX_MAX + 1]; mctp_eid_t daddr_start; struct mctp_dev *mdev; struct rtmsg *rtm; @@ -946,8 +1159,15 @@ static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; } - /* TODO: parse mtu from nlparse */ mtu = 0; + if (tb[RTA_METRICS]) { + rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS], + rta_metrics_policy, NULL); + if (rc < 0) + return rc; + if (tbx[RTAX_MTU]) + mtu = nla_get_u32(tbx[RTAX_MTU]); + } if (rtm->rtm_type != RTN_UNICAST) return -EINVAL; @@ -1116,3 +1336,7 @@ void __exit mctp_routes_exit(void) rtnl_unregister(PF_MCTP, RTM_GETROUTE); dev_remove_pack(&mctp_packet_type); } + +#if IS_ENABLED(CONFIG_MCTP_TEST) +#include "test/route-test.c" +#endif diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c new file mode 100644 index 00000000000000..36fac3daf86a4f --- /dev/null +++ b/net/mctp/test/route-test.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include "utils.h" + +struct mctp_test_route { + struct mctp_route rt; + struct sk_buff_head pkts; +}; + +static int mctp_test_route_output(struct mctp_route *rt, struct sk_buff *skb) +{ + struct mctp_test_route *test_rt = container_of(rt, struct mctp_test_route, rt); + + skb_queue_tail(&test_rt->pkts, skb); + + return 0; +} + +/* local version of mctp_route_alloc() */ +static struct mctp_test_route *mctp_route_test_alloc(void) +{ + struct mctp_test_route *rt; + + rt = kzalloc(sizeof(*rt), GFP_KERNEL); + if (!rt) + return NULL; + + INIT_LIST_HEAD(&rt->rt.list); + refcount_set(&rt->rt.refs, 1); + rt->rt.output = mctp_test_route_output; + + skb_queue_head_init(&rt->pkts); + + return rt; +} + +static struct mctp_test_route *mctp_test_create_route(struct net *net, + struct mctp_dev *dev, + mctp_eid_t eid, + unsigned int mtu) +{ + struct mctp_test_route *rt; + + rt = mctp_route_test_alloc(); + if (!rt) + return NULL; + + rt->rt.min = eid; + rt->rt.max = eid; + rt->rt.mtu = mtu; + rt->rt.type = RTN_UNSPEC; + if (dev) + mctp_dev_hold(dev); + rt->rt.dev = dev; + + list_add_rcu(&rt->rt.list, &net->mctp.routes); + + return rt; +} + +static void mctp_test_route_destroy(struct kunit *test, + struct mctp_test_route *rt) +{ + unsigned int refs; + + rtnl_lock(); + list_del_rcu(&rt->rt.list); + rtnl_unlock(); + + skb_queue_purge(&rt->pkts); + if (rt->rt.dev) + mctp_dev_put(rt->rt.dev); + + refs = refcount_read(&rt->rt.refs); + KUNIT_ASSERT_EQ_MSG(test, refs, 1, "route ref imbalance"); + + kfree_rcu(&rt->rt, rcu); +} + +static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr, + unsigned int data_len) +{ + size_t hdr_len = sizeof(*hdr); + struct sk_buff *skb; + unsigned int i; + u8 *buf; + + skb = alloc_skb(hdr_len + data_len, GFP_KERNEL); + if (!skb) + return NULL; + + memcpy(skb_put(skb, hdr_len), hdr, hdr_len); + + buf = skb_put(skb, data_len); + for (i = 0; i < data_len; i++) + buf[i] = i & 0xff; + + return skb; +} + +static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr, + const void *data, + size_t data_len) +{ + size_t hdr_len = sizeof(*hdr); + struct sk_buff *skb; + + skb = alloc_skb(hdr_len + data_len, GFP_KERNEL); + if (!skb) + return NULL; + + memcpy(skb_put(skb, hdr_len), hdr, hdr_len); + memcpy(skb_put(skb, data_len), data, data_len); + + return skb; +} + +#define mctp_test_create_skb_data(h, d) \ + __mctp_test_create_skb_data(h, d, sizeof(*d)) + +struct mctp_frag_test { + unsigned int mtu; + unsigned int msgsize; + unsigned int n_frags; +}; + +static void mctp_test_fragment(struct kunit *test) +{ + const struct mctp_frag_test *params; + int rc, i, n, mtu, msgsize; + struct mctp_test_route *rt; + struct sk_buff *skb; + struct mctp_hdr hdr; + u8 seq; + + params = test->param_value; + mtu = params->mtu; + msgsize = params->msgsize; + + hdr.ver = 1; + hdr.src = 8; + hdr.dest = 10; + hdr.flags_seq_tag = MCTP_HDR_FLAG_TO; + + skb = mctp_test_create_skb(&hdr, msgsize); + KUNIT_ASSERT_TRUE(test, skb); + + rt = mctp_test_create_route(&init_net, NULL, 10, mtu); + KUNIT_ASSERT_TRUE(test, rt); + + /* The refcount would usually be incremented as part of a route lookup, + * but we're setting the route directly here. + */ + refcount_inc(&rt->rt.refs); + + rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER); + KUNIT_EXPECT_FALSE(test, rc); + + n = rt->pkts.qlen; + + KUNIT_EXPECT_EQ(test, n, params->n_frags); + + for (i = 0;; i++) { + struct mctp_hdr *hdr2; + struct sk_buff *skb2; + u8 tag_mask, seq2; + bool first, last; + + first = i == 0; + last = i == (n - 1); + + skb2 = skb_dequeue(&rt->pkts); + + if (!skb2) + break; + + hdr2 = mctp_hdr(skb2); + + tag_mask = MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO; + + KUNIT_EXPECT_EQ(test, hdr2->ver, hdr.ver); + KUNIT_EXPECT_EQ(test, hdr2->src, hdr.src); + KUNIT_EXPECT_EQ(test, hdr2->dest, hdr.dest); + KUNIT_EXPECT_EQ(test, hdr2->flags_seq_tag & tag_mask, + hdr.flags_seq_tag & tag_mask); + + KUNIT_EXPECT_EQ(test, + !!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_SOM), first); + KUNIT_EXPECT_EQ(test, + !!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_EOM), last); + + seq2 = (hdr2->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) & + MCTP_HDR_SEQ_MASK; + + if (first) { + seq = seq2; + } else { + seq++; + KUNIT_EXPECT_EQ(test, seq2, seq & MCTP_HDR_SEQ_MASK); + } + + if (!last) + KUNIT_EXPECT_EQ(test, skb2->len, mtu); + else + KUNIT_EXPECT_LE(test, skb2->len, mtu); + + kfree_skb(skb2); + } + + mctp_test_route_destroy(test, rt); +} + +static const struct mctp_frag_test mctp_frag_tests[] = { + {.mtu = 68, .msgsize = 63, .n_frags = 1}, + {.mtu = 68, .msgsize = 64, .n_frags = 1}, + {.mtu = 68, .msgsize = 65, .n_frags = 2}, + {.mtu = 68, .msgsize = 66, .n_frags = 2}, + {.mtu = 68, .msgsize = 127, .n_frags = 2}, + {.mtu = 68, .msgsize = 128, .n_frags = 2}, + {.mtu = 68, .msgsize = 129, .n_frags = 3}, + {.mtu = 68, .msgsize = 130, .n_frags = 3}, +}; + +static void mctp_frag_test_to_desc(const struct mctp_frag_test *t, char *desc) +{ + sprintf(desc, "mtu %d len %d -> %d frags", + t->msgsize, t->mtu, t->n_frags); +} + +KUNIT_ARRAY_PARAM(mctp_frag, mctp_frag_tests, mctp_frag_test_to_desc); + +struct mctp_rx_input_test { + struct mctp_hdr hdr; + bool input; +}; + +static void mctp_test_rx_input(struct kunit *test) +{ + const struct mctp_rx_input_test *params; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct sk_buff *skb; + + params = test->param_value; + + dev = mctp_test_create_dev(); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt); + + skb = mctp_test_create_skb(¶ms->hdr, 1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); + + __mctp_cb(skb); + + mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL); + + KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input); + + mctp_test_route_destroy(test, rt); + mctp_test_destroy_dev(dev); +} + +#define RX_HDR(_ver, _src, _dest, _fst) \ + { .ver = _ver, .src = _src, .dest = _dest, .flags_seq_tag = _fst } + +/* we have a route for EID 8 only */ +static const struct mctp_rx_input_test mctp_rx_input_tests[] = { + { .hdr = RX_HDR(1, 10, 8, 0), .input = true }, + { .hdr = RX_HDR(1, 10, 9, 0), .input = false }, /* no input route */ + { .hdr = RX_HDR(2, 10, 8, 0), .input = false }, /* invalid version */ +}; + +static void mctp_rx_input_test_to_desc(const struct mctp_rx_input_test *t, + char *desc) +{ + sprintf(desc, "{%x,%x,%x,%x}", t->hdr.ver, t->hdr.src, t->hdr.dest, + t->hdr.flags_seq_tag); +} + +KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests, + mctp_rx_input_test_to_desc); + +/* set up a local dev, route on EID 8, and a socket listening on type 0 */ +static void __mctp_route_test_init(struct kunit *test, + struct mctp_test_dev **devp, + struct mctp_test_route **rtp, + struct socket **sockp) +{ + struct sockaddr_mctp addr; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct socket *sock; + int rc; + + dev = mctp_test_create_dev(); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt); + + rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock); + KUNIT_ASSERT_EQ(test, rc, 0); + + addr.smctp_family = AF_MCTP; + addr.smctp_network = MCTP_NET_ANY; + addr.smctp_addr.s_addr = 8; + addr.smctp_type = 0; + rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr)); + KUNIT_ASSERT_EQ(test, rc, 0); + + *rtp = rt; + *devp = dev; + *sockp = sock; +} + +static void __mctp_route_test_fini(struct kunit *test, + struct mctp_test_dev *dev, + struct mctp_test_route *rt, + struct socket *sock) +{ + sock_release(sock); + mctp_test_route_destroy(test, rt); + mctp_test_destroy_dev(dev); +} + +struct mctp_route_input_sk_test { + struct mctp_hdr hdr; + u8 type; + bool deliver; +}; + +static void mctp_test_route_input_sk(struct kunit *test) +{ + const struct mctp_route_input_sk_test *params; + struct sk_buff *skb, *skb2; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct socket *sock; + int rc; + + params = test->param_value; + + __mctp_route_test_init(test, &dev, &rt, &sock); + + skb = mctp_test_create_skb_data(¶ms->hdr, ¶ms->type); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); + + skb->dev = dev->ndev; + __mctp_cb(skb); + + rc = mctp_route_input(&rt->rt, skb); + + if (params->deliver) { + KUNIT_EXPECT_EQ(test, rc, 0); + + skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2); + KUNIT_EXPECT_EQ(test, skb->len, 1); + + skb_free_datagram(sock->sk, skb2); + + } else { + KUNIT_EXPECT_NE(test, rc, 0); + skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + KUNIT_EXPECT_PTR_EQ(test, skb2, NULL); + } + + __mctp_route_test_fini(test, dev, rt, sock); +} + +#define FL_S (MCTP_HDR_FLAG_SOM) +#define FL_E (MCTP_HDR_FLAG_EOM) +#define FL_T (MCTP_HDR_FLAG_TO) + +static const struct mctp_route_input_sk_test mctp_route_input_sk_tests[] = { + { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_T), .type = 0, .deliver = true }, + { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_T), .type = 1, .deliver = false }, + { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E), .type = 0, .deliver = false }, + { .hdr = RX_HDR(1, 10, 8, FL_E | FL_T), .type = 0, .deliver = false }, + { .hdr = RX_HDR(1, 10, 8, FL_T), .type = 0, .deliver = false }, + { .hdr = RX_HDR(1, 10, 8, 0), .type = 0, .deliver = false }, +}; + +static void mctp_route_input_sk_to_desc(const struct mctp_route_input_sk_test *t, + char *desc) +{ + sprintf(desc, "{%x,%x,%x,%x} type %d", t->hdr.ver, t->hdr.src, + t->hdr.dest, t->hdr.flags_seq_tag, t->type); +} + +KUNIT_ARRAY_PARAM(mctp_route_input_sk, mctp_route_input_sk_tests, + mctp_route_input_sk_to_desc); + +struct mctp_route_input_sk_reasm_test { + const char *name; + struct mctp_hdr hdrs[4]; + int n_hdrs; + int rx_len; +}; + +static void mctp_test_route_input_sk_reasm(struct kunit *test) +{ + const struct mctp_route_input_sk_reasm_test *params; + struct sk_buff *skb, *skb2; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct socket *sock; + int i, rc; + u8 c; + + params = test->param_value; + + __mctp_route_test_init(test, &dev, &rt, &sock); + + for (i = 0; i < params->n_hdrs; i++) { + c = i; + skb = mctp_test_create_skb_data(¶ms->hdrs[i], &c); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); + + skb->dev = dev->ndev; + __mctp_cb(skb); + + rc = mctp_route_input(&rt->rt, skb); + } + + skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + + if (params->rx_len) { + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2); + KUNIT_EXPECT_EQ(test, skb2->len, params->rx_len); + skb_free_datagram(sock->sk, skb2); + + } else { + KUNIT_EXPECT_PTR_EQ(test, skb2, NULL); + } + + __mctp_route_test_fini(test, dev, rt, sock); +} + +#define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_T | (f) | ((s) << MCTP_HDR_SEQ_SHIFT)) + +static const struct mctp_route_input_sk_reasm_test mctp_route_input_sk_reasm_tests[] = { + { + .name = "single packet", + .hdrs = { + RX_FRAG(FL_S | FL_E, 0), + }, + .n_hdrs = 1, + .rx_len = 1, + }, + { + .name = "single packet, offset seq", + .hdrs = { + RX_FRAG(FL_S | FL_E, 1), + }, + .n_hdrs = 1, + .rx_len = 1, + }, + { + .name = "start & end packets", + .hdrs = { + RX_FRAG(FL_S, 0), + RX_FRAG(FL_E, 1), + }, + .n_hdrs = 2, + .rx_len = 2, + }, + { + .name = "start & end packets, offset seq", + .hdrs = { + RX_FRAG(FL_S, 1), + RX_FRAG(FL_E, 2), + }, + .n_hdrs = 2, + .rx_len = 2, + }, + { + .name = "start & end packets, out of order", + .hdrs = { + RX_FRAG(FL_E, 1), + RX_FRAG(FL_S, 0), + }, + .n_hdrs = 2, + .rx_len = 0, + }, + { + .name = "start, middle & end packets", + .hdrs = { + RX_FRAG(FL_S, 0), + RX_FRAG(0, 1), + RX_FRAG(FL_E, 2), + }, + .n_hdrs = 3, + .rx_len = 3, + }, + { + .name = "missing seq", + .hdrs = { + RX_FRAG(FL_S, 0), + RX_FRAG(FL_E, 2), + }, + .n_hdrs = 2, + .rx_len = 0, + }, + { + .name = "seq wrap", + .hdrs = { + RX_FRAG(FL_S, 3), + RX_FRAG(FL_E, 0), + }, + .n_hdrs = 2, + .rx_len = 2, + }, +}; + +static void mctp_route_input_sk_reasm_to_desc( + const struct mctp_route_input_sk_reasm_test *t, + char *desc) +{ + sprintf(desc, "%s", t->name); +} + +KUNIT_ARRAY_PARAM(mctp_route_input_sk_reasm, mctp_route_input_sk_reasm_tests, + mctp_route_input_sk_reasm_to_desc); + +static struct kunit_case mctp_test_cases[] = { + KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params), + KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params), + KUNIT_CASE_PARAM(mctp_test_route_input_sk, mctp_route_input_sk_gen_params), + KUNIT_CASE_PARAM(mctp_test_route_input_sk_reasm, + mctp_route_input_sk_reasm_gen_params), + {} +}; + +static struct kunit_suite mctp_test_suite = { + .name = "mctp", + .test_cases = mctp_test_cases, +}; + +kunit_test_suite(mctp_test_suite); diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c new file mode 100644 index 00000000000000..cc6b8803aa9d0a --- /dev/null +++ b/net/mctp/test/utils.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include + +#include "utils.h" + +static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb, + struct net_device *ndev) +{ + kfree(skb); + return NETDEV_TX_OK; +} + +static const struct net_device_ops mctp_test_netdev_ops = { + .ndo_start_xmit = mctp_test_dev_tx, +}; + +static void mctp_test_dev_setup(struct net_device *ndev) +{ + ndev->type = ARPHRD_MCTP; + ndev->mtu = MCTP_DEV_TEST_MTU; + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + ndev->flags = IFF_NOARP; + ndev->netdev_ops = &mctp_test_netdev_ops; + ndev->needs_free_netdev = true; +} + +struct mctp_test_dev *mctp_test_create_dev(void) +{ + struct mctp_test_dev *dev; + struct net_device *ndev; + int rc; + + ndev = alloc_netdev(sizeof(*dev), "mctptest%d", NET_NAME_ENUM, + mctp_test_dev_setup); + if (!ndev) + return NULL; + + dev = netdev_priv(ndev); + dev->ndev = ndev; + + rc = register_netdev(ndev); + if (rc) { + free_netdev(ndev); + return NULL; + } + + rcu_read_lock(); + dev->mdev = __mctp_dev_get(ndev); + mctp_dev_hold(dev->mdev); + rcu_read_unlock(); + + return dev; +} + +void mctp_test_destroy_dev(struct mctp_test_dev *dev) +{ + mctp_dev_put(dev->mdev); + unregister_netdev(dev->ndev); +} diff --git a/net/mctp/test/utils.h b/net/mctp/test/utils.h new file mode 100644 index 00000000000000..df6aa1c0344092 --- /dev/null +++ b/net/mctp/test/utils.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __NET_MCTP_TEST_UTILS_H +#define __NET_MCTP_TEST_UTILS_H + +#include + +#define MCTP_DEV_TEST_MTU 68 + +struct mctp_test_dev { + struct net_device *ndev; + struct mctp_dev *mdev; +}; + +struct mctp_test_dev; + +struct mctp_test_dev *mctp_test_create_dev(void); +void mctp_test_destroy_dev(struct mctp_test_dev *dev); + +#endif /* __NET_MCTP_TEST_UTILS_H */ diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index b21ff9be04c617..3240b72271a7f3 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -72,6 +72,7 @@ bool mptcp_mib_alloc(struct net *net) void mptcp_seq_show(struct seq_file *seq) { + unsigned long sum[ARRAY_SIZE(mptcp_snmp_list) - 1]; struct net *net = seq->private; int i; @@ -81,17 +82,13 @@ void mptcp_seq_show(struct seq_file *seq) seq_puts(seq, "\nMPTcpExt:"); - if (!net->mib.mptcp_statistics) { - for (i = 0; mptcp_snmp_list[i].name; i++) - seq_puts(seq, " 0"); - - seq_putc(seq, '\n'); - return; - } + memset(sum, 0, sizeof(sum)); + if (net->mib.mptcp_statistics) + snmp_get_cpu_field_batch(sum, mptcp_snmp_list, + net->mib.mptcp_statistics); for (i = 0; mptcp_snmp_list[i].name; i++) - seq_printf(seq, " %lu", - snmp_fold_field(net->mib.mptcp_statistics, - mptcp_snmp_list[i].entry)); + seq_printf(seq, " %lu", sum[i]); + seq_putc(seq, '\n'); } diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c index 292374fb077928..f44125dd669712 100644 --- a/net/mptcp/mptcp_diag.c +++ b/net/mptcp/mptcp_diag.c @@ -113,37 +113,13 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_info *info = _info; - u32 flags = 0; - bool slow; - u8 val; r->idiag_rqueue = sk_rmem_alloc_get(sk); r->idiag_wqueue = sk_wmem_alloc_get(sk); if (!info) return; - slow = lock_sock_fast(sk); - info->mptcpi_subflows = READ_ONCE(msk->pm.subflows); - info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled); - info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); - info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used); - info->mptcpi_subflows_max = mptcp_pm_get_subflows_max(msk); - val = mptcp_pm_get_add_addr_signal_max(msk); - info->mptcpi_add_addr_signal_max = val; - val = mptcp_pm_get_add_addr_accept_max(msk); - info->mptcpi_add_addr_accepted_max = val; - info->mptcpi_local_addr_max = mptcp_pm_get_local_addr_max(msk); - if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) - flags |= MPTCP_INFO_FLAG_FALLBACK; - if (READ_ONCE(msk->can_ack)) - flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; - info->mptcpi_flags = flags; - info->mptcpi_token = READ_ONCE(msk->token); - info->mptcpi_write_seq = READ_ONCE(msk->write_seq); - info->mptcpi_snd_una = READ_ONCE(msk->snd_una); - info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); - info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled); - unlock_sock_fast(sk, slow); + mptcp_diag_fill_info(msk, info); } static const struct inet_diag_handler mptcp_diag_handler = { diff --git a/net/mptcp/options.c b/net/mptcp/options.c index f0f22eb4fd5f72..7c3420afb1a013 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -748,9 +748,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk, /* can't send MP_PRIO with MPC, as they share the same option space: * 'backup'. Also it makes no sense at all */ - if (!subflow->send_mp_prio || - ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | - OPTION_MPTCP_MPC_ACK) & opts->suboptions)) + if (!subflow->send_mp_prio || (opts->suboptions & OPTIONS_MPTCP_MPC)) return false; /* account for the trailing 'nop' option */ @@ -1019,11 +1017,9 @@ static void ack_update_msk(struct mptcp_sock *msk, old_snd_una = msk->snd_una; new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64); - /* ACK for data not even sent yet and even above recovery bound? Ignore.*/ - if (unlikely(after64(new_snd_una, snd_nxt))) { - if (!msk->recovery || after64(new_snd_una, msk->recovery_snd_nxt)) - new_snd_una = old_snd_una; - } + /* ACK for data not even sent yet? Ignore.*/ + if (unlikely(after64(new_snd_una, snd_nxt))) + new_snd_una = old_snd_una; new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd; @@ -1335,8 +1331,7 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); } } - } else if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | - OPTION_MPTCP_MPC_ACK) & opts->suboptions) { + } else if (OPTIONS_MPTCP_MPC & opts->suboptions) { u8 len, flag = MPTCP_CAP_HMAC_SHA256; if (OPTION_MPTCP_MPC_SYN & opts->suboptions) { diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 050eea231528bc..7b96be1e9f14a1 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -654,9 +654,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) } } -int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, - struct mptcp_addr_info *addr, - u8 bkup) +static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, + struct mptcp_addr_info *addr, + u8 bkup) { struct mptcp_subflow_context *subflow; @@ -2052,6 +2052,9 @@ static int __net_init pm_nl_init_net(struct net *net) struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id); INIT_LIST_HEAD_RCU(&pernet->local_addr_list); + + /* Cit. 2 subflows ought to be enough for anybody. */ + pernet->subflows_max = 2; pernet->next_id = 1; pernet->stale_loss_cnt = 4; spin_lock_init(&pernet->lock); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index d073b211138287..b7e32e316738b8 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -126,6 +126,11 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } +static void mptcp_rmem_charge(struct sock *sk, int size) +{ + mptcp_sk(sk)->rmem_fwd_alloc -= size; +} + static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from) { @@ -142,7 +147,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; kfree_skb_partial(from, fragstolen); atomic_add(delta, &sk->sk_rmem_alloc); - sk_mem_charge(sk, delta); + mptcp_rmem_charge(sk, delta); return true; } @@ -155,6 +160,44 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, return mptcp_try_coalesce((struct sock *)msk, to, from); } +static void __mptcp_rmem_reclaim(struct sock *sk, int amount) +{ + amount >>= SK_MEM_QUANTUM_SHIFT; + mptcp_sk(sk)->rmem_fwd_alloc -= amount << SK_MEM_QUANTUM_SHIFT; + __sk_mem_reduce_allocated(sk, amount); +} + +static void mptcp_rmem_uncharge(struct sock *sk, int size) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + int reclaimable; + + msk->rmem_fwd_alloc += size; + reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); + + /* see sk_mem_uncharge() for the rationale behind the following schema */ + if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD)) + __mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK); +} + +static void mptcp_rfree(struct sk_buff *skb) +{ + unsigned int len = skb->truesize; + struct sock *sk = skb->sk; + + atomic_sub(len, &sk->sk_rmem_alloc); + mptcp_rmem_uncharge(sk, len); +} + +static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = mptcp_rfree; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); + mptcp_rmem_charge(sk, skb->truesize); +} + /* "inspired" by tcp_data_queue_ofo(), main differences: * - use mptcp seqs * - don't cope with sacks @@ -267,7 +310,29 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) end: skb_condense(skb); - skb_set_owner_r(skb, sk); + mptcp_set_owner_r(skb, sk); +} + +static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + int amt, amount; + + if (size < msk->rmem_fwd_alloc) + return true; + + amt = sk_mem_pages(size); + amount = amt << SK_MEM_QUANTUM_SHIFT; + msk->rmem_fwd_alloc += amount; + if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) { + if (ssk->sk_forward_alloc < amount) { + msk->rmem_fwd_alloc -= amount; + return false; + } + + ssk->sk_forward_alloc -= amount; + } + return true; } static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, @@ -285,15 +350,8 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, skb_orphan(skb); /* try to fetch required memory from subflow */ - if (!sk_rmem_schedule(sk, skb, skb->truesize)) { - int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT; - - if (ssk->sk_forward_alloc < amount) - goto drop; - - ssk->sk_forward_alloc -= amount; - sk->sk_forward_alloc += amount; - } + if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) + goto drop; has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; @@ -313,7 +371,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, if (tail && mptcp_try_coalesce(sk, tail, skb)) return true; - skb_set_owner_r(skb, sk); + mptcp_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); return true; } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { @@ -908,124 +966,20 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, df->data_seq + df->data_len == msk->write_seq; } -static int mptcp_wmem_with_overhead(int size) -{ - return size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT); -} - -static void __mptcp_wmem_reserve(struct sock *sk, int size) -{ - int amount = mptcp_wmem_with_overhead(size); - struct mptcp_sock *msk = mptcp_sk(sk); - - WARN_ON_ONCE(msk->wmem_reserved); - if (WARN_ON_ONCE(amount < 0)) - amount = 0; - - if (amount <= sk->sk_forward_alloc) - goto reserve; - - /* under memory pressure try to reserve at most a single page - * otherwise try to reserve the full estimate and fallback - * to a single page before entering the error path - */ - if ((tcp_under_memory_pressure(sk) && amount > PAGE_SIZE) || - !sk_wmem_schedule(sk, amount)) { - if (amount <= PAGE_SIZE) - goto nomem; - - amount = PAGE_SIZE; - if (!sk_wmem_schedule(sk, amount)) - goto nomem; - } - -reserve: - msk->wmem_reserved = amount; - sk->sk_forward_alloc -= amount; - return; - -nomem: - /* we will wait for memory on next allocation */ - msk->wmem_reserved = -1; -} - -static void __mptcp_update_wmem(struct sock *sk) -{ - struct mptcp_sock *msk = mptcp_sk(sk); - -#ifdef CONFIG_LOCKDEP - WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock)); -#endif - - if (!msk->wmem_reserved) - return; - - if (msk->wmem_reserved < 0) - msk->wmem_reserved = 0; - if (msk->wmem_reserved > 0) { - sk->sk_forward_alloc += msk->wmem_reserved; - msk->wmem_reserved = 0; - } -} - -static bool mptcp_wmem_alloc(struct sock *sk, int size) -{ - struct mptcp_sock *msk = mptcp_sk(sk); - - /* check for pre-existing error condition */ - if (msk->wmem_reserved < 0) - return false; - - if (msk->wmem_reserved >= size) - goto account; - - mptcp_data_lock(sk); - if (!sk_wmem_schedule(sk, size)) { - mptcp_data_unlock(sk); - return false; - } - - sk->sk_forward_alloc -= size; - msk->wmem_reserved += size; - mptcp_data_unlock(sk); - -account: - msk->wmem_reserved -= size; - return true; -} - -static void mptcp_wmem_uncharge(struct sock *sk, int size) -{ - struct mptcp_sock *msk = mptcp_sk(sk); - - if (msk->wmem_reserved < 0) - msk->wmem_reserved = 0; - msk->wmem_reserved += size; -} - static void __mptcp_mem_reclaim_partial(struct sock *sk) { + int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk); + lockdep_assert_held_once(&sk->sk_lock.slock); - __mptcp_update_wmem(sk); + + __mptcp_rmem_reclaim(sk, reclaimable - 1); sk_mem_reclaim_partial(sk); } static void mptcp_mem_reclaim_partial(struct sock *sk) { - struct mptcp_sock *msk = mptcp_sk(sk); - - /* if we are experiencing a transint allocation error, - * the forward allocation memory has been already - * released - */ - if (msk->wmem_reserved < 0) - return; - mptcp_data_lock(sk); - sk->sk_forward_alloc += msk->wmem_reserved; - sk_mem_reclaim_partial(sk); - msk->wmem_reserved = sk->sk_forward_alloc; - sk->sk_forward_alloc = 0; + __mptcp_mem_reclaim_partial(sk); mptcp_data_unlock(sk); } @@ -1104,7 +1058,8 @@ static void __mptcp_clean_una(struct sock *sk) if (cleaned && tcp_under_memory_pressure(sk)) __mptcp_mem_reclaim_partial(sk); - if (snd_una == READ_ONCE(msk->snd_nxt) && !msk->recovery) { + if (snd_una == READ_ONCE(msk->snd_nxt) && + snd_una == READ_ONCE(msk->write_seq)) { if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) mptcp_stop_timer(sk); } else { @@ -1114,9 +1069,8 @@ static void __mptcp_clean_una(struct sock *sk) static void __mptcp_clean_una_wakeup(struct sock *sk) { -#ifdef CONFIG_LOCKDEP - WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock)); -#endif + lockdep_assert_held_once(&sk->sk_lock.slock); + __mptcp_clean_una(sk); mptcp_write_space(sk); } @@ -1220,7 +1174,8 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) if (likely(skb)) { if (likely(__mptcp_add_ext(skb, gfp))) { skb_reserve(skb, MAX_TCP_HEADER); - skb->reserved_tailroom = skb->end - skb->tail; + skb->ip_summed = CHECKSUM_PARTIAL; + INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); return skb; } __kfree_skb(skb); @@ -1230,31 +1185,23 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) return NULL; } -static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) +static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) { struct sk_buff *skb; - if (ssk->sk_tx_skb_cache) { - skb = ssk->sk_tx_skb_cache; - if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) && - !__mptcp_add_ext(skb, gfp))) - return false; - return true; - } - skb = __mptcp_do_alloc_tx_skb(sk, gfp); if (!skb) - return false; + return NULL; if (likely(sk_wmem_schedule(ssk, skb->truesize))) { - ssk->sk_tx_skb_cache = skb; - return true; + tcp_skb_entail(ssk, skb); + return skb; } kfree_skb(skb); - return false; + return NULL; } -static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) +static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) { gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; @@ -1284,23 +1231,29 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, struct mptcp_sendmsg_info *info) { u64 data_seq = dfrag->data_seq + info->sent; + int offset = dfrag->offset + info->sent; struct mptcp_sock *msk = mptcp_sk(sk); bool zero_window_probe = false; struct mptcp_ext *mpext = NULL; - struct sk_buff *skb, *tail; - bool must_collapse = false; - int size_bias = 0; - int avail_size; - size_t ret = 0; + bool can_coalesce = false; + bool reuse_skb = true; + struct sk_buff *skb; + size_t copy; + int i; pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u", msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); + if (WARN_ON_ONCE(info->sent > info->limit || + info->limit > dfrag->data_len)) + return 0; + /* compute send limit */ info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); - avail_size = info->size_goal; + copy = info->size_goal; + skb = tcp_write_queue_tail(ssk); - if (skb) { + if (skb && copy > skb->len) { /* Limit the write to the size available in the * current skb, if any, so that we create at most a new skb. * Explicitly tells TCP internals to avoid collapsing on later @@ -1313,62 +1266,79 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, goto alloc_skb; } - must_collapse = (info->size_goal > skb->len) && - (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags); - if (must_collapse) { - size_bias = skb->len; - avail_size = info->size_goal - skb->len; + i = skb_shinfo(skb)->nr_frags; + can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); + if (!can_coalesce && i >= sysctl_max_skb_frags) { + tcp_mark_push(tcp_sk(ssk), skb); + goto alloc_skb; } - } + copy -= skb->len; + } else { alloc_skb: - if (!must_collapse && - !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held)) - return 0; + skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); + if (!skb) + return -ENOMEM; + + i = skb_shinfo(skb)->nr_frags; + reuse_skb = false; + mpext = skb_ext_find(skb, SKB_EXT_MPTCP); + } /* Zero window and all data acked? Probe. */ - avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size); - if (avail_size == 0) { + copy = mptcp_check_allowed_size(msk, data_seq, copy); + if (copy == 0) { u64 snd_una = READ_ONCE(msk->snd_una); - if (skb || snd_una != msk->snd_nxt) + if (snd_una != msk->snd_nxt) { + tcp_remove_empty_skb(ssk); return 0; + } + zero_window_probe = true; data_seq = snd_una - 1; - avail_size = 1; - } + copy = 1; - if (WARN_ON_ONCE(info->sent > info->limit || - info->limit > dfrag->data_len)) - return 0; + /* all mptcp-level data is acked, no skbs should be present into the + * ssk write queue + */ + WARN_ON_ONCE(reuse_skb); + } - ret = info->limit - info->sent; - tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags, - dfrag->page, dfrag->offset + info->sent, &ret); - if (!tail) { - tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk)); + copy = min_t(size_t, copy, info->limit - info->sent); + if (!sk_wmem_schedule(ssk, copy)) { + tcp_remove_empty_skb(ssk); return -ENOMEM; } - /* if the tail skb is still the cached one, collapsing really happened. - */ - if (skb == tail) { - TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH; - mpext->data_len += ret; + if (can_coalesce) { + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); + } else { + get_page(dfrag->page); + skb_fill_page_desc(skb, i, dfrag->page, offset, copy); + } + + skb->len += copy; + skb->data_len += copy; + skb->truesize += copy; + sk_wmem_queued_add(ssk, copy); + sk_mem_charge(ssk, copy); + WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); + TCP_SKB_CB(skb)->end_seq += copy; + tcp_skb_pcount_set(skb, 0); + + /* on skb reuse we just need to update the DSS len */ + if (reuse_skb) { + TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; + mpext->data_len += copy; WARN_ON_ONCE(zero_window_probe); goto out; } - mpext = skb_ext_find(tail, SKB_EXT_MPTCP); - if (WARN_ON_ONCE(!mpext)) { - /* should never reach here, stream corrupted */ - return -EINVAL; - } - memset(mpext, 0, sizeof(*mpext)); mpext->data_seq = data_seq; mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; - mpext->data_len = ret; + mpext->data_len = copy; mpext->use_map = 1; mpext->dsn64 = 1; @@ -1377,18 +1347,18 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, mpext->dsn64); if (zero_window_probe) { - mptcp_subflow_ctx(ssk)->rel_write_seq += ret; + mptcp_subflow_ctx(ssk)->rel_write_seq += copy; mpext->frozen = 1; if (READ_ONCE(msk->csum_enabled)) - mptcp_update_data_checksum(tail, ret); + mptcp_update_data_checksum(skb, copy); tcp_push_pending_frames(ssk); return 0; } out: if (READ_ONCE(msk->csum_enabled)) - mptcp_update_data_checksum(tail, ret); - mptcp_subflow_ctx(ssk)->rel_write_seq += ret; - return ret; + mptcp_update_data_checksum(skb, copy); + mptcp_subflow_ctx(ssk)->rel_write_seq += copy; + return copy; } #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ @@ -1498,13 +1468,44 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) return NULL; } -static void mptcp_push_release(struct sock *sk, struct sock *ssk, - struct mptcp_sendmsg_info *info) +static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) { tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); release_sock(ssk); } +static void mptcp_update_post_push(struct mptcp_sock *msk, + struct mptcp_data_frag *dfrag, + u32 sent) +{ + u64 snd_nxt_new = dfrag->data_seq; + + dfrag->already_sent += sent; + + msk->snd_burst -= sent; + + snd_nxt_new += dfrag->already_sent; + + /* snd_nxt_new can be smaller than snd_nxt in case mptcp + * is recovering after a failover. In that event, this re-sends + * old segments. + * + * Thus compute snd_nxt_new candidate based on + * the dfrag->data_seq that was sent and the data + * that has been handed to the subflow for transmission + * and skip update in case it was old dfrag. + */ + if (likely(after64(snd_nxt_new, msk->snd_nxt))) + msk->snd_nxt = snd_nxt_new; +} + +static void mptcp_check_and_set_pending(struct sock *sk) +{ + if (mptcp_send_head(sk) && + !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) + set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); +} + void __mptcp_push_pending(struct sock *sk, unsigned int flags) { struct sock *prev_ssk = NULL, *ssk = NULL; @@ -1530,7 +1531,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) * the last round, release prev_ssk */ if (ssk != prev_ssk && prev_ssk) - mptcp_push_release(sk, prev_ssk, &info); + mptcp_push_release(prev_ssk, &info); if (!ssk) goto out; @@ -1543,24 +1544,22 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); if (ret <= 0) { - mptcp_push_release(sk, ssk, &info); + mptcp_push_release(ssk, &info); goto out; } info.sent += ret; - dfrag->already_sent += ret; - msk->snd_nxt += ret; - msk->snd_burst -= ret; - msk->tx_pending_data -= ret; copied += ret; len -= ret; + + mptcp_update_post_push(msk, dfrag, ret); } WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); } /* at this point we held the socket lock for the last subflow we used */ if (ssk) - mptcp_push_release(sk, ssk, &info); + mptcp_push_release(ssk, &info); out: /* ensure the rtx timer is running */ @@ -1606,13 +1605,11 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) goto out; info.sent += ret; - dfrag->already_sent += ret; - msk->snd_nxt += ret; - msk->snd_burst -= ret; - msk->tx_pending_data -= ret; copied += ret; len -= ret; first = false; + + mptcp_update_post_push(msk, dfrag, ret); } WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); } @@ -1621,7 +1618,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) /* __mptcp_alloc_tx_skb could have released some wmem and we are * not going to flush it via release_sock() */ - __mptcp_update_wmem(sk); if (copied) { tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); @@ -1658,7 +1654,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* silently ignore everything else */ msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL; - mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len))); + lock_sock(sk); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); @@ -1706,23 +1702,22 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) psize = min_t(size_t, psize, msg_data_left(msg)); total_ts = psize + frag_truesize; - if (!mptcp_wmem_alloc(sk, total_ts)) + if (!sk_wmem_schedule(sk, total_ts)) goto wait_for_memory; if (copy_page_from_iter(dfrag->page, offset, psize, &msg->msg_iter) != psize) { - mptcp_wmem_uncharge(sk, psize + frag_truesize); ret = -EFAULT; goto out; } /* data successfully copied into the write queue */ + sk->sk_forward_alloc -= total_ts; copied += psize; dfrag->data_len += psize; frag_truesize += psize; pfrag->offset += frag_truesize; WRITE_ONCE(msk->write_seq, msk->write_seq + psize); - msk->tx_pending_data += psize; /* charge data on mptcp pending queue to the msk socket * Note: we charge such data both to sk and ssk @@ -1914,7 +1909,7 @@ static void __mptcp_update_rmem(struct sock *sk) return; atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); - sk_mem_uncharge(sk, msk->rmem_released); + mptcp_rmem_uncharge(sk, msk->rmem_released); WRITE_ONCE(msk->rmem_released, 0); } @@ -1982,7 +1977,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); - mptcp_lock_sock(sk, __mptcp_splice_receive_queue(sk)); + lock_sock(sk); if (unlikely(sk->sk_state == TCP_LISTEN)) { copied = -ENOTCONN; goto out_err; @@ -2183,15 +2178,11 @@ bool __mptcp_retransmit_pending_data(struct sock *sk) return false; } - /* will accept ack for reijected data before re-sending them */ - if (!msk->recovery || after64(msk->snd_nxt, msk->recovery_snd_nxt)) - msk->recovery_snd_nxt = msk->snd_nxt; + msk->recovery_snd_nxt = msk->snd_nxt; msk->recovery = true; mptcp_data_unlock(sk); msk->first_pending = rtx_head; - msk->tx_pending_data += msk->snd_nxt - rtx_head->data_seq; - msk->snd_nxt = rtx_head->data_seq; msk->snd_burst = 0; /* be sure to clear the "sent status" on all re-injected fragments */ @@ -2353,6 +2344,9 @@ static void __mptcp_retrans(struct sock *sk) int ret; mptcp_clean_una_wakeup(sk); + + /* first check ssk: need to kick "stale" logic */ + ssk = mptcp_subflow_get_retrans(msk); dfrag = mptcp_rtx_head(sk); if (!dfrag) { if (mptcp_data_fin_enabled(msk)) { @@ -2365,10 +2359,12 @@ static void __mptcp_retrans(struct sock *sk) goto reset_timer; } - return; + if (!mptcp_send_head(sk)) + return; + + goto reset_timer; } - ssk = mptcp_subflow_get_retrans(msk); if (!ssk) goto reset_timer; @@ -2395,6 +2391,8 @@ static void __mptcp_retrans(struct sock *sk) release_sock(ssk); reset_timer: + mptcp_check_and_set_pending(sk); + if (!mptcp_timer_pending(sk)) mptcp_reset_timer(sk); } @@ -2459,9 +2457,8 @@ static int __mptcp_init_sock(struct sock *sk) __skb_queue_head_init(&msk->receive_queue); msk->out_of_order_queue = RB_ROOT; msk->first_pending = NULL; - msk->wmem_reserved = 0; + msk->rmem_fwd_alloc = 0; WRITE_ONCE(msk->rmem_released, 0); - msk->tx_pending_data = 0; msk->timer_ival = TCP_RTO_MIN; msk->first = NULL; @@ -2671,7 +2668,7 @@ static void __mptcp_destroy_sock(struct sock *sk) sk->sk_prot->destroy(sk); - WARN_ON_ONCE(msk->wmem_reserved); + WARN_ON_ONCE(msk->rmem_fwd_alloc); WARN_ON_ONCE(msk->rmem_released); sk_stream_kill_queues(sk); xfrm_sk_free_policy(sk); @@ -2904,8 +2901,14 @@ void mptcp_destroy_common(struct mptcp_sock *msk) /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); - + __skb_queue_purge(&sk->sk_receive_queue); skb_rbtree_purge(&msk->out_of_order_queue); + + /* move all the rx fwd alloc into the sk_mem_reclaim_final in + * inet_sock_destruct() will dispose it + */ + sk->sk_forward_alloc += msk->rmem_fwd_alloc; + msk->rmem_fwd_alloc = 0; mptcp_token_destroy(msk); mptcp_pm_free_anno_list(msk); } @@ -2987,10 +2990,6 @@ static void mptcp_release_cb(struct sock *sk) if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags)) __mptcp_error_report(sk); - /* push_pending may touch wmem_reserved, ensure we do the cleanup - * later - */ - __mptcp_update_wmem(sk); __mptcp_update_rmem(sk); } @@ -3140,6 +3139,11 @@ static void mptcp_shutdown(struct sock *sk, int how) __mptcp_wr_shutdown(sk); } +static int mptcp_forward_alloc_get(const struct sock *sk) +{ + return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc; +} + static struct proto mptcp_prot = { .name = "MPTCP", .owner = THIS_MODULE, @@ -3157,6 +3161,7 @@ static struct proto mptcp_prot = { .hash = mptcp_hash, .unhash = mptcp_unhash, .get_port = mptcp_get_port, + .forward_alloc_get = mptcp_forward_alloc_get, .sockets_allocated = &mptcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index dc984676c5eb15..67a61ac48b20a9 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -227,7 +227,7 @@ struct mptcp_sock { u64 ack_seq; u64 rcv_wnd_sent; u64 rcv_data_fin_seq; - int wmem_reserved; + int rmem_fwd_alloc; struct sock *last_snd; int snd_burst; int old_wspace; @@ -254,7 +254,6 @@ struct mptcp_sock { struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; struct sk_buff_head receive_queue; - int tx_pending_data; struct list_head conn_list; struct list_head rtx_queue; struct mptcp_data_frag *first_pending; @@ -273,19 +272,6 @@ struct mptcp_sock { char ca_name[TCP_CA_NAME_MAX]; }; -#define mptcp_lock_sock(___sk, cb) do { \ - struct sock *__sk = (___sk); /* silence macro reuse warning */ \ - might_sleep(); \ - spin_lock_bh(&__sk->sk_lock.slock); \ - if (__sk->sk_lock.owned) \ - __lock_sock(__sk); \ - cb; \ - __sk->sk_lock.owned = 1; \ - spin_unlock(&__sk->sk_lock.slock); \ - mutex_acquire(&__sk->sk_lock.dep_map, 0, 0, _RET_IP_); \ - local_bh_enable(); \ -} while (0) - #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock) #define mptcp_data_unlock(sk) spin_unlock_bh(&(sk)->sk_lock.slock) @@ -737,9 +723,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk); void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup); -int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, - struct mptcp_addr_info *addr, - u8 bkup); void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq); void mptcp_pm_free_anno_list(struct mptcp_sock *msk); bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 8c03afac5ca034..0f1e661c2032b1 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -14,6 +14,8 @@ #include #include "protocol.h" +#define MIN_INFO_OPTLEN_SIZE 16 + static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) { sock_owned_by_me((const struct sock *)msk); @@ -670,6 +672,266 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int return ret; } +void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) +{ + struct sock *sk = &msk->sk.icsk_inet.sk; + u32 flags = 0; + bool slow; + u8 val; + + memset(info, 0, sizeof(*info)); + + slow = lock_sock_fast(sk); + + info->mptcpi_subflows = READ_ONCE(msk->pm.subflows); + info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled); + info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); + info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used); + info->mptcpi_subflows_max = mptcp_pm_get_subflows_max(msk); + val = mptcp_pm_get_add_addr_signal_max(msk); + info->mptcpi_add_addr_signal_max = val; + val = mptcp_pm_get_add_addr_accept_max(msk); + info->mptcpi_add_addr_accepted_max = val; + info->mptcpi_local_addr_max = mptcp_pm_get_local_addr_max(msk); + if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) + flags |= MPTCP_INFO_FLAG_FALLBACK; + if (READ_ONCE(msk->can_ack)) + flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; + info->mptcpi_flags = flags; + info->mptcpi_token = READ_ONCE(msk->token); + info->mptcpi_write_seq = READ_ONCE(msk->write_seq); + info->mptcpi_snd_una = READ_ONCE(msk->snd_una); + info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); + info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled); + + unlock_sock_fast(sk, slow); +} +EXPORT_SYMBOL_GPL(mptcp_diag_fill_info); + +static int mptcp_getsockopt_info(struct mptcp_sock *msk, char __user *optval, int __user *optlen) +{ + struct mptcp_info m_info; + int len; + + if (get_user(len, optlen)) + return -EFAULT; + + len = min_t(unsigned int, len, sizeof(struct mptcp_info)); + + mptcp_diag_fill_info(msk, &m_info); + + if (put_user(len, optlen)) + return -EFAULT; + + if (copy_to_user(optval, &m_info, len)) + return -EFAULT; + + return 0; +} + +static int mptcp_put_subflow_data(struct mptcp_subflow_data *sfd, + char __user *optval, + u32 copied, + int __user *optlen) +{ + u32 copylen = min_t(u32, sfd->size_subflow_data, sizeof(*sfd)); + + if (copied) + copied += sfd->size_subflow_data; + else + copied = copylen; + + if (put_user(copied, optlen)) + return -EFAULT; + + if (copy_to_user(optval, sfd, copylen)) + return -EFAULT; + + return 0; +} + +static int mptcp_get_subflow_data(struct mptcp_subflow_data *sfd, + char __user *optval, int __user *optlen) +{ + int len, copylen; + + if (get_user(len, optlen)) + return -EFAULT; + + /* if mptcp_subflow_data size is changed, need to adjust + * this function to deal with programs using old version. + */ + BUILD_BUG_ON(sizeof(*sfd) != MIN_INFO_OPTLEN_SIZE); + + if (len < MIN_INFO_OPTLEN_SIZE) + return -EINVAL; + + memset(sfd, 0, sizeof(*sfd)); + + copylen = min_t(unsigned int, len, sizeof(*sfd)); + if (copy_from_user(sfd, optval, copylen)) + return -EFAULT; + + /* size_subflow_data is u32, but len is signed */ + if (sfd->size_subflow_data > INT_MAX || + sfd->size_user > INT_MAX) + return -EINVAL; + + if (sfd->size_subflow_data < MIN_INFO_OPTLEN_SIZE || + sfd->size_subflow_data > len) + return -EINVAL; + + if (sfd->num_subflows || sfd->size_kernel) + return -EINVAL; + + return len - sfd->size_subflow_data; +} + +static int mptcp_getsockopt_tcpinfo(struct mptcp_sock *msk, char __user *optval, + int __user *optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = &msk->sk.icsk_inet.sk; + unsigned int sfcount = 0, copied = 0; + struct mptcp_subflow_data sfd; + char __user *infoptr; + int len; + + len = mptcp_get_subflow_data(&sfd, optval, optlen); + if (len < 0) + return len; + + sfd.size_kernel = sizeof(struct tcp_info); + sfd.size_user = min_t(unsigned int, sfd.size_user, + sizeof(struct tcp_info)); + + infoptr = optval + sfd.size_subflow_data; + + lock_sock(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + ++sfcount; + + if (len && len >= sfd.size_user) { + struct tcp_info info; + + tcp_get_info(ssk, &info); + + if (copy_to_user(infoptr, &info, sfd.size_user)) { + release_sock(sk); + return -EFAULT; + } + + infoptr += sfd.size_user; + copied += sfd.size_user; + len -= sfd.size_user; + } + } + + release_sock(sk); + + sfd.num_subflows = sfcount; + + if (mptcp_put_subflow_data(&sfd, optval, copied, optlen)) + return -EFAULT; + + return 0; +} + +static void mptcp_get_sub_addrs(const struct sock *sk, struct mptcp_subflow_addrs *a) +{ + struct inet_sock *inet = inet_sk(sk); + + memset(a, 0, sizeof(*a)); + + if (sk->sk_family == AF_INET) { + a->sin_local.sin_family = AF_INET; + a->sin_local.sin_port = inet->inet_sport; + a->sin_local.sin_addr.s_addr = inet->inet_rcv_saddr; + + if (!a->sin_local.sin_addr.s_addr) + a->sin_local.sin_addr.s_addr = inet->inet_saddr; + + a->sin_remote.sin_family = AF_INET; + a->sin_remote.sin_port = inet->inet_dport; + a->sin_remote.sin_addr.s_addr = inet->inet_daddr; +#if IS_ENABLED(CONFIG_IPV6) + } else if (sk->sk_family == AF_INET6) { + const struct ipv6_pinfo *np = inet6_sk(sk); + + if (WARN_ON_ONCE(!np)) + return; + + a->sin6_local.sin6_family = AF_INET6; + a->sin6_local.sin6_port = inet->inet_sport; + + if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) + a->sin6_local.sin6_addr = np->saddr; + else + a->sin6_local.sin6_addr = sk->sk_v6_rcv_saddr; + + a->sin6_remote.sin6_family = AF_INET6; + a->sin6_remote.sin6_port = inet->inet_dport; + a->sin6_remote.sin6_addr = sk->sk_v6_daddr; +#endif + } +} + +static int mptcp_getsockopt_subflow_addrs(struct mptcp_sock *msk, char __user *optval, + int __user *optlen) +{ + struct sock *sk = &msk->sk.icsk_inet.sk; + struct mptcp_subflow_context *subflow; + unsigned int sfcount = 0, copied = 0; + struct mptcp_subflow_data sfd; + char __user *addrptr; + int len; + + len = mptcp_get_subflow_data(&sfd, optval, optlen); + if (len < 0) + return len; + + sfd.size_kernel = sizeof(struct mptcp_subflow_addrs); + sfd.size_user = min_t(unsigned int, sfd.size_user, + sizeof(struct mptcp_subflow_addrs)); + + addrptr = optval + sfd.size_subflow_data; + + lock_sock(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + ++sfcount; + + if (len && len >= sfd.size_user) { + struct mptcp_subflow_addrs a; + + mptcp_get_sub_addrs(ssk, &a); + + if (copy_to_user(addrptr, &a, sfd.size_user)) { + release_sock(sk); + return -EFAULT; + } + + addrptr += sfd.size_user; + copied += sfd.size_user; + len -= sfd.size_user; + } + } + + release_sock(sk); + + sfd.num_subflows = sfcount; + + if (mptcp_put_subflow_data(&sfd, optval, copied, optlen)) + return -EFAULT; + + return 0; +} + static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -684,6 +946,21 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, return -EOPNOTSUPP; } +static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, + char __user *optval, int __user *optlen) +{ + switch (optname) { + case MPTCP_INFO: + return mptcp_getsockopt_info(msk, optval, optlen); + case MPTCP_TCPINFO: + return mptcp_getsockopt_tcpinfo(msk, optval, optlen); + case MPTCP_SUBFLOW_ADDRS: + return mptcp_getsockopt_subflow_addrs(msk, optval, optlen); + } + + return -EOPNOTSUPP; +} + int mptcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *option) { @@ -706,6 +983,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); + if (level == SOL_MPTCP) + return mptcp_getsockopt_sol_mptcp(msk, optname, optval, option); return -EOPNOTSUPP; } diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 92a747896f8086..3646fc195e7de6 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -10,6 +10,17 @@ config NETFILTER_INGRESS This allows you to classify packets from ingress using the Netfilter infrastructure. +config NETFILTER_EGRESS + bool "Netfilter egress support" + default y + select NET_EGRESS + help + This allows you to classify packets before transmission using the + Netfilter infrastructure. + +config NETFILTER_SKIP_EGRESS + def_bool NETFILTER_EGRESS && (NET_CLS_ACT || IFB) + config NETFILTER_NETLINK tristate diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 63d032191e6269..6dec9cd395f157 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -316,6 +316,12 @@ nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum, if (dev && dev_net(dev) == net) return &dev->nf_hooks_ingress; } +#endif +#ifdef CONFIG_NETFILTER_EGRESS + if (hooknum == NF_NETDEV_EGRESS) { + if (dev && dev_net(dev) == net) + return &dev->nf_hooks_egress; + } #endif WARN_ON_ONCE(1); return NULL; @@ -335,7 +341,8 @@ static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg, return 0; } -static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf) +static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg, + int pf) { if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) || (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS)) @@ -344,6 +351,12 @@ static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf) return false; } +static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg, + int pf) +{ + return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS; +} + static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf) { #ifdef CONFIG_JUMP_LABEL @@ -383,9 +396,18 @@ static int __nf_register_net_hook(struct net *net, int pf, switch (pf) { case NFPROTO_NETDEV: - err = nf_ingress_check(net, reg, NF_NETDEV_INGRESS); - if (err < 0) - return err; +#ifndef CONFIG_NETFILTER_INGRESS + if (reg->hooknum == NF_NETDEV_INGRESS) + return -EOPNOTSUPP; +#endif +#ifndef CONFIG_NETFILTER_EGRESS + if (reg->hooknum == NF_NETDEV_EGRESS) + return -EOPNOTSUPP; +#endif + if ((reg->hooknum != NF_NETDEV_INGRESS && + reg->hooknum != NF_NETDEV_EGRESS) || + !reg->dev || dev_net(reg->dev) != net) + return -EINVAL; break; case NFPROTO_INET: if (reg->hooknum != NF_INET_INGRESS) @@ -417,6 +439,10 @@ static int __nf_register_net_hook(struct net *net, int pf, #ifdef CONFIG_NETFILTER_INGRESS if (nf_ingress_hook(reg, pf)) net_inc_ingress_queue(); +#endif +#ifdef CONFIG_NETFILTER_EGRESS + if (nf_egress_hook(reg, pf)) + net_inc_egress_queue(); #endif nf_static_key_inc(reg, pf); @@ -474,6 +500,10 @@ static void __nf_unregister_net_hook(struct net *net, int pf, #ifdef CONFIG_NETFILTER_INGRESS if (nf_ingress_hook(reg, pf)) net_dec_ingress_queue(); +#endif +#ifdef CONFIG_NETFILTER_EGRESS + if (nf_egress_hook(reg, pf)) + net_dec_egress_queue(); #endif nf_static_key_dec(reg, pf); } else { diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 128690c512dff1..e93c937a8bf026 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1330,12 +1330,15 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * Check if outgoing packet belongs to the established ip_vs_conn. */ static unsigned int -ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af) +ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { + struct netns_ipvs *ipvs = net_ipvs(state->net); + unsigned int hooknum = state->hook; struct ip_vs_iphdr iph; struct ip_vs_protocol *pp; struct ip_vs_proto_data *pd; struct ip_vs_conn *cp; + int af = state->pf; struct sock *sk; EnterFunction(11); @@ -1468,56 +1471,6 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in return NF_ACCEPT; } -/* - * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain, - * used only for VS/NAT. - * Check if packet is reply for established ip_vs_conn. - */ -static unsigned int -ip_vs_reply4(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET); -} - -/* - * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT. - * Check if packet is reply for established ip_vs_conn. - */ -static unsigned int -ip_vs_local_reply4(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET); -} - -#ifdef CONFIG_IP_VS_IPV6 - -/* - * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain, - * used only for VS/NAT. - * Check if packet is reply for established ip_vs_conn. - */ -static unsigned int -ip_vs_reply6(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6); -} - -/* - * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT. - * Check if packet is reply for established ip_vs_conn. - */ -static unsigned int -ip_vs_local_reply6(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6); -} - -#endif - static unsigned int ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, @@ -1957,8 +1910,10 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb, * and send it on its way... */ static unsigned int -ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af) +ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { + struct netns_ipvs *ipvs = net_ipvs(state->net); + unsigned int hooknum = state->hook; struct ip_vs_iphdr iph; struct ip_vs_protocol *pp; struct ip_vs_proto_data *pd; @@ -1966,6 +1921,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int int ret, pkts; int conn_reuse_mode; struct sock *sk; + int af = state->pf; /* Already marked as IPVS request or reply? */ if (skb->ipvs_property) @@ -2137,55 +2093,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int return ret; } -/* - * AF_INET handler in NF_INET_LOCAL_IN chain - * Schedule and forward packets from remote clients - */ -static unsigned int -ip_vs_remote_request4(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET); -} - -/* - * AF_INET handler in NF_INET_LOCAL_OUT chain - * Schedule and forward packets from local clients - */ -static unsigned int -ip_vs_local_request4(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET); -} - -#ifdef CONFIG_IP_VS_IPV6 - -/* - * AF_INET6 handler in NF_INET_LOCAL_IN chain - * Schedule and forward packets from remote clients - */ -static unsigned int -ip_vs_remote_request6(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6); -} - -/* - * AF_INET6 handler in NF_INET_LOCAL_OUT chain - * Schedule and forward packets from local clients - */ -static unsigned int -ip_vs_local_request6(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6); -} - -#endif - - /* * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP * related packets destined for 0.0.0.0/0. @@ -2199,45 +2106,36 @@ static unsigned int ip_vs_forward_icmp(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - int r; struct netns_ipvs *ipvs = net_ipvs(state->net); - - if (ip_hdr(skb)->protocol != IPPROTO_ICMP) - return NF_ACCEPT; + int r; /* ipvs enabled in this netns ? */ if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) return NF_ACCEPT; - return ip_vs_in_icmp(ipvs, skb, &r, state->hook); -} - + if (state->pf == NFPROTO_IPV4) { + if (ip_hdr(skb)->protocol != IPPROTO_ICMP) + return NF_ACCEPT; #ifdef CONFIG_IP_VS_IPV6 -static unsigned int -ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - int r; - struct netns_ipvs *ipvs = net_ipvs(state->net); - struct ip_vs_iphdr iphdr; + } else { + struct ip_vs_iphdr iphdr; - ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr); - if (iphdr.protocol != IPPROTO_ICMPV6) - return NF_ACCEPT; + ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr); - /* ipvs enabled in this netns ? */ - if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) - return NF_ACCEPT; + if (iphdr.protocol != IPPROTO_ICMPV6) + return NF_ACCEPT; - return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr); -} + return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr); #endif + } + return ip_vs_in_icmp(ipvs, skb, &r, state->hook); +} static const struct nf_hook_ops ip_vs_ops4[] = { /* After packet filtering, change source only for VS/NAT */ { - .hook = ip_vs_reply4, + .hook = ip_vs_out_hook, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP_PRI_NAT_SRC - 2, @@ -2246,21 +2144,21 @@ static const struct nf_hook_ops ip_vs_ops4[] = { * or VS/NAT(change destination), so that filtering rules can be * applied to IPVS. */ { - .hook = ip_vs_remote_request4, + .hook = ip_vs_in_hook, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP_PRI_NAT_SRC - 1, }, /* Before ip_vs_in, change source only for VS/NAT */ { - .hook = ip_vs_local_reply4, + .hook = ip_vs_out_hook, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_NAT_DST + 1, }, /* After mangle, schedule and forward local requests */ { - .hook = ip_vs_local_request4, + .hook = ip_vs_in_hook, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_NAT_DST + 2, @@ -2275,7 +2173,7 @@ static const struct nf_hook_ops ip_vs_ops4[] = { }, /* After packet filtering, change source only for VS/NAT */ { - .hook = ip_vs_reply4, + .hook = ip_vs_out_hook, .pf = NFPROTO_IPV4, .hooknum = NF_INET_FORWARD, .priority = 100, @@ -2286,7 +2184,7 @@ static const struct nf_hook_ops ip_vs_ops4[] = { static const struct nf_hook_ops ip_vs_ops6[] = { /* After packet filtering, change source only for VS/NAT */ { - .hook = ip_vs_reply6, + .hook = ip_vs_out_hook, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP6_PRI_NAT_SRC - 2, @@ -2295,21 +2193,21 @@ static const struct nf_hook_ops ip_vs_ops6[] = { * or VS/NAT(change destination), so that filtering rules can be * applied to IPVS. */ { - .hook = ip_vs_remote_request6, + .hook = ip_vs_in_hook, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP6_PRI_NAT_SRC - 1, }, /* Before ip_vs_in, change source only for VS/NAT */ { - .hook = ip_vs_local_reply6, + .hook = ip_vs_out_hook, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP6_PRI_NAT_DST + 1, }, /* After mangle, schedule and forward local requests */ { - .hook = ip_vs_local_request6, + .hook = ip_vs_in_hook, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP6_PRI_NAT_DST + 2, @@ -2317,14 +2215,14 @@ static const struct nf_hook_ops ip_vs_ops6[] = { /* After packet filtering (but before ip_vs_out_icmp), catch icmp * destined for 0.0.0.0/0, which is for incoming IPVS connections */ { - .hook = ip_vs_forward_icmp_v6, + .hook = ip_vs_forward_icmp, .pf = NFPROTO_IPV6, .hooknum = NF_INET_FORWARD, .priority = 99, }, /* After packet filtering, change source only for VS/NAT */ { - .hook = ip_vs_reply6, + .hook = ip_vs_out_hook, .pf = NFPROTO_IPV6, .hooknum = NF_INET_FORWARD, .priority = 100, diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 29ec3ef63edc7a..e62b40bd349e22 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2017,6 +2017,12 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "run_estimation", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", @@ -4090,6 +4096,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; + ipvs->sysctl_run_estimation = 1; + tbl[idx++].data = &ipvs->sysctl_run_estimation; #ifdef CONFIG_IP_VS_DEBUG /* Global sysctls must be ro in non-init netns */ if (!net_eq(net, &init_net)) diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 05b8112ffb378f..9a1a7af6a186a6 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c @@ -100,6 +100,9 @@ static void estimation_timer(struct timer_list *t) u64 rate; struct netns_ipvs *ipvs = from_timer(ipvs, t, est_timer); + if (!sysctl_run_estimation(ipvs)) + goto skip; + spin_lock(&ipvs->est_lock); list_for_each_entry(e, &ipvs->est_list, list) { s = container_of(e, struct ip_vs_stats, est); @@ -131,6 +134,8 @@ static void estimation_timer(struct timer_list *t) spin_unlock(&s->lock); } spin_unlock(&ipvs->est_lock); + +skip: mod_timer(&ipvs->est_timer, jiffies + 2*HZ); } diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 8f7a9837349c18..d1f2d3c8d2b180 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -155,6 +155,16 @@ unsigned int nf_confirm(struct sk_buff *skb, unsigned int protoff, } EXPORT_SYMBOL_GPL(nf_confirm); +static bool in_vrf_postrouting(const struct nf_hook_state *state) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (state->hook == NF_INET_POST_ROUTING && + netif_is_l3_master(state->out)) + return true; +#endif + return false; +} + static unsigned int ipv4_confirm(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -166,6 +176,9 @@ static unsigned int ipv4_confirm(void *priv, if (!ct || ctinfo == IP_CT_RELATED_REPLY) return nf_conntrack_confirm(skb); + if (in_vrf_postrouting(state)) + return NF_ACCEPT; + return nf_confirm(skb, skb_network_offset(skb) + ip_hdrlen(skb), ct, ctinfo); @@ -374,6 +387,9 @@ static unsigned int ipv6_confirm(void *priv, if (!ct || ctinfo == IP_CT_RELATED_REPLY) return nf_conntrack_confirm(skb); + if (in_vrf_postrouting(state)) + return NF_ACCEPT; + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off); if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index f8e3c0d2602f68..3b516cffc779bf 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -104,10 +104,13 @@ int nf_conntrack_udp_packet(struct nf_conn *ct, */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { unsigned long extra = timeouts[UDP_CT_UNREPLIED]; + bool stream = false; /* Still active after two seconds? Extend timeout. */ - if (time_after(jiffies, ct->proto.udp.stream_ts)) + if (time_after(jiffies, ct->proto.udp.stream_ts)) { extra = timeouts[UDP_CT_REPLIED]; + stream = true; + } nf_ct_refresh_acct(ct, ctinfo, skb, extra); @@ -116,7 +119,7 @@ int nf_conntrack_udp_packet(struct nf_conn *ct, return NF_ACCEPT; /* Also, more likely to be important, and not a probe */ - if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + if (stream && !test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_ASSURED, ct); } else { nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 27311768392285..4d50d51db79627 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -699,6 +699,16 @@ unsigned int nf_nat_packet(struct nf_conn *ct, } EXPORT_SYMBOL_GPL(nf_nat_packet); +static bool in_vrf_postrouting(const struct nf_hook_state *state) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (state->hook == NF_INET_POST_ROUTING && + netif_is_l3_master(state->out)) + return true; +#endif + return false; +} + unsigned int nf_nat_inet_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -715,7 +725,7 @@ nf_nat_inet_fn(void *priv, struct sk_buff *skb, * packet filter it out, or implement conntrack/NAT for that * protocol. 8) --RR */ - if (!ct) + if (!ct || in_vrf_postrouting(state)) return NF_ACCEPT; nat = nfct_nat(ct); diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 866cfba04d6c01..adc3480560767d 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -79,7 +79,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr, if (priv->base == NFT_PAYLOAD_NETWORK_HEADER) ptr = skb_network_header(skb); else { - if (!pkt->tprot_set) + if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) return false; ptr = skb_network_header(skb) + nft_thoff(pkt); } diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index e4fe2f0780eb6f..84a7dea46efaed 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -113,13 +113,13 @@ static int nf_trace_fill_pkt_info(struct sk_buff *nlskb, int off = skb_network_offset(skb); unsigned int len, nh_end; - nh_end = pkt->tprot_set ? nft_thoff(pkt) : skb->len; + nh_end = pkt->flags & NFT_PKTINFO_L4PROTO ? nft_thoff(pkt) : skb->len; len = min_t(unsigned int, nh_end - skb_network_offset(skb), NFT_TRACETYPE_NETWORK_HSIZE); if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len)) return -1; - if (pkt->tprot_set) { + if (pkt->flags & NFT_PKTINFO_L4PROTO) { len = min_t(unsigned int, skb->len - nft_thoff(pkt), NFT_TRACETYPE_TRANSPORT_HSIZE); if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb, diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c index f554e2ea32eed6..d5c719c9e36ca8 100644 --- a/net/netfilter/nfnetlink_hook.c +++ b/net/netfilter/nfnetlink_hook.c @@ -185,7 +185,7 @@ static const struct nf_hook_entries * nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev) { const struct nf_hook_entries *hook_head = NULL; -#ifdef CONFIG_NETFILTER_INGRESS +#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS) struct net_device *netdev; #endif @@ -221,9 +221,9 @@ nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *de hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); break; #endif -#ifdef CONFIG_NETFILTER_INGRESS +#if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS) case NFPROTO_NETDEV: - if (hook != NF_NETDEV_INGRESS) + if (hook >= NF_NETDEV_NUMHOOKS) return ERR_PTR(-EOPNOTSUPP); if (!dev) @@ -233,7 +233,15 @@ nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *de if (!netdev) return ERR_PTR(-ENODEV); - return rcu_dereference(netdev->nf_hooks_ingress); +#ifdef CONFIG_NETFILTER_INGRESS + if (hook == NF_NETDEV_INGRESS) + return rcu_dereference(netdev->nf_hooks_ingress); +#endif +#ifdef CONFIG_NETFILTER_EGRESS + if (hook == NF_NETDEV_EGRESS) + return rcu_dereference(netdev->nf_hooks_egress); +#endif + fallthrough; #endif default: return ERR_PTR(-EPROTONOSUPPORT); diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c index 3ced0eb6b7c3bf..c3563f0be26925 100644 --- a/net/netfilter/nft_chain_filter.c +++ b/net/netfilter/nft_chain_filter.c @@ -310,9 +310,11 @@ static const struct nft_chain_type nft_chain_filter_netdev = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_NETDEV, - .hook_mask = (1 << NF_NETDEV_INGRESS), + .hook_mask = (1 << NF_NETDEV_INGRESS) | + (1 << NF_NETDEV_EGRESS), .hooks = { [NF_NETDEV_INGRESS] = nft_do_chain_netdev, + [NF_NETDEV_EGRESS] = nft_do_chain_netdev, }, }; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 6ba3256fa84498..87f3af4645d9c7 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -198,17 +198,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, return -EBUSY; priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP])); - switch (priv->op) { - case NFT_DYNSET_OP_ADD: - case NFT_DYNSET_OP_DELETE: - break; - case NFT_DYNSET_OP_UPDATE: - if (!(set->flags & NFT_SET_TIMEOUT)) - return -EOPNOTSUPP; - break; - default: + if (priv->op > NFT_DYNSET_OP_DELETE) return -EOPNOTSUPP; - } timeout = 0; if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index a7e01e9952f171..fe91ff5f8fbe83 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -244,7 +244,11 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest, case NFT_META_OIF: nft_meta_store_ifindex(dest, nft_out(pkt)); break; - case NFT_META_IIFTYPE: + case NFT_META_IFTYPE: + if (!nft_meta_store_iftype(dest, pkt->skb->dev)) + return false; + break; + case __NFT_META_IIFTYPE: if (!nft_meta_store_iftype(dest, nft_in(pkt))) return false; break; @@ -329,7 +333,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, nft_reg_store8(dest, nft_pf(pkt)); break; case NFT_META_L4PROTO: - if (!pkt->tprot_set) + if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) goto err; nft_reg_store8(dest, pkt->tprot); break; diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index a44b14f6c0dc01..cbfe4e4a4ad7af 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -22,6 +22,7 @@ #include #include #include +#include #include static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off, @@ -79,6 +80,45 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; } +static int __nft_payload_inner_offset(struct nft_pktinfo *pkt) +{ + unsigned int thoff = nft_thoff(pkt); + + if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) + return -1; + + switch (pkt->tprot) { + case IPPROTO_UDP: + pkt->inneroff = thoff + sizeof(struct udphdr); + break; + case IPPROTO_TCP: { + struct tcphdr *th, _tcph; + + th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph); + if (!th) + return -1; + + pkt->inneroff = thoff + __tcp_hdrlen(th); + } + break; + default: + return -1; + } + + pkt->flags |= NFT_PKTINFO_INNER; + + return 0; +} + +static int nft_payload_inner_offset(const struct nft_pktinfo *pkt) +{ + if (!(pkt->flags & NFT_PKTINFO_INNER) && + __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0) + return -1; + + return pkt->inneroff; +} + void nft_payload_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -108,10 +148,15 @@ void nft_payload_eval(const struct nft_expr *expr, offset = skb_network_offset(skb); break; case NFT_PAYLOAD_TRANSPORT_HEADER: - if (!pkt->tprot_set) + if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) goto err; offset = nft_thoff(pkt); break; + case NFT_PAYLOAD_INNER_HEADER: + offset = nft_payload_inner_offset(pkt); + if (offset < 0) + goto err; + break; default: BUG(); } @@ -610,10 +655,15 @@ static void nft_payload_set_eval(const struct nft_expr *expr, offset = skb_network_offset(skb); break; case NFT_PAYLOAD_TRANSPORT_HEADER: - if (!pkt->tprot_set) + if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) goto err; offset = nft_thoff(pkt); break; + case NFT_PAYLOAD_INNER_HEADER: + offset = nft_payload_inner_offset(pkt); + if (offset < 0) + goto err; + break; default: BUG(); } @@ -622,7 +672,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr, offset += priv->offset; if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) && - (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || + ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER && + priv->base != NFT_PAYLOAD_INNER_HEADER) || skb->ip_summed != CHECKSUM_PARTIAL)) { fsum = skb_checksum(skb, offset, priv->len, 0); tsum = csum_partial(src, priv->len, 0); @@ -741,6 +792,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx, case NFT_PAYLOAD_LL_HEADER: case NFT_PAYLOAD_NETWORK_HEADER: case NFT_PAYLOAD_TRANSPORT_HEADER: + case NFT_PAYLOAD_INNER_HEADER: break; default: return ERR_PTR(-EOPNOTSUPP); @@ -759,7 +811,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx, len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && - base != NFT_PAYLOAD_LL_HEADER) + base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER) return &nft_payload_fast_ops; else return &nft_payload_ops; diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 0d5c422f87452f..8aec1b529364ae 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -94,11 +94,11 @@ static unsigned int xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_rateest_target_info *info = par->targinfo; - struct gnet_stats_basic_packed *stats = &info->est->bstats; + struct gnet_stats_basic_sync *stats = &info->est->bstats; spin_lock_bh(&info->est->lock); - stats->bytes += skb->len; - stats->packets++; + u64_stats_add(&stats->bytes, skb->len); + u64_stats_inc(&stats->packets); spin_unlock_bh(&info->est->lock); return XT_CONTINUE; @@ -143,6 +143,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) if (!est) goto err1; + gnet_stats_basic_sync_init(&est->bstats); strlcpy(est->name, info->name, sizeof(est->name)); spin_lock_init(&est->lock); est->refcnt = 1; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index ada47e59647a03..4c575324a98528 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1412,8 +1412,6 @@ struct netlink_broadcast_data { int delivered; gfp_t allocation; struct sk_buff *skb, *skb2; - int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); - void *tx_data; }; static void do_one_broadcast(struct sock *sk, @@ -1467,11 +1465,6 @@ static void do_one_broadcast(struct sock *sk, p->delivery_failure = 1; goto out; } - if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { - kfree_skb(p->skb2); - p->skb2 = NULL; - goto out; - } if (sk_filter(sk, p->skb2)) { kfree_skb(p->skb2); p->skb2 = NULL; @@ -1494,10 +1487,8 @@ static void do_one_broadcast(struct sock *sk, sock_put(sk); } -int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid, - u32 group, gfp_t allocation, - int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), - void *filter_data) +int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid, + u32 group, gfp_t allocation) { struct net *net = sock_net(ssk); struct netlink_broadcast_data info; @@ -1516,8 +1507,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid info.allocation = allocation; info.skb = skb; info.skb2 = NULL; - info.tx_filter = filter; - info.tx_data = filter_data; /* While we sleep in clone, do not allow to change socket list */ @@ -1543,14 +1532,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid } return -ESRCH; } -EXPORT_SYMBOL(netlink_broadcast_filtered); - -int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid, - u32 group, gfp_t allocation) -{ - return netlink_broadcast_filtered(ssk, skb, portid, group, allocation, - NULL, NULL); -} EXPORT_SYMBOL(netlink_broadcast); struct netlink_set_err_data { diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 6d16e1ab1a8aba..775064cdd0ee4d 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -633,7 +633,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr; - ax25_address *source = NULL; + const ax25_address *source = NULL; ax25_uid_assoc *user; struct net_device *dev; int err = 0; @@ -673,7 +673,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, err = -ENETUNREACH; goto out_release; } - source = (ax25_address *)dev->dev_addr; + source = (const ax25_address *)dev->dev_addr; user = ax25_findbyuid(current_euid()); if (user) { diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 29e418c8c6c30f..3aaac4a22b3876 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -108,10 +108,10 @@ static int __must_check nr_set_mac_address(struct net_device *dev, void *addr) if (err) return err; - ax25_listen_release((ax25_address *)dev->dev_addr, NULL); + ax25_listen_release((const ax25_address *)dev->dev_addr, NULL); } - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } @@ -120,7 +120,7 @@ static int nr_open(struct net_device *dev) { int err; - err = ax25_listen_register((ax25_address *)dev->dev_addr, NULL); + err = ax25_listen_register((const ax25_address *)dev->dev_addr, NULL); if (err) return err; @@ -131,7 +131,7 @@ static int nr_open(struct net_device *dev) static int nr_close(struct net_device *dev) { - ax25_listen_release((ax25_address *)dev->dev_addr, NULL); + ax25_listen_release((const ax25_address *)dev->dev_addr, NULL); netif_stop_queue(dev); return 0; } diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index ddd5cbd455e392..baea3cbd76ca5b 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -598,7 +598,7 @@ struct net_device *nr_dev_get(ax25_address *addr) rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && - ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { + ax25cmp(addr, (const ax25_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } @@ -825,7 +825,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) ax25s = nr_neigh->ax25; nr_neigh->ax25 = ax25_send_frame(skb, 256, - (ax25_address *)dev->dev_addr, + (const ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); if (ax25s) diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c index 3a89bd9b89fc37..af6bacb3ba9866 100644 --- a/net/nfc/hci/command.c +++ b/net/nfc/hci/command.c @@ -114,8 +114,6 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, { u8 pipe; - pr_debug("\n"); - pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; @@ -130,8 +128,6 @@ int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, { u8 pipe; - pr_debug("\n"); - pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; @@ -205,8 +201,6 @@ static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe) static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe) { - pr_debug("\n"); - return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE, NULL, 0, NULL); } @@ -242,8 +236,6 @@ static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host, static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) { - pr_debug("\n"); - return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); } @@ -256,8 +248,6 @@ static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) /* TODO: Find out what the identity reference data is * and fill param with it. HCI spec 6.1.3.5 */ - pr_debug("\n"); - if (test_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &hdev->quirks)) param_len = 0; @@ -271,8 +261,6 @@ int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) int r; u8 pipe = hdev->gate2pipe[gate]; - pr_debug("\n"); - if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; @@ -296,8 +284,6 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev) { int r; - pr_debug("\n"); - r = nfc_hci_clear_all_pipes(hdev); if (r < 0) return r; @@ -314,8 +300,6 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, bool pipe_created = false; int r; - pr_debug("\n"); - if (pipe == NFC_HCI_DO_NOT_CREATE_PIPE) return 0; diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c index aef750d7787c86..e90f70385813a8 100644 --- a/net/nfc/hci/llc_shdlc.c +++ b/net/nfc/hci/llc_shdlc.c @@ -201,8 +201,7 @@ static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr) del_timer_sync(&shdlc->t2_timer); shdlc->t2_active = false; - pr_debug - ("All sent frames acked. Stopped T2(retransmit)\n"); + pr_debug("All sent frames acked. Stopped T2(retransmit)\n"); } } else { skb = skb_peek(&shdlc->ack_pending_q); @@ -211,8 +210,7 @@ static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr) msecs_to_jiffies(SHDLC_T2_VALUE_MS)); shdlc->t2_active = true; - pr_debug - ("Start T2(retransmit) for remaining unacked sent frames\n"); + pr_debug("Start T2(retransmit) for remaining unacked sent frames\n"); } } @@ -365,8 +363,6 @@ static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc) { struct sk_buff *skb; - pr_debug("\n"); - skb = llc_shdlc_alloc_skb(shdlc, 2); if (skb == NULL) return -ENOMEM; @@ -381,8 +377,6 @@ static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc) { struct sk_buff *skb; - pr_debug("\n"); - skb = llc_shdlc_alloc_skb(shdlc, 0); if (skb == NULL) return -ENOMEM; @@ -522,12 +516,11 @@ static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc) unsigned long time_sent; if (shdlc->send_q.qlen) - pr_debug - ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n", - shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, - shdlc->rnr == false ? "false" : "true", - shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr), - shdlc->ack_pending_q.qlen); + pr_debug("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n", + shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, + shdlc->rnr == false ? "false" : "true", + shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr), + shdlc->ack_pending_q.qlen); while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && (shdlc->rnr == false)) { @@ -573,8 +566,6 @@ static void llc_shdlc_connect_timeout(struct timer_list *t) { struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer); - pr_debug("\n"); - schedule_work(&shdlc->sm_work); } @@ -601,8 +592,6 @@ static void llc_shdlc_sm_work(struct work_struct *work) struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work); int r; - pr_debug("\n"); - mutex_lock(&shdlc->state_mutex); switch (shdlc->state) { @@ -649,8 +638,7 @@ static void llc_shdlc_sm_work(struct work_struct *work) llc_shdlc_handle_send_queue(shdlc); if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) { - pr_debug - ("Handle T1(send ack) elapsed (T1 now inactive)\n"); + pr_debug("Handle T1(send ack) elapsed (T1 now inactive)\n"); shdlc->t1_active = false; r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR, @@ -660,8 +648,7 @@ static void llc_shdlc_sm_work(struct work_struct *work) } if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) { - pr_debug - ("Handle T2(retransmit) elapsed (T2 inactive)\n"); + pr_debug("Handle T2(retransmit) elapsed (T2 inactive)\n"); shdlc->t2_active = false; @@ -686,8 +673,6 @@ static int llc_shdlc_connect(struct llc_shdlc *shdlc) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq); - pr_debug("\n"); - mutex_lock(&shdlc->state_mutex); shdlc->state = SHDLC_CONNECTING; @@ -706,8 +691,6 @@ static int llc_shdlc_connect(struct llc_shdlc *shdlc) static void llc_shdlc_disconnect(struct llc_shdlc *shdlc) { - pr_debug("\n"); - mutex_lock(&shdlc->state_mutex); shdlc->state = SHDLC_DISCONNECTED; diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 3c4172a5aeb5e1..41e3a20c893556 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -337,8 +337,6 @@ int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock) struct nfc_dev *dev; struct nfc_llcp_local *local; - pr_debug("Sending DISC\n"); - local = sock->local; if (local == NULL) return -ENODEV; @@ -362,8 +360,6 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) struct nfc_llcp_local *local; u16 size = 0; - pr_debug("Sending SYMM\n"); - local = nfc_llcp_find_local(dev); if (local == NULL) return -ENODEV; @@ -399,8 +395,6 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) u16 size = 0; __be16 miux; - pr_debug("Sending CONNECT\n"); - local = sock->local; if (local == NULL) return -ENODEV; @@ -475,8 +469,6 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) u16 size = 0; __be16 miux; - pr_debug("Sending CC\n"); - local = sock->local; if (local == NULL) return -ENODEV; diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index eaeb2b1cfa6ac0..5ad5157aa9c57f 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -45,8 +45,6 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) struct nfc_llcp_local *local = sock->local; struct sk_buff *s, *tmp; - pr_debug("%p\n", &sock->sk); - skb_queue_purge(&sock->tx_queue); skb_queue_purge(&sock->tx_pending_queue); @@ -1505,9 +1503,8 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) { struct nfc_llcp_local *local = (struct nfc_llcp_local *) data; - pr_debug("Received an LLCP PDU\n"); if (err < 0) { - pr_err("err %d\n", err); + pr_err("LLCP PDU receive err %d\n", err); return; } diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 82ab39d80726e0..6fd873aa86bee0 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -930,8 +930,6 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); unsigned long nci_mode = NCI_DEACTIVATE_TYPE_IDLE_MODE; - pr_debug("entry\n"); - if (!ndev->target_active_prot) { pr_err("unable to deactivate target, no active target\n"); return; @@ -977,8 +975,6 @@ static int nci_dep_link_down(struct nfc_dev *nfc_dev) struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; - pr_debug("entry\n"); - if (nfc_dev->rf_mode == NFC_RF_INITIATOR) { nci_deactivate_target(nfc_dev, NULL, NCI_DEACTIVATE_TYPE_IDLE_MODE); } else { diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c index e199912ee1e594..19703a649b5a68 100644 --- a/net/nfc/nci/hci.c +++ b/net/nfc/nci/hci.c @@ -432,8 +432,6 @@ void nci_hci_data_received_cb(void *context, struct sk_buff *frag_skb; int msg_len; - pr_debug("\n"); - if (err) { nci_req_complete(ndev, err); return; @@ -547,8 +545,6 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host, static int nci_hci_delete_pipe(struct nci_dev *ndev, u8 pipe) { - pr_debug("\n"); - return nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); } diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index c5eacaac41aeab..282c51051dccd8 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -738,8 +738,6 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, const struct nci_nfcee_discover_ntf *nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data; - pr_debug("\n"); - /* NFCForum NCI 9.2.1 HCI Network Specific Handling * If the NFCC supports the HCI Network, it SHALL return one, * and only one, NFCEE_DISCOVER_NTF with a Protocol type of @@ -751,12 +749,6 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, nci_req_complete(ndev, status); } -static void nci_nfcee_action_ntf_packet(struct nci_dev *ndev, - const struct sk_buff *skb) -{ - pr_debug("\n"); -} - void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 ntf_opcode = nci_opcode(skb->data); @@ -813,7 +805,6 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) break; case NCI_OP_RF_NFCEE_ACTION_NTF: - nci_nfcee_action_ntf_packet(ndev, skb); break; default: diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c index 502e7a3f8948b6..57500c262cc3dc 100644 --- a/net/nfc/nci/uart.c +++ b/net/nfc/nci/uart.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015, Marvell International Ltd. * - * This software file (the "File") is distributed by Marvell International - * Ltd. under the terms of the GNU General Public License Version 2, June 1991 - * (the "License"). You may use, redistribute and/or modify this File in - * accordance with the terms and conditions of the License, a copy of which - * is available on the worldwide web at - * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - * - * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE - * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE - * ARE EXPRESSLY DISCLAIMED. The License provides additional details about - * this warranty disclaimer. - */ - -/* Inspired (hugely) by HCI LDISC implementation in Bluetooth. + * Inspired (hugely) by HCI LDISC implementation in Bluetooth. * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2a2bc64f75cfd8..46943a18a10d54 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -91,6 +91,7 @@ #endif #include #include +#include #include "internal.h" @@ -241,8 +242,42 @@ struct packet_skb_cb { static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); +#ifdef CONFIG_NETFILTER_EGRESS +static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb) +{ + struct sk_buff *next, *head = NULL, *tail; + int rc; + + rcu_read_lock(); + for (; skb != NULL; skb = next) { + next = skb->next; + skb_mark_not_on_list(skb); + + if (!nf_hook_egress(skb, &rc, skb->dev)) + continue; + + if (!head) + head = skb; + else + tail->next = skb; + + tail = skb; + } + rcu_read_unlock(); + + return head; +} +#endif + static int packet_direct_xmit(struct sk_buff *skb) { +#ifdef CONFIG_NETFILTER_EGRESS + if (nf_hook_egress_active()) { + skb = nf_hook_direct_egress(skb); + if (!skb) + return NET_XMIT_DROP; + } +#endif return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); } diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile index 1b1411d158a732..8e0605f88a73d9 100644 --- a/net/qrtr/Makefile +++ b/net/qrtr/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_QRTR) := qrtr.o ns.o +obj-$(CONFIG_QRTR) += qrtr.o +qrtr-y := af_qrtr.o ns.o obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o qrtr-smd-y := smd.o diff --git a/net/qrtr/qrtr.c b/net/qrtr/af_qrtr.c similarity index 100% rename from net/qrtr/qrtr.c rename to net/qrtr/af_qrtr.c diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf7d974e0f619a..30a1cf4c16c673 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -109,7 +109,7 @@ char *rose2asc(char *buf, const rose_address *addr) /* * Compare two ROSE addresses, 0 == equal. */ -int rosecmp(rose_address *addr1, rose_address *addr2) +int rosecmp(const rose_address *addr1, const rose_address *addr2) { int i; @@ -123,7 +123,8 @@ int rosecmp(rose_address *addr1, rose_address *addr2) /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ -int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) +int rosecmpm(const rose_address *addr1, const rose_address *addr2, + unsigned short mask) { unsigned int i, j; diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 051804fbee17f9..f1a76a5820f1e0 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -66,10 +66,10 @@ static int rose_set_mac_address(struct net_device *dev, void *addr) if (err) return err; - rose_del_loopback_node((rose_address *)dev->dev_addr); + rose_del_loopback_node((const rose_address *)dev->dev_addr); } - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + dev_addr_set(dev, sa->sa_data); return 0; } @@ -78,7 +78,7 @@ static int rose_open(struct net_device *dev) { int err; - err = rose_add_loopback_node((rose_address *)dev->dev_addr); + err = rose_add_loopback_node((const rose_address *)dev->dev_addr); if (err) return err; @@ -90,7 +90,7 @@ static int rose_open(struct net_device *dev) static int rose_close(struct net_device *dev) { netif_stop_queue(dev); - rose_del_loopback_node((rose_address *)dev->dev_addr); + rose_del_loopback_node((const rose_address *)dev->dev_addr); return 0; } diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index f6102e6f51617f..8b96a56d3a49b3 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -94,11 +94,11 @@ static void rose_t0timer_expiry(struct timer_list *t) */ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) { - ax25_address *rose_call; + const ax25_address *rose_call; ax25_cb *ax25s; if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) - rose_call = (ax25_address *)neigh->dev->dev_addr; + rose_call = (const ax25_address *)neigh->dev->dev_addr; else rose_call = &rose_callsign; @@ -117,11 +117,11 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) */ static int rose_link_up(struct rose_neigh *neigh) { - ax25_address *rose_call; + const ax25_address *rose_call; ax25_cb *ax25s; if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) - rose_call = (ax25_address *)neigh->dev->dev_addr; + rose_call = (const ax25_address *)neigh->dev->dev_addr; else rose_call = &rose_callsign; diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index c0e04c261a1563..e2e6b6b7857892 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -401,7 +401,7 @@ void rose_add_loopback_neigh(void) /* * Add a loopback node. */ -int rose_add_loopback_node(rose_address *address) +int rose_add_loopback_node(const rose_address *address) { struct rose_node *rose_node; int err = 0; @@ -446,7 +446,7 @@ int rose_add_loopback_node(rose_address *address) /* * Delete a loopback node. */ -void rose_del_loopback_node(rose_address *address) +void rose_del_loopback_node(const rose_address *address) { struct rose_node *rose_node; @@ -629,7 +629,8 @@ struct net_device *rose_dev_get(rose_address *addr) rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { - if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { + if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && + rosecmp(addr, (const rose_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } @@ -646,7 +647,8 @@ static int rose_dev_exists(rose_address *addr) rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { - if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) + if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && + rosecmp(addr, (const rose_address *)dev->dev_addr) == 0) goto out; } dev = NULL; diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c index 4e565eeab42607..be61d6f5be8d15 100644 --- a/net/rxrpc/rtt.c +++ b/net/rxrpc/rtt.c @@ -22,7 +22,7 @@ static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer) static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer) { - return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us); + return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us); } static u32 rxrpc_bound_rto(u32 rto) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 7dd3a2dc5fa409..3258da3d5bed51 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -480,16 +480,18 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, atomic_set(&p->tcfa_bindcnt, 1); if (cpustats) { - p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); + p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); if (!p->cpu_bstats) goto err1; - p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); + p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); if (!p->cpu_bstats_hw) goto err2; p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); if (!p->cpu_qstats) goto err3; } + gnet_stats_basic_sync_init(&p->tcfa_bstats); + gnet_stats_basic_sync_init(&p->tcfa_bstats_hw); spin_lock_init(&p->tcfa_lock); p->tcfa_index = index; p->tcfa_tm.install = jiffies; @@ -499,7 +501,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, if (est) { err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, &p->tcfa_rate_est, - &p->tcfa_lock, NULL, est); + &p->tcfa_lock, false, est); if (err) goto err4; } @@ -1126,13 +1128,13 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, u64 drops, bool hw) { if (a->cpu_bstats) { - _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); + _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); this_cpu_ptr(a->cpu_qstats)->drops += drops; if (hw) - _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), - bytes, packets); + _bstats_update(this_cpu_ptr(a->cpu_bstats_hw), + bytes, packets); return; } @@ -1171,9 +1173,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, if (err < 0) goto errout; - if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || - gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, - &p->tcfa_bstats_hw) < 0 || + if (gnet_stats_copy_basic(&d, p->cpu_bstats, + &p->tcfa_bstats, false) < 0 || + gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw, + &p->tcfa_bstats_hw, false) < 0 || gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || gnet_stats_copy_queue(&d, p->cpu_qstats, &p->tcfa_qstats, diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 5c36013339e113..f2bf896331a596 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -41,7 +41,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act, int action, filter_res; tcf_lastuse_update(&prog->tcf_tm); - bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb); filter = rcu_dereference(prog->filter); if (at_ingress) { diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 7064a365a1a983..b757f90a2d5892 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -718,7 +718,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, u8 *tlv_data; u16 metalen; - bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb); tcf_lastuse_update(&ife->tcf_tm); if (skb_at_tc_ingress(skb)) @@ -806,7 +806,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, exceed_mtu = true; } - bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb); tcf_lastuse_update(&ife->tcf_tm); if (!metalen) { /* no metadata to send */ diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index e4529b428cf44a..8faa4c58305e3f 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -59,7 +59,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, int ret, mac_len; tcf_lastuse_update(&m->tcf_tm); - bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb); /* Ensure 'data' points at mac_header prior calling mpls manipulating * functions. diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 832157a840fc36..9e77ba8401e532 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -125,7 +125,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, police->common.cpu_bstats, &police->tcf_rate_est, &police->tcf_lock, - NULL, est); + false, est); if (err) goto failure; } else if (tb[TCA_POLICE_AVRATE] && @@ -248,7 +248,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, int ret; tcf_lastuse_update(&police->tcf_tm); - bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb); ret = READ_ONCE(police->tcf_action); p = rcu_dereference_bh(police->params); diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 230501eb9e069f..ce859b0e0deb9d 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -163,7 +163,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, int retval; tcf_lastuse_update(&s->tcf_tm); - bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb); retval = READ_ONCE(s->tcf_action); psample_group = rcu_dereference_bh(s->psample_group); diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index cbbe1861d3a20c..e617ab4505ca46 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -36,7 +36,8 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a, * then it would look like "hello_3" (without quotes) */ pr_info("simple: %s_%llu\n", - (char *)d->tcfd_defdata, d->tcf_bstats.packets); + (char *)d->tcfd_defdata, + u64_stats_read(&d->tcf_bstats.packets)); spin_unlock(&d->tcf_lock); return d->tcf_action; } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 60541853834748..d30ecbfc8f8463 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -31,7 +31,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, int action; tcf_lastuse_update(&d->tcf_tm); - bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb); params = rcu_dereference_bh(d->params); action = READ_ONCE(d->tcf_action); diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index ecb9ee6660954c..9b6b52c5e24ec8 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -31,7 +31,7 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, u64 flags; tcf_lastuse_update(&d->tcf_tm); - bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); + bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb); action = READ_ONCE(d->tcf_action); if (unlikely(action == TC_ACT_SHOT)) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index eb6345a027e130..aab13ba1176729 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -329,7 +329,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, ARRAY_SIZE(fl_ct_info_to_flower_map), post_ct); skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); - skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); + skb_flow_dissect(skb, &mask->dissector, &skb_key, + FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP); f = fl_mask_lookup(mask, &skb_key); if (f && !tc_skip_sw(f->flags)) { diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 46254968d390fc..0a04468b731455 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -457,7 +457,7 @@ META_COLLECTOR(int_sk_fwd_alloc) *err = -1; return; } - dst->value = sk->sk_forward_alloc; + dst->value = sk_forward_alloc_get(sk); } META_COLLECTOR(int_sk_sndbuf) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 12f39a2dffd475..efcd0b5e9a323b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -507,7 +507,8 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt, list_for_each_entry(stab, &qdisc_stab_list, list) { if (memcmp(&stab->szopts, s, sizeof(*s))) continue; - if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) + if (tsize > 0 && + memcmp(stab->data, tab, flex_array_size(stab, data, tsize))) continue; stab->refcnt++; return stab; @@ -519,14 +520,14 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt, return ERR_PTR(-EINVAL); } - stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); + stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL); if (!stab) return ERR_PTR(-ENOMEM); stab->refcnt = 1; stab->szopts = *s; if (tsize > 0) - memcpy(stab->data, tab, tsize * sizeof(u16)); + memcpy(stab->data, tab, flex_array_size(stab, data, tsize)); list_add_tail(&stab->list, &qdisc_stab_list); @@ -884,7 +885,7 @@ static void qdisc_offload_graft_root(struct net_device *dev, static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, u32 portid, u32 seq, u16 flags, int event) { - struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; + struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL; struct gnet_stats_queue __percpu *cpu_qstats = NULL; struct tcmsg *tcm; struct nlmsghdr *nlh; @@ -942,8 +943,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, cpu_qstats = q->cpu_qstats; } - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q), - &d, cpu_bstats, &q->bstats) < 0 || + if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 || gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) goto nla_put_failure; @@ -1264,26 +1264,17 @@ static struct Qdisc *qdisc_create(struct net_device *dev, rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { - seqcount_t *running; - err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) { NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); goto err_out4; } - if (sch->parent != TC_H_ROOT && - !(sch->flags & TCQ_F_INGRESS) && - (!p || !(p->flags & TCQ_F_MQROOT))) - running = qdisc_root_sleeping_running(sch); - else - running = &sch->running; - err = gen_new_estimator(&sch->bstats, sch->cpu_bstats, &sch->rate_est, NULL, - running, + true, tca[TCA_RATE]); if (err) { NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); @@ -1359,7 +1350,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca, sch->cpu_bstats, &sch->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); } out: diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 7d8518176b45a6..4c8e994cf0a536 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -52,7 +52,7 @@ struct atm_flow_data { struct atm_qdisc_data *parent; /* parent qdisc */ struct socket *sock; /* for closing */ int ref; /* reference count */ - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct list_head list; struct atm_flow_data *excess; /* flow for excess traffic; @@ -548,6 +548,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt, pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); INIT_LIST_HEAD(&p->flows); INIT_LIST_HEAD(&p->link.list); + gnet_stats_basic_sync_init(&p->link.bstats); list_add(&p->link.list, &p->flows); p->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle, extack); @@ -652,8 +653,7 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, { struct atm_flow_data *flow = (struct atm_flow_data *)arg; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &flow->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 || gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) return -1; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index e0da15530f0e91..02d9f0dfe3564a 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -116,7 +116,7 @@ struct cbq_class { long avgidle; long deficit; /* Saved deficit for WRR */ psched_time_t penalized; - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct tc_cbq_xstats xstats; @@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q) long avgidle = cl->avgidle; long idle; - cl->bstats.packets++; - cl->bstats.bytes += len; + _bstats_update(&cl->bstats, len, 1); /* * (now - last) is total time between packet right edges. @@ -1384,8 +1383,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, if (cl->undertime != PSCHED_PASTPERFECT) cl->xstats.undertime = cl->undertime - q->now; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) return -1; @@ -1519,7 +1517,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); if (err) { NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator"); @@ -1611,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (cl == NULL) goto failure; + gnet_stats_basic_sync_init(&cl->bstats); err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); if (err) { kfree(cl); @@ -1619,9 +1618,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, - NULL, - qdisc_root_sleeping_running(sch), - tca[TCA_RATE]); + NULL, true, tca[TCA_RATE]); if (err) { NL_SET_ERR_MSG(extack, "Couldn't create new estimator"); tcf_block_put(cl->block); diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 642cd179b7a75a..18e4f7a0b29124 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -19,7 +19,7 @@ struct drr_class { struct Qdisc_class_common common; unsigned int filter_cnt; - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct list_head alist; @@ -85,8 +85,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, - NULL, - qdisc_root_sleeping_running(sch), + NULL, true, tca[TCA_RATE]); if (err) { NL_SET_ERR_MSG(extack, "Failed to replace estimator"); @@ -106,6 +105,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl == NULL) return -ENOBUFS; + gnet_stats_basic_sync_init(&cl->bstats); cl->common.classid = classid; cl->quantum = quantum; cl->qdisc = qdisc_create_dflt(sch->dev_queue, @@ -118,9 +118,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, - NULL, - qdisc_root_sleeping_running(sch), - tca[TCA_RATE]); + NULL, true, tca[TCA_RATE]); if (err) { NL_SET_ERR_MSG(extack, "Failed to replace estimator"); qdisc_put(cl->qdisc); @@ -267,8 +265,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, if (qlen) xstats.deficit = cl->deficit; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) return -1; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 1f857ffd1ac238..0eae9ff5edf6ff 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -41,7 +41,7 @@ struct ets_class { struct Qdisc *qdisc; u32 quantum; u32 deficit; - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; }; @@ -325,8 +325,7 @@ static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg, struct ets_class *cl = ets_class_from_arg(sch, arg); struct Qdisc *cl_q = cl->qdisc; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl_q->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats, true) < 0 || qdisc_qstats_copy(d, cl_q) < 0) return -1; @@ -661,7 +660,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, q->nbands = nbands; for (i = nstrict; i < q->nstrict; i++) { - INIT_LIST_HEAD(&q->classes[i].alist); if (q->classes[i].qdisc->q.qlen) { list_add_tail(&q->classes[i].alist, &q->active); q->classes[i].deficit = quanta[i]; @@ -687,7 +685,11 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, ets_offload_change(sch); for (i = q->nbands; i < oldbands; i++) { qdisc_put(q->classes[i].qdisc); - memset(&q->classes[i], 0, sizeof(q->classes[i])); + q->classes[i].qdisc = NULL; + q->classes[i].quantum = 0; + q->classes[i].deficit = 0; + gnet_stats_basic_sync_init(&q->classes[i].bstats); + memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats)); } return 0; } @@ -696,7 +698,7 @@ static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct ets_sched *q = qdisc_priv(sch); - int err; + int err, i; if (!opt) return -EINVAL; @@ -706,6 +708,9 @@ static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt, return err; INIT_LIST_HEAD(&q->active); + for (i = 0; i < TCQ_ETS_MAX_BANDS; i++) + INIT_LIST_HEAD(&q->classes[i].alist); + return ets_qdisc_change(sch, opt, extack); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index bb0cd6d3d2c274..839e1235db053b 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -362,6 +362,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 }, + [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 }, }; static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, @@ -408,6 +410,11 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; } + if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]) + q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]); + if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]) + q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]); + if (tb[TCA_FQ_CODEL_INTERVAL]) { u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); @@ -544,10 +551,15 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) q->flows_cnt)) goto nla_put_failure; - if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && - nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, - codel_time_to_us(q->cparams.ce_threshold))) - goto nla_put_failure; + if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) { + if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, + codel_time_to_us(q->cparams.ce_threshold))) + goto nla_put_failure; + if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector)) + goto nla_put_failure; + if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask)) + goto nla_put_failure; + } return nla_nest_end(skb, opts); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a8dd06c74e318c..3b0f620958037e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -304,8 +304,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, /* * Transmit possibly several skbs, and handle the return status as - * required. Owning running seqcount bit guarantees that - * only one CPU can execute this function. + * required. Owning qdisc running bit guarantees that only one CPU + * can execute this function. * * Returns to the caller: * false - hardware queue frozen backoff @@ -606,7 +606,6 @@ struct Qdisc noop_qdisc = { .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, - .running = SEQCNT_ZERO(noop_qdisc.running), .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), .gso_skb = { .next = (struct sk_buff *)&noop_qdisc.gso_skb, @@ -867,7 +866,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { EXPORT_SYMBOL(pfifo_fast_ops); static struct lock_class_key qdisc_tx_busylock; -static struct lock_class_key qdisc_running_key; struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, @@ -892,11 +890,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, __skb_queue_head_init(&sch->gso_skb); __skb_queue_head_init(&sch->skb_bad_txq); qdisc_skb_head_init(&sch->q); + gnet_stats_basic_sync_init(&sch->bstats); spin_lock_init(&sch->q.lock); if (ops->static_flags & TCQ_F_CPUSTATS) { sch->cpu_bstats = - netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); + netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); if (!sch->cpu_bstats) goto errout1; @@ -916,10 +915,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, lockdep_set_class(&sch->seqlock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); - seqcount_init(&sch->running); - lockdep_set_class(&sch->running, - dev->qdisc_running_key ?: &qdisc_running_key); - sch->ops = ops; sch->flags = ops->static_flags; sch->enqueue = ops->enqueue; @@ -1330,6 +1325,39 @@ static int qdisc_change_tx_queue_len(struct net_device *dev, return 0; } +void dev_qdisc_change_real_num_tx(struct net_device *dev, + unsigned int new_real_tx) +{ + struct Qdisc *qdisc = dev->qdisc; + + if (qdisc->ops->change_real_num_tx) + qdisc->ops->change_real_num_tx(qdisc, new_real_tx); +} + +void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) +{ +#ifdef CONFIG_NET_SCHED + struct net_device *dev = qdisc_dev(sch); + struct Qdisc *qdisc; + unsigned int i; + + for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; + /* Only update the default qdiscs we created, + * qdiscs with handles are always hashed. + */ + if (qdisc != &noop_qdisc && !qdisc->handle) + qdisc_hash_del(qdisc); + } + for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { + qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; + if (qdisc != &noop_qdisc && !qdisc->handle) + qdisc_hash_add(qdisc, false); + } +#endif +} +EXPORT_SYMBOL(mq_change_real_num_tx); + int dev_qdisc_change_tx_queue_len(struct net_device *dev) { bool up = dev->flags & IFF_UP; @@ -1459,10 +1487,6 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64) } EXPORT_SYMBOL(psched_ppscfg_precompute); -static void mini_qdisc_rcu_func(struct rcu_head *head) -{ -} - void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, struct tcf_proto *tp_head) { @@ -1475,28 +1499,30 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, if (!tp_head) { RCU_INIT_POINTER(*miniqp->p_miniq, NULL); - /* Wait for flying RCU callback before it is freed. */ - rcu_barrier(); - return; - } + } else { + miniq = miniq_old != &miniqp->miniq1 ? + &miniqp->miniq1 : &miniqp->miniq2; - miniq = !miniq_old || miniq_old == &miniqp->miniq2 ? - &miniqp->miniq1 : &miniqp->miniq2; + /* We need to make sure that readers won't see the miniq + * we are about to modify. So ensure that at least one RCU + * grace period has elapsed since the miniq was made + * inactive. + */ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + cond_synchronize_rcu(miniq->rcu_state); + else if (!poll_state_synchronize_rcu(miniq->rcu_state)) + synchronize_rcu_expedited(); - /* We need to make sure that readers won't see the miniq - * we are about to modify. So wait until previous call_rcu callback - * is done. - */ - rcu_barrier(); - miniq->filter_list = tp_head; - rcu_assign_pointer(*miniqp->p_miniq, miniq); + miniq->filter_list = tp_head; + rcu_assign_pointer(*miniqp->p_miniq, miniq); + } if (miniq_old) - /* This is counterpart of the rcu barriers above. We need to + /* This is counterpart of the rcu sync above. We need to * block potential new user of miniq_old until all readers * are not seeing it. */ - call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func); + miniq_old->rcu_state = start_poll_synchronize_rcu(); } EXPORT_SYMBOL(mini_qdisc_pair_swap); @@ -1515,6 +1541,8 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; + miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); + miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; miniqp->p_miniq = p_miniq; } EXPORT_SYMBOL(mini_qdisc_pair_init); diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 621dc6afde8f33..1073c76d05c45f 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -56,6 +56,7 @@ struct gred_sched { u32 DPs; u32 def; struct red_vars wred_set; + struct tc_gred_qopt_offload *opt; }; static inline int gred_wred_mode(struct gred_sched *table) @@ -311,48 +312,50 @@ static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) { struct gred_sched *table = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - struct tc_gred_qopt_offload opt = { - .command = command, - .handle = sch->handle, - .parent = sch->parent, - }; + struct tc_gred_qopt_offload *opt = table->opt; if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) return; + memset(opt, 0, sizeof(*opt)); + opt->command = command; + opt->handle = sch->handle; + opt->parent = sch->parent; + if (command == TC_GRED_REPLACE) { unsigned int i; - opt.set.grio_on = gred_rio_mode(table); - opt.set.wred_on = gred_wred_mode(table); - opt.set.dp_cnt = table->DPs; - opt.set.dp_def = table->def; + opt->set.grio_on = gred_rio_mode(table); + opt->set.wred_on = gred_wred_mode(table); + opt->set.dp_cnt = table->DPs; + opt->set.dp_def = table->def; for (i = 0; i < table->DPs; i++) { struct gred_sched_data *q = table->tab[i]; if (!q) continue; - opt.set.tab[i].present = true; - opt.set.tab[i].limit = q->limit; - opt.set.tab[i].prio = q->prio; - opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; - opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; - opt.set.tab[i].is_ecn = gred_use_ecn(q); - opt.set.tab[i].is_harddrop = gred_use_harddrop(q); - opt.set.tab[i].probability = q->parms.max_P; - opt.set.tab[i].backlog = &q->backlog; + opt->set.tab[i].present = true; + opt->set.tab[i].limit = q->limit; + opt->set.tab[i].prio = q->prio; + opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; + opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; + opt->set.tab[i].is_ecn = gred_use_ecn(q); + opt->set.tab[i].is_harddrop = gred_use_harddrop(q); + opt->set.tab[i].probability = q->parms.max_P; + opt->set.tab[i].backlog = &q->backlog; } - opt.set.qstats = &sch->qstats; + opt->set.qstats = &sch->qstats; } - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt); } static int gred_offload_dump_stats(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_qopt_offload *hw_stats; + u64 bytes = 0, packets = 0; unsigned int i; int ret; @@ -364,9 +367,11 @@ static int gred_offload_dump_stats(struct Qdisc *sch) hw_stats->handle = sch->handle; hw_stats->parent = sch->parent; - for (i = 0; i < MAX_DPs; i++) + for (i = 0; i < MAX_DPs; i++) { + gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]); if (table->tab[i]) hw_stats->stats.xstats[i] = &table->tab[i]->stats; + } ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats); /* Even if driver returns failure adjust the stats - in case offload @@ -375,19 +380,19 @@ static int gred_offload_dump_stats(struct Qdisc *sch) for (i = 0; i < MAX_DPs; i++) { if (!table->tab[i]) continue; - table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets; - table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; + table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets); + table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes); table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; - _bstats_update(&sch->bstats, - hw_stats->stats.bstats[i].bytes, - hw_stats->stats.bstats[i].packets); + bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes); + packets += u64_stats_read(&hw_stats->stats.bstats[i].packets); sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; sch->qstats.drops += hw_stats->stats.qstats[i].drops; sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; } + _bstats_update(&sch->bstats, bytes, packets); kfree(hw_stats); return ret; @@ -728,6 +733,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt, static int gred_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { + struct gred_sched *table = qdisc_priv(sch); struct nlattr *tb[TCA_GRED_MAX + 1]; int err; @@ -751,6 +757,12 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt, sch->limit = qdisc_dev(sch)->tx_queue_len * psched_mtu(qdisc_dev(sch)); + if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) { + table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL); + if (!table->opt) + return -ENOMEM; + } + return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); } @@ -907,6 +919,7 @@ static void gred_destroy(struct Qdisc *sch) gred_destroy_vq(table->tab[i]); } gred_offload(sch, TC_GRED_DESTROY); + kfree(table->opt); } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b7ac30cca035d1..d3979a6000e7d2 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -111,7 +111,7 @@ enum hfsc_class_flags { struct hfsc_class { struct Qdisc_class_common cl_common; - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct tcf_proto __rcu *filter_list; /* filter list */ @@ -965,7 +965,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); if (err) return err; @@ -1033,9 +1033,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, - NULL, - qdisc_root_sleeping_running(sch), - tca[TCA_RATE]); + NULL, true, tca[TCA_RATE]); if (err) { tcf_block_put(cl->block); kfree(cl); @@ -1328,7 +1326,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, xstats.work = cl->cl_total; xstats.rtwork = cl->cl_cumul; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) return -1; @@ -1406,6 +1404,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt, if (err) return err; + gnet_stats_basic_sync_init(&q->root.bstats); q->root.cl_common.classid = sch->handle; q->root.sched = q; q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 5067a6e5d4fded..9267922ea9c375 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -113,8 +113,8 @@ struct htb_class { /* * Written often fields */ - struct gnet_stats_basic_packed bstats; - struct gnet_stats_basic_packed bstats_bias; + struct gnet_stats_basic_sync bstats; + struct gnet_stats_basic_sync bstats_bias; struct tc_htb_xstats xstats; /* our special stats */ /* token bucket parameters */ @@ -1084,11 +1084,15 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]); if (offload) { - if (sch->parent != TC_H_ROOT) + if (sch->parent != TC_H_ROOT) { + NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload"); return -EOPNOTSUPP; + } - if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) + if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) { + NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on"); return -EOPNOTSUPP; + } q->num_direct_qdiscs = dev->real_num_tx_queues; q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, @@ -1308,10 +1312,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, static void htb_offload_aggregate_stats(struct htb_sched *q, struct htb_class *cl) { + u64 bytes = 0, packets = 0; struct htb_class *c; unsigned int i; - memset(&cl->bstats, 0, sizeof(cl->bstats)); + gnet_stats_basic_sync_init(&cl->bstats); for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { @@ -1323,14 +1328,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q, if (p != cl) continue; - cl->bstats.bytes += c->bstats_bias.bytes; - cl->bstats.packets += c->bstats_bias.packets; + bytes += u64_stats_read(&c->bstats_bias.bytes); + packets += u64_stats_read(&c->bstats_bias.packets); if (c->level == 0) { - cl->bstats.bytes += c->leaf.q->bstats.bytes; - cl->bstats.packets += c->leaf.q->bstats.packets; + bytes += u64_stats_read(&c->leaf.q->bstats.bytes); + packets += u64_stats_read(&c->leaf.q->bstats.packets); } } } + _bstats_update(&cl->bstats, bytes, packets); } static int @@ -1357,16 +1363,16 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) if (cl->leaf.q) cl->bstats = cl->leaf.q->bstats; else - memset(&cl->bstats, 0, sizeof(cl->bstats)); - cl->bstats.bytes += cl->bstats_bias.bytes; - cl->bstats.packets += cl->bstats_bias.packets; + gnet_stats_basic_sync_init(&cl->bstats); + _bstats_update(&cl->bstats, + u64_stats_read(&cl->bstats_bias.bytes), + u64_stats_read(&cl->bstats_bias.packets)); } else { htb_offload_aggregate_stats(q, cl); } } - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) return -1; @@ -1578,8 +1584,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl, WARN_ON(old != q); if (cl->parent) { - cl->parent->bstats_bias.bytes += q->bstats.bytes; - cl->parent->bstats_bias.packets += q->bstats.packets; + _bstats_update(&cl->parent->bstats_bias, + u64_stats_read(&q->bstats.bytes), + u64_stats_read(&q->bstats.packets)); } offload_opt = (struct tc_htb_qopt_offload) { @@ -1849,6 +1856,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, if (!cl) goto failure; + gnet_stats_basic_sync_init(&cl->bstats); + gnet_stats_basic_sync_init(&cl->bstats_bias); + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); if (err) { kfree(cl); @@ -1858,7 +1868,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE] ? : &est.nla); if (err) goto err_block_put; @@ -1922,8 +1932,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, htb_graft_helper(dev_queue, old_q); goto err_kill_estimator; } - parent->bstats_bias.bytes += old_q->bstats.bytes; - parent->bstats_bias.packets += old_q->bstats.packets; + _bstats_update(&parent->bstats_bias, + u64_stats_read(&old_q->bstats.bytes), + u64_stats_read(&old_q->bstats.packets)); qdisc_put(old_q); } new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, @@ -1983,7 +1994,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); if (err) return err; diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index e79f1afe0cfd6f..83d2e54bf303a4 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -130,10 +130,9 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) struct net_device *dev = qdisc_dev(sch); struct Qdisc *qdisc; unsigned int ntx; - __u32 qlen = 0; sch->q.qlen = 0; - memset(&sch->bstats, 0, sizeof(sch->bstats)); + gnet_stats_basic_sync_init(&sch->bstats); memset(&sch->qstats, 0, sizeof(sch->qstats)); /* MQ supports lockless qdiscs. However, statistics accounting needs @@ -145,25 +144,11 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; spin_lock_bh(qdisc_lock(qdisc)); - if (qdisc_is_percpu_stats(qdisc)) { - qlen = qdisc_qlen_sum(qdisc); - __gnet_stats_copy_basic(NULL, &sch->bstats, - qdisc->cpu_bstats, - &qdisc->bstats); - __gnet_stats_copy_queue(&sch->qstats, - qdisc->cpu_qstats, - &qdisc->qstats, qlen); - sch->q.qlen += qlen; - } else { - sch->q.qlen += qdisc->q.qlen; - sch->bstats.bytes += qdisc->bstats.bytes; - sch->bstats.packets += qdisc->bstats.packets; - sch->qstats.qlen += qdisc->qstats.qlen; - sch->qstats.backlog += qdisc->qstats.backlog; - sch->qstats.drops += qdisc->qstats.drops; - sch->qstats.requeues += qdisc->qstats.requeues; - sch->qstats.overlimits += qdisc->qstats.overlimits; - } + gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, + &qdisc->bstats, false); + gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, + &qdisc->qstats); + sch->q.qlen += qdisc_qlen(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); } @@ -246,8 +231,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct netdev_queue *dev_queue = mq_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats, - &sch->bstats) < 0 || + if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; return 0; @@ -288,6 +272,7 @@ struct Qdisc_ops mq_qdisc_ops __read_mostly = { .init = mq_init, .destroy = mq_destroy, .attach = mq_attach, + .change_real_num_tx = mq_change_real_num_tx, .dump = mq_dump, .owner = THIS_MODULE, }; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 5eb3b1b7ae5e78..b29f3453c6eafe 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -390,7 +390,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) unsigned int ntx, tc; sch->q.qlen = 0; - memset(&sch->bstats, 0, sizeof(sch->bstats)); + gnet_stats_basic_sync_init(&sch->bstats); memset(&sch->qstats, 0, sizeof(sch->qstats)); /* MQ supports lockless qdiscs. However, statistics accounting needs @@ -402,25 +402,11 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; spin_lock_bh(qdisc_lock(qdisc)); - if (qdisc_is_percpu_stats(qdisc)) { - __u32 qlen = qdisc_qlen_sum(qdisc); - - __gnet_stats_copy_basic(NULL, &sch->bstats, - qdisc->cpu_bstats, - &qdisc->bstats); - __gnet_stats_copy_queue(&sch->qstats, - qdisc->cpu_qstats, - &qdisc->qstats, qlen); - sch->q.qlen += qlen; - } else { - sch->q.qlen += qdisc->q.qlen; - sch->bstats.bytes += qdisc->bstats.bytes; - sch->bstats.packets += qdisc->bstats.packets; - sch->qstats.backlog += qdisc->qstats.backlog; - sch->qstats.drops += qdisc->qstats.drops; - sch->qstats.requeues += qdisc->qstats.requeues; - sch->qstats.overlimits += qdisc->qstats.overlimits; - } + gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, + &qdisc->bstats, false); + gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, + &qdisc->qstats); + sch->q.qlen += qdisc_qlen(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); } @@ -512,12 +498,13 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, { if (cl >= TC_H_MIN_PRIORITY) { int i; - __u32 qlen = 0; + __u32 qlen; struct gnet_stats_queue qstats = {0}; - struct gnet_stats_basic_packed bstats = {0}; + struct gnet_stats_basic_sync bstats; struct net_device *dev = qdisc_dev(sch); struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; + gnet_stats_basic_sync_init(&bstats); /* Drop lock here it will be reclaimed before touching * statistics this is required because the d->lock we * hold here is the look on dev_queue->qdisc_sleeping @@ -532,40 +519,28 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, spin_lock_bh(qdisc_lock(qdisc)); - if (qdisc_is_percpu_stats(qdisc)) { - qlen = qdisc_qlen_sum(qdisc); - - __gnet_stats_copy_basic(NULL, &bstats, - qdisc->cpu_bstats, - &qdisc->bstats); - __gnet_stats_copy_queue(&qstats, - qdisc->cpu_qstats, - &qdisc->qstats, - qlen); - } else { - qlen += qdisc->q.qlen; - bstats.bytes += qdisc->bstats.bytes; - bstats.packets += qdisc->bstats.packets; - qstats.backlog += qdisc->qstats.backlog; - qstats.drops += qdisc->qstats.drops; - qstats.requeues += qdisc->qstats.requeues; - qstats.overlimits += qdisc->qstats.overlimits; - } + gnet_stats_add_basic(&bstats, qdisc->cpu_bstats, + &qdisc->bstats, false); + gnet_stats_add_queue(&qstats, qdisc->cpu_qstats, + &qdisc->qstats); + sch->q.qlen += qdisc_qlen(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); } + qlen = qdisc_qlen(sch) + qstats.qlen; /* Reclaim root sleeping lock before completing stats */ if (d->lock) spin_lock_bh(d->lock); - if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 || gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) return -1; } else { struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, - sch->cpu_bstats, &sch->bstats) < 0 || + if (gnet_stats_copy_basic(d, sch->cpu_bstats, + &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; } @@ -629,6 +604,7 @@ static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { .init = mqprio_init, .destroy = mqprio_destroy, .attach = mqprio_attach, + .change_real_num_tx = mq_change_real_num_tx, .dump = mqprio_dump, .owner = THIS_MODULE, }; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index e282e7382117a5..cd8ab90c4765d4 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -338,8 +338,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || + if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 || qdisc_qstats_copy(d, cl_q) < 0) return -1; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0c345e43a09a37..ecbb10db111168 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -785,7 +785,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, if (!n || n > NETEM_DIST_MAX) return -EINVAL; - d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); + d = kvmalloc(struct_size(d, table, n), GFP_KERNEL); if (!d) return -ENOMEM; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 03fdf31ccb6aff..3b8d7197c06bff 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -361,8 +361,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || + if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, + &cl_q->bstats, true) < 0 || qdisc_qstats_copy(d, cl_q) < 0) return -1; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 58a9d42b52b8ff..0b7f9ba28deb05 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -131,7 +131,7 @@ struct qfq_class { unsigned int filter_cnt; - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct Qdisc *qdisc; @@ -451,7 +451,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); if (err) return err; @@ -465,6 +465,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl == NULL) return -ENOBUFS; + gnet_stats_basic_sync_init(&cl->bstats); cl->common.classid = classid; cl->deficit = lmax; @@ -477,7 +478,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); if (err) goto destroy_class; @@ -639,8 +640,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, xstats.weight = cl->agg->class_weight; xstats.lmax = cl->agg->lmax; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), - d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || qdisc_qstats_copy(d, cl->qdisc) < 0) return -1; @@ -1234,8 +1234,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - cl->bstats.bytes += len; - cl->bstats.packets += gso_segs; + _bstats_update(&cl->bstats, len, gso_segs); sch->qstats.backlog += len; ++sch->q.qlen; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index b9fd18d986464f..9ab068fa2672be 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1977,7 +1977,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; return 0; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 78e79029dc631a..72102277449e1b 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -184,6 +184,20 @@ static int tbf_offload_dump(struct Qdisc *sch) return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt); } +static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new, + struct Qdisc *old, struct netlink_ext_ack *extack) +{ + struct tc_tbf_qopt_offload graft_offload = { + .handle = sch->handle, + .parent = sch->parent, + .child_handle = new->handle, + .command = TC_TBF_GRAFT, + }; + + qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old, + TC_SETUP_QDISC_TBF, &graft_offload, extack); +} + /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ @@ -547,6 +561,8 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, new = &noop_qdisc; *old = qdisc_replace(sch, new, &q->qdisc); + + tbf_offload_graft(sch, new, *old, extack); return 0; } diff --git a/net/sctp/output.c b/net/sctp/output.c index 4dfb5ea82b05b0..cdfdbd353c6786 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -581,13 +581,16 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; - /* check gso */ if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) { - if (!sk_can_gso(sk)) { - pr_err_once("Trying to GSO but underlying device doesn't support it."); - goto out; + if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */ + packet->ipfragok = 1; + } else { + if (!sk_can_gso(sk)) { /* check gso */ + pr_err_once("Trying to GSO but underlying device doesn't support it."); + goto out; + } + gso = 1; } - gso = 1; } /* alloc head skb */ diff --git a/net/sctp/transport.c b/net/sctp/transport.c index a3d3ca6dd63dd3..133f1719bf1b73 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -269,7 +269,7 @@ bool sctp_transport_pl_send(struct sctp_transport *t) if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ - t->pl.pmtu = SCTP_MIN_PLPMTU; + t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); } @@ -366,8 +366,9 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) { t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ - t->pl.pmtu = SCTP_MIN_PLPMTU; + t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + return true; } } else if (t->pl.state == SCTP_PL_SEARCH) { if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { @@ -378,11 +379,10 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + return true; } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) { t->pl.probe_size = pmtu; t->pl.probe_count = 0; - - return false; } } else if (t->pl.state == SCTP_PL_COMPLETE) { if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { @@ -393,10 +393,11 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) t->pl.probe_high = 0; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); + return true; } } - return true; + return false; } bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) diff --git a/net/smc/Makefile b/net/smc/Makefile index 99a0186cba5be3..196fb6f01b14f3 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only +ccflags-y += -I$(src) obj-$(CONFIG_SMC) += smc.o obj-$(CONFIG_SMC_DIAG) += smc_diag.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o +smc-y += smc_tracepoint.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 78b663dbfa1f91..0cf7ed2f5d41b5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -50,6 +50,7 @@ #include "smc_rx.h" #include "smc_close.h" #include "smc_stats.h" +#include "smc_tracepoint.h" static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group * creation on server @@ -439,6 +440,47 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc) return 0; } +static bool smc_isascii(char *hostname) +{ + int i; + + for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++) + if (!isascii(hostname[i])) + return false; + return true; +} + +static void smc_conn_save_peer_info_fce(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)clc; + struct smc_clc_first_contact_ext *fce; + int clc_v2_len; + + if (clc->hdr.version == SMC_V1 || + !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) + return; + + if (smc->conn.lgr->is_smcd) { + memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid, + SMC_MAX_EID_LEN); + clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2, + d1); + } else { + memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid, + SMC_MAX_EID_LEN); + clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2, + r1); + } + fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len); + smc->conn.lgr->peer_os = fce->os_type; + smc->conn.lgr->peer_smc_release = fce->release; + if (smc_isascii(fce->hostname)) + memcpy(smc->conn.lgr->peer_hostname, fce->hostname, + SMC_MAX_HOSTNAME_LEN); +} + static void smcr_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { @@ -451,16 +493,6 @@ static void smcr_conn_save_peer_info(struct smc_sock *smc, smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); } -static bool smc_isascii(char *hostname) -{ - int i; - - for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++) - if (!isascii(hostname[i])) - return false; - return true; -} - static void smcd_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { @@ -472,22 +504,6 @@ static void smcd_conn_save_peer_info(struct smc_sock *smc, smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; - if (clc->hdr.version > SMC_V1 && - (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) { - struct smc_clc_msg_accept_confirm_v2 *clc_v2 = - (struct smc_clc_msg_accept_confirm_v2 *)clc; - struct smc_clc_first_contact_ext *fce = - (struct smc_clc_first_contact_ext *) - (((u8 *)clc_v2) + sizeof(*clc_v2)); - - memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid, - SMC_MAX_EID_LEN); - smc->conn.lgr->peer_os = fce->os_type; - smc->conn.lgr->peer_smc_release = fce->release; - if (smc_isascii(fce->hostname)) - memcpy(smc->conn.lgr->peer_hostname, fce->hostname, - SMC_MAX_HOSTNAME_LEN); - } } static void smc_conn_save_peer_info(struct smc_sock *smc, @@ -497,14 +513,16 @@ static void smc_conn_save_peer_info(struct smc_sock *smc, smcd_conn_save_peer_info(smc, clc); else smcr_conn_save_peer_info(smc, clc); + smc_conn_save_peer_info_fce(smc, clc); } static void smc_link_save_peer_info(struct smc_link *link, - struct smc_clc_msg_accept_confirm *clc) + struct smc_clc_msg_accept_confirm *clc, + struct smc_init_info *ini) { link->peer_qpn = ntoh24(clc->r0.qpn); - memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE); - memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac)); + memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE); + memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac)); link->peer_psn = ntoh24(clc->r0.psn); link->peer_mtu = clc->r0.qp_mtu; } @@ -547,6 +565,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) smc->use_fallback = true; smc->fallback_rsn = reason_code; smc_stat_fallback(smc); + trace_smc_switch_to_fallback(smc, reason_code); if (smc->sk.sk_socket && smc->sk.sk_socket->file) { smc->clcsock->file = smc->sk.sk_socket->file; smc->clcsock->file->private_data = smc->clcsock; @@ -608,7 +627,9 @@ static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini) * used for the internal TCP socket */ smc_pnet_find_roce_resource(smc->clcsock->sk, ini); - if (!ini->ib_dev) + if (!ini->check_smcrv2 && !ini->ib_dev) + return SMC_CLC_DECL_NOSMCRDEV; + if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2) return SMC_CLC_DECL_NOSMCRDEV; return 0; } @@ -692,27 +713,42 @@ static int smc_find_proposal_devices(struct smc_sock *smc, int rc = 0; /* check if there is an ism device available */ - if (ini->smcd_version & SMC_V1) { - if (smc_find_ism_device(smc, ini) || - smc_connect_ism_vlan_setup(smc, ini)) { - if (ini->smc_type_v1 == SMC_TYPE_B) - ini->smc_type_v1 = SMC_TYPE_R; - else - ini->smc_type_v1 = SMC_TYPE_N; - } /* else ISM V1 is supported for this connection */ - if (smc_find_rdma_device(smc, ini)) { - if (ini->smc_type_v1 == SMC_TYPE_B) - ini->smc_type_v1 = SMC_TYPE_D; - else - ini->smc_type_v1 = SMC_TYPE_N; - } /* else RDMA is supported for this connection */ - } - if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini)) - ini->smc_type_v2 = SMC_TYPE_N; + if (!(ini->smcd_version & SMC_V1) || + smc_find_ism_device(smc, ini) || + smc_connect_ism_vlan_setup(smc, ini)) + ini->smcd_version &= ~SMC_V1; + /* else ISM V1 is supported for this connection */ + + /* check if there is an rdma device available */ + if (!(ini->smcr_version & SMC_V1) || + smc_find_rdma_device(smc, ini)) + ini->smcr_version &= ~SMC_V1; + /* else RDMA is supported for this connection */ + + ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1, + ini->smcr_version & SMC_V1); + + /* check if there is an ism v2 device available */ + if (!(ini->smcd_version & SMC_V2) || + !smc_ism_is_v2_capable() || + smc_find_ism_v2_device_clnt(smc, ini)) + ini->smcd_version &= ~SMC_V2; + + /* check if there is an rdma v2 device available */ + ini->check_smcrv2 = true; + ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr; + if (!(ini->smcr_version & SMC_V2) || + smc->clcsock->sk->sk_family != AF_INET || + !smc_clc_ueid_count() || + smc_find_rdma_device(smc, ini)) + ini->smcr_version &= ~SMC_V2; + ini->check_smcrv2 = false; + + ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2, + ini->smcr_version & SMC_V2); /* if neither ISM nor RDMA are supported, fallback */ - if (!smcr_indicated(ini->smc_type_v1) && - ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N) + if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N) rc = SMC_CLC_DECL_NOSMCDEV; return rc; @@ -752,6 +788,64 @@ static int smc_connect_clc(struct smc_sock *smc, SMC_CLC_ACCEPT, CLC_WAIT_TIME); } +void smc_fill_gid_list(struct smc_link_group *lgr, + struct smc_gidlist *gidlist, + struct smc_ib_device *known_dev, u8 *known_gid) +{ + struct smc_init_info *alt_ini = NULL; + + memset(gidlist, 0, sizeof(*gidlist)); + memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE); + + alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL); + if (!alt_ini) + goto out; + + alt_ini->vlan_id = lgr->vlan_id; + alt_ini->check_smcrv2 = true; + alt_ini->smcrv2.saddr = lgr->saddr; + smc_pnet_find_alt_roce(lgr, alt_ini, known_dev); + + if (!alt_ini->smcrv2.ib_dev_v2) + goto out; + + memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2, + SMC_GID_SIZE); + +out: + kfree(alt_ini); +} + +static int smc_connect_rdma_v2_prepare(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *aclc, + struct smc_init_info *ini) +{ + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)aclc; + struct smc_clc_first_contact_ext *fce = + (struct smc_clc_first_contact_ext *) + (((u8 *)clc_v2) + sizeof(*clc_v2)); + + if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1) + return 0; + + if (fce->v2_direct) { + memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN); + ini->smcrv2.uses_gateway = false; + } else { + if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr, + smc_ib_gid_to_ipv4(aclc->r0.lcl.gid), + ini->smcrv2.nexthop_mac, + &ini->smcrv2.uses_gateway)) + return SMC_CLC_DECL_NOROUTE; + if (!ini->smcrv2.uses_gateway) { + /* mismatch: peer claims indirect, but its direct */ + return SMC_CLC_DECL_NOINDIRECT; + } + } + return 0; +} + /* setup for RDMA connection of client */ static int smc_connect_rdma(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *aclc, @@ -759,11 +853,18 @@ static int smc_connect_rdma(struct smc_sock *smc, { int i, reason_code = 0; struct smc_link *link; + u8 *eid = NULL; ini->is_smcd = false; - ini->ib_lcl = &aclc->r0.lcl; ini->ib_clcqpn = ntoh24(aclc->r0.qpn); ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK; + memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN); + memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE); + memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN); + + reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini); + if (reason_code) + return reason_code; mutex_lock(&smc_client_lgr_pending); reason_code = smc_conn_create(smc, ini); @@ -785,8 +886,9 @@ static int smc_connect_rdma(struct smc_sock *smc, if (l->peer_qpn == ntoh24(aclc->r0.qpn) && !memcmp(l->peer_gid, &aclc->r0.lcl.gid, SMC_GID_SIZE) && - !memcmp(l->peer_mac, &aclc->r0.lcl.mac, - sizeof(l->peer_mac))) { + (aclc->hdr.version > SMC_V1 || + !memcmp(l->peer_mac, &aclc->r0.lcl.mac, + sizeof(l->peer_mac)))) { link = l; break; } @@ -805,7 +907,7 @@ static int smc_connect_rdma(struct smc_sock *smc, } if (ini->first_contact_local) - smc_link_save_peer_info(link, aclc); + smc_link_save_peer_info(link, aclc, ini); if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) { reason_code = SMC_CLC_DECL_ERR_RTOK; @@ -828,8 +930,18 @@ static int smc_connect_rdma(struct smc_sock *smc, } smc_rmb_sync_sg_for_device(&smc->conn); + if (aclc->hdr.version > SMC_V1) { + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)aclc; + + eid = clc_v2->r1.eid; + if (ini->first_contact_local) + smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist, + link->smcibdev, link->gid); + } + reason_code = smc_clc_send_confirm(smc, ini->first_contact_local, - SMC_V1); + aclc->hdr.version, eid, ini); if (reason_code) goto connect_abort; @@ -869,7 +981,7 @@ smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc, int i; for (i = 0; i < ini->ism_offered_cnt + 1; i++) { - if (ini->ism_chid[i] == ntohs(aclc->chid)) { + if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) { ini->ism_selected = i; return 0; } @@ -883,6 +995,7 @@ static int smc_connect_ism(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *aclc, struct smc_init_info *ini) { + u8 *eid = NULL; int rc = 0; ini->is_smcd = true; @@ -918,8 +1031,15 @@ static int smc_connect_ism(struct smc_sock *smc, smc_rx_init(smc); smc_tx_init(smc); + if (aclc->hdr.version > SMC_V1) { + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)aclc; + + eid = clc_v2->d1.eid; + } + rc = smc_clc_send_confirm(smc, ini->first_contact_local, - aclc->hdr.version); + aclc->hdr.version, eid, NULL); if (rc) goto connect_abort; mutex_unlock(&smc_server_lgr_pending); @@ -942,17 +1062,24 @@ static int smc_connect_ism(struct smc_sock *smc, static int smc_connect_check_aclc(struct smc_init_info *ini, struct smc_clc_msg_accept_confirm *aclc) { - if ((aclc->hdr.typev1 == SMC_TYPE_R && - !smcr_indicated(ini->smc_type_v1)) || - (aclc->hdr.typev1 == SMC_TYPE_D && - ((!smcd_indicated(ini->smc_type_v1) && - !smcd_indicated(ini->smc_type_v2)) || - (aclc->hdr.version == SMC_V1 && - !smcd_indicated(ini->smc_type_v1)) || - (aclc->hdr.version == SMC_V2 && - !smcd_indicated(ini->smc_type_v2))))) + if (aclc->hdr.typev1 != SMC_TYPE_R && + aclc->hdr.typev1 != SMC_TYPE_D) return SMC_CLC_DECL_MODEUNSUPP; + if (aclc->hdr.version >= SMC_V2) { + if ((aclc->hdr.typev1 == SMC_TYPE_R && + !smcr_indicated(ini->smc_type_v2)) || + (aclc->hdr.typev1 == SMC_TYPE_D && + !smcd_indicated(ini->smc_type_v2))) + return SMC_CLC_DECL_MODEUNSUPP; + } else { + if ((aclc->hdr.typev1 == SMC_TYPE_R && + !smcr_indicated(ini->smc_type_v1)) || + (aclc->hdr.typev1 == SMC_TYPE_D && + !smcd_indicated(ini->smc_type_v1))) + return SMC_CLC_DECL_MODEUNSUPP; + } + return 0; } @@ -983,14 +1110,15 @@ static int __smc_connect(struct smc_sock *smc) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM, version); - ini->smcd_version = SMC_V1; - ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0; + ini->smcd_version = SMC_V1 | SMC_V2; + ini->smcr_version = SMC_V1 | SMC_V2; ini->smc_type_v1 = SMC_TYPE_B; - ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N; + ini->smc_type_v2 = SMC_TYPE_B; /* get vlan id from IP device */ if (smc_vlan_by_tcpsk(smc->clcsock, ini)) { ini->smcd_version &= ~SMC_V1; + ini->smcr_version = 0; ini->smc_type_v1 = SMC_TYPE_N; if (!ini->smcd_version) { rc = SMC_CLC_DECL_GETVLANERR; @@ -1018,15 +1146,17 @@ static int __smc_connect(struct smc_sock *smc) /* check if smc modes and versions of CLC proposal and accept match */ rc = smc_connect_check_aclc(ini, aclc); version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2; - ini->smcd_version = version; if (rc) goto vlan_cleanup; /* depending on previous steps, connect using rdma or ism */ - if (aclc->hdr.typev1 == SMC_TYPE_R) + if (aclc->hdr.typev1 == SMC_TYPE_R) { + ini->smcr_version = version; rc = smc_connect_rdma(smc, aclc, ini); - else if (aclc->hdr.typev1 == SMC_TYPE_D) + } else if (aclc->hdr.typev1 == SMC_TYPE_D) { + ini->smcd_version = version; rc = smc_connect_ism(smc, aclc, ini); + } if (rc) goto vlan_cleanup; @@ -1307,7 +1437,7 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc) smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE); /* initial contact - try to establish second link */ - smc_llc_srv_add_link(link); + smc_llc_srv_add_link(link, NULL); return 0; } @@ -1387,33 +1517,48 @@ static int smc_listen_v2_check(struct smc_sock *new_smc, ini->smc_type_v1 = pclc->hdr.typev1; ini->smc_type_v2 = pclc->hdr.typev2; - ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0; - if (pclc->hdr.version > SMC_V1) - ini->smcd_version |= - ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0; - if (!(ini->smcd_version & SMC_V2)) { + ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0; + ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0; + if (pclc->hdr.version > SMC_V1) { + if (smcd_indicated(ini->smc_type_v2)) + ini->smcd_version |= SMC_V2; + if (smcr_indicated(ini->smc_type_v2)) + ini->smcr_version |= SMC_V2; + } + if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) { rc = SMC_CLC_DECL_PEERNOSMC; goto out; } - if (!smc_ism_is_v2_capable()) { - ini->smcd_version &= ~SMC_V2; - rc = SMC_CLC_DECL_NOISM2SUPP; - goto out; - } pclc_v2_ext = smc_get_clc_v2_ext(pclc); if (!pclc_v2_ext) { ini->smcd_version &= ~SMC_V2; + ini->smcr_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOV2EXT; goto out; } pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext); - if (!pclc_smcd_v2_ext) { - ini->smcd_version &= ~SMC_V2; - rc = SMC_CLC_DECL_NOV2DEXT; + if (ini->smcd_version & SMC_V2) { + if (!smc_ism_is_v2_capable()) { + ini->smcd_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOISM2SUPP; + } else if (!pclc_smcd_v2_ext) { + ini->smcd_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOV2DEXT; + } else if (!pclc_v2_ext->hdr.eid_cnt && + !pclc_v2_ext->hdr.flag.seid) { + ini->smcd_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOUEID; + } + } + if (ini->smcr_version & SMC_V2) { + if (!pclc_v2_ext->hdr.eid_cnt) { + ini->smcr_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOUEID; + } } out: - if (!ini->smcd_version) + if (!ini->smcd_version && !ini->smcr_version) return rc; return 0; @@ -1533,11 +1678,6 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, pclc_smcd = smc_get_clc_msg_smcd(pclc); smc_v2_ext = smc_get_clc_v2_ext(pclc); smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext); - if (!smcd_v2_ext || - !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */ - smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini); - goto not_found; - } mutex_lock(&smcd_dev_list.mutex); if (pclc_smcd->ism.chid) @@ -1555,14 +1695,16 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, } mutex_unlock(&smcd_dev_list.mutex); - if (ini->ism_dev[0]) { - smc_ism_get_system_eid(ini->ism_dev[0], &eid); - if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN)) - goto not_found; - } else { + if (!ini->ism_dev[0]) { + smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini); goto not_found; } + smc_ism_get_system_eid(&eid); + if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, + smcd_v2_ext->system_eid, eid)) + goto not_found; + /* separate - outside the smcd_dev_list.lock */ smcd_version = ini->smcd_version; for (i = 0; i < matches; i++) { @@ -1579,6 +1721,7 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, } /* no V2 ISM device could be initialized */ ini->smcd_version = smcd_version; /* restore original value */ + ini->negotiated_eid[0] = 0; not_found: ini->smcd_version &= ~SMC_V2; @@ -1608,6 +1751,7 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc, not_found: smc_find_ism_store_rc(rc, ini); + ini->smcd_version &= ~SMC_V1; ini->ism_dev[0] = NULL; ini->is_smcd = false; } @@ -1626,24 +1770,69 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first) return 0; } +static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, + struct smc_clc_msg_proposal *pclc, + struct smc_init_info *ini) +{ + struct smc_clc_v2_extension *smc_v2_ext; + u8 smcr_version; + int rc; + + if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2)) + goto not_found; + + smc_v2_ext = smc_get_clc_v2_ext(pclc); + if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) + goto not_found; + + /* prepare RDMA check */ + memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN); + memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE); + memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN); + ini->check_smcrv2 = true; + ini->smcrv2.clc_sk = new_smc->clcsock->sk; + ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr; + ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce); + rc = smc_find_rdma_device(new_smc, ini); + if (rc) { + smc_find_ism_store_rc(rc, ini); + goto not_found; + } + if (!ini->smcrv2.uses_gateway) + memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN); + + smcr_version = ini->smcr_version; + ini->smcr_version = SMC_V2; + rc = smc_listen_rdma_init(new_smc, ini); + if (!rc) + rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); + if (!rc) + return; + ini->smcr_version = smcr_version; + smc_find_ism_store_rc(rc, ini); + +not_found: + ini->smcr_version &= ~SMC_V2; + ini->check_smcrv2 = false; +} + static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { int rc; - if (!smcr_indicated(ini->smc_type_v1)) + if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1)) return SMC_CLC_DECL_NOSMCDEV; /* prepare RDMA check */ - ini->ib_lcl = &pclc->lcl; + memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN); + memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE); + memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN); rc = smc_find_rdma_device(new_smc, ini); if (rc) { /* no RDMA device found */ - if (ini->smc_type_v1 == SMC_TYPE_B) - /* neither ISM nor RDMA device found */ - rc = SMC_CLC_DECL_NOSMCDEV; - return rc; + return SMC_CLC_DECL_NOSMCDEV; } rc = smc_listen_rdma_init(new_smc, ini); if (rc) @@ -1656,51 +1845,60 @@ static int smc_listen_find_device(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { - int rc; + int prfx_rc; /* check for ISM device matching V2 proposed device */ smc_find_ism_v2_device_serv(new_smc, pclc, ini); if (ini->ism_dev[0]) return 0; - if (!(ini->smcd_version & SMC_V1)) - return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV; - - /* check for matching IP prefix and subnet length */ - rc = smc_listen_prfx_check(new_smc, pclc); - if (rc) - return ini->rc ?: rc; + /* check for matching IP prefix and subnet length (V1) */ + prfx_rc = smc_listen_prfx_check(new_smc, pclc); + if (prfx_rc) + smc_find_ism_store_rc(prfx_rc, ini); /* get vlan id from IP device */ if (smc_vlan_by_tcpsk(new_smc->clcsock, ini)) return ini->rc ?: SMC_CLC_DECL_GETVLANERR; /* check for ISM device matching V1 proposed device */ - smc_find_ism_v1_device_serv(new_smc, pclc, ini); + if (!prfx_rc) + smc_find_ism_v1_device_serv(new_smc, pclc, ini); if (ini->ism_dev[0]) return 0; - if (pclc->hdr.typev1 == SMC_TYPE_D) + if (!smcr_indicated(pclc->hdr.typev1) && + !smcr_indicated(pclc->hdr.typev2)) /* skip RDMA and decline */ return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV; - /* check if RDMA is available */ - rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); - smc_find_ism_store_rc(rc, ini); + /* check if RDMA V2 is available */ + smc_find_rdma_v2_device_serv(new_smc, pclc, ini); + if (ini->smcrv2.ib_dev_v2) + return 0; - return (!rc) ? 0 : ini->rc; + /* check if RDMA V1 is available */ + if (!prfx_rc) { + int rc; + + rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); + smc_find_ism_store_rc(rc, ini); + return (!rc) ? 0 : ini->rc; + } + return SMC_CLC_DECL_NOSMCDEV; } /* listen worker: finish RDMA setup */ static int smc_listen_rdma_finish(struct smc_sock *new_smc, struct smc_clc_msg_accept_confirm *cclc, - bool local_first) + bool local_first, + struct smc_init_info *ini) { struct smc_link *link = new_smc->conn.lnk; int reason_code = 0; if (local_first) - smc_link_save_peer_info(link, cclc); + smc_link_save_peer_info(link, cclc, ini); if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc)) return SMC_CLC_DECL_ERR_RTOK; @@ -1721,12 +1919,13 @@ static void smc_listen_work(struct work_struct *work) { struct smc_sock *new_smc = container_of(work, struct smc_sock, smc_listen_work); - u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1; struct socket *newclcsock = new_smc->clcsock; struct smc_clc_msg_accept_confirm *cclc; struct smc_clc_msg_proposal_area *buf; struct smc_clc_msg_proposal *pclc; struct smc_init_info *ini = NULL; + u8 proposal_version = SMC_V1; + u8 accept_version; int rc = 0; if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) @@ -1757,7 +1956,9 @@ static void smc_listen_work(struct work_struct *work) SMC_CLC_PROPOSAL, CLC_WAIT_TIME); if (rc) goto out_decl; - version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version; + + if (pclc->hdr.version > SMC_V1) + proposal_version = SMC_V2; /* IPSec connections opt out of SMC optimizations */ if (using_ipsec(new_smc)) { @@ -1787,8 +1988,9 @@ static void smc_listen_work(struct work_struct *work) goto out_unlock; /* send SMC Accept CLC message */ + accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version; rc = smc_clc_send_accept(new_smc, ini->first_contact_local, - ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1); + accept_version, ini->negotiated_eid); if (rc) goto out_unlock; @@ -1810,7 +2012,7 @@ static void smc_listen_work(struct work_struct *work) /* finish worker */ if (!ini->is_smcd) { rc = smc_listen_rdma_finish(new_smc, cclc, - ini->first_contact_local); + ini->first_contact_local, ini); if (rc) goto out_unlock; mutex_unlock(&smc_server_lgr_pending); @@ -1824,7 +2026,7 @@ static void smc_listen_work(struct work_struct *work) mutex_unlock(&smc_server_lgr_pending); out_decl: smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0, - version); + proposal_version); out_free: kfree(ini); kfree(buf); @@ -2662,6 +2864,7 @@ static void __exit smc_exit(void) proto_unregister(&smc_proto); smc_pnet_exit(); smc_nl_exit(); + smc_clc_exit(); unregister_pernet_subsys(&smc_net_stat_ops); unregister_pernet_subsys(&smc_net_ops); rcu_barrier(); diff --git a/net/smc/smc.h b/net/smc/smc.h index d65e15f0c944c1..f4286ca1f22836 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -29,9 +29,6 @@ * devices */ -#define SMC_MAX_HOSTNAME_LEN 32 -#define SMC_MAX_EID_LEN 32 - extern struct proto smc_proto; extern struct proto smc_proto6; @@ -59,7 +56,20 @@ enum smc_state { /* possible states of an SMC socket */ struct smc_link_group; struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */ - u8 type; + union { + u8 type; +#if defined(__BIG_ENDIAN_BITFIELD) + struct { + u8 llc_version:4, + llc_type:4; + }; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + struct { + u8 llc_type:4, + llc_version:4; + }; +#endif + }; } __aligned(1); struct smc_cdc_conn_state_flags { @@ -289,7 +299,12 @@ static inline bool using_ipsec(struct smc_sock *smc) } #endif +struct smc_gidlist; + struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock); void smc_close_non_accepted(struct sock *sk); +void smc_fill_gid_list(struct smc_link_group *lgr, + struct smc_gidlist *gidlist, + struct smc_ib_device *known_dev, u8 *known_gid); #endif /* __SMC_H */ diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 6ec1ebe878ae0e..8409ab71a5e426 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -26,10 +26,12 @@ #include "smc_clc.h" #include "smc_ib.h" #include "smc_ism.h" +#include "smc_netlink.h" #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 #define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78 +#define SMCR_CLC_ACCEPT_CONFIRM_LEN_V2 108 #define SMC_CLC_RECV_BUF_LEN 100 /* eye catcher "SMCR" EBCDIC for CLC messages */ @@ -39,6 +41,297 @@ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'}; static u8 smc_hostname[SMC_MAX_HOSTNAME_LEN]; +struct smc_clc_eid_table { + rwlock_t lock; + struct list_head list; + u8 ueid_cnt; + u8 seid_enabled; +}; + +static struct smc_clc_eid_table smc_clc_eid_table; + +struct smc_clc_eid_entry { + struct list_head list; + u8 eid[SMC_MAX_EID_LEN]; +}; + +/* The size of a user EID is 32 characters. + * Valid characters should be (single-byte character set) A-Z, 0-9, '.' and '-'. + * Blanks should only be used to pad to the expected size. + * First character must be alphanumeric. + */ +static bool smc_clc_ueid_valid(char *ueid) +{ + char *end = ueid + SMC_MAX_EID_LEN; + + while (--end >= ueid && isspace(*end)) + ; + if (end < ueid) + return false; + if (!isalnum(*ueid) || islower(*ueid)) + return false; + while (ueid <= end) { + if ((!isalnum(*ueid) || islower(*ueid)) && *ueid != '.' && + *ueid != '-') + return false; + ueid++; + } + return true; +} + +static int smc_clc_ueid_add(char *ueid) +{ + struct smc_clc_eid_entry *new_ueid, *tmp_ueid; + int rc; + + if (!smc_clc_ueid_valid(ueid)) + return -EINVAL; + + /* add a new ueid entry to the ueid table if there isn't one */ + new_ueid = kzalloc(sizeof(*new_ueid), GFP_KERNEL); + if (!new_ueid) + return -ENOMEM; + memcpy(new_ueid->eid, ueid, SMC_MAX_EID_LEN); + + write_lock(&smc_clc_eid_table.lock); + if (smc_clc_eid_table.ueid_cnt >= SMC_MAX_UEID) { + rc = -ERANGE; + goto err_out; + } + list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) { + if (!memcmp(tmp_ueid->eid, ueid, SMC_MAX_EID_LEN)) { + rc = -EEXIST; + goto err_out; + } + } + list_add_tail(&new_ueid->list, &smc_clc_eid_table.list); + smc_clc_eid_table.ueid_cnt++; + write_unlock(&smc_clc_eid_table.lock); + return 0; + +err_out: + write_unlock(&smc_clc_eid_table.lock); + kfree(new_ueid); + return rc; +} + +int smc_clc_ueid_count(void) +{ + int count; + + read_lock(&smc_clc_eid_table.lock); + count = smc_clc_eid_table.ueid_cnt; + read_unlock(&smc_clc_eid_table.lock); + + return count; +} + +int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY]; + char *ueid; + + if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1) + return -EINVAL; + ueid = (char *)nla_data(nla_ueid); + + return smc_clc_ueid_add(ueid); +} + +/* remove one or all ueid entries from the table */ +static int smc_clc_ueid_remove(char *ueid) +{ + struct smc_clc_eid_entry *lst_ueid, *tmp_ueid; + int rc = -ENOENT; + + /* remove table entry */ + write_lock(&smc_clc_eid_table.lock); + list_for_each_entry_safe(lst_ueid, tmp_ueid, &smc_clc_eid_table.list, + list) { + if (!ueid || !memcmp(lst_ueid->eid, ueid, SMC_MAX_EID_LEN)) { + list_del(&lst_ueid->list); + smc_clc_eid_table.ueid_cnt--; + kfree(lst_ueid); + rc = 0; + } + } + if (!rc && !smc_clc_eid_table.ueid_cnt) { + smc_clc_eid_table.seid_enabled = 1; + rc = -EAGAIN; /* indicate success and enabling of seid */ + } + write_unlock(&smc_clc_eid_table.lock); + return rc; +} + +int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY]; + char *ueid; + + if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1) + return -EINVAL; + ueid = (char *)nla_data(nla_ueid); + + return smc_clc_ueid_remove(ueid); +} + +int smc_nl_flush_ueid(struct sk_buff *skb, struct genl_info *info) +{ + smc_clc_ueid_remove(NULL); + return 0; +} + +static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq, + u32 flags, char *ueid) +{ + char ueid_str[SMC_MAX_EID_LEN + 1]; + void *hdr; + + hdr = genlmsg_put(skb, portid, seq, &smc_gen_nl_family, + flags, SMC_NETLINK_DUMP_UEID); + if (!hdr) + return -ENOMEM; + snprintf(ueid_str, sizeof(ueid_str), "%s", ueid); + if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) { + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; + } + genlmsg_end(skb, hdr); + return 0; +} + +static int _smc_nl_ueid_dump(struct sk_buff *skb, u32 portid, u32 seq, + int start_idx) +{ + struct smc_clc_eid_entry *lst_ueid; + int idx = 0; + + read_lock(&smc_clc_eid_table.lock); + list_for_each_entry(lst_ueid, &smc_clc_eid_table.list, list) { + if (idx++ < start_idx) + continue; + if (smc_nl_ueid_dumpinfo(skb, portid, seq, NLM_F_MULTI, + lst_ueid->eid)) { + --idx; + break; + } + } + read_unlock(&smc_clc_eid_table.lock); + return idx; +} + +int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); + int idx; + + idx = _smc_nl_ueid_dump(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb_ctx->pos[0]); + + cb_ctx->pos[0] = idx; + return skb->len; +} + +int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); + char seid_str[SMC_MAX_EID_LEN + 1]; + u8 seid_enabled; + void *hdr; + u8 *seid; + + if (cb_ctx->pos[0]) + return skb->len; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &smc_gen_nl_family, NLM_F_MULTI, + SMC_NETLINK_DUMP_SEID); + if (!hdr) + return -ENOMEM; + if (!smc_ism_is_v2_capable()) + goto end; + + smc_ism_get_system_eid(&seid); + snprintf(seid_str, sizeof(seid_str), "%s", seid); + if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str)) + goto err; + read_lock(&smc_clc_eid_table.lock); + seid_enabled = smc_clc_eid_table.seid_enabled; + read_unlock(&smc_clc_eid_table.lock); + if (nla_put_u8(skb, SMC_NLA_SEID_ENABLED, seid_enabled)) + goto err; +end: + genlmsg_end(skb, hdr); + cb_ctx->pos[0]++; + return skb->len; +err: + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info) +{ + write_lock(&smc_clc_eid_table.lock); + smc_clc_eid_table.seid_enabled = 1; + write_unlock(&smc_clc_eid_table.lock); + return 0; +} + +int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info) +{ + int rc = 0; + + write_lock(&smc_clc_eid_table.lock); + if (!smc_clc_eid_table.ueid_cnt) + rc = -ENOENT; + else + smc_clc_eid_table.seid_enabled = 0; + write_unlock(&smc_clc_eid_table.lock); + return rc; +} + +static bool _smc_clc_match_ueid(u8 *peer_ueid) +{ + struct smc_clc_eid_entry *tmp_ueid; + + list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) { + if (!memcmp(tmp_ueid->eid, peer_ueid, SMC_MAX_EID_LEN)) + return true; + } + return false; +} + +bool smc_clc_match_eid(u8 *negotiated_eid, + struct smc_clc_v2_extension *smc_v2_ext, + u8 *peer_eid, u8 *local_eid) +{ + bool match = false; + int i; + + negotiated_eid[0] = 0; + read_lock(&smc_clc_eid_table.lock); + if (peer_eid && local_eid && + smc_clc_eid_table.seid_enabled && + smc_v2_ext->hdr.flag.seid && + !memcmp(peer_eid, local_eid, SMC_MAX_EID_LEN)) { + memcpy(negotiated_eid, peer_eid, SMC_MAX_EID_LEN); + match = true; + goto out; + } + + for (i = 0; i < smc_v2_ext->hdr.eid_cnt; i++) { + if (_smc_clc_match_ueid(smc_v2_ext->user_eids[i])) { + memcpy(negotiated_eid, smc_v2_ext->user_eids[i], + SMC_MAX_EID_LEN); + match = true; + goto out; + } + } +out: + read_unlock(&smc_clc_eid_table.lock); + return match; +} + /* check arriving CLC proposal */ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) { @@ -100,6 +393,27 @@ smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm_v2 *clc_v2) (ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 + sizeof(struct smc_clc_first_contact_ext))) return false; + if (hdr->typev1 == SMC_TYPE_R && + ntohs(hdr->length) < SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) + return false; + } + return true; +} + +/* check arriving CLC decline */ +static bool +smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc) +{ + struct smc_clc_msg_hdr *hdr = &dclc->hdr; + + if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D) + return false; + if (hdr->version == SMC_V1) { + if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline)) + return false; + } else { + if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline_v2)) + return false; } return true; } @@ -145,9 +459,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl) break; case SMC_CLC_DECLINE: dclc = (struct smc_clc_msg_decline *)clcm; - if (ntohs(dclc->hdr.length) != sizeof(*dclc)) + if (!smc_clc_msg_decl_valid(dclc)) return false; - trl = &dclc->trl; + check_trl = false; break; default: return false; @@ -446,15 +760,16 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, /* send CLC DECLINE message across internal TCP socket */ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version) { - struct smc_clc_msg_decline dclc; + struct smc_clc_msg_decline *dclc_v1; + struct smc_clc_msg_decline_v2 dclc; struct msghdr msg; + int len, send_len; struct kvec vec; - int len; + dclc_v1 = (struct smc_clc_msg_decline *)&dclc; memset(&dclc, 0, sizeof(dclc)); memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); dclc.hdr.type = SMC_CLC_DECLINE; - dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); dclc.hdr.version = version; dclc.os_type = version == SMC_V1 ? 0 : SMC_CLC_OS_LINUX; dclc.hdr.typev2 = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? @@ -464,14 +779,22 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version) memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); dclc.peer_diagnosis = htonl(peer_diag_info); - memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + if (version == SMC_V1) { + memcpy(dclc_v1->trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + send_len = sizeof(*dclc_v1); + } else { + memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + send_len = sizeof(dclc); + } + dclc.hdr.length = htons(send_len); memset(&msg, 0, sizeof(msg)); vec.iov_base = &dclc; - vec.iov_len = sizeof(struct smc_clc_msg_decline); - len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, - sizeof(struct smc_clc_msg_decline)); - if (len < 0 || len < sizeof(struct smc_clc_msg_decline)) + vec.iov_len = send_len; + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, send_len); + if (len < 0 || len < send_len) len = -EPROTO; return len > 0 ? 0 : len; } @@ -551,9 +874,10 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) if (ini->smc_type_v2 == SMC_TYPE_N) { pclc_smcd->v2_ext_offset = 0; } else { + struct smc_clc_eid_entry *ueident; u16 v2_ext_offset; - u8 *eid = NULL; + v2_ext->hdr.flag.release = SMC_RELEASE; v2_ext_offset = sizeof(*pclc_smcd) - offsetofend(struct smc_clc_msg_smcd, v2_ext_offset); if (ini->smc_type_v1 != SMC_TYPE_N) @@ -561,21 +885,31 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) pclc_prfx->ipv6_prefixes_cnt * sizeof(ipv6_prfx[0]); pclc_smcd->v2_ext_offset = htons(v2_ext_offset); - v2_ext->hdr.eid_cnt = 0; + plen += sizeof(*v2_ext); + + read_lock(&smc_clc_eid_table.lock); + v2_ext->hdr.eid_cnt = smc_clc_eid_table.ueid_cnt; + plen += smc_clc_eid_table.ueid_cnt * SMC_MAX_EID_LEN; + i = 0; + list_for_each_entry(ueident, &smc_clc_eid_table.list, list) { + memcpy(v2_ext->user_eids[i++], ueident->eid, + sizeof(ueident->eid)); + } + read_unlock(&smc_clc_eid_table.lock); + } + if (smcd_indicated(ini->smc_type_v2)) { + u8 *eid = NULL; + + v2_ext->hdr.flag.seid = smc_clc_eid_table.seid_enabled; v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt; - v2_ext->hdr.flag.release = SMC_RELEASE; - v2_ext->hdr.flag.seid = 1; v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) - offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset) + v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN); - if (ini->ism_dev[0]) - smc_ism_get_system_eid(ini->ism_dev[0], &eid); - else - smc_ism_get_system_eid(ini->ism_dev[1], &eid); - if (eid) + smc_ism_get_system_eid(&eid); + if (eid && v2_ext->hdr.flag.seid) memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN); - plen += sizeof(*v2_ext) + sizeof(*smcd_v2_ext); + plen += sizeof(*smcd_v2_ext); if (ini->ism_offered_cnt) { for (i = 1; i <= ini->ism_offered_cnt; i++) { gidchids[i - 1].gid = @@ -587,6 +921,9 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) sizeof(struct smc_clc_smcd_gid_chid); } } + if (smcr_indicated(ini->smc_type_v2)) + memcpy(v2_ext->roce, ini->smcrv2.ib_gid_v2, SMC_GID_SIZE); + pclc_base->hdr.length = htons(plen); memcpy(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); @@ -608,13 +945,16 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) } if (ini->smc_type_v2 != SMC_TYPE_N) { vec[i].iov_base = v2_ext; - vec[i++].iov_len = sizeof(*v2_ext); - vec[i].iov_base = smcd_v2_ext; - vec[i++].iov_len = sizeof(*smcd_v2_ext); - if (ini->ism_offered_cnt) { - vec[i].iov_base = gidchids; - vec[i++].iov_len = ini->ism_offered_cnt * + vec[i++].iov_len = sizeof(*v2_ext) + + (v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN); + if (smcd_indicated(ini->smc_type_v2)) { + vec[i].iov_base = smcd_v2_ext; + vec[i++].iov_len = sizeof(*smcd_v2_ext); + if (ini->ism_offered_cnt) { + vec[i].iov_base = gidchids; + vec[i++].iov_len = ini->ism_offered_cnt * sizeof(struct smc_clc_smcd_gid_chid); + } } } vec[i].iov_base = trl; @@ -636,13 +976,15 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) /* build and send CLC CONFIRM / ACCEPT message */ static int smc_clc_send_confirm_accept(struct smc_sock *smc, struct smc_clc_msg_accept_confirm_v2 *clc_v2, - int first_contact, u8 version) + int first_contact, u8 version, + u8 *eid, struct smc_init_info *ini) { struct smc_connection *conn = &smc->conn; struct smc_clc_msg_accept_confirm *clc; struct smc_clc_first_contact_ext fce; + struct smc_clc_fce_gid_ext gle; struct smc_clc_msg_trail trl; - struct kvec vec[3]; + struct kvec vec[5]; struct msghdr msg; int i, len; @@ -664,12 +1006,10 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, if (version == SMC_V1) { clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); } else { - u8 *eid = NULL; - - clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd)); - smc_ism_get_system_eid(conn->lgr->smcd, &eid); - if (eid) - memcpy(clc_v2->eid, eid, SMC_MAX_EID_LEN); + clc_v2->d1.chid = + htons(smc_ism_get_chid(conn->lgr->smcd)); + if (eid && eid[0]) + memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN); len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2; if (first_contact) smc_clc_fill_fce(&fce, &len); @@ -708,6 +1048,26 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, clc->r0.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address (conn->rmb_desc->sgt[link->link_idx].sgl)); hton24(clc->r0.psn, link->psn_initial); + if (version == SMC_V1) { + clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + } else { + if (eid && eid[0]) + memcpy(clc_v2->r1.eid, eid, SMC_MAX_EID_LEN); + len = SMCR_CLC_ACCEPT_CONFIRM_LEN_V2; + if (first_contact) { + smc_clc_fill_fce(&fce, &len); + fce.v2_direct = !link->lgr->uses_gateway; + memset(&gle, 0, sizeof(gle)); + if (ini && clc->hdr.type == SMC_CLC_CONFIRM) { + gle.gid_cnt = ini->smcrv2.gidlist.len; + len += sizeof(gle); + len += gle.gid_cnt * sizeof(gle.gid[0]); + } else { + len += sizeof(gle.reserved); + } + } + clc_v2->hdr.length = htons(len); + } memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); } @@ -715,7 +1075,10 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, i = 0; vec[i].iov_base = clc_v2; if (version > SMC_V1) - vec[i++].iov_len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 - sizeof(trl); + vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ? + SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 : + SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) - + sizeof(trl); else vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ? SMCD_CLC_ACCEPT_CONFIRM_LEN : @@ -724,6 +1087,18 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, if (version > SMC_V1 && first_contact) { vec[i].iov_base = &fce; vec[i++].iov_len = sizeof(fce); + if (!conn->lgr->is_smcd) { + if (clc->hdr.type == SMC_CLC_CONFIRM) { + vec[i].iov_base = &gle; + vec[i++].iov_len = sizeof(gle); + vec[i].iov_base = &ini->smcrv2.gidlist.list; + vec[i++].iov_len = gle.gid_cnt * + sizeof(gle.gid[0]); + } else { + vec[i].iov_base = &gle.reserved; + vec[i++].iov_len = sizeof(gle.reserved); + } + } } vec[i].iov_base = &trl; vec[i++].iov_len = sizeof(trl); @@ -733,7 +1108,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc, /* send CLC CONFIRM message across internal TCP socket */ int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact, - u8 version) + u8 version, u8 *eid, struct smc_init_info *ini) { struct smc_clc_msg_accept_confirm_v2 cclc_v2; int reason_code = 0; @@ -743,7 +1118,7 @@ int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact, memset(&cclc_v2, 0, sizeof(cclc_v2)); cclc_v2.hdr.type = SMC_CLC_CONFIRM; len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact, - version); + version, eid, ini); if (len < ntohs(cclc_v2.hdr.length)) { if (len >= 0) { reason_code = -ENETUNREACH; @@ -758,7 +1133,7 @@ int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact, /* send CLC ACCEPT message across internal TCP socket */ int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact, - u8 version) + u8 version, u8 *negotiated_eid) { struct smc_clc_msg_accept_confirm_v2 aclc_v2; int len; @@ -766,7 +1141,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact, memset(&aclc_v2, 0, sizeof(aclc_v2)); aclc_v2.hdr.type = SMC_CLC_ACCEPT; len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact, - version); + version, negotiated_eid, NULL); if (len < ntohs(aclc_v2.hdr.length)) len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err; @@ -786,4 +1161,14 @@ void __init smc_clc_init(void) u = utsname(); memcpy(smc_hostname, u->nodename, min_t(size_t, strlen(u->nodename), sizeof(smc_hostname))); + + INIT_LIST_HEAD(&smc_clc_eid_table.list); + rwlock_init(&smc_clc_eid_table.lock); + smc_clc_eid_table.ueid_cnt = 0; + smc_clc_eid_table.seid_enabled = 1; +} + +void smc_clc_exit(void) +{ + smc_clc_ueid_remove(NULL); } diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 32d37f7b70f2b5..83f02f131fc090 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -14,8 +14,10 @@ #define _SMC_CLC_H #include +#include #include "smc.h" +#include "smc_netlink.h" #define SMC_CLC_PROPOSAL 0x01 #define SMC_CLC_ACCEPT 0x02 @@ -42,6 +44,7 @@ #define SMC_CLC_DECL_NOV2DEXT 0x03030005 /* peer sent no clc SMC-Dv2 ext. */ #define SMC_CLC_DECL_NOSEID 0x03030006 /* peer sent no SEID */ #define SMC_CLC_DECL_NOSMCD2DEV 0x03030007 /* no SMC-Dv2 device found */ +#define SMC_CLC_DECL_NOUEID 0x03030008 /* peer sent no UEID */ #define SMC_CLC_DECL_MODEUNSUPP 0x03040000 /* smc modes do not match (R or D)*/ #define SMC_CLC_DECL_RMBE_EC 0x03050000 /* peer has eyecatcher in RMBE */ #define SMC_CLC_DECL_OPTUNSUPP 0x03060000 /* fastopen sockopt not supported */ @@ -52,6 +55,8 @@ #define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */ #define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */ #define SMC_CLC_DECL_MAX_DMB 0x030d0000 /* SMC-D DMB limit exceeded */ +#define SMC_CLC_DECL_NOROUTE 0x030e0000 /* SMC-Rv2 conn. no route to peer */ +#define SMC_CLC_DECL_NOINDIRECT 0x030f0000 /* SMC-Rv2 conn. indirect mismatch*/ #define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ #define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */ #define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */ @@ -158,6 +163,7 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */ } __aligned(4); #define SMC_CLC_MAX_V6_PREFIX 8 +#define SMC_CLC_MAX_UEID 8 struct smc_clc_msg_proposal_area { struct smc_clc_msg_proposal pclc_base; @@ -165,6 +171,7 @@ struct smc_clc_msg_proposal_area { struct smc_clc_msg_proposal_prefix pclc_prfx; struct smc_clc_ipv6_prefix pclc_prfx_ipv6[SMC_CLC_MAX_V6_PREFIX]; struct smc_clc_v2_extension pclc_v2_ext; + u8 user_eids[SMC_CLC_MAX_UEID][SMC_MAX_EID_LEN]; struct smc_clc_smcd_v2_extension pclc_smcd_v2_ext; struct smc_clc_smcd_gid_chid pclc_gidchids[SMC_MAX_ISM_DEVS]; struct smc_clc_msg_trail pclc_trl; @@ -209,11 +216,14 @@ struct smcd_clc_msg_accept_confirm_common { /* SMCD accept/confirm */ #define SMC_CLC_OS_AIX 3 struct smc_clc_first_contact_ext { - u8 reserved1; #if defined(__BIG_ENDIAN_BITFIELD) + u8 v2_direct : 1, + reserved : 7; u8 os_type : 4, release : 4; #elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved : 7, + v2_direct : 1; u8 release : 4, os_type : 4; #endif @@ -221,6 +231,13 @@ struct smc_clc_first_contact_ext { u8 hostname[SMC_MAX_HOSTNAME_LEN]; }; +struct smc_clc_fce_gid_ext { + u8 reserved[16]; + u8 gid_cnt; + u8 reserved2[3]; + u8 gid[][SMC_GID_SIZE]; +}; + struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */ struct smc_clc_msg_hdr hdr; union { @@ -235,13 +252,17 @@ struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */ struct smc_clc_msg_accept_confirm_v2 { /* clc accept / confirm message */ struct smc_clc_msg_hdr hdr; union { - struct smcr_clc_msg_accept_confirm r0; /* SMC-R */ + struct { /* SMC-R */ + struct smcr_clc_msg_accept_confirm r0; + u8 eid[SMC_MAX_EID_LEN]; + u8 reserved6[8]; + } r1; struct { /* SMC-D */ struct smcd_clc_msg_accept_confirm_common d0; __be16 chid; u8 eid[SMC_MAX_EID_LEN]; u8 reserved5[8]; - }; + } d1; }; }; @@ -260,6 +281,24 @@ struct smc_clc_msg_decline { /* clc decline message */ struct smc_clc_msg_trail trl; /* eye catcher "SMCD" or "SMCR" EBCDIC */ } __aligned(4); +#define SMC_DECL_DIAG_COUNT_V2 4 /* no. of additional peer diagnosis codes */ + +struct smc_clc_msg_decline_v2 { /* clc decline message */ + struct smc_clc_msg_hdr hdr; + u8 id_for_peer[SMC_SYSTEMID_LEN]; /* sender peer_id */ + __be32 peer_diagnosis; /* diagnosis information */ +#if defined(__BIG_ENDIAN_BITFIELD) + u8 os_type : 4, + reserved : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved : 4, + os_type : 4; +#endif + u8 reserved2[3]; + __be32 peer_diagnosis_v2[SMC_DECL_DIAG_COUNT_V2]; + struct smc_clc_msg_trail trl; /* eye catcher "SMCD" or "SMCR" EBCDIC */ +} __aligned(4); + /* determine start of the prefix area within the proposal message */ static inline struct smc_clc_msg_proposal_prefix * smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc) @@ -278,6 +317,17 @@ static inline bool smcd_indicated(int smc_type) return smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B; } +static inline u8 smc_indicated_type(int is_smcd, int is_smcr) +{ + if (is_smcd && is_smcr) + return SMC_TYPE_B; + if (is_smcd) + return SMC_TYPE_D; + if (is_smcr) + return SMC_TYPE_R; + return SMC_TYPE_N; +} + /* get SMC-D info from proposal message */ static inline struct smc_clc_msg_smcd * smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop) @@ -330,10 +380,22 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version); int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini); int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact, - u8 version); + u8 version, u8 *eid, struct smc_init_info *ini); int smc_clc_send_accept(struct smc_sock *smc, bool srv_first_contact, - u8 version); + u8 version, u8 *negotiated_eid); void smc_clc_init(void) __init; +void smc_clc_exit(void); void smc_clc_get_hostname(u8 **host); +bool smc_clc_match_eid(u8 *negotiated_eid, + struct smc_clc_v2_extension *smc_v2_ext, + u8 *peer_eid, u8 *local_eid); +int smc_clc_ueid_count(void); +int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb); +int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info); +int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info); +int smc_nl_flush_ueid(struct sk_buff *skb, struct genl_info *info); +int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb); +int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info); +int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info); #endif diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index d2206743dc7148..49b8ba3bb6835e 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -34,6 +34,7 @@ #include "smc_ism.h" #include "smc_netlink.h" #include "smc_stats.h" +#include "smc_tracepoint.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) @@ -223,7 +224,6 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb) struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); char hostname[SMC_MAX_HOSTNAME_LEN + 1]; char smc_seid[SMC_MAX_EID_LEN + 1]; - struct smcd_dev *smcd_dev; struct nlattr *attrs; u8 *seid = NULL; u8 *host = NULL; @@ -245,6 +245,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable())) goto errattr; + if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true)) + goto errattr; smc_clc_get_hostname(&host); if (host) { memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN); @@ -252,13 +254,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb) if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname)) goto errattr; } - mutex_lock(&smcd_dev_list.mutex); - smcd_dev = list_first_entry_or_null(&smcd_dev_list.list, - struct smcd_dev, list); - if (smcd_dev) - smc_ism_get_system_eid(smcd_dev, &seid); - mutex_unlock(&smcd_dev_list.mutex); - if (seid && smc_ism_is_v2_capable()) { + if (smc_ism_is_v2_capable()) { + smc_ism_get_system_eid(&seid); memcpy(smc_seid, seid, SMC_MAX_EID_LEN); smc_seid[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid)) @@ -277,12 +274,65 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } +/* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */ +static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr, + struct sk_buff *skb, + struct netlink_callback *cb, + struct nlattr *v2_attrs) +{ + char smc_host[SMC_MAX_HOSTNAME_LEN + 1]; + char smc_eid[SMC_MAX_EID_LEN + 1]; + + if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version)) + goto errv2attr; + if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release)) + goto errv2attr; + if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os)) + goto errv2attr; + memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN); + smc_host[SMC_MAX_HOSTNAME_LEN] = 0; + if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host)) + goto errv2attr; + memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN); + smc_eid[SMC_MAX_EID_LEN] = 0; + if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid)) + goto errv2attr; + + nla_nest_end(skb, v2_attrs); + return 0; + +errv2attr: + nla_nest_cancel(skb, v2_attrs); + return -EMSGSIZE; +} + +static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct nlattr *v2_attrs; + + v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2); + if (!v2_attrs) + goto errattr; + if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway)) + goto errv2attr; + + nla_nest_end(skb, v2_attrs); + return 0; + +errv2attr: + nla_nest_cancel(skb, v2_attrs); +errattr: + return -EMSGSIZE; +} + static int smc_nl_fill_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { char smc_target[SMC_MAX_PNETID_LEN + 1]; - struct nlattr *attrs; + struct nlattr *attrs, *v2_attrs; attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR); if (!attrs) @@ -302,6 +352,15 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr, smc_target[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target)) goto errattr; + if (lgr->smc_version > SMC_V1) { + v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON); + if (!v2_attrs) + goto errattr; + if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs)) + goto errattr; + if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb)) + goto errattr; + } nla_nest_end(skb, attrs); return 0; @@ -434,10 +493,7 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { - char smc_host[SMC_MAX_HOSTNAME_LEN + 1]; char smc_pnet[SMC_MAX_PNETID_LEN + 1]; - char smc_eid[SMC_MAX_EID_LEN + 1]; - struct nlattr *v2_attrs; struct nlattr *attrs; void *nlh; @@ -469,32 +525,19 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr, smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet)) goto errattr; + if (lgr->smc_version > SMC_V1) { + struct nlattr *v2_attrs; - v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_V2); - if (!v2_attrs) - goto errattr; - if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version)) - goto errv2attr; - if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release)) - goto errv2attr; - if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os)) - goto errv2attr; - memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN); - smc_host[SMC_MAX_HOSTNAME_LEN] = 0; - if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host)) - goto errv2attr; - memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN); - smc_eid[SMC_MAX_EID_LEN] = 0; - if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid)) - goto errv2attr; - - nla_nest_end(skb, v2_attrs); + v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON); + if (!v2_attrs) + goto errattr; + if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs)) + goto errattr; + } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; -errv2attr: - nla_nest_cancel(skb, v2_attrs); errattr: nla_nest_cancel(skb, attrs); errout: @@ -690,24 +733,30 @@ static void smcr_copy_dev_info_to_link(struct smc_link *link) int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, u8 link_idx, struct smc_init_info *ini) { + struct smc_ib_device *smcibdev; u8 rndvec[3]; int rc; - get_device(&ini->ib_dev->ibdev->dev); - atomic_inc(&ini->ib_dev->lnk_cnt); + if (lgr->smc_version == SMC_V2) { + lnk->smcibdev = ini->smcrv2.ib_dev_v2; + lnk->ibport = ini->smcrv2.ib_port_v2; + } else { + lnk->smcibdev = ini->ib_dev; + lnk->ibport = ini->ib_port; + } + get_device(&lnk->smcibdev->ibdev->dev); + atomic_inc(&lnk->smcibdev->lnk_cnt); + lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu; lnk->link_id = smcr_next_link_id(lgr); lnk->lgr = lgr; lnk->link_idx = link_idx; - lnk->smcibdev = ini->ib_dev; - lnk->ibport = ini->ib_port; smc_ibdev_cnt_inc(lnk); smcr_copy_dev_info_to_link(lnk); - lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu; atomic_set(&lnk->conn_cnt, 0); smc_llc_link_set_uid(lnk); INIT_WORK(&lnk->link_down_wrk, smc_link_down_work); - if (!ini->ib_dev->initialized) { - rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev); + if (!lnk->smcibdev->initialized) { + rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev); if (rc) goto out; } @@ -715,7 +764,9 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport, - ini->vlan_id, lnk->gid, &lnk->sgid_index); + ini->vlan_id, lnk->gid, &lnk->sgid_index, + lgr->smc_version == SMC_V2 ? + &ini->smcrv2 : NULL); if (rc) goto out; rc = smc_llc_link_init(lnk); @@ -746,11 +797,12 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, smc_llc_link_clear(lnk, false); out: smc_ibdev_cnt_dec(lnk); - put_device(&ini->ib_dev->ibdev->dev); + put_device(&lnk->smcibdev->ibdev->dev); + smcibdev = lnk->smcibdev; memset(lnk, 0, sizeof(struct smc_link)); lnk->state = SMC_LNK_UNUSED; - if (!atomic_dec_return(&ini->ib_dev->lnk_cnt)) - wake_up(&ini->ib_dev->lnks_deleted); + if (!atomic_dec_return(&smcibdev->lnk_cnt)) + wake_up(&smcibdev->lnks_deleted); return rc; } @@ -814,18 +866,37 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt); } else { /* SMC-R specific settings */ + struct smc_ib_device *ibdev; + int ibport; + lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; - memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer, + lgr->smc_version = ini->smcr_version; + memcpy(lgr->peer_systemid, ini->peer_systemid, SMC_SYSTEMID_LEN); - memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1], + if (lgr->smc_version == SMC_V2) { + ibdev = ini->smcrv2.ib_dev_v2; + ibport = ini->smcrv2.ib_port_v2; + lgr->saddr = ini->smcrv2.saddr; + lgr->uses_gateway = ini->smcrv2.uses_gateway; + memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac, + ETH_ALEN); + } else { + ibdev = ini->ib_dev; + ibport = ini->ib_port; + } + memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1], SMC_MAX_PNETID_LEN); + if (smc_wr_alloc_lgr_mem(lgr)) + goto free_wq; smc_llc_lgr_init(lgr, smc); link_idx = SMC_SINGLE_LINK; lnk = &lgr->lnk[link_idx]; rc = smcr_link_init(lgr, lnk, link_idx, ini); - if (rc) + if (rc) { + smc_wr_free_lgr_mem(lgr); goto free_wq; + } lgr_list = &smc_lgr_list.list; lgr_lock = &smc_lgr_list.lock; atomic_inc(&lgr_cnt); @@ -1232,6 +1303,7 @@ static void smc_lgr_free(struct smc_link_group *lgr) if (!atomic_dec_return(&lgr->smcd->lgr_cnt)) wake_up(&lgr->smcd->lgrs_deleted); } else { + smc_wr_free_lgr_mem(lgr); if (!atomic_dec_return(&lgr_cnt)) wake_up(&lgrs_deleted); } @@ -1549,15 +1621,19 @@ static void smcr_link_down(struct smc_link *lnk) /* must be called under lgr->llc_conf_mutex lock */ void smcr_link_down_cond(struct smc_link *lnk) { - if (smc_link_downing(&lnk->state)) + if (smc_link_downing(&lnk->state)) { + trace_smcr_link_down(lnk, __builtin_return_address(0)); smcr_link_down(lnk); + } } /* will get the lgr->llc_conf_mutex lock */ void smcr_link_down_cond_sched(struct smc_link *lnk) { - if (smc_link_downing(&lnk->state)) + if (smc_link_downing(&lnk->state)) { + trace_smcr_link_down(lnk, __builtin_return_address(0)); schedule_work(&lnk->link_down_wrk); + } } void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport) @@ -1642,13 +1718,15 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) return rc; } -static bool smcr_lgr_match(struct smc_link_group *lgr, - struct smc_clc_msg_local *lcl, +static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version, + u8 peer_systemid[], + u8 peer_gid[], + u8 peer_mac_v1[], enum smc_lgr_role role, u32 clcqpn) { int i; - if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) || + if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) || lgr->role != role) return false; @@ -1656,8 +1734,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, if (!smc_link_active(&lgr->lnk[i])) continue; if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) && - !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) && - !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac))) + !memcmp(lgr->lnk[i].peer_gid, peer_gid, SMC_GID_SIZE) && + (smcr_version == SMC_V2 || + !memcmp(lgr->lnk[i].peer_mac, peer_mac_v1, ETH_ALEN))) return true; } return false; @@ -1696,7 +1775,10 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) if ((ini->is_smcd ? smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected], ini->ism_peer_gid[ini->ism_selected]) : - smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) && + smcr_lgr_match(lgr, ini->smcr_version, + ini->peer_systemid, + ini->peer_gid, ini->peer_mac, role, + ini->ib_clcqpn)) && !lgr->sync_err && (ini->smcd_version == SMC_V2 || lgr->vlan_id == ini->vlan_id) && diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index c043ecdca5c445..59cef3b830d835 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -42,11 +42,16 @@ enum smc_link_state { /* possible states of a link */ }; #define SMC_WR_BUF_SIZE 48 /* size of work request buffer */ +#define SMC_WR_BUF_V2_SIZE 8192 /* size of v2 work request buffer */ struct smc_wr_buf { u8 raw[SMC_WR_BUF_SIZE]; }; +struct smc_wr_v2_buf { + u8 raw[SMC_WR_BUF_V2_SIZE]; +}; + #define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */ enum smc_wr_reg_state { @@ -92,7 +97,11 @@ struct smc_link { struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ struct completion *wr_tx_compl; /* WR send CQE completion */ /* above four vectors have wr_tx_cnt elements and use the same index */ + struct ib_send_wr *wr_tx_v2_ib; /* WR send v2 meta data */ + struct ib_sge *wr_tx_v2_sge; /* WR send v2 gather meta data*/ + struct smc_wr_tx_pend *wr_tx_v2_pend; /* WR send v2 waiting for CQE */ dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ + dma_addr_t wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/ atomic_long_t wr_tx_id; /* seq # of last sent WR */ unsigned long *wr_tx_mask; /* bit mask of used indexes */ u32 wr_tx_cnt; /* number of WR send buffers */ @@ -104,6 +113,7 @@ struct smc_link { struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */ /* above three vectors have wr_rx_cnt elements and use the same index */ dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ + dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/ u64 wr_rx_id; /* seq # of last recv WR */ u32 wr_rx_cnt; /* number of WR recv buffers */ unsigned long wr_rx_tstamp; /* jiffies when last buf rx */ @@ -208,6 +218,7 @@ enum smc_llc_flowtype { SMC_LLC_FLOW_NONE = 0, SMC_LLC_FLOW_ADD_LINK = 2, SMC_LLC_FLOW_DEL_LINK = 4, + SMC_LLC_FLOW_REQ_ADD_LINK = 5, SMC_LLC_FLOW_RKEY = 6, }; @@ -250,6 +261,10 @@ struct smc_link_group { /* client or server */ struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; /* smc link */ + struct smc_wr_v2_buf *wr_rx_buf_v2; + /* WR v2 recv payload buffer */ + struct smc_wr_v2_buf *wr_tx_buf_v2; + /* WR v2 send payload buffer */ char peer_systemid[SMC_SYSTEMID_LEN]; /* unique system_id of peer */ struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] @@ -288,6 +303,9 @@ struct smc_link_group { /* link keep alive time */ u32 llc_termination_rsn; /* rsn code for termination */ + u8 nexthop_mac[ETH_ALEN]; + u8 uses_gateway; + __be32 saddr; }; struct { /* SMC-D */ u64 peer_gid; @@ -302,6 +320,31 @@ struct smc_link_group { struct smc_clc_msg_local; +#define GID_LIST_SIZE 2 + +struct smc_gidlist { + u8 len; + u8 list[GID_LIST_SIZE][SMC_GID_SIZE]; +}; + +struct smc_init_info_smcrv2 { + /* Input fields */ + __be32 saddr; + struct sock *clc_sk; + __be32 daddr; + + /* Output fields when saddr is set */ + struct smc_ib_device *ib_dev_v2; + u8 ib_port_v2; + u8 ib_gid_v2[SMC_GID_SIZE]; + + /* Additional output fields when clc_sk and daddr is set as well */ + u8 uses_gateway; + u8 nexthop_mac[ETH_ALEN]; + + struct smc_gidlist gidlist; +}; + struct smc_init_info { u8 is_smcd; u8 smc_type_v1; @@ -310,12 +353,18 @@ struct smc_init_info { u8 first_contact_local; unsigned short vlan_id; u32 rc; + u8 negotiated_eid[SMC_MAX_EID_LEN]; /* SMC-R */ - struct smc_clc_msg_local *ib_lcl; + u8 smcr_version; + u8 check_smcrv2; + u8 peer_gid[SMC_GID_SIZE]; + u8 peer_mac[ETH_ALEN]; + u8 peer_systemid[SMC_SYSTEMID_LEN]; struct smc_ib_device *ib_dev; u8 ib_gid[SMC_GID_SIZE]; u8 ib_port; u32 ib_clcqpn; + struct smc_init_info_smcrv2 smcrv2; /* SMC-D */ u64 ism_peer_gid[SMC_MAX_ISM_DEVS + 1]; struct smcd_dev *ism_dev[SMC_MAX_ISM_DEVS + 1]; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index a8845343d183e8..d93055ec17ae86 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -62,16 +63,23 @@ static int smc_ib_modify_qp_rtr(struct smc_link *lnk) IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; struct ib_qp_attr qp_attr; + u8 hop_lim = 1; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_RTR; qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); - rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0); + if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway) + hop_lim = IPV6_DEFAULT_HOPLIMIT; + rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0); rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); - memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, - sizeof(lnk->peer_mac)); + if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway) + memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac, + sizeof(lnk->lgr->nexthop_mac)); + else + memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, + sizeof(lnk->peer_mac)); qp_attr.dest_qp_num = lnk->peer_qpn; qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming @@ -183,9 +191,81 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; } +int smc_ib_find_route(__be32 saddr, __be32 daddr, + u8 nexthop_mac[], u8 *uses_gateway) +{ + struct neighbour *neigh = NULL; + struct rtable *rt = NULL; + struct flowi4 fl4 = { + .saddr = saddr, + .daddr = daddr + }; + + if (daddr == cpu_to_be32(INADDR_NONE)) + goto out; + rt = ip_route_output_flow(&init_net, &fl4, NULL); + if (IS_ERR(rt)) + goto out; + if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET) + goto out; + neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr); + if (neigh) { + memcpy(nexthop_mac, neigh->ha, ETH_ALEN); + *uses_gateway = rt->rt_uses_gateway; + return 0; + } +out: + return -ENOENT; +} + +static int smc_ib_determine_gid_rcu(const struct net_device *ndev, + const struct ib_gid_attr *attr, + u8 gid[], u8 *sgid_index, + struct smc_init_info_smcrv2 *smcrv2) +{ + if (!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) { + if (gid) + memcpy(gid, &attr->gid, SMC_GID_SIZE); + if (sgid_index) + *sgid_index = attr->index; + return 0; + } + if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) { + struct in_device *in_dev = __in_dev_get_rcu(ndev); + const struct in_ifaddr *ifa; + bool subnet_match = false; + + if (!in_dev) + goto out; + in_dev_for_each_ifa_rcu(ifa, in_dev) { + if (!inet_ifa_match(smcrv2->saddr, ifa)) + continue; + subnet_match = true; + break; + } + if (!subnet_match) + goto out; + if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr, + smcrv2->daddr, + smcrv2->nexthop_mac, + &smcrv2->uses_gateway)) + goto out; + + if (gid) + memcpy(gid, &attr->gid, SMC_GID_SIZE); + if (sgid_index) + *sgid_index = attr->index; + return 0; + } +out: + return -ENODEV; +} + /* determine the gid for an ib-device port and vlan id */ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, - unsigned short vlan_id, u8 gid[], u8 *sgid_index) + unsigned short vlan_id, u8 gid[], u8 *sgid_index, + struct smc_init_info_smcrv2 *smcrv2) { const struct ib_gid_attr *attr; const struct net_device *ndev; @@ -201,15 +281,13 @@ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, if (!IS_ERR(ndev) && ((!vlan_id && !is_vlan_dev(ndev)) || (vlan_id && is_vlan_dev(ndev) && - vlan_dev_vlan_id(ndev) == vlan_id)) && - attr->gid_type == IB_GID_TYPE_ROCE) { - rcu_read_unlock(); - if (gid) - memcpy(gid, &attr->gid, SMC_GID_SIZE); - if (sgid_index) - *sgid_index = attr->index; - rdma_put_gid_attr(attr); - return 0; + vlan_dev_vlan_id(ndev) == vlan_id))) { + if (!smc_ib_determine_gid_rcu(ndev, attr, gid, + sgid_index, smcrv2)) { + rcu_read_unlock(); + rdma_put_gid_attr(attr); + return 0; + } } rcu_read_unlock(); rdma_put_gid_attr(attr); @@ -217,6 +295,58 @@ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, return -ENODEV; } +/* check if gid is still defined on smcibdev */ +static bool smc_ib_check_link_gid(u8 gid[SMC_GID_SIZE], bool smcrv2, + struct smc_ib_device *smcibdev, u8 ibport) +{ + const struct ib_gid_attr *attr; + bool rc = false; + int i; + + for (i = 0; !rc && i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { + attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); + if (IS_ERR(attr)) + continue; + + rcu_read_lock(); + if ((!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) || + (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + !(ipv6_addr_type((const struct in6_addr *)&attr->gid) + & IPV6_ADDR_LINKLOCAL))) + if (!memcmp(gid, &attr->gid, SMC_GID_SIZE)) + rc = true; + rcu_read_unlock(); + rdma_put_gid_attr(attr); + } + return rc; +} + +/* check all links if the gid is still defined on smcibdev */ +static void smc_ib_gid_check(struct smc_ib_device *smcibdev, u8 ibport) +{ + struct smc_link_group *lgr; + int i; + + spin_lock_bh(&smc_lgr_list.lock); + list_for_each_entry(lgr, &smc_lgr_list.list, list) { + if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, + SMC_MAX_PNETID_LEN)) + continue; /* lgr is not affected */ + if (list_empty(&lgr->list)) + continue; + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { + if (lgr->lnk[i].state == SMC_LNK_UNUSED || + lgr->lnk[i].smcibdev != smcibdev) + continue; + if (!smc_ib_check_link_gid(lgr->lnk[i].gid, + lgr->smc_version == SMC_V2, + smcibdev, ibport)) + smcr_port_err(smcibdev, ibport); + } + } + spin_unlock_bh(&smc_lgr_list.lock); +} + static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) { int rc; @@ -255,6 +385,7 @@ static void smc_ib_port_event_work(struct work_struct *work) } else { clear_bit(port_idx, smcibdev->ports_going_away); smcr_port_add(smcibdev, port_idx + 1); + smc_ib_gid_check(smcibdev, port_idx + 1); } } } @@ -523,6 +654,7 @@ void smc_ib_destroy_queue_pair(struct smc_link *lnk) /* create a queue pair within the protection domain for a link */ int smc_ib_create_queue_pair(struct smc_link *lnk) { + int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; struct ib_qp_init_attr qp_attr = { .event_handler = smc_ib_qp_event_handler, .qp_context = lnk, @@ -536,7 +668,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .max_send_wr = SMC_WR_BUF_CNT * 3, .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, - .max_recv_sge = 1, + .max_recv_sge = sges_per_buf, }, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 3085f5180da79a..07585937370ebc 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -59,6 +59,17 @@ struct smc_ib_device { /* ib-device infos for smc */ int ndev_ifidx[SMC_MAX_PORTS]; /* ndev if indexes */ }; +static inline __be32 smc_ib_gid_to_ipv4(u8 gid[SMC_GID_SIZE]) +{ + struct in6_addr *addr6 = (struct in6_addr *)gid; + + if (ipv6_addr_v4mapped(addr6) || + !(addr6->s6_addr32[0] | addr6->s6_addr32[1] | addr6->s6_addr32[2])) + return addr6->s6_addr32[3]; + return cpu_to_be32(INADDR_NONE); +} + +struct smc_init_info_smcrv2; struct smc_buf_desc; struct smc_link; @@ -90,7 +101,10 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, - unsigned short vlan_id, u8 gid[], u8 *sgid_index); + unsigned short vlan_id, u8 gid[], u8 *sgid_index, + struct smc_init_info_smcrv2 *smcrv2); +int smc_ib_find_route(__be32 saddr, __be32 daddr, + u8 nexthop_mac[], u8 *uses_gateway); bool smc_ib_is_valid_local_systemid(void); int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb); #endif diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index 9cb2df28996364..fd28cc498b984b 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c @@ -23,6 +23,7 @@ struct smcd_dev_list smcd_dev_list = { }; static bool smc_ism_v2_capable; +static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN]; /* Test if an ISM communication is possible - same CPC */ int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) @@ -42,9 +43,12 @@ int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos, return rc < 0 ? rc : 0; } -void smc_ism_get_system_eid(struct smcd_dev *smcd, u8 **eid) +void smc_ism_get_system_eid(u8 **eid) { - smcd->ops->get_system_eid(smcd, eid); + if (!smc_ism_v2_capable) + *eid = NULL; + else + *eid = smc_ism_v2_system_eid; } u16 smc_ism_get_chid(struct smcd_dev *smcd) @@ -435,9 +439,12 @@ int smcd_register_dev(struct smcd_dev *smcd) if (list_empty(&smcd_dev_list.list)) { u8 *system_eid = NULL; - smc_ism_get_system_eid(smcd, &system_eid); - if (system_eid[24] != '0' || system_eid[28] != '0') + smcd->ops->get_system_eid(smcd, &system_eid); + if (system_eid[24] != '0' || system_eid[28] != '0') { smc_ism_v2_capable = true; + memcpy(smc_ism_v2_system_eid, system_eid, + SMC_MAX_EID_LEN); + } } /* sort list: devices without pnetid before devices with pnetid */ if (smcd->pnetid[0]) @@ -533,4 +540,5 @@ EXPORT_SYMBOL_GPL(smcd_handle_irq); void __init smc_ism_init(void) { smc_ism_v2_capable = false; + memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN); } diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h index 113efc7352ed67..004b22a13ffadd 100644 --- a/net/smc/smc_ism.h +++ b/net/smc/smc_ism.h @@ -48,7 +48,7 @@ int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, void *data, size_t len); int smc_ism_signal_shutdown(struct smc_link_group *lgr); -void smc_ism_get_system_eid(struct smcd_dev *dev, u8 **eid); +void smc_ism_get_system_eid(u8 **eid); u16 smc_ism_get_chid(struct smcd_dev *dev); bool smc_ism_is_v2_capable(void); void smc_ism_init(void); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index f1d323439a2af3..b102680296b898 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -23,16 +23,24 @@ struct smc_llc_hdr { struct smc_wr_rx_hdr common; - u8 length; /* 44 */ -#if defined(__BIG_ENDIAN_BITFIELD) - u8 reserved:4, - add_link_rej_rsn:4; + union { + struct { + u8 length; /* 44 */ + #if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:4, + add_link_rej_rsn:4; #elif defined(__LITTLE_ENDIAN_BITFIELD) - u8 add_link_rej_rsn:4, - reserved:4; + u8 add_link_rej_rsn:4, + reserved:4; #endif + }; + u16 length_v2; /* 44 - 8192*/ + }; u8 flags; -}; +} __packed; /* format defined in + * IBM Shared Memory Communications Version 2 + * (https://www.ibm.com/support/pages/node/6326337) + */ #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03 @@ -76,6 +84,32 @@ struct smc_llc_msg_add_link_cont_rt { __be64 rmb_vaddr_new; }; +struct smc_llc_msg_add_link_v2_ext { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 v2_direct : 1, + reserved : 7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved : 7, + v2_direct : 1; +#endif + u8 reserved2; + u8 client_target_gid[SMC_GID_SIZE]; + u8 reserved3[8]; + u16 num_rkeys; + struct smc_llc_msg_add_link_cont_rt rt[]; +} __packed; /* format defined in + * IBM Shared Memory Communications Version 2 + * (https://www.ibm.com/support/pages/node/6326337) + */ + +struct smc_llc_msg_req_add_link_v2 { + struct smc_llc_hdr hd; + u8 reserved[20]; + u8 gid_cnt; + u8 reserved2[3]; + u8 gid[][SMC_GID_SIZE]; +}; + #define SMC_LLC_RKEYS_PER_CONT_MSG 2 struct smc_llc_msg_add_link_cont { /* type 0x03 */ @@ -114,7 +148,8 @@ struct smc_rmb_rtoken { __be64 rmb_vaddr; } __packed; /* format defined in RFC7609 */ -#define SMC_LLC_RKEYS_PER_MSG 3 +#define SMC_LLC_RKEYS_PER_MSG 3 +#define SMC_LLC_RKEYS_PER_MSG_V2 255 struct smc_llc_msg_confirm_rkey { /* type 0x06 */ struct smc_llc_hdr hd; @@ -135,9 +170,18 @@ struct smc_llc_msg_delete_rkey { /* type 0x09 */ u8 reserved2[4]; }; +struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */ + struct smc_llc_hdr hd; + u8 num_rkeys; + u8 num_inval_rkeys; + u8 reserved[2]; + __be32 rkey[]; +}; + union smc_llc_msg { struct smc_llc_msg_confirm_link confirm_link; struct smc_llc_msg_add_link add_link; + struct smc_llc_msg_req_add_link_v2 req_add_link; struct smc_llc_msg_add_link_cont add_link_cont; struct smc_llc_msg_del_link delete_link; @@ -189,7 +233,7 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow, static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, struct smc_llc_qentry *qentry) { - u8 msg_type = qentry->msg.raw.hdr.common.type; + u8 msg_type = qentry->msg.raw.hdr.common.llc_type; if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && flow_type != msg_type && !lgr->delayed_event) { @@ -219,7 +263,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow, spin_unlock_bh(&lgr->llc_flow_lock); return false; } - switch (qentry->msg.raw.hdr.common.type) { + switch (qentry->msg.raw.hdr.common.llc_type) { case SMC_LLC_ADD_LINK: flow->type = SMC_LLC_FLOW_ADD_LINK; break; @@ -306,7 +350,7 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, smc_llc_flow_qentry_del(flow); goto out; } - rcv_msg = flow->qentry->msg.raw.hdr.common.type; + rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type; if (exp_msg && rcv_msg != exp_msg) { if (exp_msg == SMC_LLC_ADD_LINK && rcv_msg == SMC_LLC_DELETE_LINK) { @@ -374,6 +418,30 @@ static int smc_llc_add_pending_send(struct smc_link *link, return 0; } +static int smc_llc_add_pending_send_v2(struct smc_link *link, + struct smc_wr_v2_buf **wr_buf, + struct smc_wr_tx_pend_priv **pend) +{ + int rc; + + rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend); + if (rc < 0) + return rc; + return 0; +} + +static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr, + struct smc_link_group *lgr, size_t len) +{ + if (lgr->smc_version == SMC_V2) { + hdr->common.llc_version = SMC_V2; + hdr->length_v2 = len; + } else { + hdr->common.llc_version = 0; + hdr->length = len; + } +} + /* high-level API to send LLC confirm link */ int smc_llc_send_confirm_link(struct smc_link *link, enum smc_llc_reqresp reqresp) @@ -390,8 +458,8 @@ int smc_llc_send_confirm_link(struct smc_link *link, goto put_out; confllc = (struct smc_llc_msg_confirm_link *)wr_buf; memset(confllc, 0, sizeof(*confllc)); - confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; - confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link); + confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK; + smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc)); confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; if (reqresp == SMC_LLC_RESP) confllc->hd.flags |= SMC_LLC_FLAG_RESP; @@ -426,8 +494,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link, goto put_out; rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf; memset(rkeyllc, 0, sizeof(*rkeyllc)); - rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY; - rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey); + rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY; + smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc)); rtok_ix = 1; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { @@ -471,8 +539,8 @@ static int smc_llc_send_delete_rkey(struct smc_link *link, goto put_out; rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf; memset(rkeyllc, 0, sizeof(*rkeyllc)); - rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY; - rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey); + rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY; + smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc)); rkeyllc->num_rkeys = 1; rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); /* send llc message */ @@ -482,26 +550,116 @@ static int smc_llc_send_delete_rkey(struct smc_link *link, return rc; } +/* return first buffer from any of the next buf lists */ +static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, + int *buf_lst) +{ + struct smc_buf_desc *buf_pos; + + while (*buf_lst < SMC_RMBE_SIZES) { + buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], + struct smc_buf_desc, list); + if (buf_pos) + return buf_pos; + (*buf_lst)++; + } + return NULL; +} + +/* return next rmb from buffer lists */ +static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, + int *buf_lst, + struct smc_buf_desc *buf_pos) +{ + struct smc_buf_desc *buf_next; + + if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { + (*buf_lst)++; + return _smc_llc_get_next_rmb(lgr, buf_lst); + } + buf_next = list_next_entry(buf_pos, list); + return buf_next; +} + +static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, + int *buf_lst) +{ + *buf_lst = 0; + return smc_llc_get_next_rmb(lgr, buf_lst, NULL); +} + +static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext, + struct smc_link *link, struct smc_link *link_new) +{ + struct smc_link_group *lgr = link->lgr; + struct smc_buf_desc *buf_pos; + int prim_lnk_idx, lnk_idx, i; + struct smc_buf_desc *rmb; + int len = sizeof(*ext); + int buf_lst; + + ext->v2_direct = !lgr->uses_gateway; + memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE); + + prim_lnk_idx = link->link_idx; + lnk_idx = link_new->link_idx; + mutex_lock(&lgr->rmbs_lock); + ext->num_rkeys = lgr->conns_num; + if (!ext->num_rkeys) + goto out; + buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); + for (i = 0; i < ext->num_rkeys; i++) { + if (!buf_pos) + break; + rmb = buf_pos; + ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey); + ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey); + ext->rt[i].rmb_vaddr_new = + cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); + buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); + while (buf_pos && !(buf_pos)->used) + buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); + } + len += i * sizeof(ext->rt[0]); +out: + mutex_unlock(&lgr->rmbs_lock); + return len; +} + /* send ADD LINK request or response */ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], struct smc_link *link_new, enum smc_llc_reqresp reqresp) { + struct smc_llc_msg_add_link_v2_ext *ext = NULL; struct smc_llc_msg_add_link *addllc; struct smc_wr_tx_pend_priv *pend; - struct smc_wr_buf *wr_buf; + int len = sizeof(*addllc); int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; - rc = smc_llc_add_pending_send(link, &wr_buf, &pend); - if (rc) - goto put_out; - addllc = (struct smc_llc_msg_add_link *)wr_buf; + if (link->lgr->smc_version == SMC_V2) { + struct smc_wr_v2_buf *wr_buf; + + rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); + if (rc) + goto put_out; + addllc = (struct smc_llc_msg_add_link *)wr_buf; + ext = (struct smc_llc_msg_add_link_v2_ext *) + &wr_buf->raw[sizeof(*addllc)]; + memset(ext, 0, SMC_WR_TX_SIZE); + } else { + struct smc_wr_buf *wr_buf; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + goto put_out; + addllc = (struct smc_llc_msg_add_link *)wr_buf; + } memset(addllc, 0, sizeof(*addllc)); - addllc->hd.common.type = SMC_LLC_ADD_LINK; - addllc->hd.length = sizeof(struct smc_llc_msg_add_link); + addllc->hd.common.llc_type = SMC_LLC_ADD_LINK; if (reqresp == SMC_LLC_RESP) addllc->hd.flags |= SMC_LLC_FLAG_RESP; memcpy(addllc->sender_mac, mac, ETH_ALEN); @@ -516,8 +674,14 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], addllc->qp_mtu = min(link_new->path_mtu, link_new->peer_mtu); } + if (ext && link_new) + len += smc_llc_fill_ext_v2(ext, link, link_new); + smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len); /* send llc message */ - rc = smc_wr_tx_send(link, pend); + if (link->lgr->smc_version == SMC_V2) + rc = smc_wr_tx_v2_send(link, pend, len); + else + rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; @@ -541,8 +705,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id, delllc = (struct smc_llc_msg_del_link *)wr_buf; memset(delllc, 0, sizeof(*delllc)); - delllc->hd.common.type = SMC_LLC_DELETE_LINK; - delllc->hd.length = sizeof(struct smc_llc_msg_del_link); + delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK; + smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc)); if (reqresp == SMC_LLC_RESP) delllc->hd.flags |= SMC_LLC_FLAG_RESP; if (orderly) @@ -574,8 +738,8 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16]) goto put_out; testllc = (struct smc_llc_msg_test_link *)wr_buf; memset(testllc, 0, sizeof(*testllc)); - testllc->hd.common.type = SMC_LLC_TEST_LINK; - testllc->hd.length = sizeof(struct smc_llc_msg_test_link); + testllc->hd.common.llc_type = SMC_LLC_TEST_LINK; + smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc)); memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); /* send llc message */ rc = smc_wr_tx_send(link, pend); @@ -651,44 +815,6 @@ static int smc_llc_alloc_alt_link(struct smc_link_group *lgr, return -EMLINK; } -/* return first buffer from any of the next buf lists */ -static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, - int *buf_lst) -{ - struct smc_buf_desc *buf_pos; - - while (*buf_lst < SMC_RMBE_SIZES) { - buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], - struct smc_buf_desc, list); - if (buf_pos) - return buf_pos; - (*buf_lst)++; - } - return NULL; -} - -/* return next rmb from buffer lists */ -static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, - int *buf_lst, - struct smc_buf_desc *buf_pos) -{ - struct smc_buf_desc *buf_next; - - if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { - (*buf_lst)++; - return _smc_llc_get_next_rmb(lgr, buf_lst); - } - buf_next = list_next_entry(buf_pos, list); - return buf_next; -} - -static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, - int *buf_lst) -{ - *buf_lst = 0; - return smc_llc_get_next_rmb(lgr, buf_lst, NULL); -} - /* send one add_link_continue msg */ static int smc_llc_add_link_cont(struct smc_link *link, struct smc_link *link_new, u8 *num_rkeys_todo, @@ -734,7 +860,7 @@ static int smc_llc_add_link_cont(struct smc_link *link, while (*buf_pos && !(*buf_pos)->used) *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); } - addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT; + addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); if (lgr->role == SMC_CLNT) addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; @@ -793,6 +919,8 @@ static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; + smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, + sizeof(qentry->msg)); return smc_llc_send_message(qentry->link, &qentry->msg); } @@ -813,7 +941,7 @@ static int smc_llc_cli_conf_link(struct smc_link *link, SMC_LLC_DEL_LOST_PATH); return -ENOLINK; } - if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { + if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { /* received DELETE_LINK instead */ qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, &qentry->msg); @@ -854,6 +982,26 @@ static int smc_llc_cli_conf_link(struct smc_link *link, return 0; } +static void smc_llc_save_add_link_rkeys(struct smc_link *link, + struct smc_link *link_new) +{ + struct smc_llc_msg_add_link_v2_ext *ext; + struct smc_link_group *lgr = link->lgr; + int max, i; + + ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 + + SMC_WR_TX_SIZE); + max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); + mutex_lock(&lgr->rmbs_lock); + for (i = 0; i < max; i++) { + smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, + ext->rt[i].rmb_key, + ext->rt[i].rmb_vaddr_new, + ext->rt[i].rmb_key_new); + } + mutex_unlock(&lgr->rmbs_lock); +} + static void smc_llc_save_add_link_info(struct smc_link *link, struct smc_llc_msg_add_link *add_llc) { @@ -870,31 +1018,47 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) struct smc_llc_msg_add_link *llc = &qentry->msg.add_link; enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; struct smc_link_group *lgr = smc_get_lgr(link); + struct smc_init_info *ini = NULL; struct smc_link *lnk_new = NULL; - struct smc_init_info ini; int lnk_idx, rc = 0; if (!llc->qp_mtu) goto out_reject; - ini.vlan_id = lgr->vlan_id; - smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); + ini = kzalloc(sizeof(*ini), GFP_KERNEL); + if (!ini) { + rc = -ENOMEM; + goto out_reject; + } + + ini->vlan_id = lgr->vlan_id; + if (lgr->smc_version == SMC_V2) { + ini->check_smcrv2 = true; + ini->smcrv2.saddr = lgr->saddr; + ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid); + } + smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && - !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) { - if (!ini.ib_dev) + (lgr->smc_version == SMC_V2 || + !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) { + if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2) goto out_reject; lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; } - if (!ini.ib_dev) { + if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; - ini.ib_dev = link->smcibdev; - ini.ib_port = link->ibport; + ini->smcrv2.ib_dev_v2 = link->smcibdev; + ini->smcrv2.ib_port_v2 = link->ibport; + } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { + lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; + ini->ib_dev = link->smcibdev; + ini->ib_port = link->ibport; } lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); if (lnk_idx < 0) goto out_reject; lnk_new = &lgr->lnk[lnk_idx]; - rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini); + rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini); if (rc) goto out_reject; smc_llc_save_add_link_info(lnk_new, llc); @@ -910,16 +1074,20 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) goto out_clear_lnk; rc = smc_llc_send_add_link(link, - lnk_new->smcibdev->mac[ini.ib_port - 1], + lnk_new->smcibdev->mac[lnk_new->ibport - 1], lnk_new->gid, lnk_new, SMC_LLC_RESP); if (rc) goto out_clear_lnk; - rc = smc_llc_cli_rkey_exchange(link, lnk_new); - if (rc) { - rc = 0; - goto out_clear_lnk; + if (lgr->smc_version == SMC_V2) { + smc_llc_save_add_link_rkeys(link, lnk_new); + } else { + rc = smc_llc_cli_rkey_exchange(link, lnk_new); + if (rc) { + rc = 0; + goto out_clear_lnk; + } } - rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t); + rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t); if (!rc) goto out; out_clear_lnk: @@ -928,29 +1096,78 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) out_reject: smc_llc_cli_add_link_reject(qentry); out: + kfree(ini); kfree(qentry); return rc; } +static void smc_llc_send_request_add_link(struct smc_link *link) +{ + struct smc_llc_msg_req_add_link_v2 *llc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_v2_buf *wr_buf; + struct smc_gidlist gidlist; + int rc, len, i; + + if (!smc_wr_tx_link_hold(link)) + return; + if (link->lgr->type == SMC_LGR_SYMMETRIC || + link->lgr->type == SMC_LGR_ASYMMETRIC_PEER) + goto put_out; + + smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid); + if (gidlist.len <= 1) + goto put_out; + + rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); + if (rc) + goto put_out; + llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf; + memset(llc, 0, SMC_WR_TX_SIZE); + + llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK; + for (i = 0; i < gidlist.len; i++) + memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0])); + llc->gid_cnt = gidlist.len; + len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0])); + smc_llc_init_msg_hdr(&llc->hd, link->lgr, len); + rc = smc_wr_tx_v2_send(link, pend, len); + if (!rc) + /* set REQ_ADD_LINK flow and wait for response from peer */ + link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK; +put_out: + smc_wr_tx_link_put(link); +} + /* as an SMC client, invite server to start the add_link processing */ static void smc_llc_cli_add_link_invite(struct smc_link *link, struct smc_llc_qentry *qentry) { struct smc_link_group *lgr = smc_get_lgr(link); - struct smc_init_info ini; + struct smc_init_info *ini = NULL; + + if (lgr->smc_version == SMC_V2) { + smc_llc_send_request_add_link(link); + goto out; + } if (lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER) goto out; - ini.vlan_id = lgr->vlan_id; - smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); - if (!ini.ib_dev) + ini = kzalloc(sizeof(*ini), GFP_KERNEL); + if (!ini) + goto out; + + ini->vlan_id = lgr->vlan_id; + smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); + if (!ini->ib_dev) goto out; - smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1], - ini.ib_gid, NULL, SMC_LLC_REQ); + smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1], + ini->ib_gid, NULL, SMC_LLC_REQ); out: + kfree(ini); kfree(qentry); } @@ -966,7 +1183,7 @@ static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc) static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) { - if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && + if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK && smc_llc_is_empty_llc_message(llc)) return true; return false; @@ -1133,7 +1350,7 @@ static int smc_llc_srv_conf_link(struct smc_link *link, /* receive CONFIRM LINK response over the RoCE fabric */ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); if (!qentry || - qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { + qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { /* send DELETE LINK */ smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); @@ -1152,37 +1369,80 @@ static int smc_llc_srv_conf_link(struct smc_link *link, return 0; } -int smc_llc_srv_add_link(struct smc_link *link) +static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry) +{ + qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; + smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, + sizeof(qentry->msg)); + memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data)); + smc_llc_send_message(qentry->link, &qentry->msg); +} + +int smc_llc_srv_add_link(struct smc_link *link, + struct smc_llc_qentry *req_qentry) { enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; struct smc_link_group *lgr = link->lgr; struct smc_llc_msg_add_link *add_llc; struct smc_llc_qentry *qentry = NULL; - struct smc_link *link_new; - struct smc_init_info ini; + bool send_req_add_link_resp = false; + struct smc_link *link_new = NULL; + struct smc_init_info *ini = NULL; int lnk_idx, rc = 0; + if (req_qentry && + req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK) + send_req_add_link_resp = true; + + ini = kzalloc(sizeof(*ini), GFP_KERNEL); + if (!ini) { + rc = -ENOMEM; + goto out; + } + /* ignore client add link recommendation, start new flow */ - ini.vlan_id = lgr->vlan_id; - smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); - if (!ini.ib_dev) { + ini->vlan_id = lgr->vlan_id; + if (lgr->smc_version == SMC_V2) { + ini->check_smcrv2 = true; + ini->smcrv2.saddr = lgr->saddr; + if (send_req_add_link_resp) { + struct smc_llc_msg_req_add_link_v2 *req_add = + &req_qentry->msg.req_add_link; + + ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]); + } + } + smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); + if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { + lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; + ini->smcrv2.ib_dev_v2 = link->smcibdev; + ini->smcrv2.ib_port_v2 = link->ibport; + } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; - ini.ib_dev = link->smcibdev; - ini.ib_port = link->ibport; + ini->ib_dev = link->smcibdev; + ini->ib_port = link->ibport; } lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); - if (lnk_idx < 0) - return 0; + if (lnk_idx < 0) { + rc = 0; + goto out; + } - rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini); + rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini); if (rc) - return rc; + goto out; link_new = &lgr->lnk[lnk_idx]; + + rc = smcr_buf_map_lgr(link_new); + if (rc) + goto out_err; + rc = smc_llc_send_add_link(link, - link_new->smcibdev->mac[ini.ib_port - 1], + link_new->smcibdev->mac[link_new->ibport-1], link_new->gid, link_new, SMC_LLC_REQ); if (rc) goto out_err; + send_req_add_link_resp = false; /* receive ADD LINK response over the RoCE fabric */ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); if (!qentry) { @@ -1197,48 +1457,59 @@ int smc_llc_srv_add_link(struct smc_link *link) } if (lgr->type == SMC_LGR_SINGLE && (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && - !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) { + (lgr->smc_version == SMC_V2 || + !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) { lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; } smc_llc_save_add_link_info(link_new, add_llc); smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); rc = smc_ib_ready_link(link_new); - if (rc) - goto out_err; - rc = smcr_buf_map_lgr(link_new); if (rc) goto out_err; rc = smcr_buf_reg_lgr(link_new); if (rc) goto out_err; - rc = smc_llc_srv_rkey_exchange(link, link_new); - if (rc) - goto out_err; + if (lgr->smc_version == SMC_V2) { + smc_llc_save_add_link_rkeys(link, link_new); + } else { + rc = smc_llc_srv_rkey_exchange(link, link_new); + if (rc) + goto out_err; + } rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); if (rc) goto out_err; + kfree(ini); return 0; out_err: - link_new->state = SMC_LNK_INACTIVE; - smcr_link_clear(link_new, false); + if (link_new) { + link_new->state = SMC_LNK_INACTIVE; + smcr_link_clear(link_new, false); + } +out: + kfree(ini); + if (send_req_add_link_resp) + smc_llc_send_req_add_link_response(req_qentry); return rc; } static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) { struct smc_link *link = lgr->llc_flow_lcl.qentry->link; + struct smc_llc_qentry *qentry; int rc; - smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); mutex_lock(&lgr->llc_conf_mutex); - rc = smc_llc_srv_add_link(link); + rc = smc_llc_srv_add_link(link, qentry); if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { /* delete any asymmetric link */ smc_llc_delete_asym_link(lgr); } mutex_unlock(&lgr->llc_conf_mutex); + kfree(qentry); } /* enqueue a local add_link req to trigger a new add_link flow */ @@ -1246,8 +1517,8 @@ void smc_llc_add_link_local(struct smc_link *link) { struct smc_llc_msg_add_link add_llc = {}; - add_llc.hd.length = sizeof(add_llc); - add_llc.hd.common.type = SMC_LLC_ADD_LINK; + add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK; + smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc)); /* no dev and port needed */ smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); } @@ -1269,7 +1540,8 @@ static void smc_llc_add_link_work(struct work_struct *work) else smc_llc_process_srv_add_link(lgr); out: - smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); + if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK) + smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } /* enqueue a local del_link msg to trigger a new del_link flow, @@ -1279,8 +1551,8 @@ void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id) { struct smc_llc_msg_del_link del_llc = {}; - del_llc.hd.length = sizeof(del_llc); - del_llc.hd.common.type = SMC_LLC_DELETE_LINK; + del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK; + smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc)); del_llc.link_num = del_link_id; del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH); del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; @@ -1350,8 +1622,8 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn) struct smc_llc_msg_del_link delllc = {}; int i; - delllc.hd.common.type = SMC_LLC_DELETE_LINK; - delllc.hd.length = sizeof(delllc); + delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK; + smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc)); if (ord) delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; @@ -1467,6 +1739,8 @@ static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr) link = qentry->link; num_entries = llc->rtoken[0].num_rkeys; + if (num_entries > SMC_LLC_RKEYS_PER_MSG) + goto out_err; /* first rkey entry is for receiving link */ rk_idx = smc_rtoken_add(link, llc->rtoken[0].rmb_vaddr, @@ -1485,6 +1759,7 @@ static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr) llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY; out: llc->hd.flags |= SMC_LLC_FLAG_RESP; + smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); } @@ -1502,6 +1777,28 @@ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr) llc = &qentry->msg.delete_rkey; link = qentry->link; + if (lgr->smc_version == SMC_V2) { + struct smc_llc_msg_delete_rkey_v2 *llcv2; + + memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc)); + llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2; + llcv2->num_inval_rkeys = 0; + + max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); + for (i = 0; i < max; i++) { + if (smc_rtoken_delete(link, llcv2->rkey[i])) + llcv2->num_inval_rkeys++; + } + memset(&llc->rkey[0], 0, sizeof(llc->rkey)); + memset(&llc->reserved2, 0, sizeof(llc->reserved2)); + smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); + if (llcv2->num_inval_rkeys) { + llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; + llc->err_mask = llcv2->num_inval_rkeys; + } + goto finish; + } + max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); for (i = 0; i < max; i++) { if (smc_rtoken_delete(link, llc->rkey[i])) @@ -1511,6 +1808,7 @@ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr) llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; llc->err_mask = err_mask; } +finish: llc->hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); @@ -1546,7 +1844,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) if (!smc_link_usable(link)) goto out; - switch (llc->raw.hdr.common.type) { + switch (llc->raw.hdr.common.llc_type) { case SMC_LLC_TEST_LINK: llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, llc); @@ -1571,8 +1869,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); wake_up(&lgr->llc_msg_waiter); - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, - qentry)) { + return; + } + if (lgr->llc_flow_lcl.type == + SMC_LLC_FLOW_REQ_ADD_LINK) { + /* server started add_link processing */ + lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, + qentry); + schedule_work(&lgr->llc_add_link_work); + return; + } + if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { schedule_work(&lgr->llc_add_link_work); } } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { @@ -1620,6 +1928,23 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); } return; + case SMC_LLC_REQ_ADD_LINK: + /* handle response here, smc_llc_flow_stop() cannot be called + * in tasklet context + */ + if (lgr->role == SMC_CLNT && + lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK && + (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) { + smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl); + } else if (lgr->role == SMC_SERV) { + if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { + /* as smc server, handle client suggestion */ + lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; + schedule_work(&lgr->llc_add_link_work); + } + return; + } + break; default: smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type); break; @@ -1663,7 +1988,7 @@ static void smc_llc_rx_response(struct smc_link *link, { enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; - u8 llc_type = qentry->msg.raw.hdr.common.type; + u8 llc_type = qentry->msg.raw.hdr.common.llc_type; switch (llc_type) { case SMC_LLC_TEST_LINK: @@ -1689,7 +2014,8 @@ static void smc_llc_rx_response(struct smc_link *link, /* not used because max links is 3 */ break; default: - smc_llc_protocol_violation(link->lgr, llc_type); + smc_llc_protocol_violation(link->lgr, + qentry->msg.raw.hdr.common.type); break; } kfree(qentry); @@ -1714,7 +2040,8 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg)); /* process responses immediately */ - if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) { + if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) && + llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) { smc_llc_rx_response(link, qentry); return; } @@ -1734,8 +2061,13 @@ static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) if (wc->byte_len < sizeof(*llc)) return; /* short message */ - if (llc->raw.hdr.length != sizeof(*llc)) - return; /* invalid message */ + if (!llc->raw.hdr.common.llc_version) { + if (llc->raw.hdr.length != sizeof(*llc)) + return; /* invalid message */ + } else { + if (llc->raw.hdr.length_v2 < sizeof(*llc)) + return; /* invalid message */ + } smc_llc_enqueue(link, llc); } @@ -1954,6 +2286,35 @@ static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_RKEY }, + /* V2 types */ + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_TEST_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_ADD_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_DELETE_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_REQ_ADD_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_RKEY_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_DELETE_RKEY_V2 + }, { .handler = NULL, } diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index cc00a2ec4e92e8..4404e52b3346fb 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -30,10 +30,19 @@ enum smc_llc_msg_type { SMC_LLC_ADD_LINK = 0x02, SMC_LLC_ADD_LINK_CONT = 0x03, SMC_LLC_DELETE_LINK = 0x04, + SMC_LLC_REQ_ADD_LINK = 0x05, SMC_LLC_CONFIRM_RKEY = 0x06, SMC_LLC_TEST_LINK = 0x07, SMC_LLC_CONFIRM_RKEY_CONT = 0x08, SMC_LLC_DELETE_RKEY = 0x09, + /* V2 types */ + SMC_LLC_CONFIRM_LINK_V2 = 0x21, + SMC_LLC_ADD_LINK_V2 = 0x22, + SMC_LLC_DELETE_LINK_V2 = 0x24, + SMC_LLC_REQ_ADD_LINK_V2 = 0x25, + SMC_LLC_CONFIRM_RKEY_V2 = 0x26, + SMC_LLC_TEST_LINK_V2 = 0x27, + SMC_LLC_DELETE_RKEY_V2 = 0x29, }; #define smc_link_downing(state) \ @@ -102,7 +111,8 @@ void smc_llc_flow_qentry_del(struct smc_llc_flow *flow); void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn); int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); -int smc_llc_srv_add_link(struct smc_link *link); +int smc_llc_srv_add_link(struct smc_link *link, + struct smc_llc_qentry *req_qentry); void smc_llc_add_link_local(struct smc_link *link); int smc_llc_init(void) __init; diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c index 6fb6f96c1d1775..f13ab0661ed5ee 100644 --- a/net/smc/smc_netlink.c +++ b/net/smc/smc_netlink.c @@ -19,11 +19,19 @@ #include "smc_core.h" #include "smc_ism.h" #include "smc_ib.h" +#include "smc_clc.h" #include "smc_stats.h" #include "smc_netlink.h" -#define SMC_CMD_MAX_ATTR 1 +const struct nla_policy +smc_gen_ueid_policy[SMC_NLA_EID_TABLE_MAX + 1] = { + [SMC_NLA_EID_TABLE_UNSPEC] = { .type = NLA_UNSPEC }, + [SMC_NLA_EID_TABLE_ENTRY] = { .type = NLA_STRING, + .len = SMC_MAX_EID_LEN, + }, +}; +#define SMC_CMD_MAX_ATTR 1 /* SMC_GENL generic netlink operation definition */ static const struct genl_ops smc_gen_nl_ops[] = { { @@ -66,6 +74,43 @@ static const struct genl_ops smc_gen_nl_ops[] = { /* can be retrieved by unprivileged users */ .dumpit = smc_nl_get_fback_stats, }, + { + .cmd = SMC_NETLINK_DUMP_UEID, + /* can be retrieved by unprivileged users */ + .dumpit = smc_nl_dump_ueid, + }, + { + .cmd = SMC_NETLINK_ADD_UEID, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_add_ueid, + .policy = smc_gen_ueid_policy, + }, + { + .cmd = SMC_NETLINK_REMOVE_UEID, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_remove_ueid, + .policy = smc_gen_ueid_policy, + }, + { + .cmd = SMC_NETLINK_FLUSH_UEID, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_flush_ueid, + }, + { + .cmd = SMC_NETLINK_DUMP_SEID, + /* can be retrieved by unprivileged users */ + .dumpit = smc_nl_dump_seid, + }, + { + .cmd = SMC_NETLINK_ENABLE_SEID, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_enable_seid, + }, + { + .cmd = SMC_NETLINK_DISABLE_SEID, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_disable_seid, + }, }; static const struct nla_policy smc_gen_nl_policy[2] = { diff --git a/net/smc/smc_netlink.h b/net/smc/smc_netlink.h index 5ce2c0a89ccda1..e8c6c3f0e98ca1 100644 --- a/net/smc/smc_netlink.h +++ b/net/smc/smc_netlink.h @@ -17,6 +17,8 @@ extern struct genl_family smc_gen_nl_family; +extern const struct nla_policy smc_gen_ueid_policy[]; + struct smc_nl_dmp_ctx { int pos[3]; }; diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 4a964e9190b02a..67e9d9fde08540 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -953,6 +953,26 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, return rc; } +static int smc_pnet_determine_gid(struct smc_ib_device *ibdev, int i, + struct smc_init_info *ini) +{ + if (!ini->check_smcrv2 && + !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->ib_gid, NULL, + NULL)) { + ini->ib_dev = ibdev; + ini->ib_port = i; + return 0; + } + if (ini->check_smcrv2 && + !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->smcrv2.ib_gid_v2, + NULL, &ini->smcrv2)) { + ini->smcrv2.ib_dev_v2 = ibdev; + ini->smcrv2.ib_port_v2 = i; + return 0; + } + return -ENODEV; +} + /* find a roce device for the given pnetid */ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, struct smc_init_info *ini, @@ -961,7 +981,6 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, struct smc_ib_device *ibdev; int i; - ini->ib_dev = NULL; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (ibdev == known_dev) @@ -971,12 +990,9 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, continue; if (smc_pnet_match(ibdev->pnetid[i - 1], pnet_id) && smc_ib_port_active(ibdev, i) && - !test_bit(i - 1, ibdev->ports_going_away) && - !smc_ib_determine_gid(ibdev, i, ini->vlan_id, - ini->ib_gid, NULL)) { - ini->ib_dev = ibdev; - ini->ib_port = i; - goto out; + !test_bit(i - 1, ibdev->ports_going_away)) { + if (!smc_pnet_determine_gid(ibdev, i, ini)) + goto out; } } } @@ -1016,12 +1032,9 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev, dev_put(ndev); if (netdev == ndev && smc_ib_port_active(ibdev, i) && - !test_bit(i - 1, ibdev->ports_going_away) && - !smc_ib_determine_gid(ibdev, i, ini->vlan_id, - ini->ib_gid, NULL)) { - ini->ib_dev = ibdev; - ini->ib_port = i; - break; + !test_bit(i - 1, ibdev->ports_going_away)) { + if (!smc_pnet_determine_gid(ibdev, i, ini)) + break; } } } @@ -1083,8 +1096,6 @@ void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(sk); - ini->ib_dev = NULL; - ini->ib_port = 0; if (!dst) goto out; if (!dst->dev) diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 170b733bc73675..51e8eb2933ff47 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -22,6 +22,7 @@ #include "smc_tx.h" /* smc_tx_consumer_update() */ #include "smc_rx.h" #include "smc_stats.h" +#include "smc_tracepoint.h" /* callback implementation to wakeup consumers blocked with smc_rx_wait(). * indirectly called by smc_cdc_msg_recv_action(). @@ -438,6 +439,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, if (msg && smc_rx_update_consumer(smc, cons, copylen)) goto out; } + + trace_smc_rx_recvmsg(smc, copylen); } while (read_remaining); out: return read_done; diff --git a/net/smc/smc_tracepoint.c b/net/smc/smc_tracepoint.c new file mode 100644 index 00000000000000..8d47ced5a49236 --- /dev/null +++ b/net/smc/smc_tracepoint.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define CREATE_TRACE_POINTS +#include "smc_tracepoint.h" + +EXPORT_TRACEPOINT_SYMBOL(smc_switch_to_fallback); +EXPORT_TRACEPOINT_SYMBOL(smc_tx_sendmsg); +EXPORT_TRACEPOINT_SYMBOL(smc_rx_recvmsg); +EXPORT_TRACEPOINT_SYMBOL(smcr_link_down); diff --git a/net/smc/smc_tracepoint.h b/net/smc/smc_tracepoint.h new file mode 100644 index 00000000000000..b4c36795a9280f --- /dev/null +++ b/net/smc/smc_tracepoint.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM smc + +#if !defined(_TRACE_SMC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SMC_H + +#include +#include +#include +#include +#include "smc.h" +#include "smc_core.h" + +TRACE_EVENT(smc_switch_to_fallback, + + TP_PROTO(const struct smc_sock *smc, int fallback_rsn), + + TP_ARGS(smc, fallback_rsn), + + TP_STRUCT__entry( + __field(const void *, sk) + __field(const void *, clcsk) + __field(int, fallback_rsn) + ), + + TP_fast_assign( + const struct sock *sk = &smc->sk; + const struct sock *clcsk = smc->clcsock->sk; + + __entry->sk = sk; + __entry->clcsk = clcsk; + __entry->fallback_rsn = fallback_rsn; + ), + + TP_printk("sk=%p clcsk=%p fallback_rsn=%d", + __entry->sk, __entry->clcsk, __entry->fallback_rsn) +); + +DECLARE_EVENT_CLASS(smc_msg_event, + + TP_PROTO(const struct smc_sock *smc, size_t len), + + TP_ARGS(smc, len), + + TP_STRUCT__entry( + __field(const void *, smc) + __field(size_t, len) + __string(name, smc->conn.lnk->ibname) + ), + + TP_fast_assign( + __entry->smc = smc; + __entry->len = len; + __assign_str(name, smc->conn.lnk->ibname); + ), + + TP_printk("smc=%p len=%zu dev=%s", + __entry->smc, __entry->len, + __get_str(name)) +); + +DEFINE_EVENT(smc_msg_event, smc_tx_sendmsg, + + TP_PROTO(const struct smc_sock *smc, size_t len), + + TP_ARGS(smc, len) +); + +DEFINE_EVENT(smc_msg_event, smc_rx_recvmsg, + + TP_PROTO(const struct smc_sock *smc, size_t len), + + TP_ARGS(smc, len) +); + +TRACE_EVENT(smcr_link_down, + + TP_PROTO(const struct smc_link *lnk, void *location), + + TP_ARGS(lnk, location), + + TP_STRUCT__entry( + __field(const void *, lnk) + __field(const void *, lgr) + __field(int, state) + __string(name, lnk->ibname) + __field(void *, location) + ), + + TP_fast_assign( + const struct smc_link_group *lgr = lnk->lgr; + + __entry->lnk = lnk; + __entry->lgr = lgr; + __entry->state = lnk->state; + __assign_str(name, lnk->ibname); + __entry->location = location; + ), + + TP_printk("lnk=%p lgr=%p state=%d dev=%s location=%p", + __entry->lnk, __entry->lgr, + __entry->state, __get_str(name), + __entry->location) +); + +#endif /* _TRACE_SMC_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE smc_tracepoint + +#include diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 738a4a99c82797..be241d53020f1e 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -28,6 +28,7 @@ #include "smc_ism.h" #include "smc_tx.h" #include "smc_stats.h" +#include "smc_tracepoint.h" #define SMC_TX_WORK_DELAY 0 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */ @@ -245,6 +246,8 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) SMC_TX_CORK_DELAY); else smc_tx_sndbuf_nonempty(conn); + + trace_smc_tx_sendmsg(smc, copylen); } /* while (msg_data_left(msg)) */ return send_done; diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index a419e9af36b982..600ab58892271f 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -101,19 +101,32 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) } pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); - if (pnd_snd_idx == link->wr_tx_cnt) - return; - link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; - if (link->wr_tx_pends[pnd_snd_idx].compl_requested) - complete(&link->wr_tx_compl[pnd_snd_idx]); - memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd)); - /* clear the full struct smc_wr_tx_pend including .priv */ - memset(&link->wr_tx_pends[pnd_snd_idx], 0, - sizeof(link->wr_tx_pends[pnd_snd_idx])); - memset(&link->wr_tx_bufs[pnd_snd_idx], 0, - sizeof(link->wr_tx_bufs[pnd_snd_idx])); - if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) - return; + if (pnd_snd_idx == link->wr_tx_cnt) { + if (link->lgr->smc_version != SMC_V2 || + link->wr_tx_v2_pend->wr_id != wc->wr_id) + return; + link->wr_tx_v2_pend->wc_status = wc->status; + memcpy(&pnd_snd, link->wr_tx_v2_pend, sizeof(pnd_snd)); + /* clear the full struct smc_wr_tx_pend including .priv */ + memset(link->wr_tx_v2_pend, 0, + sizeof(*link->wr_tx_v2_pend)); + memset(link->lgr->wr_tx_buf_v2, 0, + sizeof(*link->lgr->wr_tx_buf_v2)); + } else { + link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; + if (link->wr_tx_pends[pnd_snd_idx].compl_requested) + complete(&link->wr_tx_compl[pnd_snd_idx]); + memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], + sizeof(pnd_snd)); + /* clear the full struct smc_wr_tx_pend including .priv */ + memset(&link->wr_tx_pends[pnd_snd_idx], 0, + sizeof(link->wr_tx_pends[pnd_snd_idx])); + memset(&link->wr_tx_bufs[pnd_snd_idx], 0, + sizeof(link->wr_tx_bufs[pnd_snd_idx])); + if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) + return; + } + if (wc->status) { for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { /* clear full struct smc_wr_tx_pend including .priv */ @@ -123,6 +136,12 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) sizeof(link->wr_tx_bufs[i])); clear_bit(i, link->wr_tx_mask); } + if (link->lgr->smc_version == SMC_V2) { + memset(link->wr_tx_v2_pend, 0, + sizeof(*link->wr_tx_v2_pend)); + memset(link->lgr->wr_tx_buf_v2, 0, + sizeof(*link->lgr->wr_tx_buf_v2)); + } /* terminate link */ smcr_link_down_cond_sched(link); } @@ -239,6 +258,33 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, return 0; } +int smc_wr_tx_get_v2_slot(struct smc_link *link, + smc_wr_tx_handler handler, + struct smc_wr_v2_buf **wr_buf, + struct smc_wr_tx_pend_priv **wr_pend_priv) +{ + struct smc_wr_tx_pend *wr_pend; + struct ib_send_wr *wr_ib; + u64 wr_id; + + if (link->wr_tx_v2_pend->idx == link->wr_tx_cnt) + return -EBUSY; + + *wr_buf = NULL; + *wr_pend_priv = NULL; + wr_id = smc_wr_tx_get_next_wr_id(link); + wr_pend = link->wr_tx_v2_pend; + wr_pend->wr_id = wr_id; + wr_pend->handler = handler; + wr_pend->link = link; + wr_pend->idx = link->wr_tx_cnt; + wr_ib = link->wr_tx_v2_ib; + wr_ib->wr_id = wr_id; + *wr_buf = link->lgr->wr_tx_buf_v2; + *wr_pend_priv = &wr_pend->priv; + return 0; +} + int smc_wr_tx_put_slot(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv) { @@ -256,6 +302,14 @@ int smc_wr_tx_put_slot(struct smc_link *link, test_and_clear_bit(idx, link->wr_tx_mask); wake_up(&link->wr_tx_wait); return 1; + } else if (link->lgr->smc_version == SMC_V2 && + pend->idx == link->wr_tx_cnt) { + /* Large v2 buffer */ + memset(&link->wr_tx_v2_pend, 0, + sizeof(link->wr_tx_v2_pend)); + memset(&link->lgr->wr_tx_buf_v2, 0, + sizeof(link->lgr->wr_tx_buf_v2)); + return 1; } return 0; @@ -280,6 +334,22 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) return rc; } +int smc_wr_tx_v2_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, + int len) +{ + int rc; + + link->wr_tx_v2_ib->sg_list[0].length = len; + ib_req_notify_cq(link->smcibdev->roce_cq_send, + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); + rc = ib_post_send(link->roce_qp, link->wr_tx_v2_ib, NULL); + if (rc) { + smc_wr_tx_put_slot(link, priv); + smcr_link_down_cond_sched(link); + } + return rc; +} + /* Send prepared WR slot via ib_post_send and wait for send completion * notification. * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer @@ -517,6 +587,7 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk) static void smc_wr_init_sge(struct smc_link *lnk) { + int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; u32 i; for (i = 0; i < lnk->wr_tx_cnt; i++) { @@ -545,14 +616,44 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; } + + if (lnk->lgr->smc_version == SMC_V2) { + lnk->wr_tx_v2_sge->addr = lnk->wr_tx_v2_dma_addr; + lnk->wr_tx_v2_sge->length = SMC_WR_BUF_V2_SIZE; + lnk->wr_tx_v2_sge->lkey = lnk->roce_pd->local_dma_lkey; + + lnk->wr_tx_v2_ib->next = NULL; + lnk->wr_tx_v2_ib->sg_list = lnk->wr_tx_v2_sge; + lnk->wr_tx_v2_ib->num_sge = 1; + lnk->wr_tx_v2_ib->opcode = IB_WR_SEND; + lnk->wr_tx_v2_ib->send_flags = + IB_SEND_SIGNALED | IB_SEND_SOLICITED; + } + + /* With SMC-Rv2 there can be messages larger than SMC_WR_TX_SIZE. + * Each ib_recv_wr gets 2 sges, the second one is a spillover buffer + * and the same buffer for all sges. When a larger message arrived then + * the content of the first small sge is copied to the beginning of + * the larger spillover buffer, allowing easy data mapping. + */ for (i = 0; i < lnk->wr_rx_cnt; i++) { - lnk->wr_rx_sges[i].addr = + int x = i * sges_per_buf; + + lnk->wr_rx_sges[x].addr = lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; - lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE; - lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; + lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE; + lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey; + if (lnk->lgr->smc_version == SMC_V2) { + lnk->wr_rx_sges[x + 1].addr = + lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE; + lnk->wr_rx_sges[x + 1].length = + SMC_WR_BUF_V2_SIZE - SMC_WR_TX_SIZE; + lnk->wr_rx_sges[x + 1].lkey = + lnk->roce_pd->local_dma_lkey; + } lnk->wr_rx_ibs[i].next = NULL; - lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; - lnk->wr_rx_ibs[i].num_sge = 1; + lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x]; + lnk->wr_rx_ibs[i].num_sge = sges_per_buf; } lnk->wr_reg.wr.next = NULL; lnk->wr_reg.wr.num_sge = 0; @@ -585,16 +686,45 @@ void smc_wr_free_link(struct smc_link *lnk) DMA_FROM_DEVICE); lnk->wr_rx_dma_addr = 0; } + if (lnk->wr_rx_v2_dma_addr) { + ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr, + SMC_WR_BUF_V2_SIZE, + DMA_FROM_DEVICE); + lnk->wr_rx_v2_dma_addr = 0; + } if (lnk->wr_tx_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, DMA_TO_DEVICE); lnk->wr_tx_dma_addr = 0; } + if (lnk->wr_tx_v2_dma_addr) { + ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr, + SMC_WR_BUF_V2_SIZE, + DMA_TO_DEVICE); + lnk->wr_tx_v2_dma_addr = 0; + } +} + +void smc_wr_free_lgr_mem(struct smc_link_group *lgr) +{ + if (lgr->smc_version < SMC_V2) + return; + + kfree(lgr->wr_rx_buf_v2); + lgr->wr_rx_buf_v2 = NULL; + kfree(lgr->wr_tx_buf_v2); + lgr->wr_tx_buf_v2 = NULL; } void smc_wr_free_link_mem(struct smc_link *lnk) { + kfree(lnk->wr_tx_v2_ib); + lnk->wr_tx_v2_ib = NULL; + kfree(lnk->wr_tx_v2_sge); + lnk->wr_tx_v2_sge = NULL; + kfree(lnk->wr_tx_v2_pend); + lnk->wr_tx_v2_pend = NULL; kfree(lnk->wr_tx_compl); lnk->wr_tx_compl = NULL; kfree(lnk->wr_tx_pends); @@ -619,8 +749,26 @@ void smc_wr_free_link_mem(struct smc_link *lnk) lnk->wr_rx_bufs = NULL; } +int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr) +{ + if (lgr->smc_version < SMC_V2) + return 0; + + lgr->wr_rx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL); + if (!lgr->wr_rx_buf_v2) + return -ENOMEM; + lgr->wr_tx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL); + if (!lgr->wr_tx_buf_v2) { + kfree(lgr->wr_rx_buf_v2); + return -ENOMEM; + } + return 0; +} + int smc_wr_alloc_link_mem(struct smc_link *link) { + int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1; + /* allocate link related memory */ link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); if (!link->wr_tx_bufs) @@ -653,7 +801,7 @@ int smc_wr_alloc_link_mem(struct smc_link *link) if (!link->wr_tx_sges) goto no_mem_wr_tx_rdma_sges; link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, - sizeof(link->wr_rx_sges[0]), + sizeof(link->wr_rx_sges[0]) * sges_per_buf, GFP_KERNEL); if (!link->wr_rx_sges) goto no_mem_wr_tx_sges; @@ -672,8 +820,29 @@ int smc_wr_alloc_link_mem(struct smc_link *link) GFP_KERNEL); if (!link->wr_tx_compl) goto no_mem_wr_tx_pends; + + if (link->lgr->smc_version == SMC_V2) { + link->wr_tx_v2_ib = kzalloc(sizeof(*link->wr_tx_v2_ib), + GFP_KERNEL); + if (!link->wr_tx_v2_ib) + goto no_mem_tx_compl; + link->wr_tx_v2_sge = kzalloc(sizeof(*link->wr_tx_v2_sge), + GFP_KERNEL); + if (!link->wr_tx_v2_sge) + goto no_mem_v2_ib; + link->wr_tx_v2_pend = kzalloc(sizeof(*link->wr_tx_v2_pend), + GFP_KERNEL); + if (!link->wr_tx_v2_pend) + goto no_mem_v2_sge; + } return 0; +no_mem_v2_sge: + kfree(link->wr_tx_v2_sge); +no_mem_v2_ib: + kfree(link->wr_tx_v2_ib); +no_mem_tx_compl: + kfree(link->wr_tx_compl); no_mem_wr_tx_pends: kfree(link->wr_tx_pends); no_mem_wr_tx_mask: @@ -725,6 +894,24 @@ int smc_wr_create_link(struct smc_link *lnk) rc = -EIO; goto out; } + if (lnk->lgr->smc_version == SMC_V2) { + lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev, + lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE, + DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) { + lnk->wr_rx_v2_dma_addr = 0; + rc = -EIO; + goto dma_unmap; + } + lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev, + lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ibdev, lnk->wr_tx_v2_dma_addr)) { + lnk->wr_tx_v2_dma_addr = 0; + rc = -EIO; + goto dma_unmap; + } + } lnk->wr_tx_dma_addr = ib_dma_map_single( ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, DMA_TO_DEVICE); @@ -742,6 +929,18 @@ int smc_wr_create_link(struct smc_link *lnk) return rc; dma_unmap: + if (lnk->wr_rx_v2_dma_addr) { + ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr, + SMC_WR_BUF_V2_SIZE, + DMA_FROM_DEVICE); + lnk->wr_rx_v2_dma_addr = 0; + } + if (lnk->wr_tx_v2_dma_addr) { + ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr, + SMC_WR_BUF_V2_SIZE, + DMA_TO_DEVICE); + lnk->wr_tx_v2_dma_addr = 0; + } ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, DMA_FROM_DEVICE); diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 2bc626f230a56d..f353311e6f84b1 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -101,8 +101,10 @@ static inline int smc_wr_rx_post(struct smc_link *link) int smc_wr_create_link(struct smc_link *lnk); int smc_wr_alloc_link_mem(struct smc_link *lnk); +int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr); void smc_wr_free_link(struct smc_link *lnk); void smc_wr_free_link_mem(struct smc_link *lnk); +void smc_wr_free_lgr_mem(struct smc_link_group *lgr); void smc_wr_remember_qp_attr(struct smc_link *lnk); void smc_wr_remove_dev(struct smc_ib_device *smcibdev); void smc_wr_add_dev(struct smc_ib_device *smcibdev); @@ -111,10 +113,16 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, struct smc_wr_buf **wr_buf, struct smc_rdma_wr **wrs, struct smc_wr_tx_pend_priv **wr_pend_priv); +int smc_wr_tx_get_v2_slot(struct smc_link *link, + smc_wr_tx_handler handler, + struct smc_wr_v2_buf **wr_buf, + struct smc_wr_tx_pend_priv **wr_pend_priv); int smc_wr_tx_put_slot(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv); int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv); +int smc_wr_tx_v2_send(struct smc_link *link, + struct smc_wr_tx_pend_priv *priv, int len); int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, unsigned long timeout); void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context); diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 0b2c18efc07912..83460470e88315 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -428,17 +428,17 @@ switchdev_lower_dev_find(struct net_device *dev, return switchdev_priv.lower_dev; } -static int __switchdev_handle_fdb_add_to_device(struct net_device *dev, - const struct net_device *orig_dev, +static int __switchdev_handle_fdb_event_to_device(struct net_device *dev, + struct net_device *orig_dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), - int (*add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info)) { const struct switchdev_notifier_info *info = &fdb_info->info; @@ -447,17 +447,17 @@ static int __switchdev_handle_fdb_add_to_device(struct net_device *dev, int err = -EOPNOTSUPP; if (check_cb(dev)) - return add_cb(dev, orig_dev, info->ctx, fdb_info); + return mod_cb(dev, orig_dev, event, info->ctx, fdb_info); if (netif_is_lag_master(dev)) { if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) goto maybe_bridged_with_us; /* This is a LAG interface that we offload */ - if (!lag_add_cb) + if (!lag_mod_cb) return -EOPNOTSUPP; - return lag_add_cb(dev, orig_dev, info->ctx, fdb_info); + return lag_mod_cb(dev, orig_dev, event, info->ctx, fdb_info); } /* Recurse through lower interfaces in case the FDB entry is pointing @@ -481,10 +481,10 @@ static int __switchdev_handle_fdb_add_to_device(struct net_device *dev, foreign_dev_check_cb)) continue; - err = __switchdev_handle_fdb_add_to_device(lower_dev, orig_dev, - fdb_info, check_cb, - foreign_dev_check_cb, - add_cb, lag_add_cb); + err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev, + event, fdb_info, check_cb, + foreign_dev_check_cb, + mod_cb, lag_mod_cb); if (err && err != -EOPNOTSUPP) return err; } @@ -503,140 +503,34 @@ static int __switchdev_handle_fdb_add_to_device(struct net_device *dev, if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb)) return 0; - return __switchdev_handle_fdb_add_to_device(br, orig_dev, fdb_info, - check_cb, foreign_dev_check_cb, - add_cb, lag_add_cb); + return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info, + check_cb, foreign_dev_check_cb, + mod_cb, lag_mod_cb); } -int switchdev_handle_fdb_add_to_device(struct net_device *dev, +int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), - int (*add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_add_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, + int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, + unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info)) { int err; - err = __switchdev_handle_fdb_add_to_device(dev, dev, fdb_info, - check_cb, - foreign_dev_check_cb, - add_cb, lag_add_cb); + err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info, + check_cb, foreign_dev_check_cb, + mod_cb, lag_mod_cb); if (err == -EOPNOTSUPP) err = 0; return err; } -EXPORT_SYMBOL_GPL(switchdev_handle_fdb_add_to_device); - -static int __switchdev_handle_fdb_del_to_device(struct net_device *dev, - const struct net_device *orig_dev, - const struct switchdev_notifier_fdb_info *fdb_info, - bool (*check_cb)(const struct net_device *dev), - bool (*foreign_dev_check_cb)(const struct net_device *dev, - const struct net_device *foreign_dev), - int (*del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info)) -{ - const struct switchdev_notifier_info *info = &fdb_info->info; - struct net_device *br, *lower_dev; - struct list_head *iter; - int err = -EOPNOTSUPP; - - if (check_cb(dev)) - return del_cb(dev, orig_dev, info->ctx, fdb_info); - - if (netif_is_lag_master(dev)) { - if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) - goto maybe_bridged_with_us; - - /* This is a LAG interface that we offload */ - if (!lag_del_cb) - return -EOPNOTSUPP; - - return lag_del_cb(dev, orig_dev, info->ctx, fdb_info); - } - - /* Recurse through lower interfaces in case the FDB entry is pointing - * towards a bridge device. - */ - if (netif_is_bridge_master(dev)) { - if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb)) - return 0; - - /* This is a bridge interface that we offload */ - netdev_for_each_lower_dev(dev, lower_dev, iter) { - /* Do not propagate FDB entries across bridges */ - if (netif_is_bridge_master(lower_dev)) - continue; - - /* Bridge ports might be either us, or LAG interfaces - * that we offload. - */ - if (!check_cb(lower_dev) && - !switchdev_lower_dev_find(lower_dev, check_cb, - foreign_dev_check_cb)) - continue; - - err = __switchdev_handle_fdb_del_to_device(lower_dev, orig_dev, - fdb_info, check_cb, - foreign_dev_check_cb, - del_cb, lag_del_cb); - if (err && err != -EOPNOTSUPP) - return err; - } - - return 0; - } - -maybe_bridged_with_us: - /* Event is neither on a bridge nor a LAG. Check whether it is on an - * interface that is in a bridge with us. - */ - br = netdev_master_upper_dev_get_rcu(dev); - if (!br || !netif_is_bridge_master(br)) - return 0; - - if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb)) - return 0; - - return __switchdev_handle_fdb_del_to_device(br, orig_dev, fdb_info, - check_cb, foreign_dev_check_cb, - del_cb, lag_del_cb); -} - -int switchdev_handle_fdb_del_to_device(struct net_device *dev, - const struct switchdev_notifier_fdb_info *fdb_info, - bool (*check_cb)(const struct net_device *dev), - bool (*foreign_dev_check_cb)(const struct net_device *dev, - const struct net_device *foreign_dev), - int (*del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_del_cb)(struct net_device *dev, - const struct net_device *orig_dev, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info)) -{ - int err; - - err = __switchdev_handle_fdb_del_to_device(dev, dev, fdb_info, - check_cb, - foreign_dev_check_cb, - del_cb, lag_del_cb); - if (err == -EOPNOTSUPP) - err = 0; - - return err; -} -EXPORT_SYMBOL_GPL(switchdev_handle_fdb_del_to_device); +EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device); static int __switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 443f8e5b94777e..60bc74b76adc59 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -462,7 +462,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, b->bcast_addr.media_id = b->media->type_id; b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT; b->mtu = dev->mtu; - b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr); + b->media->raw2addr(b, &b->addr, (const char *)dev->dev_addr); rcu_assign_pointer(dev->tipc_ptr, b); return 0; } @@ -703,7 +703,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, break; case NETDEV_CHANGEADDR: b->media->raw2addr(b, &b->addr, - (char *)dev->dev_addr); + (const char *)dev->dev_addr); tipc_reset_bearer(net, b); break; case NETDEV_UNREGISTER: diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 57c6a1a719e243..490ad6e5f7a3c8 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -117,7 +117,7 @@ struct tipc_media { char *msg); int (*raw2addr)(struct tipc_bearer *b, struct tipc_media_addr *addr, - char *raw); + const char *raw); u32 priority; u32 tolerance; u32 min_win; diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index c68019697cfe1d..cb0d185e06afd4 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -60,7 +60,7 @@ static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) /* Convert raw mac address format to media addr format */ static int tipc_eth_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, - char *msg) + const char *msg) { memset(addr, 0, sizeof(*addr)); ether_addr_copy(addr->value, msg); diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c index 7aa9ff88458d64..b9ad0434c3cdca 100644 --- a/net/tipc/ib_media.c +++ b/net/tipc/ib_media.c @@ -67,7 +67,7 @@ static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) /* Convert raw InfiniBand address format to media addr format */ static int tipc_ib_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, - char *msg) + const char *msg) { memset(addr, 0, sizeof(*addr)); memcpy(addr->value, msg, INFINIBAND_ALEN); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 9ab81db8a65453..acfba9f1ba72fa 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -421,6 +421,88 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EFAULT; break; } + case TLS_CIPHER_AES_CCM_128: { + struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = + container_of(crypto_info, + struct tls12_crypto_info_aes_ccm_128, info); + + if (len != sizeof(*aes_ccm_128)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(aes_ccm_128->iv, + cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, + TLS_CIPHER_AES_CCM_128_IV_SIZE); + memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, + TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_CHACHA20_POLY1305: { + struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = + container_of(crypto_info, + struct tls12_crypto_info_chacha20_poly1305, + info); + + if (len != sizeof(*chacha20_poly1305)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(chacha20_poly1305->iv, + cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, + TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); + memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, + TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, chacha20_poly1305, + sizeof(*chacha20_poly1305))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_gcm, info); + + if (len != sizeof(*sm4_gcm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_gcm_info->iv, + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, + TLS_CIPHER_SM4_GCM_IV_SIZE); + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_ccm, info); + + if (len != sizeof(*sm4_ccm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_ccm_info->iv, + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, + TLS_CIPHER_SM4_CCM_IV_SIZE); + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@ -524,6 +606,12 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, case TLS_CIPHER_CHACHA20_POLY1305: optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); break; + case TLS_CIPHER_SM4_GCM: + optsize = sizeof(struct tls12_crypto_info_sm4_gcm); + break; + case TLS_CIPHER_SM4_CCM: + optsize = sizeof(struct tls12_crypto_info_sm4_ccm); + break; default: rc = -EINVAL; goto err_crypto_info; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 1b08b877a89000..d81564078557b9 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -507,9 +507,15 @@ static int tls_do_encryption(struct sock *sk, int rc, iv_offset = 0; /* For CCM based ciphers, first byte of IV is a constant */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; } memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, @@ -1466,10 +1472,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, aad = (u8 *)(sgout + n_sgout); iv = aad + prot->aad_size; - /* For CCM based ciphers, first byte of nonce+iv is always '2' */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { - iv[0] = 2; + /* For CCM based ciphers, first byte of nonce+iv is a constant */ + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: + iv[0] = TLS_AES_CCM_IV_B0_BYTE; + iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + iv[0] = TLS_SM4_CCM_IV_B0_BYTE; iv_offset = 1; + break; } /* Prepare IV */ @@ -2433,6 +2445,40 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) cipher_name = "rfc7539(chacha20,poly1305)"; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; + + sm4_gcm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + iv = sm4_gcm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; + rec_seq = sm4_gcm_info->rec_seq; + keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; + key = sm4_gcm_info->key; + salt = sm4_gcm_info->salt; + salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; + cipher_name = "gcm(sm4)"; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; + + sm4_ccm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + iv = sm4_ccm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; + rec_seq = sm4_ccm_info->rec_seq; + keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; + key = sm4_ccm_info->key; + salt = sm4_ccm_info->salt; + salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; + cipher_name = "ccm(sm4)"; + break; + } default: rc = -EINVAL; goto free_priv; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index e2c0cfb334d20c..7d851eb3a68307 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1614,13 +1614,18 @@ static int vsock_connectible_setsockopt(struct socket *sock, vsock_update_buffer_size(vsk, transport, vsk->buffer_size); break; - case SO_VM_SOCKETS_CONNECT_TIMEOUT: { - struct __kernel_old_timeval tv; - COPY_IN(tv); + case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: + case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: { + struct __kernel_sock_timeval tv; + + err = sock_copy_user_timeval(&tv, optval, optlen, + optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); + if (err) + break; if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { vsk->connect_timeout = tv.tv_sec * HZ + - DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ)); + DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ)); if (vsk->connect_timeout == 0) vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; @@ -1648,68 +1653,59 @@ static int vsock_connectible_getsockopt(struct socket *sock, char __user *optval, int __user *optlen) { - int err; + struct sock *sk = sock->sk; + struct vsock_sock *vsk = vsock_sk(sk); + + union { + u64 val64; + struct old_timeval32 tm32; + struct __kernel_old_timeval tm; + struct __kernel_sock_timeval stm; + } v; + + int lv = sizeof(v.val64); int len; - struct sock *sk; - struct vsock_sock *vsk; - u64 val; if (level != AF_VSOCK) return -ENOPROTOOPT; - err = get_user(len, optlen); - if (err != 0) - return err; - -#define COPY_OUT(_v) \ - do { \ - if (len < sizeof(_v)) \ - return -EINVAL; \ - \ - len = sizeof(_v); \ - if (copy_to_user(optval, &_v, len) != 0) \ - return -EFAULT; \ - \ - } while (0) + if (get_user(len, optlen)) + return -EFAULT; - err = 0; - sk = sock->sk; - vsk = vsock_sk(sk); + memset(&v, 0, sizeof(v)); switch (optname) { case SO_VM_SOCKETS_BUFFER_SIZE: - val = vsk->buffer_size; - COPY_OUT(val); + v.val64 = vsk->buffer_size; break; case SO_VM_SOCKETS_BUFFER_MAX_SIZE: - val = vsk->buffer_max_size; - COPY_OUT(val); + v.val64 = vsk->buffer_max_size; break; case SO_VM_SOCKETS_BUFFER_MIN_SIZE: - val = vsk->buffer_min_size; - COPY_OUT(val); + v.val64 = vsk->buffer_min_size; break; - case SO_VM_SOCKETS_CONNECT_TIMEOUT: { - struct __kernel_old_timeval tv; - tv.tv_sec = vsk->connect_timeout / HZ; - tv.tv_usec = - (vsk->connect_timeout - - tv.tv_sec * HZ) * (1000000 / HZ); - COPY_OUT(tv); + case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: + case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: + lv = sock_get_timeout(vsk->connect_timeout, &v, + optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); break; - } + default: return -ENOPROTOOPT; } - err = put_user(len, optlen); - if (err != 0) + if (len < lv) + return -EINVAL; + if (len > lv) + len = lv; + if (copy_to_user(optval, &v, len)) return -EFAULT; -#undef COPY_OUT + if (put_user(len, optlen)) + return -EFAULT; return 0; } diff --git a/net/wireless/Makefile b/net/wireless/Makefile index af590ae606b69c..756e7de7e33f30 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -26,7 +26,7 @@ endif $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex) @$(kecho) " GEN $@" - @(echo '#include "reg.h"'; \ + $(Q)(echo '#include "reg.h"'; \ echo 'const u8 shipped_regdb_certs[] = {'; \ echo | cat - $^ ; \ echo '};'; \ @@ -36,7 +36,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex) $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) @$(kecho) " GEN $@" - @(set -e; \ + $(Q)(set -e; \ allf=""; \ for f in $^ ; do \ test -f $$f || continue;\ diff --git a/net/wireless/core.c b/net/wireless/core.c index aaba847d79eb29..eb297e1015e050 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1081,6 +1081,16 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); mutex_destroy(&rdev->wiphy.mtx); + + /* + * The 'regd' can only be non-NULL if we never finished + * initializing the wiphy and thus never went through the + * unregister path - e.g. in failure scenarios. Thus, it + * cannot have been visible to anyone if non-NULL, so we + * can just free it here. + */ + kfree(rcu_dereference_raw(rdev->wiphy.regd)); + kfree(rdev); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index bf7cd47525472a..81232b73df8f99 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -437,6 +437,16 @@ sar_policy[NL80211_SAR_ATTR_MAX + 1] = { [NL80211_SAR_ATTR_SPECS] = NLA_POLICY_NESTED_ARRAY(sar_specs_policy), }; +static const struct nla_policy +nl80211_mbssid_config_policy[NL80211_MBSSID_CONFIG_ATTR_MAX + 1] = { + [NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES] = NLA_POLICY_MIN(NLA_U8, 2), + [NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY] = + NLA_POLICY_MIN(NLA_U8, 1), + [NL80211_MBSSID_CONFIG_ATTR_INDEX] = { .type = NLA_U8 }, + [NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX] = { .type = NLA_U32 }, + [NL80211_MBSSID_CONFIG_ATTR_EMA] = { .type = NLA_FLAG }, +}; + static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, @@ -763,6 +773,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_COLOR_CHANGE_COUNT] = { .type = NLA_U8 }, [NL80211_ATTR_COLOR_CHANGE_COLOR] = { .type = NLA_U8 }, [NL80211_ATTR_COLOR_CHANGE_ELEMS] = NLA_POLICY_NESTED(nl80211_policy), + [NL80211_ATTR_MBSSID_CONFIG] = + NLA_POLICY_NESTED(nl80211_mbssid_config_policy), + [NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED }, }; /* policy for the key attributes */ @@ -853,6 +866,7 @@ nl80211_match_band_rssi_policy[NUM_NL80211_BANDS] = { [NL80211_BAND_5GHZ] = { .type = NLA_S32 }, [NL80211_BAND_6GHZ] = { .type = NLA_S32 }, [NL80211_BAND_60GHZ] = { .type = NLA_S32 }, + [NL80211_BAND_LC] = { .type = NLA_S32 }, }; static const struct nla_policy @@ -2207,6 +2221,35 @@ nl80211_put_sar_specs(struct cfg80211_registered_device *rdev, return -ENOBUFS; } +static int nl80211_put_mbssid_support(struct wiphy *wiphy, struct sk_buff *msg) +{ + struct nlattr *config; + + if (!wiphy->mbssid_max_interfaces) + return 0; + + config = nla_nest_start(msg, NL80211_ATTR_MBSSID_CONFIG); + if (!config) + return -ENOBUFS; + + if (nla_put_u8(msg, NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES, + wiphy->mbssid_max_interfaces)) + goto fail; + + if (wiphy->ema_max_profile_periodicity && + nla_put_u8(msg, + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY, + wiphy->ema_max_profile_periodicity)) + goto fail; + + nla_nest_end(msg, config); + return 0; + +fail: + nla_nest_cancel(msg, config); + return -ENOBUFS; +} + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@ -2792,6 +2835,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, if (nl80211_put_sar_specs(rdev, msg)) goto nla_put_failure; + if (nl80211_put_mbssid_support(&rdev->wiphy, msg)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -4981,6 +5027,96 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev, return 0; } +static int nl80211_parse_mbssid_config(struct wiphy *wiphy, + struct net_device *dev, + struct nlattr *attrs, + struct cfg80211_mbssid_config *config, + u8 num_elems) +{ + struct nlattr *tb[NL80211_MBSSID_CONFIG_ATTR_MAX + 1]; + + if (!wiphy->mbssid_max_interfaces) + return -EOPNOTSUPP; + + if (nla_parse_nested(tb, NL80211_MBSSID_CONFIG_ATTR_MAX, attrs, NULL, + NULL) || + !tb[NL80211_MBSSID_CONFIG_ATTR_INDEX]) + return -EINVAL; + + config->ema = nla_get_flag(tb[NL80211_MBSSID_CONFIG_ATTR_EMA]); + if (config->ema) { + if (!wiphy->ema_max_profile_periodicity) + return -EOPNOTSUPP; + + if (num_elems > wiphy->ema_max_profile_periodicity) + return -EINVAL; + } + + config->index = nla_get_u8(tb[NL80211_MBSSID_CONFIG_ATTR_INDEX]); + if (config->index >= wiphy->mbssid_max_interfaces || + (!config->index && !num_elems)) + return -EINVAL; + + if (tb[NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX]) { + u32 tx_ifindex = + nla_get_u32(tb[NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX]); + + if ((!config->index && tx_ifindex != dev->ifindex) || + (config->index && tx_ifindex == dev->ifindex)) + return -EINVAL; + + if (tx_ifindex != dev->ifindex) { + struct net_device *tx_netdev = + dev_get_by_index(wiphy_net(wiphy), tx_ifindex); + + if (!tx_netdev || !tx_netdev->ieee80211_ptr || + tx_netdev->ieee80211_ptr->wiphy != wiphy || + tx_netdev->ieee80211_ptr->iftype != + NL80211_IFTYPE_AP) { + dev_put(tx_netdev); + return -EINVAL; + } + + config->tx_wdev = tx_netdev->ieee80211_ptr; + } else { + config->tx_wdev = dev->ieee80211_ptr; + } + } else if (!config->index) { + config->tx_wdev = dev->ieee80211_ptr; + } else { + return -EINVAL; + } + + return 0; +} + +static struct cfg80211_mbssid_elems * +nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs) +{ + struct nlattr *nl_elems; + struct cfg80211_mbssid_elems *elems; + int rem_elems; + u8 i = 0, num_elems = 0; + + if (!wiphy->mbssid_max_interfaces) + return ERR_PTR(-EINVAL); + + nla_for_each_nested(nl_elems, attrs, rem_elems) + num_elems++; + + elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); + if (!elems) + return ERR_PTR(-ENOMEM); + + nla_for_each_nested(nl_elems, attrs, rem_elems) { + elems->elem[i].data = nla_data(nl_elems); + elems->elem[i].len = nla_len(nl_elems); + i++; + } + elems->cnt = num_elems; + return elems; +} + static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, struct nlattr *attrs[], struct cfg80211_beacon_data *bcn) @@ -5061,6 +5197,17 @@ static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, bcn->ftm_responder = -1; } + if (attrs[NL80211_ATTR_MBSSID_ELEMS]) { + struct cfg80211_mbssid_elems *mbssid = + nl80211_parse_mbssid_elems(&rdev->wiphy, + attrs[NL80211_ATTR_MBSSID_ELEMS]); + + if (IS_ERR(mbssid)) + return PTR_ERR(mbssid); + + bcn->mbssid_ies = mbssid; + } + return 0; } @@ -5192,21 +5339,21 @@ nl80211_parse_unsol_bcast_probe_resp(struct cfg80211_registered_device *rdev, } static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, - const u8 *rates) + const struct element *rates) { int i; if (!rates) return; - for (i = 0; i < rates[1]; i++) { - if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + for (i = 0; i < rates->datalen; i++) { + if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_HT_PHY) params->ht_required = true; - if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_VHT_PHY) + if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_VHT_PHY) params->vht_required = true; - if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HE_PHY) + if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_HE_PHY) params->he_required = true; - if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_SAE_H2E) + if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_SAE_H2E) params->sae_h2e_required = true; } } @@ -5221,27 +5368,27 @@ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) const struct cfg80211_beacon_data *bcn = ¶ms->beacon; size_t ies_len = bcn->tail_len; const u8 *ies = bcn->tail; - const u8 *rates; - const u8 *cap; + const struct element *rates; + const struct element *cap; - rates = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies, ies_len); + rates = cfg80211_find_elem(WLAN_EID_SUPP_RATES, ies, ies_len); nl80211_check_ap_rate_selectors(params, rates); - rates = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies, ies_len); + rates = cfg80211_find_elem(WLAN_EID_EXT_SUPP_RATES, ies, ies_len); nl80211_check_ap_rate_selectors(params, rates); - cap = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len); - if (cap && cap[1] >= sizeof(*params->ht_cap)) - params->ht_cap = (void *)(cap + 2); - cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len); - if (cap && cap[1] >= sizeof(*params->vht_cap)) - params->vht_cap = (void *)(cap + 2); - cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len); - if (cap && cap[1] >= sizeof(*params->he_cap) + 1) - params->he_cap = (void *)(cap + 3); - cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ies, ies_len); - if (cap && cap[1] >= sizeof(*params->he_oper) + 1) - params->he_oper = (void *)(cap + 3); + cap = cfg80211_find_elem(WLAN_EID_HT_CAPABILITY, ies, ies_len); + if (cap && cap->datalen >= sizeof(*params->ht_cap)) + params->ht_cap = (void *)cap->data; + cap = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY, ies, ies_len); + if (cap && cap->datalen >= sizeof(*params->vht_cap)) + params->vht_cap = (void *)cap->data; + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len); + if (cap && cap->datalen >= sizeof(*params->he_cap) + 1) + params->he_cap = (void *)(cap->data + 1); + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ies, ies_len); + if (cap && cap->datalen >= sizeof(*params->he_oper) + 1) + params->he_oper = (void *)(cap->data + 1); } static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, @@ -5323,7 +5470,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_ap_settings params; + struct cfg80211_ap_settings *params; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && @@ -5336,27 +5483,29 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (wdev->beacon_interval) return -EALREADY; - memset(¶ms, 0, sizeof(params)); - /* these are required for START_AP */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; - err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon); + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + err = nl80211_parse_beacon(rdev, info->attrs, ¶ms->beacon); if (err) - return err; + goto out; - params.beacon_interval = + params->beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); - params.dtim_period = + params->dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype, - params.beacon_interval); + params->beacon_interval); if (err) - return err; + goto out; /* * In theory, some of these attributes should be required here @@ -5366,129 +5515,157 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) * additional information -- drivers must check! */ if (info->attrs[NL80211_ATTR_SSID]) { - params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); - params.ssid_len = + params->ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + params->ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); - if (params.ssid_len == 0) - return -EINVAL; + if (params->ssid_len == 0) { + err = -EINVAL; + goto out; + } } if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) - params.hidden_ssid = nla_get_u32( + params->hidden_ssid = nla_get_u32( info->attrs[NL80211_ATTR_HIDDEN_SSID]); - params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; + params->privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { - params.auth_type = nla_get_u32( + params->auth_type = nla_get_u32( info->attrs[NL80211_ATTR_AUTH_TYPE]); - if (!nl80211_valid_auth_type(rdev, params.auth_type, - NL80211_CMD_START_AP)) - return -EINVAL; + if (!nl80211_valid_auth_type(rdev, params->auth_type, + NL80211_CMD_START_AP)) { + err = -EINVAL; + goto out; + } } else - params.auth_type = NL80211_AUTHTYPE_AUTOMATIC; + params->auth_type = NL80211_AUTHTYPE_AUTOMATIC; - err = nl80211_crypto_settings(rdev, info, ¶ms.crypto, + err = nl80211_crypto_settings(rdev, info, ¶ms->crypto, NL80211_MAX_NR_CIPHER_SUITES); if (err) - return err; + goto out; if (info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]) { - if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER)) - return -EOPNOTSUPP; - params.inactivity_timeout = nla_get_u16( + if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER)) { + err = -EOPNOTSUPP; + goto out; + } + params->inactivity_timeout = nla_get_u16( info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]); } if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) { - if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) - return -EINVAL; - params.p2p_ctwindow = + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { + err = -EINVAL; + goto out; + } + params->p2p_ctwindow = nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); - if (params.p2p_ctwindow != 0 && - !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) - return -EINVAL; + if (params->p2p_ctwindow != 0 && + !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) { + err = -EINVAL; + goto out; + } } if (info->attrs[NL80211_ATTR_P2P_OPPPS]) { u8 tmp; - if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) - return -EINVAL; + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { + err = -EINVAL; + goto out; + } tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); - params.p2p_opp_ps = tmp; - if (params.p2p_opp_ps != 0 && - !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) - return -EINVAL; + params->p2p_opp_ps = tmp; + if (params->p2p_opp_ps != 0 && + !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) { + err = -EINVAL; + goto out; + } } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { - err = nl80211_parse_chandef(rdev, info, ¶ms.chandef); + err = nl80211_parse_chandef(rdev, info, ¶ms->chandef); if (err) - return err; + goto out; } else if (wdev->preset_chandef.chan) { - params.chandef = wdev->preset_chandef; - } else if (!nl80211_get_ap_channel(rdev, ¶ms)) - return -EINVAL; + params->chandef = wdev->preset_chandef; + } else if (!nl80211_get_ap_channel(rdev, params)) { + err = -EINVAL; + goto out; + } - if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, - wdev->iftype)) - return -EINVAL; + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms->chandef, + wdev->iftype)) { + err = -EINVAL; + goto out; + } if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, - ¶ms.beacon_rate, + ¶ms->beacon_rate, dev, false); if (err) - return err; + goto out; - err = validate_beacon_tx_rate(rdev, params.chandef.chan->band, - ¶ms.beacon_rate); + err = validate_beacon_tx_rate(rdev, params->chandef.chan->band, + ¶ms->beacon_rate); if (err) - return err; + goto out; } if (info->attrs[NL80211_ATTR_SMPS_MODE]) { - params.smps_mode = + params->smps_mode = nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]); - switch (params.smps_mode) { + switch (params->smps_mode) { case NL80211_SMPS_OFF: break; case NL80211_SMPS_STATIC: if (!(rdev->wiphy.features & - NL80211_FEATURE_STATIC_SMPS)) - return -EINVAL; + NL80211_FEATURE_STATIC_SMPS)) { + err = -EINVAL; + goto out; + } break; case NL80211_SMPS_DYNAMIC: if (!(rdev->wiphy.features & - NL80211_FEATURE_DYNAMIC_SMPS)) - return -EINVAL; + NL80211_FEATURE_DYNAMIC_SMPS)) { + err = -EINVAL; + goto out; + } break; default: - return -EINVAL; + err = -EINVAL; + goto out; } } else { - params.smps_mode = NL80211_SMPS_OFF; + params->smps_mode = NL80211_SMPS_OFF; } - params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); - if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) - return -EOPNOTSUPP; + params->pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); + if (params->pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { + err = -EOPNOTSUPP; + goto out; + } if (info->attrs[NL80211_ATTR_ACL_POLICY]) { - params.acl = parse_acl_data(&rdev->wiphy, info); - if (IS_ERR(params.acl)) - return PTR_ERR(params.acl); + params->acl = parse_acl_data(&rdev->wiphy, info); + if (IS_ERR(params->acl)) { + err = PTR_ERR(params->acl); + params->acl = NULL; + goto out; + } } - params.twt_responder = + params->twt_responder = nla_get_flag(info->attrs[NL80211_ATTR_TWT_RESPONDER]); if (info->attrs[NL80211_ATTR_HE_OBSS_PD]) { err = nl80211_parse_he_obss_pd( info->attrs[NL80211_ATTR_HE_OBSS_PD], - ¶ms.he_obss_pd); + ¶ms->he_obss_pd); if (err) goto out; } @@ -5496,7 +5673,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) { err = nl80211_parse_he_bss_color( info->attrs[NL80211_ATTR_HE_BSS_COLOR], - ¶ms.he_bss_color); + ¶ms->he_bss_color); if (err) goto out; } @@ -5504,7 +5681,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) { err = nl80211_parse_fils_discovery(rdev, info->attrs[NL80211_ATTR_FILS_DISCOVERY], - ¶ms); + params); if (err) goto out; } @@ -5512,24 +5689,35 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) { err = nl80211_parse_unsol_bcast_probe_resp( rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP], - ¶ms); + params); + if (err) + goto out; + } + + if (info->attrs[NL80211_ATTR_MBSSID_CONFIG]) { + err = nl80211_parse_mbssid_config(&rdev->wiphy, dev, + info->attrs[NL80211_ATTR_MBSSID_CONFIG], + ¶ms->mbssid_config, + params->beacon.mbssid_ies ? + params->beacon.mbssid_ies->cnt : + 0); if (err) goto out; } - nl80211_calculate_ap_params(¶ms); + nl80211_calculate_ap_params(params); if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) - params.flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; + params->flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; wdev_lock(wdev); - err = rdev_start_ap(rdev, dev, ¶ms); + err = rdev_start_ap(rdev, dev, params); if (!err) { - wdev->preset_chandef = params.chandef; - wdev->beacon_interval = params.beacon_interval; - wdev->chandef = params.chandef; - wdev->ssid_len = params.ssid_len; - memcpy(wdev->ssid, params.ssid, wdev->ssid_len); + wdev->preset_chandef = params->chandef; + wdev->beacon_interval = params->beacon_interval; + wdev->chandef = params->chandef; + wdev->ssid_len = params->ssid_len; + memcpy(wdev->ssid, params->ssid, wdev->ssid_len); if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) wdev->conn_owner_nlportid = info->snd_portid; @@ -5537,7 +5725,13 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) wdev_unlock(wdev); out: - kfree(params.acl); + kfree(params->acl); + kfree(params->beacon.mbssid_ies); + if (params->mbssid_config.tx_wdev && + params->mbssid_config.tx_wdev->netdev && + params->mbssid_config.tx_wdev->netdev != dev) + dev_put(params->mbssid_config.tx_wdev->netdev); + kfree(params); return err; } @@ -5562,12 +5756,14 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) err = nl80211_parse_beacon(rdev, info->attrs, ¶ms); if (err) - return err; + goto out; wdev_lock(wdev); err = rdev_change_beacon(rdev, dev, ¶ms); wdev_unlock(wdev); +out: + kfree(params.mbssid_ies); return err; } @@ -9244,12 +9440,14 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) err = nl80211_parse_beacon(rdev, info->attrs, ¶ms.beacon_after); if (err) - return err; + goto free; csa_attrs = kcalloc(NL80211_ATTR_MAX + 1, sizeof(*csa_attrs), GFP_KERNEL); - if (!csa_attrs) - return -ENOMEM; + if (!csa_attrs) { + err = -ENOMEM; + goto free; + } err = nla_parse_nested_deprecated(csa_attrs, NL80211_ATTR_MAX, info->attrs[NL80211_ATTR_CSA_IES], @@ -9367,6 +9565,8 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) wdev_unlock(wdev); free: + kfree(params.beacon_after.mbssid_ies); + kfree(params.beacon_csa.mbssid_ies); kfree(csa_attrs); return err; } @@ -11767,8 +11967,9 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, if (n_thresholds) { struct cfg80211_cqm_config *cqm_config; - cqm_config = kzalloc(sizeof(struct cfg80211_cqm_config) + - n_thresholds * sizeof(s32), GFP_KERNEL); + cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds, + n_thresholds), + GFP_KERNEL); if (!cqm_config) { err = -ENOMEM; goto unlock; @@ -11777,7 +11978,8 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, cqm_config->rssi_hyst = hysteresis; cqm_config->n_rssi_thresholds = n_thresholds; memcpy(cqm_config->rssi_thresholds, thresholds, - n_thresholds * sizeof(s32)); + flex_array_size(cqm_config, rssi_thresholds, + n_thresholds)); wdev->cqm_config = cqm_config; } @@ -14900,10 +15102,35 @@ static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info) wdev_unlock(wdev); out: + kfree(params.beacon_next.mbssid_ies); + kfree(params.beacon_color_change.mbssid_ies); kfree(tb); return err; } +static int nl80211_set_fils_aad(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct cfg80211_fils_aad fils_aad = {}; + u8 *nonces; + + if (!info->attrs[NL80211_ATTR_MAC] || + !info->attrs[NL80211_ATTR_FILS_KEK] || + !info->attrs[NL80211_ATTR_FILS_NONCES]) + return -EINVAL; + + fils_aad.macaddr = nla_data(info->attrs[NL80211_ATTR_MAC]); + fils_aad.kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]); + fils_aad.kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]); + nonces = nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]); + fils_aad.snonce = nonces; + fils_aad.anonce = nonces + FILS_NONCE_LEN; + + return rdev_set_fils_aad(rdev, dev, &fils_aad); +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -15081,9 +15308,7 @@ static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info) if (specs > rdev->wiphy.sar_capa->num_freq_ranges) return -EINVAL; - sar_spec = kzalloc(sizeof(*sar_spec) + - specs * sizeof(struct cfg80211_sar_sub_specs), - GFP_KERNEL); + sar_spec = kzalloc(struct_size(sar_spec, sub_specs, specs), GFP_KERNEL); if (!sar_spec) return -ENOMEM; @@ -15907,6 +16132,13 @@ static const struct genl_small_ops nl80211_small_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_SET_FILS_AAD, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = nl80211_set_fils_aad, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP, + }, }; static struct genl_family nl80211_fam __ro_after_init = { diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index ce6bf218a1a3b2..cc1efec4b27b62 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1381,4 +1381,18 @@ static inline int rdev_color_change(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_set_fils_aad(struct cfg80211_registered_device *rdev, + struct net_device *dev, struct cfg80211_fils_aad *fils_aad) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_set_fils_aad(&rdev->wiphy, dev, fils_aad); + if (rdev->ops->set_fils_aad) + ret = rdev->ops->set_fils_aad(&rdev->wiphy, dev, fils_aad); + trace_rdev_return_int(&rdev->wiphy, ret); + + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index adc0d14cfd8609..22e92be6193885 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -383,7 +383,7 @@ static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, const u8 *ssid, size_t ssid_len) { const struct cfg80211_bss_ies *ies; - const u8 *ssidie; + const struct element *ssid_elem; if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; @@ -394,12 +394,12 @@ static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, ies = rcu_access_pointer(a->ies); if (!ies) return false; - ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); - if (!ssidie) + ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len); + if (!ssid_elem) return false; - if (ssidie[1] != ssid_len) + if (ssid_elem->datalen != ssid_len) return false; - return memcmp(ssidie + 2, ssid, ssid_len) == 0; + return memcmp(ssid_elem->data, ssid, ssid_len) == 0; } static int @@ -1794,25 +1794,13 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, return NULL; } -/* - * Update RX channel information based on the available frame payload - * information. This is mainly for the 2.4 GHz band where frames can be received - * from neighboring channels and the Beacon frames use the DSSS Parameter Set - * element to indicate the current (transmitting) channel, but this might also - * be needed on other bands if RX frequency does not match with the actual - * operating channel of a BSS. - */ -static struct ieee80211_channel * -cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, - struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width) +int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band) { const u8 *tmp; - u32 freq; int channel_number = -1; - struct ieee80211_channel *alt_channel; - if (channel->band == NL80211_BAND_S1GHZ) { + if (band == NL80211_BAND_S1GHZ) { tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen); if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) { struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2); @@ -1833,6 +1821,29 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, } } + return channel_number; +} +EXPORT_SYMBOL(cfg80211_get_ies_channel_number); + +/* + * Update RX channel information based on the available frame payload + * information. This is mainly for the 2.4 GHz band where frames can be received + * from neighboring channels and the Beacon frames use the DSSS Parameter Set + * element to indicate the current (transmitting) channel, but this might also + * be needed on other bands if RX frequency does not match with the actual + * operating channel of a BSS. + */ +static struct ieee80211_channel * +cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width) +{ + u32 freq; + int channel_number; + struct ieee80211_channel *alt_channel; + + channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band); + if (channel_number < 0) { /* No channel information in frame payload */ return channel; @@ -2075,12 +2086,12 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, if (!non_tx_data) return; - if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return; if (!wiphy->support_mbssid) return; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return; new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); @@ -2447,10 +2458,10 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); if (!res || !wiphy->support_mbssid || - !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return res; non_tx_data.tx_bss = res; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 19b78d47228349..ad6c16a06bcb04 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -167,6 +167,19 @@ __entry->center_freq1, __entry->freq1_offset, \ __entry->center_freq2 +#define FILS_AAD_ASSIGN(fa) \ + do { \ + if (fa) { \ + ether_addr_copy(__entry->macaddr, fa->macaddr); \ + __entry->kek_len = fa->kek_len; \ + } else { \ + eth_zero_addr(__entry->macaddr); \ + __entry->kek_len = 0; \ + } \ + } while (0) +#define FILS_AAD_PR_FMT \ + "macaddr: %pM, kek_len: %d" + #define SINFO_ENTRY __field(int, generation) \ __field(u32, connected_time) \ __field(u32, inactive_time) \ @@ -2614,6 +2627,24 @@ DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_abort_pmsr, TP_ARGS(wiphy, wdev, cookie) ); +TRACE_EVENT(rdev_set_fils_aad, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_fils_aad *fils_aad), + TP_ARGS(wiphy, netdev, fils_aad), + TP_STRUCT__entry(WIPHY_ENTRY + NETDEV_ENTRY + __array(u8, macaddr, ETH_ALEN) + __field(u8, kek_len) + ), + TP_fast_assign(WIPHY_ASSIGN; + NETDEV_ASSIGN; + FILS_AAD_ASSIGN(fils_aad); + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " FILS_AAD_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr, + __entry->kek_len) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ diff --git a/net/wireless/util.c b/net/wireless/util.c index a1a99a5749844a..5ff1f8726faf8d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -80,6 +80,7 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band) return 0; /* not supported */ switch (band) { case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: if (chan == 14) return MHZ_TO_KHZ(2484); else if (chan < 14) @@ -209,6 +210,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) WARN_ON(want); break; case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: want = 7; for (i = 0; i < sband->n_bitrates; i++) { switch (sband->bitrates[i].bitrate) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index d6b500dc420847..f16074eb53c72a 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -134,21 +134,6 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, return 0; } -void xp_release(struct xdp_buff_xsk *xskb) -{ - xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; -} - -static u64 xp_get_handle(struct xdp_buff_xsk *xskb) -{ - u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; - - offset += xskb->pool->headroom; - if (!xskb->pool->unaligned) - return xskb->orig_addr + offset; - return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); -} - static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 8de01aaac4a084..90c4e1e819d38b 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -44,12 +44,13 @@ void xp_destroy(struct xsk_buff_pool *pool) struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem) { + bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; struct xsk_buff_pool *pool; struct xdp_buff_xsk *xskb; - u32 i; + u32 i, entries; - pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), - GFP_KERNEL); + entries = unaligned ? umem->chunks : 0; + pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL); if (!pool) goto out; @@ -63,7 +64,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, pool->free_heads_cnt = umem->chunks; pool->headroom = umem->headroom; pool->chunk_size = umem->chunk_size; - pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; + pool->chunk_shift = ffs(umem->chunk_size) - 1; + pool->unaligned = unaligned; pool->frame_len = umem->chunk_size - umem->headroom - XDP_PACKET_HEADROOM; pool->umem = umem; @@ -81,7 +83,10 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, xskb = &pool->heads[i]; xskb->pool = pool; xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; - pool->free_heads[i] = xskb; + if (pool->unaligned) + pool->free_heads[i] = xskb; + else + xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); } return pool; @@ -406,6 +411,12 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, if (pool->unaligned) xp_check_dma_contiguity(dma_map); + else + for (i = 0; i < pool->heads_cnt; i++) { + struct xdp_buff_xsk *xskb = &pool->heads[i]; + + xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); + } err = xp_init_dma_info(pool, dma_map); if (err) { @@ -448,12 +459,9 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) if (pool->free_heads_cnt == 0) return NULL; - xskb = pool->free_heads[--pool->free_heads_cnt]; - for (;;) { if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { pool->fq->queue_empty_descs++; - xp_release(xskb); return NULL; } @@ -466,17 +474,17 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) } break; } - xskq_cons_release(pool->fq); - xskb->orig_addr = addr; - xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; - if (pool->dma_pages_cnt) { - xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & - ~XSK_NEXT_PG_CONTIG_MASK) + - (addr & ~PAGE_MASK); - xskb->dma = xskb->frame_dma + pool->headroom + - XDP_PACKET_HEADROOM; + if (pool->unaligned) { + xskb = pool->free_heads[--pool->free_heads_cnt]; + xp_init_xskb_addr(xskb, pool, addr); + if (pool->dma_pages_cnt) + xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); + } else { + xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; } + + xskq_cons_release(pool->fq); return xskb; } @@ -507,6 +515,96 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) } EXPORT_SYMBOL(xp_alloc); +static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +{ + u32 i, cached_cons, nb_entries; + + if (max > pool->free_heads_cnt) + max = pool->free_heads_cnt; + max = xskq_cons_nb_entries(pool->fq, max); + + cached_cons = pool->fq->cached_cons; + nb_entries = max; + i = max; + while (i--) { + struct xdp_buff_xsk *xskb; + u64 addr; + bool ok; + + __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr); + + ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : + xp_check_aligned(pool, &addr); + if (unlikely(!ok)) { + pool->fq->invalid_descs++; + nb_entries--; + continue; + } + + if (pool->unaligned) { + xskb = pool->free_heads[--pool->free_heads_cnt]; + xp_init_xskb_addr(xskb, pool, addr); + if (pool->dma_pages_cnt) + xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); + } else { + xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; + } + + *xdp = &xskb->xdp; + xdp++; + } + + xskq_cons_release_n(pool->fq, max); + return nb_entries; +} + +static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) +{ + struct xdp_buff_xsk *xskb; + u32 i; + + nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); + + i = nb_entries; + while (i--) { + xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); + list_del(&xskb->free_list_node); + + *xdp = &xskb->xdp; + xdp++; + } + pool->free_list_cnt -= nb_entries; + + return nb_entries; +} + +u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +{ + u32 nb_entries1 = 0, nb_entries2; + + if (unlikely(pool->dma_need_sync)) { + /* Slow path */ + *xdp = xp_alloc(pool); + return !!*xdp; + } + + if (unlikely(pool->free_list_cnt)) { + nb_entries1 = xp_alloc_reused(pool, xdp, max); + if (nb_entries1 == max) + return nb_entries1; + + max -= nb_entries1; + xdp += nb_entries1; + } + + nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max); + if (!nb_entries2) + pool->fq->queue_empty_descs++; + + return nb_entries1 + nb_entries2; +} +EXPORT_SYMBOL(xp_alloc_batch); + bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) { if (pool->free_list_cnt >= count) diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 9ae13cccfb28d2..e9aa2c23635618 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -111,14 +111,18 @@ struct xsk_queue { /* Functions that read and validate content from consumer rings. */ -static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) +static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; + u32 idx = cached_cons & q->ring_mask; - if (q->cached_cons != q->cached_prod) { - u32 idx = q->cached_cons & q->ring_mask; + *addr = ring->desc[idx]; +} - *addr = ring->desc[idx]; +static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) +{ + if (q->cached_cons != q->cached_prod) { + __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); return true; } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 3df0861d4390f6..70a8c36f0ba6e6 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -530,7 +530,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) goto drop; } - if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { + if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } @@ -560,7 +560,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) } seq = 0; - if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { + if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) { secpath_reset(skb); XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore index fcba217f0ae2a2..0e7bfdbff80a64 100644 --- a/samples/bpf/.gitignore +++ b/samples/bpf/.gitignore @@ -57,3 +57,7 @@ testfile.img hbm_out.log iperf.* *.out +*.skel.h +/vmlinux.h +/bpftool/ +/libbpf/ diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 5fd48a8d4f10a5..a886dff1ba8991 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -3,6 +3,8 @@ BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src)) TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools +pound := \# + # List of programs to build tprogs-y := test_lru_dist tprogs-y += sock_example @@ -59,7 +61,11 @@ tprogs-y += xdp_redirect tprogs-y += xdp_monitor # Libbpf dependencies -LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a +LIBBPF_SRC = $(TOOLS_PATH)/lib/bpf +LIBBPF_OUTPUT = $(abspath $(BPF_SAMPLES_PATH))/libbpf +LIBBPF_DESTDIR = $(LIBBPF_OUTPUT) +LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include +LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o @@ -198,7 +204,7 @@ TPROGS_CFLAGS += -Wstrict-prototypes TPROGS_CFLAGS += -I$(objtree)/usr/include TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ -TPROGS_CFLAGS += -I$(srctree)/tools/lib/ +TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE) TPROGS_CFLAGS += -I$(srctree)/tools/include TPROGS_CFLAGS += -I$(srctree)/tools/perf TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0 @@ -223,6 +229,7 @@ CLANG ?= clang OPT ?= opt LLVM_DIS ?= llvm-dis LLVM_OBJCOPY ?= llvm-objcopy +LLVM_READELF ?= llvm-readelf BTF_PAHOLE ?= pahole # Detect that we're cross compiling and use the cross compiler @@ -232,7 +239,7 @@ endif # Don't evaluate probes and warnings if we need to run make recursively ifneq ($(src),) -HDR_PROBE := $(shell printf "\#include \n struct list_head { int a; }; int main() { return 0; }" | \ +HDR_PROBE := $(shell printf "$(pound)include \n struct list_head { int a; }; int main() { return 0; }" | \ $(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \ -o /dev/null 2>/dev/null && echo okay) @@ -246,7 +253,7 @@ BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \ $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \ - readelf -S ./llvm_btf_verify.o | grep BTF; \ + $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \ /bin/rm -f ./llvm_btf_verify.o) BPF_EXTRA_CFLAGS += -fno-stack-protector @@ -268,16 +275,27 @@ all: clean: $(MAKE) -C ../../ M=$(CURDIR) clean @find $(CURDIR) -type f -name '*~' -delete + @$(RM) -r $(CURDIR)/libbpf $(CURDIR)/bpftool -$(LIBBPF): FORCE +$(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT) # Fix up variables inherited from Kbuild that tools/ build system won't like - $(MAKE) -C $(dir $@) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \ - LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ O= + $(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \ + LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ \ + O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \ + $@ install_headers BPFTOOLDIR := $(TOOLS_PATH)/bpf/bpftool -BPFTOOL := $(BPFTOOLDIR)/bpftool -$(BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) - $(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ +BPFTOOL_OUTPUT := $(abspath $(BPF_SAMPLES_PATH))/bpftool +BPFTOOL := $(BPFTOOL_OUTPUT)/bpftool +$(BPFTOOL): $(LIBBPF) $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT) + $(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ \ + OUTPUT=$(BPFTOOL_OUTPUT)/ \ + LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \ + LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/ + +$(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT): + $(call msg,MKDIR,$@) + $(Q)mkdir -p $@ $(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE $(call filechk,offsets,__SYSCALL_NRS_H__) @@ -309,6 +327,11 @@ verify_target_bpf: verify_cmds $(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF) $(src)/*.c: verify_target_bpf $(LIBBPF) +libbpf_hdrs: $(LIBBPF) +$(obj)/$(TRACE_HELPERS): | libbpf_hdrs + +.PHONY: libbpf_hdrs + $(obj)/xdp_redirect_cpu_user.o: $(obj)/xdp_redirect_cpu.skel.h $(obj)/xdp_redirect_map_multi_user.o: $(obj)/xdp_redirect_map_multi.skel.h $(obj)/xdp_redirect_map_user.o: $(obj)/xdp_redirect_map.skel.h @@ -366,7 +389,7 @@ $(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/x $(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(SRCARCH) \ -Wno-compare-distinct-pointer-types -I$(srctree)/include \ -I$(srctree)/samples/bpf -I$(srctree)/tools/include \ - -I$(srctree)/tools/lib $(CLANG_SYS_INCLUDES) \ + -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \ -c $(filter %.bpf.c,$^) -o $@ LINKED_SKELS := xdp_redirect_cpu.skel.h xdp_redirect_map_multi.skel.h \ @@ -403,7 +426,7 @@ $(obj)/%.o: $(src)/%.c @echo " CLANG-bpf " $@ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \ -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \ - -I$(srctree)/tools/lib/ \ + -I$(LIBBPF_INCLUDE) \ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \ -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \ -Wno-gnu-variable-sized-type-not-at-end \ diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 116e39f6b66662..8675fa5273df88 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -128,7 +128,7 @@ int main(int argc, char **argv) if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) return 1; - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); if (!map) { printf("finding a map in obj file failed\n"); return 1; diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index 6e25fba64c72ba..d84e6949007cc9 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -325,7 +325,6 @@ int main(int argc, char **argv) int add_cpu = -1; int ifindex = -1; int *cpu, i, opt; - char *ifname; __u32 qsize; int n_cpus; @@ -393,9 +392,8 @@ int main(int argc, char **argv) fprintf(stderr, "-d/--dev name too long\n"); goto end_cpu; } - ifname = (char *)&ifname_buf; - safe_strncpy(ifname, optarg, sizeof(ifname)); - ifindex = if_nametoindex(ifname); + safe_strncpy(ifname_buf, optarg, strlen(ifname_buf)); + ifindex = if_nametoindex(ifname_buf); if (!ifindex) ifindex = strtoul(optarg, NULL, 0); if (!ifindex) { diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c index b5f03cb17a3c04..cfaf7e50e43169 100644 --- a/samples/bpf/xdp_router_ipv4_user.c +++ b/samples/bpf/xdp_router_ipv4_user.c @@ -155,7 +155,7 @@ static void read_route(struct nlmsghdr *nh, int nll) printf("%d\n", nh->nlmsg_type); memset(&route, 0, sizeof(route)); - printf("Destination\t\tGateway\t\tGenmask\t\tMetric\t\tIface\n"); + printf("Destination Gateway Genmask Metric Iface\n"); for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) { rt_msg = (struct rtmsg *)NLMSG_DATA(nh); rtm_family = rt_msg->rtm_family; @@ -207,6 +207,7 @@ static void read_route(struct nlmsghdr *nh, int nll) int metric; __be32 gw; } *prefix_value; + struct in_addr dst_addr, gw_addr, mask_addr; prefix_key = alloca(sizeof(*prefix_key) + 3); prefix_value = alloca(sizeof(*prefix_value)); @@ -234,14 +235,17 @@ static void read_route(struct nlmsghdr *nh, int nll) for (i = 0; i < 4; i++) prefix_key->data[i] = (route.dst >> i * 8) & 0xff; - printf("%3d.%d.%d.%d\t\t%3x\t\t%d\t\t%d\t\t%s\n", - (int)prefix_key->data[0], - (int)prefix_key->data[1], - (int)prefix_key->data[2], - (int)prefix_key->data[3], - route.gw, route.dst_len, + dst_addr.s_addr = route.dst; + printf("%-16s", inet_ntoa(dst_addr)); + + gw_addr.s_addr = route.gw; + printf("%-16s", inet_ntoa(gw_addr)); + + mask_addr.s_addr = htonl(~(0xffffffffU >> route.dst_len)); + printf("%-16s%-7d%s\n", inet_ntoa(mask_addr), route.metric, route.iface_name); + if (bpf_map_lookup_elem(lpm_map_fd, prefix_key, prefix_value) < 0) { for (i = 0; i < 4; i++) @@ -393,8 +397,12 @@ static void read_arp(struct nlmsghdr *nh, int nll) if (nh->nlmsg_type == RTM_GETNEIGH) printf("READING arp entry\n"); - printf("Address\tHwAddress\n"); + printf("Address HwAddress\n"); for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) { + struct in_addr dst_addr; + char mac_str[18]; + int len = 0, i; + rt_msg = (struct ndmsg *)NLMSG_DATA(nh); rt_attr = (struct rtattr *)RTM_RTA(rt_msg); ndm_family = rt_msg->ndm_family; @@ -415,7 +423,14 @@ static void read_arp(struct nlmsghdr *nh, int nll) } arp_entry.dst = atoi(dsts); arp_entry.mac = atol(mac); - printf("%x\t\t%llx\n", arp_entry.dst, arp_entry.mac); + + dst_addr.s_addr = arp_entry.dst; + for (i = 0; i < 6; i++) + len += snprintf(mac_str + len, 18 - len, "%02llx%s", + ((arp_entry.mac >> i * 8) & 0xff), + i < 5 ? ":" : ""); + printf("%-16s%s\n", inet_ntoa(dst_addr), mac_str); + if (ndm_family == AF_INET) { if (bpf_map_lookup_elem(exact_match_map_fd, &arp_entry.dst, @@ -672,7 +687,7 @@ int main(int ac, char **argv) if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) return 1; - printf("\n**************loading bpf file*********************\n\n\n"); + printf("\n******************loading bpf file*********************\n"); if (!prog_fd) { printf("bpf_prog_load_xattr: %s\n", strerror(errno)); return 1; @@ -722,9 +737,9 @@ int main(int ac, char **argv) signal(SIGINT, int_exit); signal(SIGTERM, int_exit); - printf("*******************ROUTE TABLE*************************\n\n\n"); + printf("\n*******************ROUTE TABLE*************************\n"); get_route_table(AF_INET); - printf("*******************ARP TABLE***************************\n\n\n"); + printf("\n*******************ARP TABLE***************************\n"); get_arp_table(AF_INET); if (monitor_route() < 0) { printf("Error in receiving route update"); diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c index 495e09897bd3d5..f4382ccdcbb183 100644 --- a/samples/bpf/xdp_sample_pkts_user.c +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -154,7 +154,7 @@ int main(int argc, char **argv) return 1; } - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); if (!map) { printf("finding a map in obj file failed\n"); return 1; diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h index 0cc9816fe8e828..417e48a4c4df2b 100644 --- a/samples/seccomp/bpf-helper.h +++ b/samples/seccomp/bpf-helper.h @@ -62,9 +62,9 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count); #define EXPAND(...) __VA_ARGS__ /* Ensure that we load the logically correct offset. */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32) #else #error "Unknown endianness" @@ -85,10 +85,10 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count); #elif __BITS_PER_LONG == 64 /* Ensure that we load the logically correct offset. */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define ENDIAN(_lo, _hi) _lo, _hi #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32) -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define ENDIAN(_lo, _hi) _hi, _lo #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) #endif diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal index ff805777431ce7..7f39599e9faedd 100644 --- a/scripts/Makefile.modfinal +++ b/scripts/Makefile.modfinal @@ -40,7 +40,8 @@ quiet_cmd_ld_ko_o = LD [M] $@ quiet_cmd_btf_ko = BTF [M] $@ cmd_btf_ko = \ if [ -f vmlinux ]; then \ - LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \ + LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \ + $(RESOLVE_BTFIDS) -b vmlinux $@; \ else \ printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \ fi; diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index 00ac7b79cddb47..a6403ddf5de721 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -537,6 +537,7 @@ def __init__(self, parser): 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', + 'struct unix_sock', 'struct task_struct', 'struct __sk_buff', @@ -589,6 +590,7 @@ def __init__(self, parser): 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', + 'struct unix_sock', 'struct task_struct', 'struct path', 'struct btf_ptr', diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index d74cee5c4326a5..3ea7cece7c97d9 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -205,7 +205,6 @@ vmlinux_link() gen_btf() { local pahole_ver - local extra_paholeopt= if ! [ -x "$(command -v ${PAHOLE})" ]; then echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" @@ -220,16 +219,8 @@ gen_btf() vmlinux_link ${1} - if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then - # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars - extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars" - fi - if [ "${pahole_ver}" -ge "121" ]; then - extra_paholeopt="${extra_paholeopt} --btf_gen_floats" - fi - info "BTF" ${2} - LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1} + LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1} # Create ${2} which contains just .BTF section but no symbols. Add # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh new file mode 100755 index 00000000000000..e6093adf4c06d7 --- /dev/null +++ b/scripts/pahole-flags.sh @@ -0,0 +1,20 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +extra_paholeopt= + +if ! [ -x "$(command -v ${PAHOLE})" ]; then + exit 0 +fi + +pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/') + +if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then + # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars + extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars" +fi +if [ "${pahole_ver}" -ge "121" ]; then + extra_paholeopt="${extra_paholeopt} --btf_gen_floats" +fi + +echo ${extra_paholeopt} diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index d73232be1e9912..c0c30e56988f2c 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -14,33 +14,43 @@ else Q = @ endif -BPF_DIR = $(srctree)/tools/lib/bpf/ +BPF_DIR = $(srctree)/tools/lib/bpf ifneq ($(OUTPUT),) - LIBBPF_OUTPUT = $(OUTPUT)/libbpf/ - LIBBPF_PATH = $(LIBBPF_OUTPUT) - BOOTSTRAP_OUTPUT = $(OUTPUT)/bootstrap/ + _OUTPUT := $(OUTPUT) else - LIBBPF_OUTPUT = - LIBBPF_PATH = $(BPF_DIR) - BOOTSTRAP_OUTPUT = $(CURDIR)/bootstrap/ + _OUTPUT := $(CURDIR) endif +BOOTSTRAP_OUTPUT := $(_OUTPUT)/bootstrap/ +LIBBPF_OUTPUT := $(_OUTPUT)/libbpf/ +LIBBPF_DESTDIR := $(LIBBPF_OUTPUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include +LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf -LIBBPF = $(LIBBPF_PATH)libbpf.a +LIBBPF = $(LIBBPF_OUTPUT)libbpf.a LIBBPF_BOOTSTRAP_OUTPUT = $(BOOTSTRAP_OUTPUT)libbpf/ LIBBPF_BOOTSTRAP = $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a +# We need to copy hashmap.h and nlattr.h which is not otherwise exported by +# libbpf, but still required by bpftool. +LIBBPF_INTERNAL_HDRS := $(addprefix $(LIBBPF_HDRS_DIR)/,hashmap.h nlattr.h) + ifeq ($(BPFTOOL_VERSION),) BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion) endif -$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT): +$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR): $(QUIET_MKDIR)mkdir -p $@ -$(LIBBPF): FORCE | $(LIBBPF_OUTPUT) - $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a +$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT) + $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= $(LIBBPF) install_headers + +$(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR) + $(call QUIET_INSTALL, $@) + $(Q)install -m 644 -t $(LIBBPF_HDRS_DIR) $< -$(LIBBPF_BOOTSTRAP): FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT) +$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT) $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \ ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@ @@ -60,11 +70,10 @@ CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS)) CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ -I$(if $(OUTPUT),$(OUTPUT),.) \ + -I$(LIBBPF_INCLUDE) \ -I$(srctree)/kernel/bpf/ \ -I$(srctree)/tools/include \ - -I$(srctree)/tools/include/uapi \ - -I$(srctree)/tools/lib \ - -I$(srctree)/tools/perf + -I$(srctree)/tools/include/uapi CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"' ifneq ($(EXTRA_CFLAGS),) CFLAGS += $(EXTRA_CFLAGS) @@ -137,7 +146,10 @@ endif BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o) +$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP) + OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o +$(OBJS): $(LIBBPF) $(LIBBPF_INTERNAL_HDRS) VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ @@ -164,8 +176,7 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) $(QUIET_CLANG)$(CLANG) \ -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/tools/include/uapi/ \ - -I$(LIBBPF_PATH) \ - -I$(srctree)/tools/lib \ + -I$(LIBBPF_INCLUDE) \ -g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) @@ -186,7 +197,10 @@ $(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< -$(OUTPUT)feature.o: | zdep +$(OUTPUT)feature.o: +ifneq ($(feature-zlib), 1) + $(error "No zlib found") +endif $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP) $(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \ @@ -195,7 +209,7 @@ $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP) $(OUTPUT)bpftool: $(OBJS) $(LIBBPF) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS) -$(BOOTSTRAP_OUTPUT)%.o: %.c | $(BOOTSTRAP_OUTPUT) +$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT) $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $< $(OUTPUT)%.o: %.c @@ -214,10 +228,12 @@ clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool $(Q)$(RM) -r -- $(OUTPUT)feature/ -install: $(OUTPUT)bpftool +install-bin: $(OUTPUT)bpftool $(call QUIET_INSTALL, bpftool) $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin $(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool + +install: install-bin $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir) $(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir) @@ -240,10 +256,7 @@ doc-uninstall: FORCE: -zdep: - @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi - .SECONDARY: -.PHONY: all FORCE clean install uninstall zdep +.PHONY: all FORCE clean install-bin install uninstall .PHONY: doc doc-clean doc-install doc-uninstall .DEFAULT_GOAL := all diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index f7e5ff3586c9b4..015d2758f82647 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -8,14 +8,15 @@ #include #include #include -#include -#include -#include #include -#include #include #include +#include +#include +#include +#include + #include "json_writer.h" #include "main.h" @@ -37,16 +38,12 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", -}; - -struct btf_attach_table { - DECLARE_HASHTABLE(table, 16); + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; struct btf_attach_point { __u32 obj_id; __u32 btf_id; - struct hlist_node hash; }; static const char *btf_int_enc_str(__u8 encoding) @@ -328,7 +325,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id, printf("\n\ttype_id=%u offset=%u size=%u", v->type, v->offset, v->size); - if (v->type <= btf__get_nr_types(btf)) { + if (v->type < btf__type_cnt(btf)) { vt = btf__type_by_id(btf, v->type); printf(" (%s '%s')", btf_kind_str[btf_kind_safe(btf_kind(vt))], @@ -347,6 +344,17 @@ static int dump_btf_type(const struct btf *btf, __u32 id, printf(" size=%u", t->size); break; } + case BTF_KIND_DECL_TAG: { + const struct btf_decl_tag *tag = (const void *)(t + 1); + + if (json_output) { + jsonw_uint_field(w, "type_id", t->type); + jsonw_int_field(w, "component_idx", tag->component_idx); + } else { + printf(" type_id=%u component_idx=%d", t->type, tag->component_idx); + } + break; + } default: break; } @@ -378,14 +386,14 @@ static int dump_btf_raw(const struct btf *btf, } } else { const struct btf *base; - int cnt = btf__get_nr_types(btf); + int cnt = btf__type_cnt(btf); int start_id = 1; base = btf__base_btf(btf); if (base) - start_id = btf__get_nr_types(base) + 1; + start_id = btf__type_cnt(base); - for (i = start_id; i <= cnt; i++) { + for (i = start_id; i < cnt; i++) { t = btf__type_by_id(btf, i); dump_btf_type(btf, i, t); } @@ -428,9 +436,9 @@ static int dump_btf_c(const struct btf *btf, goto done; } } else { - int cnt = btf__get_nr_types(btf); + int cnt = btf__type_cnt(btf); - for (i = 1; i <= cnt; i++) { + for (i = 1; i < cnt; i++) { err = btf_dump__dump_type(d, i); if (err) goto done; @@ -633,21 +641,8 @@ static int btf_parse_fd(int *argc, char ***argv) return fd; } -static void delete_btf_table(struct btf_attach_table *tab) -{ - struct btf_attach_point *obj; - struct hlist_node *tmp; - - unsigned int bkt; - - hash_for_each_safe(tab->table, bkt, tmp, obj, hash) { - hash_del(&obj->hash); - free(obj); - } -} - static int -build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type, +build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type, void *info, __u32 *len) { static const char * const names[] = { @@ -655,7 +650,6 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type, [BPF_OBJ_PROG] = "prog", [BPF_OBJ_MAP] = "map", }; - struct btf_attach_point *obj_node; __u32 btf_id, id = 0; int err; int fd; @@ -729,28 +723,25 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type, if (!btf_id) continue; - obj_node = calloc(1, sizeof(*obj_node)); - if (!obj_node) { - p_err("failed to allocate memory: %s", strerror(errno)); - err = -ENOMEM; + err = hashmap__append(tab, u32_as_hash_field(btf_id), + u32_as_hash_field(id)); + if (err) { + p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s", + btf_id, id, strerror(errno)); goto err_free; } - - obj_node->obj_id = id; - obj_node->btf_id = btf_id; - hash_add(tab->table, &obj_node->hash, obj_node->btf_id); } return 0; err_free: - delete_btf_table(tab); + hashmap__free(tab); return err; } static int -build_btf_tables(struct btf_attach_table *btf_prog_table, - struct btf_attach_table *btf_map_table) +build_btf_tables(struct hashmap *btf_prog_table, + struct hashmap *btf_map_table) { struct bpf_prog_info prog_info; __u32 prog_len = sizeof(prog_info); @@ -766,7 +757,7 @@ build_btf_tables(struct btf_attach_table *btf_prog_table, err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info, &map_len); if (err) { - delete_btf_table(btf_prog_table); + hashmap__free(btf_prog_table); return err; } @@ -775,10 +766,10 @@ build_btf_tables(struct btf_attach_table *btf_prog_table, static void show_btf_plain(struct bpf_btf_info *info, int fd, - struct btf_attach_table *btf_prog_table, - struct btf_attach_table *btf_map_table) + struct hashmap *btf_prog_table, + struct hashmap *btf_map_table) { - struct btf_attach_point *obj; + struct hashmap_entry *entry; const char *name = u64_to_ptr(info->name); int n; @@ -792,29 +783,30 @@ show_btf_plain(struct bpf_btf_info *info, int fd, printf("size %uB", info->btf_size); n = 0; - hash_for_each_possible(btf_prog_table->table, obj, hash, info->id) { - if (obj->btf_id == info->id) - printf("%s%u", n++ == 0 ? " prog_ids " : ",", - obj->obj_id); + hashmap__for_each_key_entry(btf_prog_table, entry, + u32_as_hash_field(info->id)) { + printf("%s%u", n++ == 0 ? " prog_ids " : ",", + hash_field_as_u32(entry->value)); } n = 0; - hash_for_each_possible(btf_map_table->table, obj, hash, info->id) { - if (obj->btf_id == info->id) - printf("%s%u", n++ == 0 ? " map_ids " : ",", - obj->obj_id); + hashmap__for_each_key_entry(btf_map_table, entry, + u32_as_hash_field(info->id)) { + printf("%s%u", n++ == 0 ? " map_ids " : ",", + hash_field_as_u32(entry->value)); } - emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + + emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); printf("\n"); } static void show_btf_json(struct bpf_btf_info *info, int fd, - struct btf_attach_table *btf_prog_table, - struct btf_attach_table *btf_map_table) + struct hashmap *btf_prog_table, + struct hashmap *btf_map_table) { - struct btf_attach_point *obj; + struct hashmap_entry *entry; const char *name = u64_to_ptr(info->name); jsonw_start_object(json_wtr); /* btf object */ @@ -823,23 +815,21 @@ show_btf_json(struct bpf_btf_info *info, int fd, jsonw_name(json_wtr, "prog_ids"); jsonw_start_array(json_wtr); /* prog_ids */ - hash_for_each_possible(btf_prog_table->table, obj, hash, - info->id) { - if (obj->btf_id == info->id) - jsonw_uint(json_wtr, obj->obj_id); + hashmap__for_each_key_entry(btf_prog_table, entry, + u32_as_hash_field(info->id)) { + jsonw_uint(json_wtr, hash_field_as_u32(entry->value)); } jsonw_end_array(json_wtr); /* prog_ids */ jsonw_name(json_wtr, "map_ids"); jsonw_start_array(json_wtr); /* map_ids */ - hash_for_each_possible(btf_map_table->table, obj, hash, - info->id) { - if (obj->btf_id == info->id) - jsonw_uint(json_wtr, obj->obj_id); + hashmap__for_each_key_entry(btf_map_table, entry, + u32_as_hash_field(info->id)) { + jsonw_uint(json_wtr, hash_field_as_u32(entry->value)); } jsonw_end_array(json_wtr); /* map_ids */ - emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */ + emit_obj_refs_json(refs_table, info->id, json_wtr); /* pids */ jsonw_bool_field(json_wtr, "kernel", info->kernel_btf); @@ -850,8 +840,8 @@ show_btf_json(struct bpf_btf_info *info, int fd, } static int -show_btf(int fd, struct btf_attach_table *btf_prog_table, - struct btf_attach_table *btf_map_table) +show_btf(int fd, struct hashmap *btf_prog_table, + struct hashmap *btf_map_table) { struct bpf_btf_info info; __u32 len = sizeof(info); @@ -888,8 +878,8 @@ show_btf(int fd, struct btf_attach_table *btf_prog_table, static int do_show(int argc, char **argv) { - struct btf_attach_table btf_prog_table; - struct btf_attach_table btf_map_table; + struct hashmap *btf_prog_table; + struct hashmap *btf_map_table; int err, fd = -1; __u32 id = 0; @@ -905,9 +895,19 @@ static int do_show(int argc, char **argv) return BAD_ARG(); } - hash_init(btf_prog_table.table); - hash_init(btf_map_table.table); - err = build_btf_tables(&btf_prog_table, &btf_map_table); + btf_prog_table = hashmap__new(hash_fn_for_key_as_id, + equal_fn_for_key_as_id, NULL); + btf_map_table = hashmap__new(hash_fn_for_key_as_id, + equal_fn_for_key_as_id, NULL); + if (!btf_prog_table || !btf_map_table) { + hashmap__free(btf_prog_table); + hashmap__free(btf_map_table); + if (fd >= 0) + close(fd); + p_err("failed to create hashmap for object references"); + return -1; + } + err = build_btf_tables(btf_prog_table, btf_map_table); if (err) { if (fd >= 0) close(fd); @@ -916,7 +916,7 @@ static int do_show(int argc, char **argv) build_obj_refs_table(&refs_table, BPF_OBJ_BTF); if (fd >= 0) { - err = show_btf(fd, &btf_prog_table, &btf_map_table); + err = show_btf(fd, btf_prog_table, btf_map_table); close(fd); goto exit_free; } @@ -948,7 +948,7 @@ static int do_show(int argc, char **argv) break; } - err = show_btf(fd, &btf_prog_table, &btf_map_table); + err = show_btf(fd, btf_prog_table, btf_map_table); close(fd); if (err) break; @@ -958,9 +958,9 @@ static int do_show(int argc, char **argv) jsonw_end_array(json_wtr); /* root array */ exit_free: - delete_btf_table(&btf_prog_table); - delete_btf_table(&btf_map_table); - delete_obj_refs_table(&refs_table); + hashmap__free(btf_prog_table); + hashmap__free(btf_map_table); + delete_obj_refs_table(refs_table); return err; } diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index d42d930a3ec4d4..511eccdbdfe6df 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -22,6 +22,7 @@ #include #include +#include #include /* libbpf_num_possible_cpus */ #include "main.h" @@ -393,7 +394,7 @@ void print_hex_data_json(uint8_t *data, size_t len) } /* extra params for nftw cb */ -static struct pinned_obj_table *build_fn_table; +static struct hashmap *build_fn_table; static enum bpf_obj_type build_fn_type; static int do_build_table_cb(const char *fpath, const struct stat *sb, @@ -401,9 +402,9 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb, { struct bpf_prog_info pinned_info; __u32 len = sizeof(pinned_info); - struct pinned_obj *obj_node; enum bpf_obj_type objtype; int fd, err = 0; + char *path; if (typeflag != FTW_F) goto out_ret; @@ -420,28 +421,26 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb, if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len)) goto out_close; - obj_node = calloc(1, sizeof(*obj_node)); - if (!obj_node) { + path = strdup(fpath); + if (!path) { err = -1; goto out_close; } - obj_node->id = pinned_info.id; - obj_node->path = strdup(fpath); - if (!obj_node->path) { - err = -1; - free(obj_node); + err = hashmap__append(build_fn_table, u32_as_hash_field(pinned_info.id), path); + if (err) { + p_err("failed to append entry to hashmap for ID %u, path '%s': %s", + pinned_info.id, path, strerror(errno)); goto out_close; } - hash_add(build_fn_table->table, &obj_node->hash, obj_node->id); out_close: close(fd); out_ret: return err; } -int build_pinned_obj_table(struct pinned_obj_table *tab, +int build_pinned_obj_table(struct hashmap *tab, enum bpf_obj_type type) { struct mntent *mntent = NULL; @@ -470,17 +469,18 @@ int build_pinned_obj_table(struct pinned_obj_table *tab, return err; } -void delete_pinned_obj_table(struct pinned_obj_table *tab) +void delete_pinned_obj_table(struct hashmap *map) { - struct pinned_obj *obj; - struct hlist_node *tmp; - unsigned int bkt; + struct hashmap_entry *entry; + size_t bkt; - hash_for_each_safe(tab->table, bkt, tmp, obj, hash) { - hash_del(&obj->hash); - free(obj->path); - free(obj); - } + if (!map) + return; + + hashmap__for_each_entry(map, entry, bkt) + free(entry->value); + + hashmap__free(map); } unsigned int get_page_size(void) @@ -962,3 +962,13 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) return fd; } + +size_t hash_fn_for_key_as_id(const void *key, void *ctx) +{ + return (size_t)key; +} + +bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx) +{ + return k1 == k2; +} diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 7f36385aa9e2e3..ade44577688edb 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -624,6 +624,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, */ switch (id) { case BPF_FUNC_trace_printk: + case BPF_FUNC_trace_vprintk: case BPF_FUNC_probe_write_user: if (!full_mode) continue; diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index d40d92bbf0e484..5c18351290f002 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "json_writer.h" #include "main.h" @@ -34,6 +33,11 @@ static void sanitize_identifier(char *name) name[i] = '_'; } +static bool str_has_prefix(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} + static bool str_has_suffix(const char *str, const char *suffix) { size_t i, n1 = strlen(str), n2 = strlen(suffix); @@ -68,23 +72,47 @@ static void get_header_guard(char *guard, const char *obj_name) guard[i] = toupper(guard[i]); } -static const char *get_map_ident(const struct bpf_map *map) +static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz) { + static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; const char *name = bpf_map__name(map); + int i, n; + + if (!bpf_map__is_internal(map)) { + snprintf(buf, buf_sz, "%s", name); + return true; + } + + for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) { + const char *sfx = sfxs[i], *p; + + p = strstr(name, sfx); + if (p) { + snprintf(buf, buf_sz, "%s", p + 1); + sanitize_identifier(buf); + return true; + } + } + + return false; +} + +static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz) +{ + static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; + int i, n; + + for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { + const char *pfx = pfxs[i]; + + if (str_has_prefix(sec_name, pfx)) { + snprintf(buf, buf_sz, "%s", sec_name + 1); + sanitize_identifier(buf); + return true; + } + } - if (!bpf_map__is_internal(map)) - return name; - - if (str_has_suffix(name, ".data")) - return "data"; - else if (str_has_suffix(name, ".rodata")) - return "rodata"; - else if (str_has_suffix(name, ".bss")) - return "bss"; - else if (str_has_suffix(name, ".kconfig")) - return "kconfig"; - else - return NULL; + return false; } static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) @@ -101,24 +129,14 @@ static int codegen_datasec_def(struct bpf_object *obj, const char *sec_name = btf__name_by_offset(btf, sec->name_off); const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec); int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); - const char *sec_ident; - char var_ident[256]; + char var_ident[256], sec_ident[256]; bool strip_mods = false; - if (strcmp(sec_name, ".data") == 0) { - sec_ident = "data"; - strip_mods = true; - } else if (strcmp(sec_name, ".bss") == 0) { - sec_ident = "bss"; - strip_mods = true; - } else if (strcmp(sec_name, ".rodata") == 0) { - sec_ident = "rodata"; - strip_mods = true; - } else if (strcmp(sec_name, ".kconfig") == 0) { - sec_ident = "kconfig"; - } else { + if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) return 0; - } + + if (strcmp(sec_name, ".kconfig") != 0) + strip_mods = true; printf(" struct %s__%s {\n", obj_name, sec_ident); for (i = 0; i < vlen; i++, sec_var++) { @@ -193,24 +211,63 @@ static int codegen_datasec_def(struct bpf_object *obj, static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) { struct btf *btf = bpf_object__btf(obj); - int n = btf__get_nr_types(btf); + int n = btf__type_cnt(btf); struct btf_dump *d; + struct bpf_map *map; + const struct btf_type *sec; + char sec_ident[256], map_ident[256]; int i, err = 0; d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf); if (IS_ERR(d)) return PTR_ERR(d); - for (i = 1; i <= n; i++) { - const struct btf_type *t = btf__type_by_id(btf, i); + bpf_object__for_each_map(map, obj) { + /* only generate definitions for memory-mapped internal maps */ + if (!bpf_map__is_internal(map)) + continue; + if (!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) + continue; - if (!btf_is_datasec(t)) + if (!get_map_ident(map, map_ident, sizeof(map_ident))) continue; - err = codegen_datasec_def(obj, btf, d, t, obj_name); - if (err) - goto out; + sec = NULL; + for (i = 1; i < n; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + const char *name; + + if (!btf_is_datasec(t)) + continue; + + name = btf__str_by_offset(btf, t->name_off); + if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident))) + continue; + + if (strcmp(sec_ident, map_ident) == 0) { + sec = t; + break; + } + } + + /* In some cases (e.g., sections like .rodata.cst16 containing + * compiler allocated string constants only) there will be + * special internal maps with no corresponding DATASEC BTF + * type. In such case, generate empty structs for each such + * map. It will still be memory-mapped and its contents + * accessible from user-space through BPF skeleton. + */ + if (!sec) { + printf(" struct %s__%s {\n", obj_name, map_ident); + printf(" } *%s;\n", map_ident); + } else { + err = codegen_datasec_def(obj, btf, d, sec, obj_name); + if (err) + goto out; + } } + + out: btf_dump__free(d); return err; @@ -238,8 +295,8 @@ static void codegen(const char *template, ...) } else if (c == '\n') { break; } else { - p_err("unrecognized character at pos %td in template '%s'", - src - template - 1, template); + p_err("unrecognized character at pos %td in template '%s': '%c'", + src - template - 1, template, c); free(s); exit(-1); } @@ -386,6 +443,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name) { struct bpf_program *prog; struct bpf_map *map; + char ident[256]; codegen("\ \n\ @@ -406,10 +464,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name) } bpf_object__for_each_map(map, obj) { - const char * ident; - - ident = get_map_ident(map); - if (!ident) + if (!get_map_ident(map, ident, sizeof(ident))) continue; if (bpf_map__is_internal(map) && (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) @@ -433,6 +488,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h struct bpf_object_load_attr load_attr = {}; DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); struct bpf_map *map; + char ident[256]; int err = 0; err = bpf_object__gen_loader(obj, &opts); @@ -478,12 +534,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h ", obj_name, opts.data_sz); bpf_object__for_each_map(map, obj) { - const char *ident; const void *mmap_data = NULL; size_t mmap_size = 0; - ident = get_map_ident(map); - if (!ident) + if (!get_map_ident(map, ident, sizeof(ident))) continue; if (!bpf_map__is_internal(map) || @@ -545,15 +599,15 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h return err; \n\ ", obj_name); bpf_object__for_each_map(map, obj) { - const char *ident, *mmap_flags; + const char *mmap_flags; - ident = get_map_ident(map); - if (!ident) + if (!get_map_ident(map, ident, sizeof(ident))) continue; if (!bpf_map__is_internal(map) || !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) continue; + if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG) mmap_flags = "PROT_READ"; else @@ -603,7 +657,8 @@ static int do_skeleton(int argc, char **argv) DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; struct bpf_object *obj = NULL; - const char *file, *ident; + const char *file; + char ident[256]; struct bpf_program *prog; int fd, err = -1; struct bpf_map *map; @@ -674,8 +729,7 @@ static int do_skeleton(int argc, char **argv) } bpf_object__for_each_map(map, obj) { - ident = get_map_ident(map); - if (!ident) { + if (!get_map_ident(map, ident, sizeof(ident))) { p_err("ignoring unrecognized internal map '%s'...", bpf_map__name(map)); continue; @@ -728,8 +782,7 @@ static int do_skeleton(int argc, char **argv) if (map_cnt) { printf("\tstruct {\n"); bpf_object__for_each_map(map, obj) { - ident = get_map_ident(map); - if (!ident) + if (!get_map_ident(map, ident, sizeof(ident))) continue; if (use_loader) printf("\t\tstruct bpf_map_desc %s;\n", ident); @@ -803,7 +856,10 @@ static int do_skeleton(int argc, char **argv) } \n\ \n\ err = %1$s__create_skeleton(obj); \n\ - err = err ?: bpf_object__open_skeleton(obj->skeleton, opts);\n\ + if (err) \n\ + goto err_out; \n\ + \n\ + err = bpf_object__open_skeleton(obj->skeleton, opts);\n\ if (err) \n\ goto err_out; \n\ \n\ @@ -862,6 +918,8 @@ static int do_skeleton(int argc, char **argv) codegen("\ \n\ \n\ + static inline const void *%1$s__elf_bytes(size_t *sz); \n\ + \n\ static inline int \n\ %1$s__create_skeleton(struct %1$s *obj) \n\ { \n\ @@ -893,9 +951,7 @@ static int do_skeleton(int argc, char **argv) ); i = 0; bpf_object__for_each_map(map, obj) { - ident = get_map_ident(map); - - if (!ident) + if (!get_map_ident(map, ident, sizeof(ident))) continue; codegen("\ @@ -943,10 +999,20 @@ static int do_skeleton(int argc, char **argv) codegen("\ \n\ \n\ - s->data_sz = %d; \n\ - s->data = (void *)\"\\ \n\ - ", - file_sz); + s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\ + \n\ + return 0; \n\ + err: \n\ + bpf_object__destroy_skeleton(s); \n\ + return -ENOMEM; \n\ + } \n\ + \n\ + static inline const void *%2$s__elf_bytes(size_t *sz) \n\ + { \n\ + *sz = %1$d; \n\ + return (const void *)\"\\ \n\ + " + , file_sz, obj_name); /* embed contents of BPF object file */ print_hex(obj_data, file_sz); @@ -954,11 +1020,6 @@ static int do_skeleton(int argc, char **argv) codegen("\ \n\ \"; \n\ - \n\ - return 0; \n\ - err: \n\ - bpf_object__destroy_skeleton(s); \n\ - return -ENOMEM; \n\ } \n\ \n\ #endif /* %s */ \n\ diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c index 84a9b01d956dc3..6c0de647b8ad52 100644 --- a/tools/bpf/bpftool/iter.c +++ b/tools/bpf/bpftool/iter.c @@ -57,7 +57,7 @@ static int do_pin(int argc, char **argv) goto close_obj; } - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); if (!prog) { p_err("can't find bpf program in objfile %s", objfile); goto close_obj; diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 8cc3e36f8cc695..2c258db0d3521a 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -7,6 +7,7 @@ #include #include +#include #include "json_writer.h" #include "main.h" @@ -20,6 +21,8 @@ static const char * const link_type_name[] = { [BPF_LINK_TYPE_NETNS] = "netns", }; +static struct hashmap *link_table; + static int link_parse_fd(int *argc, char ***argv) { int fd; @@ -156,19 +159,18 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) break; } - if (!hash_empty(link_table.table)) { - struct pinned_obj *obj; + if (!hashmap__empty(link_table)) { + struct hashmap_entry *entry; jsonw_name(json_wtr, "pinned"); jsonw_start_array(json_wtr); - hash_for_each_possible(link_table.table, obj, hash, info->id) { - if (obj->id == info->id) - jsonw_string(json_wtr, obj->path); - } + hashmap__for_each_key_entry(link_table, entry, + u32_as_hash_field(info->id)) + jsonw_string(json_wtr, entry->value); jsonw_end_array(json_wtr); } - emit_obj_refs_json(&refs_table, info->id, json_wtr); + emit_obj_refs_json(refs_table, info->id, json_wtr); jsonw_end_object(json_wtr); @@ -244,15 +246,14 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) break; } - if (!hash_empty(link_table.table)) { - struct pinned_obj *obj; + if (!hashmap__empty(link_table)) { + struct hashmap_entry *entry; - hash_for_each_possible(link_table.table, obj, hash, info->id) { - if (obj->id == info->id) - printf("\n\tpinned %s", obj->path); - } + hashmap__for_each_key_entry(link_table, entry, + u32_as_hash_field(info->id)) + printf("\n\tpinned %s", (char *)entry->value); } - emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); printf("\n"); @@ -302,8 +303,15 @@ static int do_show(int argc, char **argv) __u32 id = 0; int err, fd; - if (show_pinned) - build_pinned_obj_table(&link_table, BPF_OBJ_LINK); + if (show_pinned) { + link_table = hashmap__new(hash_fn_for_key_as_id, + equal_fn_for_key_as_id, NULL); + if (!link_table) { + p_err("failed to create hashmap for pinned paths"); + return -1; + } + build_pinned_obj_table(link_table, BPF_OBJ_LINK); + } build_obj_refs_table(&refs_table, BPF_OBJ_LINK); if (argc == 2) { @@ -344,7 +352,10 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); - delete_obj_refs_table(&refs_table); + delete_obj_refs_table(refs_table); + + if (show_pinned) + delete_pinned_obj_table(link_table); return errno == ENOENT ? 0 : -1; } diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 02eaaf065f6515..28237d7cef67fa 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -10,8 +10,9 @@ #include #include -#include #include +#include +#include #include "main.h" @@ -31,10 +32,7 @@ bool verifier_logs; bool relaxed_maps; bool use_loader; struct btf *base_btf; -struct pinned_obj_table prog_table; -struct pinned_obj_table map_table; -struct pinned_obj_table link_table; -struct obj_refs_table refs_table; +struct hashmap *refs_table; static void __noreturn clean_and_exit(int i) { @@ -409,10 +407,6 @@ int main(int argc, char **argv) block_mount = false; bin_name = argv[0]; - hash_init(prog_table.table); - hash_init(map_table.table); - hash_init(link_table.table); - opterr = 0; while ((opt = getopt_long(argc, argv, "VhpjfLmndB:", options, NULL)) >= 0) { @@ -479,11 +473,6 @@ int main(int argc, char **argv) if (json_output) jsonw_destroy(&json_wtr); - if (show_pinned) { - delete_pinned_obj_table(&prog_table); - delete_pinned_obj_table(&map_table); - delete_pinned_obj_table(&link_table); - } btf__free(base_btf); return ret; diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 90caa42aac4cf3..383835c2604d05 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -11,9 +11,9 @@ #include #include #include -#include #include +#include #include #include "json_writer.h" @@ -91,10 +91,7 @@ extern bool verifier_logs; extern bool relaxed_maps; extern bool use_loader; extern struct btf *base_btf; -extern struct pinned_obj_table prog_table; -extern struct pinned_obj_table map_table; -extern struct pinned_obj_table link_table; -extern struct obj_refs_table refs_table; +extern struct hashmap *refs_table; void __printf(1, 2) p_err(const char *fmt, ...); void __printf(1, 2) p_info(const char *fmt, ...); @@ -108,28 +105,12 @@ void set_max_rlimit(void); int mount_tracefs(const char *target); -struct pinned_obj_table { - DECLARE_HASHTABLE(table, 16); -}; - -struct pinned_obj { - __u32 id; - char *path; - struct hlist_node hash; -}; - -struct obj_refs_table { - DECLARE_HASHTABLE(table, 16); -}; - struct obj_ref { int pid; char comm[16]; }; struct obj_refs { - struct hlist_node node; - __u32 id; int ref_cnt; struct obj_ref *refs; }; @@ -137,15 +118,15 @@ struct obj_refs { struct btf; struct bpf_line_info; -int build_pinned_obj_table(struct pinned_obj_table *table, +int build_pinned_obj_table(struct hashmap *table, enum bpf_obj_type type); -void delete_pinned_obj_table(struct pinned_obj_table *tab); -__weak int build_obj_refs_table(struct obj_refs_table *table, +void delete_pinned_obj_table(struct hashmap *table); +__weak int build_obj_refs_table(struct hashmap **table, enum bpf_obj_type type); -__weak void delete_obj_refs_table(struct obj_refs_table *table); -__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, +__weak void delete_obj_refs_table(struct hashmap *table); +__weak void emit_obj_refs_json(struct hashmap *table, __u32 id, json_writer_t *json_wtr); -__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, +__weak void emit_obj_refs_plain(struct hashmap *table, __u32 id, const char *prefix); void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); @@ -259,4 +240,23 @@ int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind, int print_all_levels(__maybe_unused enum libbpf_print_level level, const char *format, va_list args); + +size_t hash_fn_for_key_as_id(const void *key, void *ctx); +bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx); + +static inline void *u32_as_hash_field(__u32 x) +{ + return (void *)(uintptr_t)x; +} + +static inline __u32 hash_field_as_u32(const void *x) +{ + return (__u32)(uintptr_t)x; +} + +static inline bool hashmap__empty(struct hashmap *map) +{ + return map ? hashmap__size(map) == 0 : true; +} + #endif diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 407071d54ab1ce..cae1f11192969a 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -17,6 +17,7 @@ #include #include +#include #include "json_writer.h" #include "main.h" @@ -56,6 +57,8 @@ const char * const map_type_name[] = { const size_t map_type_name_size = ARRAY_SIZE(map_type_name); +static struct hashmap *map_table; + static bool map_is_per_cpu(__u32 type) { return type == BPF_MAP_TYPE_PERCPU_HASH || @@ -535,19 +538,18 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) if (info->btf_id) jsonw_int_field(json_wtr, "btf_id", info->btf_id); - if (!hash_empty(map_table.table)) { - struct pinned_obj *obj; + if (!hashmap__empty(map_table)) { + struct hashmap_entry *entry; jsonw_name(json_wtr, "pinned"); jsonw_start_array(json_wtr); - hash_for_each_possible(map_table.table, obj, hash, info->id) { - if (obj->id == info->id) - jsonw_string(json_wtr, obj->path); - } + hashmap__for_each_key_entry(map_table, entry, + u32_as_hash_field(info->id)) + jsonw_string(json_wtr, entry->value); jsonw_end_array(json_wtr); } - emit_obj_refs_json(&refs_table, info->id, json_wtr); + emit_obj_refs_json(refs_table, info->id, json_wtr); jsonw_end_object(json_wtr); @@ -610,13 +612,12 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) } close(fd); - if (!hash_empty(map_table.table)) { - struct pinned_obj *obj; + if (!hashmap__empty(map_table)) { + struct hashmap_entry *entry; - hash_for_each_possible(map_table.table, obj, hash, info->id) { - if (obj->id == info->id) - printf("\n\tpinned %s", obj->path); - } + hashmap__for_each_key_entry(map_table, entry, + u32_as_hash_field(info->id)) + printf("\n\tpinned %s", (char *)entry->value); } printf("\n"); @@ -636,7 +637,7 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) if (frozen) printf("%sfrozen", info->btf_id ? " " : ""); - emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); printf("\n"); return 0; @@ -694,8 +695,15 @@ static int do_show(int argc, char **argv) int err; int fd; - if (show_pinned) - build_pinned_obj_table(&map_table, BPF_OBJ_MAP); + if (show_pinned) { + map_table = hashmap__new(hash_fn_for_key_as_id, + equal_fn_for_key_as_id, NULL); + if (!map_table) { + p_err("failed to create hashmap for pinned paths"); + return -1; + } + build_pinned_obj_table(map_table, BPF_OBJ_MAP); + } build_obj_refs_table(&refs_table, BPF_OBJ_MAP); if (argc == 2) @@ -740,7 +748,10 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); - delete_obj_refs_table(&refs_table); + delete_obj_refs_table(refs_table); + + if (show_pinned) + delete_pinned_obj_table(map_table); return errno == ENOENT ? 0 : -1; } diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 825f29f93a57a9..b98ea702d2849a 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c @@ -22,7 +22,6 @@ #include #include -#include #include "main.h" diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index 477e55d59c3483..56b598eee043a9 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -6,35 +6,37 @@ #include #include #include + #include +#include #include "main.h" #include "skeleton/pid_iter.h" #ifdef BPFTOOL_WITHOUT_SKELETONS -int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) +int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type) { return -ENOTSUP; } -void delete_obj_refs_table(struct obj_refs_table *table) {} -void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {} -void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {} +void delete_obj_refs_table(struct hashmap *map) {} +void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {} +void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {} #else /* BPFTOOL_WITHOUT_SKELETONS */ #include "pid_iter.skel.h" -static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e) +static void add_ref(struct hashmap *map, struct pid_iter_entry *e) { + struct hashmap_entry *entry; struct obj_refs *refs; struct obj_ref *ref; + int err, i; void *tmp; - int i; - hash_for_each_possible(table->table, refs, node, e->id) { - if (refs->id != e->id) - continue; + hashmap__for_each_key_entry(map, entry, u32_as_hash_field(e->id)) { + refs = entry->value; for (i = 0; i < refs->ref_cnt; i++) { if (refs->refs[i].pid == e->pid) @@ -64,7 +66,6 @@ static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e) return; } - refs->id = e->id; refs->refs = malloc(sizeof(*refs->refs)); if (!refs->refs) { free(refs); @@ -76,7 +77,11 @@ static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e) ref->pid = e->pid; memcpy(ref->comm, e->comm, sizeof(ref->comm)); refs->ref_cnt = 1; - hash_add(table->table, &refs->node, e->id); + + err = hashmap__append(map, u32_as_hash_field(e->id), refs); + if (err) + p_err("failed to append entry to hashmap for ID %u: %s", + e->id, strerror(errno)); } static int __printf(2, 0) @@ -87,7 +92,7 @@ libbpf_print_none(__maybe_unused enum libbpf_print_level level, return 0; } -int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) +int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type) { struct pid_iter_entry *e; char buf[4096 / sizeof(*e) * sizeof(*e)]; @@ -95,7 +100,11 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) int err, ret, fd = -1, i; libbpf_print_fn_t default_print; - hash_init(table->table); + *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL); + if (!*map) { + p_err("failed to create hashmap for PID references"); + return -1; + } set_max_rlimit(); skel = pid_iter_bpf__open(); @@ -151,7 +160,7 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) e = (void *)buf; for (i = 0; i < ret; i++, e++) { - add_ref(table, e); + add_ref(*map, e); } } err = 0; @@ -162,39 +171,44 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) return err; } -void delete_obj_refs_table(struct obj_refs_table *table) +void delete_obj_refs_table(struct hashmap *map) { - struct obj_refs *refs; - struct hlist_node *tmp; - unsigned int bkt; + struct hashmap_entry *entry; + size_t bkt; + + if (!map) + return; + + hashmap__for_each_entry(map, entry, bkt) { + struct obj_refs *refs = entry->value; - hash_for_each_safe(table->table, bkt, tmp, refs, node) { - hash_del(&refs->node); free(refs->refs); free(refs); } + + hashmap__free(map); } -void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, +void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) { - struct obj_refs *refs; - struct obj_ref *ref; - int i; + struct hashmap_entry *entry; - if (hash_empty(table->table)) + if (hashmap__empty(map)) return; - hash_for_each_possible(table->table, refs, node, id) { - if (refs->id != id) - continue; + hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) { + struct obj_refs *refs = entry->value; + int i; + if (refs->ref_cnt == 0) break; jsonw_name(json_writer, "pids"); jsonw_start_array(json_writer); for (i = 0; i < refs->ref_cnt; i++) { - ref = &refs->refs[i]; + struct obj_ref *ref = &refs->refs[i]; + jsonw_start_object(json_writer); jsonw_int_field(json_writer, "pid", ref->pid); jsonw_string_field(json_writer, "comm", ref->comm); @@ -205,24 +219,24 @@ void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, } } -void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) +void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) { - struct obj_refs *refs; - struct obj_ref *ref; - int i; + struct hashmap_entry *entry; - if (hash_empty(table->table)) + if (hashmap__empty(map)) return; - hash_for_each_possible(table->table, refs, node, id) { - if (refs->id != id) - continue; + hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) { + struct obj_refs *refs = entry->value; + int i; + if (refs->ref_cnt == 0) break; printf("%s", prefix); for (i = 0; i < refs->ref_cnt; i++) { - ref = &refs->refs[i]; + struct obj_ref *ref = &refs->refs[i]; + printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid); } break; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 9c3e343b7d872a..515d2295260264 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -24,8 +24,8 @@ #include #include +#include #include -#include #include #include "cfg.h" @@ -85,6 +85,8 @@ static const char * const attach_type_strings[] = { [__MAX_BPF_ATTACH_TYPE] = NULL, }; +static struct hashmap *prog_table; + static enum bpf_attach_type parse_attach_type(const char *str) { enum bpf_attach_type type; @@ -308,18 +310,12 @@ static void show_prog_metadata(int fd, __u32 num_maps) if (printed_header) jsonw_end_object(json_wtr); } else { - json_writer_t *btf_wtr = jsonw_new(stdout); + json_writer_t *btf_wtr; struct btf_dumper d = { .btf = btf, - .jw = btf_wtr, .is_plain_text = true, }; - if (!btf_wtr) { - p_err("jsonw alloc failed"); - goto out_free; - } - for (i = 0; i < vlen; i++, vsi++) { t_var = btf__type_by_id(btf, vsi->type); name = btf__name_by_offset(btf, t_var->name_off); @@ -329,6 +325,14 @@ static void show_prog_metadata(int fd, __u32 num_maps) if (!printed_header) { printf("\tmetadata:"); + + btf_wtr = jsonw_new(stdout); + if (!btf_wtr) { + p_err("jsonw alloc failed"); + goto out_free; + } + d.jw = btf_wtr, + printed_header = true; } @@ -415,19 +419,18 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) if (info->btf_id) jsonw_int_field(json_wtr, "btf_id", info->btf_id); - if (!hash_empty(prog_table.table)) { - struct pinned_obj *obj; + if (!hashmap__empty(prog_table)) { + struct hashmap_entry *entry; jsonw_name(json_wtr, "pinned"); jsonw_start_array(json_wtr); - hash_for_each_possible(prog_table.table, obj, hash, info->id) { - if (obj->id == info->id) - jsonw_string(json_wtr, obj->path); - } + hashmap__for_each_key_entry(prog_table, entry, + u32_as_hash_field(info->id)) + jsonw_string(json_wtr, entry->value); jsonw_end_array(json_wtr); } - emit_obj_refs_json(&refs_table, info->id, json_wtr); + emit_obj_refs_json(refs_table, info->id, json_wtr); show_prog_metadata(fd, info->nr_map_ids); @@ -487,19 +490,18 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) if (info->nr_map_ids) show_prog_maps(fd, info->nr_map_ids); - if (!hash_empty(prog_table.table)) { - struct pinned_obj *obj; + if (!hashmap__empty(prog_table)) { + struct hashmap_entry *entry; - hash_for_each_possible(prog_table.table, obj, hash, info->id) { - if (obj->id == info->id) - printf("\n\tpinned %s", obj->path); - } + hashmap__for_each_key_entry(prog_table, entry, + u32_as_hash_field(info->id)) + printf("\n\tpinned %s", (char *)entry->value); } if (info->btf_id) printf("\n\tbtf_id %d", info->btf_id); - emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); printf("\n"); @@ -566,8 +568,15 @@ static int do_show(int argc, char **argv) int err; int fd; - if (show_pinned) - build_pinned_obj_table(&prog_table, BPF_OBJ_PROG); + if (show_pinned) { + prog_table = hashmap__new(hash_fn_for_key_as_id, + equal_fn_for_key_as_id, NULL); + if (!prog_table) { + p_err("failed to create hashmap for pinned paths"); + return -1; + } + build_pinned_obj_table(prog_table, BPF_OBJ_PROG); + } build_obj_refs_table(&refs_table, BPF_OBJ_PROG); if (argc == 2) @@ -610,7 +619,10 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); - delete_obj_refs_table(&refs_table); + delete_obj_refs_table(refs_table); + + if (show_pinned) + delete_pinned_obj_table(prog_table); return err; } @@ -1601,7 +1613,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) goto err_close_obj; if (first_prog_only) { - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); if (!prog) { p_err("object file doesn't contain any bpf program"); goto err_close_obj; diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index bb9fa8de7e625a..751643f860b291 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -26,27 +26,33 @@ LIBBPF_SRC := $(srctree)/tools/lib/bpf/ SUBCMD_SRC := $(srctree)/tools/lib/subcmd/ BPFOBJ := $(OUTPUT)/libbpf/libbpf.a +LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/ SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a +LIBBPF_DESTDIR := $(LIBBPF_OUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include + BINARY := $(OUTPUT)/resolve_btfids BINARY_IN := $(BINARY)-in.o all: $(BINARY) -$(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd: +$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT): $(call msg,MKDIR,,$@) $(Q)mkdir -p $(@) $(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) -$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf - $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT) + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= \ + $(abspath $@) install_headers CFLAGS := -g \ -I$(srctree)/tools/include \ -I$(srctree)/tools/include/uapi \ - -I$(LIBBPF_SRC) \ + -I$(LIBBPF_INCLUDE) \ -I$(SUBCMD_SRC) LIBS = -lelf -lz @@ -54,7 +60,7 @@ LIBS = -lelf -lz export srctree OUTPUT CFLAGS Q include $(srctree)/tools/build/Makefile.include -$(BINARY_IN): fixdep FORCE | $(OUTPUT) +$(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT) $(Q)$(MAKE) $(build)=resolve_btfids $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) @@ -64,7 +70,8 @@ $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) clean_objects := $(wildcard $(OUTPUT)/*.o \ $(OUTPUT)/.*.o.cmd \ $(OUTPUT)/.*.o.d \ - $(OUTPUT)/libbpf \ + $(LIBBPF_OUT) \ + $(LIBBPF_DESTDIR) \ $(OUTPUT)/libsubcmd \ $(OUTPUT)/resolve_btfids) diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index de6365b53c9ca0..a59cb0ee609cd9 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -60,8 +60,8 @@ #include #include #include -#include -#include +#include +#include #include #define BTF_IDS_SECTION ".BTF_ids" @@ -89,6 +89,7 @@ struct btf_id { struct object { const char *path; const char *btf; + const char *base_btf_path; struct { int fd; @@ -477,25 +478,36 @@ static int symbols_resolve(struct object *obj) int nr_structs = obj->nr_structs; int nr_unions = obj->nr_unions; int nr_funcs = obj->nr_funcs; + struct btf *base_btf = NULL; int err, type_id; struct btf *btf; __u32 nr_types; - btf = btf__parse(obj->btf ?: obj->path, NULL); + if (obj->base_btf_path) { + base_btf = btf__parse(obj->base_btf_path, NULL); + err = libbpf_get_error(base_btf); + if (err) { + pr_err("FAILED: load base BTF from %s: %s\n", + obj->base_btf_path, strerror(-err)); + return -1; + } + } + + btf = btf__parse_split(obj->btf ?: obj->path, base_btf); err = libbpf_get_error(btf); if (err) { pr_err("FAILED: load BTF from %s: %s\n", obj->btf ?: obj->path, strerror(-err)); - return -1; + goto out; } err = -1; - nr_types = btf__get_nr_types(btf); + nr_types = btf__type_cnt(btf); /* * Iterate all the BTF types and search for collected symbol IDs. */ - for (type_id = 1; type_id <= nr_types; type_id++) { + for (type_id = 1; type_id < nr_types; type_id++) { const struct btf_type *type; struct rb_root *root; struct btf_id *id; @@ -545,6 +557,7 @@ static int symbols_resolve(struct object *obj) err = 0; out: + btf__free(base_btf); btf__free(btf); return err; } @@ -678,7 +691,6 @@ static const char * const resolve_btfids_usage[] = { int main(int argc, const char **argv) { - bool no_fail = false; struct object obj = { .efile = { .idlist_shndx = -1, @@ -695,8 +707,8 @@ int main(int argc, const char **argv) "be more verbose (show errors, etc)"), OPT_STRING(0, "btf", &obj.btf, "BTF data", "BTF data"), - OPT_BOOLEAN(0, "no-fail", &no_fail, - "do not fail if " BTF_IDS_SECTION " section is not found"), + OPT_STRING('b', "btf_base", &obj.base_btf_path, "file", + "path of file providing base BTF"), OPT_END() }; int err = -1; @@ -717,10 +729,8 @@ int main(int argc, const char **argv) */ if (obj.efile.idlist_shndx == -1 || obj.efile.symbols_shndx == -1) { - if (no_fail) - return 0; - pr_err("FAILED to find needed sections\n"); - return -1; + pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n"); + return 0; } if (symbols_collect(&obj)) diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index 3818ec511fd25d..bbd1150578f7ae 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -9,9 +9,9 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL) LIBBPF_SRC := $(abspath ../../lib/bpf) BPFOBJ_OUTPUT := $(OUTPUT)libbpf/ BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a -BPF_INCLUDE := $(BPFOBJ_OUTPUT) -INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib) \ - -I$(abspath ../../include/uapi) +BPF_DESTDIR := $(BPFOBJ_OUTPUT) +BPF_INCLUDE := $(BPF_DESTDIR)/include +INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi) CFLAGS := -g -Wall # Try to detect best kernel BTF source @@ -33,7 +33,7 @@ endif .DELETE_ON_ERROR: -.PHONY: all clean runqslower +.PHONY: all clean runqslower libbpf_hdrs all: runqslower runqslower: $(OUTPUT)/runqslower @@ -46,13 +46,15 @@ clean: $(Q)$(RM) $(OUTPUT)runqslower $(Q)$(RM) -r .output +libbpf_hdrs: $(BPFOBJ) + $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ - $(OUTPUT)/runqslower.bpf.o + $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs -$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h +$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs $(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL) $(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@ @@ -81,8 +83,10 @@ else endif $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT) - $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) $@ + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \ + DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers -$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT) +$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \ - CC=$(HOSTCC) LD=$(HOSTLD) + LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \ + LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 791f31dd0abee7..ba5af15e25f5c1 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -906,6 +906,7 @@ enum bpf_map_type { BPF_MAP_TYPE_RINGBUF, BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, + BPF_MAP_TYPE_BLOOM_FILTER, }; /* Note that tracing related programs such as @@ -1274,6 +1275,13 @@ union bpf_attr { * struct stored as the * map value */ + /* Any per-map-type extra fields + * + * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the + * number of hash functions (if 0, the bloom filter will default + * to using 5 hash functions). + */ + __u64 map_extra; }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -1629,7 +1637,7 @@ union bpf_attr { * u32 bpf_get_smp_processor_id(void) * Description * Get the SMP (symmetric multiprocessing) processor id. Note that - * all programs run with preemption disabled, which means that the + * all programs run with migration disabled, which means that the * SMP processor id is stable during all the execution of the * program. * Return @@ -4046,7 +4054,7 @@ union bpf_attr { * arguments. The *data* are a **u64** array and corresponding format string * values are stored in the array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* array. - * The *data_len* is the size of *data* in bytes. + * The *data_len* is the size of *data* in bytes - must be a multiple of 8. * * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. * Reading kernel memory may fail due to either invalid address or @@ -4751,7 +4759,8 @@ union bpf_attr { * Each format specifier in **fmt** corresponds to one u64 element * in the **data** array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* - * array. The *data_len* is the size of *data* in bytes. + * array. The *data_len* is the size of *data* in bytes - must be + * a multiple of 8. * * Formats **%s** and **%p{i,I}{4,6}** require to read kernel * memory. Reading kernel memory may fail due to either invalid @@ -4877,6 +4886,58 @@ union bpf_attr { * Get the struct pt_regs associated with **task**. * Return * A pointer to struct pt_regs. + * + * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) + * Description + * Get branch trace from hardware engines like Intel LBR. The + * hardware engine is stopped shortly after the helper is + * called. Therefore, the user need to filter branch entries + * based on the actual use case. To capture branch trace + * before the trigger point of the BPF program, the helper + * should be called at the beginning of the BPF program. + * + * The data is stored as struct perf_branch_entry into output + * buffer *entries*. *size* is the size of *entries* in bytes. + * *flags* is reserved for now and must be zero. + * + * Return + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-ENOENT** if architecture does not support branch records. + * + * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * Description + * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 + * to format and can handle more format args as a result. + * + * Arguments are to be used as in **bpf_seq_printf**\ () helper. + * Return + * The number of bytes written to the buffer, or a negative error + * in case of failure. + * + * struct unix_sock *bpf_skc_to_unix_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *unix_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res) + * Description + * Get the address of a kernel symbol, returned in *res*. *res* is + * set to 0 if the symbol is not found. + * Return + * On success, zero. On error, a negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-EINVAL** if string *name* is not the same size as *name_sz*. + * + * **-ENOENT** if symbol is not found. + * + * **-EPERM** if caller does not have permission to obtain kernel address. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5055,6 +5116,10 @@ union bpf_attr { FN(get_func_ip), \ FN(get_attach_cookie), \ FN(task_pt_regs), \ + FN(get_branch_snapshot), \ + FN(trace_vprintk), \ + FN(skc_to_unix_sock), \ + FN(kallsyms_lookup_name), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5284,6 +5349,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; + __u32 :32; /* Padding, future use. */ + __u64 hwtstamp; }; struct bpf_tunnel_key { @@ -5577,6 +5644,7 @@ struct bpf_prog_info { __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; + __u32 verified_insns; } __attribute__((aligned(8))); struct bpf_map_info { @@ -5594,6 +5662,8 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; + __u32 :32; /* alignment pad */ + __u64 map_extra; } __attribute__((aligned(8))); struct bpf_btf_info { diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h index d27b1708efe919..deb12f755f0fa4 100644 --- a/tools/include/uapi/linux/btf.h +++ b/tools/include/uapi/linux/btf.h @@ -43,7 +43,7 @@ struct btf_type { * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO and VAR. + * FUNC, FUNC_PROTO, VAR and DECL_TAG. * "type" is a type_id referring to another type. */ union { @@ -56,25 +56,29 @@ struct btf_type { #define BTF_INFO_VLEN(info) ((info) & 0xffff) #define BTF_INFO_KFLAG(info) ((info) >> 31) -#define BTF_KIND_UNKN 0 /* Unknown */ -#define BTF_KIND_INT 1 /* Integer */ -#define BTF_KIND_PTR 2 /* Pointer */ -#define BTF_KIND_ARRAY 3 /* Array */ -#define BTF_KIND_STRUCT 4 /* Struct */ -#define BTF_KIND_UNION 5 /* Union */ -#define BTF_KIND_ENUM 6 /* Enumeration */ -#define BTF_KIND_FWD 7 /* Forward */ -#define BTF_KIND_TYPEDEF 8 /* Typedef */ -#define BTF_KIND_VOLATILE 9 /* Volatile */ -#define BTF_KIND_CONST 10 /* Const */ -#define BTF_KIND_RESTRICT 11 /* Restrict */ -#define BTF_KIND_FUNC 12 /* Function */ -#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ -#define BTF_KIND_VAR 14 /* Variable */ -#define BTF_KIND_DATASEC 15 /* Section */ -#define BTF_KIND_FLOAT 16 /* Floating point */ -#define BTF_KIND_MAX BTF_KIND_FLOAT -#define NR_BTF_KINDS (BTF_KIND_MAX + 1) +enum { + BTF_KIND_UNKN = 0, /* Unknown */ + BTF_KIND_INT = 1, /* Integer */ + BTF_KIND_PTR = 2, /* Pointer */ + BTF_KIND_ARRAY = 3, /* Array */ + BTF_KIND_STRUCT = 4, /* Struct */ + BTF_KIND_UNION = 5, /* Union */ + BTF_KIND_ENUM = 6, /* Enumeration */ + BTF_KIND_FWD = 7, /* Forward */ + BTF_KIND_TYPEDEF = 8, /* Typedef */ + BTF_KIND_VOLATILE = 9, /* Volatile */ + BTF_KIND_CONST = 10, /* Const */ + BTF_KIND_RESTRICT = 11, /* Restrict */ + BTF_KIND_FUNC = 12, /* Function */ + BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ + BTF_KIND_VAR = 14, /* Variable */ + BTF_KIND_DATASEC = 15, /* Section */ + BTF_KIND_FLOAT = 16, /* Floating point */ + BTF_KIND_DECL_TAG = 17, /* Decl Tag */ + + NR_BTF_KINDS, + BTF_KIND_MAX = NR_BTF_KINDS - 1, +}; /* For some specific BTF_KIND, "struct btf_type" is immediately * followed by extra data. @@ -170,4 +174,15 @@ struct btf_var_secinfo { __u32 size; }; +/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe + * additional information related to the tag applied location. + * If component_idx == -1, the tag is applied to a struct, union, + * variable or function. Otherwise, it is applied to a struct/union + * member or a func argument, and component_idx indicates which member + * or argument (0 ... vlen-1). + */ +struct btf_decl_tag { + __s32 component_idx; +}; + #endif /* _UAPI__LINUX_BTF_H__ */ diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore index 5d4cfac671d540..0da84cb9e66d0e 100644 --- a/tools/lib/bpf/.gitignore +++ b/tools/lib/bpf/.gitignore @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -libbpf_version.h libbpf.pc libbpf.so.* TAGS diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 74c3b73a5fbe89..b393b5e8238042 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -8,7 +8,8 @@ VERSION_SCRIPT := libbpf.map LIBBPF_VERSION := $(shell \ grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \ sort -rV | head -n1 | cut -d'_' -f2) -LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION))) +LIBBPF_MAJOR_VERSION := $(word 1,$(subst ., ,$(LIBBPF_VERSION))) +LIBBPF_MINOR_VERSION := $(word 2,$(subst ., ,$(LIBBPF_VERSION))) MAKEFLAGS += --no-print-directory @@ -59,7 +60,8 @@ ifndef VERBOSE VERBOSE = 0 endif -INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi +INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.) \ + -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi export prefix libdir src obj @@ -112,6 +114,7 @@ STATIC_OBJDIR := $(OUTPUT)staticobjs/ BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h +BPF_GENERATED := $(BPF_HELPER_DEFS) LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) @@ -136,25 +139,19 @@ all: fixdep all_cmd: $(CMD_TARGETS) check -$(BPF_IN_SHARED): force $(BPF_HELPER_DEFS) +$(BPF_IN_SHARED): force $(BPF_GENERATED) @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \ (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true @(test -f ../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \ (diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf_common.h' differs from latest version at 'include/uapi/linux/bpf_common.h'" >&2 )) || true - @(test -f ../../include/uapi/linux/netlink.h -a -f ../../../include/uapi/linux/netlink.h && ( \ - (diff -B ../../include/uapi/linux/netlink.h ../../../include/uapi/linux/netlink.h >/dev/null) || \ - echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/netlink.h' differs from latest version at 'include/uapi/linux/netlink.h'" >&2 )) || true - @(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \ - (diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \ - echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \ (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)" -$(BPF_IN_STATIC): force $(BPF_HELPER_DEFS) +$(BPF_IN_STATIC): force $(BPF_GENERATED) $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h @@ -179,7 +176,7 @@ $(OUTPUT)libbpf.pc: -e "s|@VERSION@|$(LIBBPF_VERSION)|" \ < libbpf.pc.template > $@ -check: check_abi +check: check_abi check_version check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT) @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \ @@ -205,6 +202,21 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT) exit 1; \ fi +HDR_MAJ_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3) +HDR_MIN_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3) + +check_version: $(VERSION_SCRIPT) libbpf_version.h + @if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \ + echo "Error: libbpf major version mismatch detected: " \ + "'$(HDR_MAJ_VERSION)' != '$(LIBBPF_MAJOR_VERSION)'" >&2; \ + exit 1; \ + fi + @if [ "$(HDR_MIN_VERSION)" != "$(LIBBPF_MINOR_VERSION)" ]; then \ + echo "Error: libbpf minor version mismatch detected: " \ + "'$(HDR_MIN_VERSION)' != '$(LIBBPF_MINOR_VERSION)'" >&2; \ + exit 1; \ + fi + define do_install_mkdir if [ ! -d '$(DESTDIR_SQ)$1' ]; then \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \ @@ -223,14 +235,24 @@ install_lib: all_cmd $(call do_install_mkdir,$(libdir_SQ)); \ cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ) -INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \ - bpf_helpers.h $(BPF_HELPER_DEFS) bpf_tracing.h \ - bpf_endian.h bpf_core_read.h skel_internal.h +SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \ + bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \ + skel_internal.h libbpf_version.h +GEN_HDRS := $(BPF_GENERATED) + +INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf +INSTALL_SRC_HDRS := $(addprefix $(INSTALL_PFX)/, $(SRC_HDRS)) +INSTALL_GEN_HDRS := $(addprefix $(INSTALL_PFX)/, $(notdir $(GEN_HDRS))) + +$(INSTALL_SRC_HDRS): $(INSTALL_PFX)/%.h: %.h + $(call QUIET_INSTALL, $@) \ + $(call do_install,$<,$(prefix)/include/bpf,644) + +$(INSTALL_GEN_HDRS): $(INSTALL_PFX)/%.h: $(OUTPUT)%.h + $(call QUIET_INSTALL, $@) \ + $(call do_install,$<,$(prefix)/include/bpf,644) -install_headers: $(BPF_HELPER_DEFS) - $(call QUIET_INSTALL, headers) \ - $(foreach hdr,$(INSTALL_HEADERS), \ - $(call do_install,$(hdr),$(prefix)/include/bpf,644);) +install_headers: $(BPF_GENERATED) $(INSTALL_SRC_HDRS) $(INSTALL_GEN_HDRS) install_pkgconfig: $(PC_FILE) $(call QUIET_INSTALL, $(PC_FILE)) \ @@ -240,12 +262,12 @@ install: install_lib install_pkgconfig install_headers clean: $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \ - *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \ + *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \ $(SHARED_OBJDIR) $(STATIC_OBJDIR) \ $(addprefix $(OUTPUT), \ *.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc) -PHONY += force cscope tags +PHONY += force cscope tags check check_abi check_version force: cscope: diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 2401fad090c526..c09cbb868c9f12 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -65,19 +65,28 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, return syscall(__NR_bpf, cmd, attr, size); } +static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, + unsigned int size) +{ + int fd; + + fd = sys_bpf(cmd, attr, size); + return ensure_good_fd(fd); +} + static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) { int retries = 5; int fd; do { - fd = sys_bpf(BPF_PROG_LOAD, attr, size); + fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); } while (fd < 0 && errno == EAGAIN && retries-- > 0); return fd; } -int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) +int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr) { union bpf_attr attr; int fd; @@ -102,11 +111,36 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) create_attr->btf_vmlinux_value_type_id; else attr.inner_map_fd = create_attr->inner_map_fd; + attr.map_extra = create_attr->map_extra; - fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); return libbpf_err_errno(fd); } +int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) +{ + struct bpf_create_map_params p = {}; + + p.map_type = create_attr->map_type; + p.key_size = create_attr->key_size; + p.value_size = create_attr->value_size; + p.max_entries = create_attr->max_entries; + p.map_flags = create_attr->map_flags; + p.name = create_attr->name; + p.numa_node = create_attr->numa_node; + p.btf_fd = create_attr->btf_fd; + p.btf_key_type_id = create_attr->btf_key_type_id; + p.btf_value_type_id = create_attr->btf_value_type_id; + p.map_ifindex = create_attr->map_ifindex; + if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS) + p.btf_vmlinux_value_type_id = + create_attr->btf_vmlinux_value_type_id; + else + p.inner_map_fd = create_attr->inner_map_fd; + + return libbpf__bpf_create_map_xattr(&p); +} + int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node) @@ -181,7 +215,7 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, attr.numa_node = node; } - fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -264,6 +298,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) attr.line_info_rec_size = load_attr->line_info_rec_size; attr.line_info_cnt = load_attr->line_info_cnt; attr.line_info = ptr_to_u64(load_attr->line_info); + attr.fd_array = ptr_to_u64(load_attr->fd_array); if (load_attr->name) memcpy(attr.prog_name, load_attr->name, @@ -608,7 +643,7 @@ int bpf_obj_get(const char *pathname) memset(&attr, 0, sizeof(attr)); attr.pathname = ptr_to_u64((void *)pathname); - fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -719,7 +754,7 @@ int bpf_link_create(int prog_fd, int target_fd, break; } proceed: - fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -762,7 +797,7 @@ int bpf_iter_create(int link_fd) memset(&attr, 0, sizeof(attr)); attr.iter_create.link_fd = link_fd; - fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -920,7 +955,7 @@ int bpf_prog_get_fd_by_id(__u32 id) memset(&attr, 0, sizeof(attr)); attr.prog_id = id; - fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -932,7 +967,7 @@ int bpf_map_get_fd_by_id(__u32 id) memset(&attr, 0, sizeof(attr)); attr.map_id = id; - fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -944,7 +979,7 @@ int bpf_btf_get_fd_by_id(__u32 id) memset(&attr, 0, sizeof(attr)); attr.btf_id = id; - fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -956,7 +991,7 @@ int bpf_link_get_fd_by_id(__u32 id) memset(&attr, 0, sizeof(attr)); attr.link_id = id; - fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -987,7 +1022,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) attr.raw_tracepoint.name = ptr_to_u64(name); attr.raw_tracepoint.prog_fd = prog_fd; - fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); return libbpf_err_errno(fd); } @@ -1007,7 +1042,7 @@ int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_s attr.btf_log_buf = ptr_to_u64(log_buf); } - fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr)); if (fd < 0 && !do_log && log_buf && log_buf_size) { do_log = true; @@ -1049,7 +1084,7 @@ int bpf_enable_stats(enum bpf_stats_type type) memset(&attr, 0, sizeof(attr)); attr.enable_stats.type = type; - fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr)); + fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); return libbpf_err_errno(fd); } diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index 09ebe3db5f2f8e..e4aa9996a55011 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -40,7 +40,7 @@ enum bpf_enum_value_kind { #define __CORE_RELO(src, field, info) \ __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ bpf_probe_read_kernel( \ (void *)dst, \ diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index 615400391e5780..d26e5472fe5016 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -7,6 +7,21 @@ struct ksym_relo_desc { const char *name; int kind; int insn_idx; + bool is_weak; + bool is_typeless; +}; + +struct ksym_desc { + const char *name; + int ref; + int kind; + union { + /* used for kfunc */ + int off; + /* used for typeless ksym */ + bool typeless; + }; + int insn; }; struct bpf_gen { @@ -24,18 +39,23 @@ struct bpf_gen { int relo_cnt; char attach_target[128]; int attach_kind; + struct ksym_desc *ksyms; + __u32 nr_ksyms; + int fd_array; + int nr_fd_array; }; void bpf_gen__init(struct bpf_gen *gen, int log_level); int bpf_gen__finish(struct bpf_gen *gen); void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); -void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx); +void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); struct bpf_prog_load_params; void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx); void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size); void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); -void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx); +void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, + bool is_typeless, int kind, int insn_idx); #endif diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index b9987c3efa3c43..963b1060d9441f 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -14,14 +14,6 @@ #define __type(name, val) typeof(val) *name #define __array(name, val) typeof(val) *name[] -/* Helper macro to print out debug messages */ -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - /* * Helper macro to place programs, maps, license in * different sections in elf_bpf file. Section names @@ -224,4 +216,47 @@ enum libbpf_tristate { ___param, sizeof(___param)); \ }) +#ifdef BPF_NO_GLOBAL_DATA +#define BPF_PRINTK_FMT_MOD +#else +#define BPF_PRINTK_FMT_MOD static const +#endif + +#define __bpf_printk(fmt, ...) \ +({ \ + BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +/* + * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments + * instead of an array of u64. + */ +#define __bpf_vprintk(fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_trace_vprintk(___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ +}) + +/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args + * Otherwise use __bpf_vprintk + */ +#define ___bpf_pick_printk(...) \ + ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ + __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ + __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\ + __bpf_printk /*1*/, __bpf_printk /*0*/) + +/* Helper macro to print out debug messages */ +#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) + #endif diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index d6bfbe009296c1..db05a593710560 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -24,6 +24,9 @@ #elif defined(__TARGET_ARCH_sparc) #define bpf_target_sparc #define bpf_target_defined +#elif defined(__TARGET_ARCH_riscv) + #define bpf_target_riscv + #define bpf_target_defined #else /* Fall back to what the compiler says */ @@ -48,6 +51,9 @@ #elif defined(__sparc__) #define bpf_target_sparc #define bpf_target_defined +#elif defined(__riscv) && __riscv_xlen == 64 + #define bpf_target_riscv + #define bpf_target_defined #endif /* no compiler target */ #endif @@ -288,6 +294,32 @@ struct pt_regs; #define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc) #endif +#elif defined(bpf_target_riscv) + +struct pt_regs; +#define PT_REGS_RV const volatile struct user_regs_struct +#define PT_REGS_PARM1(x) (((PT_REGS_RV *)(x))->a0) +#define PT_REGS_PARM2(x) (((PT_REGS_RV *)(x))->a1) +#define PT_REGS_PARM3(x) (((PT_REGS_RV *)(x))->a2) +#define PT_REGS_PARM4(x) (((PT_REGS_RV *)(x))->a3) +#define PT_REGS_PARM5(x) (((PT_REGS_RV *)(x))->a4) +#define PT_REGS_RET(x) (((PT_REGS_RV *)(x))->ra) +#define PT_REGS_FP(x) (((PT_REGS_RV *)(x))->s5) +#define PT_REGS_RC(x) (((PT_REGS_RV *)(x))->a5) +#define PT_REGS_SP(x) (((PT_REGS_RV *)(x))->sp) +#define PT_REGS_IP(x) (((PT_REGS_RV *)(x))->epc) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a0) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a1) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a2) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a3) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a4) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), ra) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), fp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a5) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), epc) + #endif #if defined(bpf_target_powerpc) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 77dc24d58302dc..7e4c5586bd8775 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -57,7 +57,7 @@ struct btf { * representation is broken up into three independently allocated * memory regions to be able to modify them independently. * raw_data is nulled out at that point, but can be later allocated - * and cached again if user calls btf__get_raw_data(), at which point + * and cached again if user calls btf__raw_data(), at which point * raw_data will contain a contiguous copy of header, types, and * strings: * @@ -189,12 +189,17 @@ int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_ return 0; } +static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt) +{ + return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), + btf->nr_types, BTF_MAX_NR_TYPES, add_cnt); +} + static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) { __u32 *p; - p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), - btf->nr_types, BTF_MAX_NR_TYPES, 1); + p = btf_add_type_offs_mem(btf, 1); if (!p) return -ENOMEM; @@ -231,17 +236,23 @@ static int btf_parse_hdr(struct btf *btf) } btf_bswap_hdr(hdr); } else if (hdr->magic != BTF_MAGIC) { - pr_debug("Invalid BTF magic:%x\n", hdr->magic); + pr_debug("Invalid BTF magic: %x\n", hdr->magic); + return -EINVAL; + } + + if (btf->raw_size < hdr->hdr_len) { + pr_debug("BTF header len %u larger than data size %u\n", + hdr->hdr_len, btf->raw_size); return -EINVAL; } - meta_left = btf->raw_size - sizeof(*hdr); - if (meta_left < hdr->str_off + hdr->str_len) { - pr_debug("Invalid BTF total size:%u\n", btf->raw_size); + meta_left = btf->raw_size - hdr->hdr_len; + if (meta_left < (long long)hdr->str_off + hdr->str_len) { + pr_debug("Invalid BTF total size: %u\n", btf->raw_size); return -EINVAL; } - if (hdr->type_off + hdr->type_len > hdr->str_off) { + if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) { pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n", hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); return -EINVAL; @@ -304,6 +315,8 @@ static int btf_type_size(const struct btf_type *t) return base_size + sizeof(struct btf_var); case BTF_KIND_DATASEC: return base_size + vlen * sizeof(struct btf_var_secinfo); + case BTF_KIND_DECL_TAG: + return base_size + sizeof(struct btf_decl_tag); default: pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); return -EINVAL; @@ -376,6 +389,9 @@ static int btf_bswap_type_rest(struct btf_type *t) v->size = bswap_32(v->size); } return 0; + case BTF_KIND_DECL_TAG: + btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx); + return 0; default: pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); return -EINVAL; @@ -425,6 +441,11 @@ __u32 btf__get_nr_types(const struct btf *btf) return btf->start_id + btf->nr_types - 1; } +__u32 btf__type_cnt(const struct btf *btf) +{ + return btf->start_id + btf->nr_types; +} + const struct btf *btf__base_btf(const struct btf *btf) { return btf->base_btf; @@ -456,8 +477,8 @@ static int determine_ptr_size(const struct btf *btf) if (btf->base_btf && btf->base_btf->ptr_sz > 0) return btf->base_btf->ptr_sz; - n = btf__get_nr_types(btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (!btf_is_int(t)) continue; @@ -517,9 +538,9 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) static bool is_host_big_endian(void) { -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return false; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return true; #else # error "Unrecognized __BYTE_ORDER__" @@ -586,6 +607,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: type_id = t->type; break; case BTF_KIND_ARRAY: @@ -673,12 +695,12 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id) __s32 btf__find_by_name(const struct btf *btf, const char *type_name) { - __u32 i, nr_types = btf__get_nr_types(btf); + __u32 i, nr_types = btf__type_cnt(btf); if (!strcmp(type_name, "void")) return 0; - for (i = 1; i <= nr_types; i++) { + for (i = 1; i < nr_types; i++) { const struct btf_type *t = btf__type_by_id(btf, i); const char *name = btf__name_by_offset(btf, t->name_off); @@ -689,15 +711,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) return libbpf_err(-ENOENT); } -__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, - __u32 kind) +static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, + const char *type_name, __u32 kind) { - __u32 i, nr_types = btf__get_nr_types(btf); + __u32 i, nr_types = btf__type_cnt(btf); if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) return 0; - for (i = 1; i <= nr_types; i++) { + for (i = start_id; i < nr_types; i++) { const struct btf_type *t = btf__type_by_id(btf, i); const char *name; @@ -711,6 +733,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, return libbpf_err(-ENOENT); } +__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, + __u32 kind) +{ + return btf_find_by_name_kind(btf, btf->start_id, type_name, kind); +} + +__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, + __u32 kind) +{ + return btf_find_by_name_kind(btf, 1, type_name, kind); +} + static bool btf_is_modifiable(const struct btf *btf) { return (void *)btf->hdr != btf->raw_data; @@ -758,7 +792,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) if (base_btf) { btf->base_btf = base_btf; - btf->start_id = btf__get_nr_types(base_btf) + 1; + btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; } @@ -808,7 +842,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) if (base_btf) { btf->base_btf = base_btf; - btf->start_id = btf__get_nr_types(base_btf) + 1; + btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; } @@ -863,7 +897,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, return ERR_PTR(-LIBBPF_ERRNO__LIBELF); } - fd = open(path, O_RDONLY); + fd = open(path, O_RDONLY | O_CLOEXEC); if (fd < 0) { err = -errno; pr_warn("failed to open %s: %s\n", path, strerror(errno)); @@ -1084,99 +1118,6 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf) return libbpf_ptr(btf_parse(path, base_btf, NULL)); } -static int compare_vsi_off(const void *_a, const void *_b) -{ - const struct btf_var_secinfo *a = _a; - const struct btf_var_secinfo *b = _b; - - return a->offset - b->offset; -} - -static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, - struct btf_type *t) -{ - __u32 size = 0, off = 0, i, vars = btf_vlen(t); - const char *name = btf__name_by_offset(btf, t->name_off); - const struct btf_type *t_var; - struct btf_var_secinfo *vsi; - const struct btf_var *var; - int ret; - - if (!name) { - pr_debug("No name found in string section for DATASEC kind.\n"); - return -ENOENT; - } - - /* .extern datasec size and var offsets were set correctly during - * extern collection step, so just skip straight to sorting variables - */ - if (t->size) - goto sort_vars; - - ret = bpf_object__section_size(obj, name, &size); - if (ret || !size || (t->size && t->size != size)) { - pr_debug("Invalid size for section %s: %u bytes\n", name, size); - return -ENOENT; - } - - t->size = size; - - for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { - t_var = btf__type_by_id(btf, vsi->type); - var = btf_var(t_var); - - if (!btf_is_var(t_var)) { - pr_debug("Non-VAR type seen in section %s\n", name); - return -EINVAL; - } - - if (var->linkage == BTF_VAR_STATIC) - continue; - - name = btf__name_by_offset(btf, t_var->name_off); - if (!name) { - pr_debug("No name found in string section for VAR kind\n"); - return -ENOENT; - } - - ret = bpf_object__variable_offset(obj, name, &off); - if (ret) { - pr_debug("No offset found in symbol table for VAR %s\n", - name); - return -ENOENT; - } - - vsi->offset = off; - } - -sort_vars: - qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); - return 0; -} - -int btf__finalize_data(struct bpf_object *obj, struct btf *btf) -{ - int err = 0; - __u32 i; - - for (i = 1; i <= btf->nr_types; i++) { - struct btf_type *t = btf_type_by_id(btf, i); - - /* Loader needs to fix up some of the things compiler - * couldn't get its hands on while emitting BTF. This - * is section size and global variable offset. We use - * the info from the ELF itself for this purpose. - */ - if (btf_is_datasec(t)) { - err = btf_fixup_datasec(obj, btf, t); - if (err) - break; - } - } - - return libbpf_err(err); -} - static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); int btf__load_into_kernel(struct btf *btf) @@ -1294,7 +1235,7 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi return NULL; } -const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size) +const void *btf__raw_data(const struct btf *btf_ro, __u32 *size) { struct btf *btf = (struct btf *)btf_ro; __u32 data_sz; @@ -1302,7 +1243,7 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size) data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); if (!data) - return errno = -ENOMEM, NULL; + return errno = ENOMEM, NULL; btf->raw_size = data_sz; if (btf->swapped_endian) @@ -1313,6 +1254,9 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size) return data; } +__attribute__((alias("btf__raw_data"))) +const void *btf__get_raw_data(const struct btf *btf, __u32 *size); + const char *btf__str_by_offset(const struct btf *btf, __u32 offset) { if (offset < btf->start_str_off) @@ -1685,6 +1629,111 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t return btf_commit_type(btf, sz); } +static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) +{ + struct btf *btf = ctx; + + if (!*type_id) /* nothing to do for VOID references */ + return 0; + + /* we haven't updated btf's type count yet, so + * btf->start_id + btf->nr_types - 1 is the type ID offset we should + * add to all newly added BTF types + */ + *type_id += btf->start_id + btf->nr_types - 1; + return 0; +} + +int btf__add_btf(struct btf *btf, const struct btf *src_btf) +{ + struct btf_pipe p = { .src = src_btf, .dst = btf }; + int data_sz, sz, cnt, i, err, old_strs_len; + __u32 *off; + void *t; + + /* appending split BTF isn't supported yet */ + if (src_btf->base_btf) + return libbpf_err(-ENOTSUP); + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + /* remember original strings section size if we have to roll back + * partial strings section changes + */ + old_strs_len = btf->hdr->str_len; + + data_sz = src_btf->hdr->type_len; + cnt = btf__type_cnt(src_btf) - 1; + + /* pre-allocate enough memory for new types */ + t = btf_add_type_mem(btf, data_sz); + if (!t) + return libbpf_err(-ENOMEM); + + /* pre-allocate enough memory for type offset index for new types */ + off = btf_add_type_offs_mem(btf, cnt); + if (!off) + return libbpf_err(-ENOMEM); + + /* bulk copy types data for all types from src_btf */ + memcpy(t, src_btf->types_data, data_sz); + + for (i = 0; i < cnt; i++) { + sz = btf_type_size(t); + if (sz < 0) { + /* unlikely, has to be corrupted src_btf */ + err = sz; + goto err_out; + } + + /* fill out type ID to type offset mapping for lookups by type ID */ + *off = t - btf->types_data; + + /* add, dedup, and remap strings referenced by this BTF type */ + err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); + if (err) + goto err_out; + + /* remap all type IDs referenced from this BTF type */ + err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf); + if (err) + goto err_out; + + /* go to next type data and type offset index entry */ + t += sz; + off++; + } + + /* Up until now any of the copied type data was effectively invisible, + * so if we exited early before this point due to error, BTF would be + * effectively unmodified. There would be extra internal memory + * pre-allocated, but it would not be available for querying. But now + * that we've copied and rewritten all the data successfully, we can + * update type count and various internal offsets and sizes to + * "commit" the changes and made them visible to the outside world. + */ + btf->hdr->type_len += data_sz; + btf->hdr->str_off += data_sz; + btf->nr_types += cnt; + + /* return type ID of the first added BTF type */ + return btf->start_id + btf->nr_types - cnt; +err_out: + /* zero out preallocated memory as if it was just allocated with + * libbpf_add_mem() + */ + memset(btf->types_data + btf->hdr->type_len, 0, data_sz); + memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len); + + /* and now restore original strings section size; types data size + * wasn't modified, so doesn't need restoring, see big comment above */ + btf->hdr->str_len = old_strs_len; + + return libbpf_err(err); +} + /* * Append new BTF_KIND_INT type with: * - *name* - non-empty, non-NULL type name; @@ -1933,7 +1982,7 @@ int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) static struct btf_type *btf_last_type(struct btf *btf) { - return btf_type_by_id(btf, btf__get_nr_types(btf)); + return btf_type_by_id(btf, btf__type_cnt(btf) - 1); } /* @@ -2440,6 +2489,48 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __ return 0; } +/* + * Append new BTF_KIND_DECL_TAG type with: + * - *value* - non-empty/non-NULL string; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * - *component_idx* - -1 for tagging reference type, otherwise struct/union + * member or function argument index; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx) +{ + struct btf_type *t; + int sz, value_off; + + if (!value || !value[0] || component_idx < -1) + return libbpf_err(-EINVAL); + + if (validate_type_id(ref_type_id)) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + value_off = btf__add_str(btf, value); + if (value_off < 0) + return value_off; + + t->name_off = value_off; + t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false); + t->type = ref_type_id; + btf_decl_tag(t)->component_idx = component_idx; + + return btf_commit_type(btf, sz); +} + struct btf_ext_sec_setup_param { __u32 off; __u32 len; @@ -2914,8 +3005,10 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, return libbpf_err(-EINVAL); } - if (btf_ensure_modifiable(btf)) - return libbpf_err(-ENOMEM); + if (btf_ensure_modifiable(btf)) { + err = -ENOMEM; + goto done; + } err = btf_dedup_prep(d); if (err) { @@ -3095,7 +3188,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, goto done; } - type_cnt = btf__get_nr_types(btf) + 1; + type_cnt = btf__type_cnt(btf); d->map = malloc(sizeof(__u32) * type_cnt); if (!d->map) { err = -ENOMEM; @@ -3256,8 +3349,8 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) t1->size == t2->size; } -/* Calculate type signature hash of INT. */ -static long btf_hash_int(struct btf_type *t) +/* Calculate type signature hash of INT or TAG. */ +static long btf_hash_int_decl_tag(struct btf_type *t) { __u32 info = *(__u32 *)(t + 1); long h; @@ -3267,8 +3360,8 @@ static long btf_hash_int(struct btf_type *t) return h; } -/* Check structural equality of two INTs. */ -static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) +/* Check structural equality of two INTs or TAGs. */ +static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2) { __u32 info1, info2; @@ -3535,7 +3628,8 @@ static int btf_dedup_prep(struct btf_dedup *d) h = btf_hash_common(t); break; case BTF_KIND_INT: - h = btf_hash_int(t); + case BTF_KIND_DECL_TAG: + h = btf_hash_int_decl_tag(t); break; case BTF_KIND_ENUM: h = btf_hash_enum(t); @@ -3590,14 +3684,15 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_FUNC_PROTO: case BTF_KIND_VAR: case BTF_KIND_DATASEC: + case BTF_KIND_DECL_TAG: return 0; case BTF_KIND_INT: - h = btf_hash_int(t); + h = btf_hash_int_decl_tag(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); - if (btf_equal_int(t, cand)) { + if (btf_equal_int_tag(t, cand)) { new_id = cand_id; break; } @@ -3881,7 +3976,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, switch (cand_kind) { case BTF_KIND_INT: - return btf_equal_int(cand_type, canon_type); + return btf_equal_int_tag(cand_type, canon_type); case BTF_KIND_ENUM: if (d->opts.dont_resolve_fwds) @@ -4210,6 +4305,23 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) } break; + case BTF_KIND_DECL_TAG: + ref_type_id = btf_dedup_ref_type(d, t->type); + if (ref_type_id < 0) + return ref_type_id; + t->type = ref_type_id; + + h = btf_hash_int_decl_tag(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_int_tag(t, cand)) { + new_id = cand_id; + break; + } + } + break; + case BTF_KIND_ARRAY: { struct btf_array *info = btf_array(t); @@ -4482,6 +4594,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: return visit(&t->type, ctx); case BTF_KIND_ARRAY: { diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 4a711f990904b4..bc005ba3ceec3d 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2018 Facebook */ +/*! \file */ #ifndef __LIBBPF_BTF_H #define __LIBBPF_BTF_H @@ -30,11 +31,80 @@ enum btf_endianness { BTF_BIG_ENDIAN = 1, }; +/** + * @brief **btf__free()** frees all data of a BTF object + * @param btf BTF object to free + */ LIBBPF_API void btf__free(struct btf *btf); +/** + * @brief **btf__new()** creates a new instance of a BTF object from the raw + * bytes of an ELF's BTF section + * @param data raw bytes + * @param size number of bytes passed in `data` + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ LIBBPF_API struct btf *btf__new(const void *data, __u32 size); + +/** + * @brief **btf__new_split()** create a new instance of a BTF object from the + * provided raw data bytes. It takes another BTF instance, **base_btf**, which + * serves as a base BTF, which is extended by types in a newly created BTF + * instance + * @param data raw bytes + * @param size length of raw bytes + * @param base_btf the base BTF object + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and + * creates non-split BTF. + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf); + +/** + * @brief **btf__new_empty()** creates an empty BTF object. Use + * `btf__add_*()` to populate such BTF object. + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ LIBBPF_API struct btf *btf__new_empty(void); + +/** + * @brief **btf__new_empty_split()** creates an unpopulated BTF object from an + * ELF BTF section except with a base BTF on top of which split BTF should be + * based + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to + * `btf__new_empty()` and creates non-split BTF. + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); @@ -50,16 +120,21 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void); LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id); LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf); +LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead") LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); +LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only") LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); +LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead") LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API int btf__load_into_kernel(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, const char *type_name); LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, __u32 kind); +LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1") LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); +LIBBPF_API __u32 btf__type_cnt(const struct btf *btf); LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); @@ -72,7 +147,9 @@ LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); LIBBPF_API void btf__set_fd(struct btf *btf, int fd); +LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__raw_data() instead") LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); +LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, @@ -101,6 +178,28 @@ LIBBPF_API int btf__find_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type); +/** + * @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf* + * @param btf BTF object which all the BTF types and strings are added to + * @param src_btf BTF object which all BTF types and referenced strings are copied from + * @return BTF type ID of the first appended BTF type, or negative error code + * + * **btf__add_btf()** can be used to simply and efficiently append the entire + * contents of one BTF object to another one. All the BTF type data is copied + * over, all referenced type IDs are adjusted by adding a necessary ID offset. + * Only strings referenced from BTF types are copied over and deduplicated, so + * if there were some unused strings in *src_btf*, those won't be copied over, + * which is consistent with the general string deduplication semantics of BTF + * writing APIs. + * + * If any error is encountered during this process, the contents of *btf* is + * left intact, which means that **btf__add_btf()** follows the transactional + * semantics and the operation as a whole is all-or-nothing. + * + * *src_btf* has to be non-split BTF, as of now copying types from split BTF + * is not supported and will result in -ENOTSUP error code returned. + */ +LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf); LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding); LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz); @@ -141,6 +240,10 @@ LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz); +/* tag construction API */ +LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx); + struct btf_dedup_opts { unsigned int dedup_table_size; bool dont_resolve_fwds; @@ -328,6 +431,11 @@ static inline bool btf_is_float(const struct btf_type *t) return btf_kind(t) == BTF_KIND_FLOAT; } +static inline bool btf_is_decl_tag(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_DECL_TAG; +} + static inline __u8 btf_int_encoding(const struct btf_type *t) { return BTF_INT_ENCODING(*(__u32 *)(t + 1)); @@ -396,6 +504,12 @@ btf_var_secinfos(const struct btf_type *t) return (struct btf_var_secinfo *)(t + 1); } +struct btf_decl_tag; +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) +{ + return (struct btf_decl_tag *)(t + 1); +} + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index e4b483f15fb99a..17db62b5002e88 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -188,7 +188,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf, static int btf_dump_resize(struct btf_dump *d) { - int err, last_id = btf__get_nr_types(d->btf); + int err, last_id = btf__type_cnt(d->btf) - 1; if (last_id <= d->last_id) return 0; @@ -262,7 +262,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) { int err, i; - if (id > btf__get_nr_types(d->btf)) + if (id >= btf__type_cnt(d->btf)) return libbpf_err(-EINVAL); err = btf_dump_resize(d); @@ -294,11 +294,11 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) */ static int btf_dump_mark_referenced(struct btf_dump *d) { - int i, j, n = btf__get_nr_types(d->btf); + int i, j, n = btf__type_cnt(d->btf); const struct btf_type *t; __u16 vlen; - for (i = d->last_id + 1; i <= n; i++) { + for (i = d->last_id + 1; i < n; i++) { t = btf__type_by_id(d->btf, i); vlen = btf_vlen(t); @@ -316,6 +316,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: d->type_states[t->type].referenced = 1; break; @@ -583,6 +584,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DATASEC: + case BTF_KIND_DECL_TAG: d->type_states[id].order_state = ORDERED; return 0; @@ -1560,29 +1562,28 @@ static int btf_dump_get_bitfield_value(struct btf_dump *d, __u64 *value) { __u16 left_shift_bits, right_shift_bits; - __u8 nr_copy_bits, nr_copy_bytes; const __u8 *bytes = data; - int sz = t->size; + __u8 nr_copy_bits; __u64 num = 0; int i; /* Maximum supported bitfield size is 64 bits */ - if (sz > 8) { - pr_warn("unexpected bitfield size %d\n", sz); + if (t->size > 8) { + pr_warn("unexpected bitfield size %d\n", t->size); return -EINVAL; } /* Bitfield value retrieval is done in two steps; first relevant bytes are * stored in num, then we left/right shift num to eliminate irrelevant bits. */ - nr_copy_bits = bit_sz + bits_offset; - nr_copy_bytes = t->size; -#if __BYTE_ORDER == __LITTLE_ENDIAN - for (i = nr_copy_bytes - 1; i >= 0; i--) +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + for (i = t->size - 1; i >= 0; i--) num = num * 256 + bytes[i]; -#elif __BYTE_ORDER == __BIG_ENDIAN - for (i = 0; i < nr_copy_bytes; i++) + nr_copy_bits = bit_sz + bits_offset; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + for (i = 0; i < t->size; i++) num = num * 256 + bytes[i]; + nr_copy_bits = t->size * 8 - bits_offset; #else # error "Unrecognized __BYTE_ORDER__" #endif @@ -1656,9 +1657,15 @@ static int btf_dump_base_type_check_zero(struct btf_dump *d, return 0; } -static bool ptr_is_aligned(const void *data, int data_sz) +static bool ptr_is_aligned(const struct btf *btf, __u32 type_id, + const void *data) { - return ((uintptr_t)data) % data_sz == 0; + int alignment = btf__align_of(btf, type_id); + + if (alignment == 0) + return false; + + return ((uintptr_t)data) % alignment == 0; } static int btf_dump_int_data(struct btf_dump *d, @@ -1669,9 +1676,10 @@ static int btf_dump_int_data(struct btf_dump *d, { __u8 encoding = btf_int_encoding(t); bool sign = encoding & BTF_INT_SIGNED; + char buf[16] __attribute__((aligned(16))); int sz = t->size; - if (sz == 0) { + if (sz == 0 || sz > sizeof(buf)) { pr_warn("unexpected size %d for id [%u]\n", sz, type_id); return -EINVAL; } @@ -1679,8 +1687,10 @@ static int btf_dump_int_data(struct btf_dump *d, /* handle packed int data - accesses of integers not aligned on * int boundaries can cause problems on some platforms. */ - if (!ptr_is_aligned(data, sz)) - return btf_dump_bitfield_data(d, t, data, 0, 0); + if (!ptr_is_aligned(d->btf, type_id, data)) { + memcpy(buf, data, sz); + data = buf; + } switch (sz) { case 16: { @@ -1690,10 +1700,10 @@ static int btf_dump_int_data(struct btf_dump *d, /* avoid use of __int128 as some 32-bit platforms do not * support it. */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ lsi = ints[0]; msi = ints[1]; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ lsi = ints[1]; msi = ints[0]; #else @@ -1766,7 +1776,7 @@ static int btf_dump_float_data(struct btf_dump *d, int sz = t->size; /* handle unaligned data; copy to local union */ - if (!ptr_is_aligned(data, sz)) { + if (!ptr_is_aligned(d->btf, type_id, data)) { memcpy(&fl, data, sz); flp = &fl; } @@ -1929,7 +1939,7 @@ static int btf_dump_ptr_data(struct btf_dump *d, __u32 id, const void *data) { - if (ptr_is_aligned(data, d->ptr_sz) && d->ptr_sz == sizeof(void *)) { + if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) { btf_dump_type_values(d, "%p", *(void **)data); } else { union ptr_data pt; @@ -1949,10 +1959,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d, __u32 id, __s64 *value) { - int sz = t->size; - /* handle unaligned enum value */ - if (!ptr_is_aligned(data, sz)) { + if (!ptr_is_aligned(d->btf, id, data)) { __u64 val; int err; @@ -2215,6 +2223,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d, case BTF_KIND_FWD: case BTF_KIND_FUNC: case BTF_KIND_FUNC_PROTO: + case BTF_KIND_DECL_TAG: err = btf_dump_unsupported_data(d, t, id); break; case BTF_KIND_INT: diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index 8df718a6b142df..502dea53a742d7 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "btf.h" #include "bpf.h" #include "libbpf.h" @@ -12,9 +13,12 @@ #include "hashmap.h" #include "bpf_gen_internal.h" #include "skel_internal.h" +#include -#define MAX_USED_MAPS 64 -#define MAX_USED_PROGS 32 +#define MAX_USED_MAPS 64 +#define MAX_USED_PROGS 32 +#define MAX_KFUNC_DESCS 256 +#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS) /* The following structure describes the stack layout of the loader program. * In addition R6 contains the pointer to context. @@ -29,7 +33,6 @@ */ struct loader_stack { __u32 btf_fd; - __u32 map_fd[MAX_USED_MAPS]; __u32 prog_fd[MAX_USED_PROGS]; __u32 inner_map_fd; }; @@ -135,16 +138,56 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level) static int add_data(struct bpf_gen *gen, const void *data, __u32 size) { + __u32 size8 = roundup(size, 8); + __u64 zero = 0; void *prev; - if (realloc_data_buf(gen, size)) + if (realloc_data_buf(gen, size8)) return 0; prev = gen->data_cur; - memcpy(gen->data_cur, data, size); - gen->data_cur += size; + if (data) { + memcpy(gen->data_cur, data, size); + memcpy(gen->data_cur + size, &zero, size8 - size); + } else { + memset(gen->data_cur, 0, size8); + } + gen->data_cur += size8; return prev - gen->data_start; } +/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative + * to start of fd_array. Caller can decide if it is usable or not. + */ +static int add_map_fd(struct bpf_gen *gen) +{ + if (!gen->fd_array) + gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); + if (gen->nr_maps == MAX_USED_MAPS) { + pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS); + gen->error = -E2BIG; + return 0; + } + return gen->nr_maps++; +} + +static int add_kfunc_btf_fd(struct bpf_gen *gen) +{ + int cur; + + if (!gen->fd_array) + gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); + if (gen->nr_fd_array == MAX_KFUNC_DESCS) { + cur = add_data(gen, NULL, sizeof(int)); + return (cur - gen->fd_array) / sizeof(int); + } + return MAX_USED_MAPS + gen->nr_fd_array++; +} + +static int blob_fd_array_off(struct bpf_gen *gen, int index) +{ + return gen->fd_array + index * sizeof(int); +} + static int insn_bytes_to_bpf_size(__u32 sz) { switch (sz) { @@ -166,14 +209,22 @@ static void emit_rel_store(struct bpf_gen *gen, int off, int data) emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0)); } -/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */ -static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off) +static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off) { - emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10)); - emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_off)); + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, off)); - emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); +} + +static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off) +{ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_off)); + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); } static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off, @@ -321,11 +372,11 @@ int bpf_gen__finish(struct bpf_gen *gen) offsetof(struct bpf_prog_desc, prog_fd), 4, stack_off(prog_fd[i])); for (i = 0; i < gen->nr_maps; i++) - move_stack2ctx(gen, - sizeof(struct bpf_loader_ctx) + - sizeof(struct bpf_map_desc) * i + - offsetof(struct bpf_map_desc, map_fd), 4, - stack_off(map_fd[i])); + move_blob2ctx(gen, + sizeof(struct bpf_loader_ctx) + + sizeof(struct bpf_map_desc) * i + + offsetof(struct bpf_map_desc, map_fd), 4, + blob_fd_array_off(gen, i)); emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0)); emit(gen, BPF_EXIT_INSN()); pr_debug("gen: finish %d\n", gen->error); @@ -381,11 +432,11 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, } void bpf_gen__map_create(struct bpf_gen *gen, - struct bpf_create_map_attr *map_attr, int map_idx) + struct bpf_create_map_params *map_attr, int map_idx) { int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id); bool close_inner_map_fd = false; - int map_create_attr; + int map_create_attr, idx; union bpf_attr attr; memset(&attr, 0, attr_size); @@ -393,6 +444,7 @@ void bpf_gen__map_create(struct bpf_gen *gen, attr.key_size = map_attr->key_size; attr.value_size = map_attr->value_size; attr.map_flags = map_attr->map_flags; + attr.map_extra = map_attr->map_extra; memcpy(attr.map_name, map_attr->name, min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1)); attr.numa_node = map_attr->numa_node; @@ -462,9 +514,11 @@ void bpf_gen__map_create(struct bpf_gen *gen, gen->error = -EDOM; /* internal bug */ return; } else { - emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, - stack_off(map_fd[map_idx]))); - gen->nr_maps++; + /* add_map_fd does gen->nr_maps++ */ + idx = add_map_fd(gen); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_fd_array_off(gen, idx))); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0)); } if (close_inner_map_fd) emit_sys_close_stack(gen, stack_off(inner_map_fd)); @@ -506,8 +560,8 @@ static void emit_find_attach_target(struct bpf_gen *gen) */ } -void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, - int insn_idx) +void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, + bool is_typeless, int kind, int insn_idx) { struct ksym_relo_desc *relo; @@ -519,38 +573,292 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, gen->relos = relo; relo += gen->relo_cnt; relo->name = name; + relo->is_weak = is_weak; + relo->is_typeless = is_typeless; relo->kind = kind; relo->insn_idx = insn_idx; gen->relo_cnt++; } -static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) +/* returns existing ksym_desc with ref incremented, or inserts a new one */ +static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo) { - int name, insn, len = strlen(relo->name) + 1; + struct ksym_desc *kdesc; - pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx); - name = add_data(gen, relo->name, len); + for (int i = 0; i < gen->nr_ksyms; i++) { + if (!strcmp(gen->ksyms[i].name, relo->name)) { + gen->ksyms[i].ref++; + return &gen->ksyms[i]; + } + } + kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc)); + if (!kdesc) { + gen->error = -ENOMEM; + return NULL; + } + gen->ksyms = kdesc; + kdesc = &gen->ksyms[gen->nr_ksyms++]; + kdesc->name = relo->name; + kdesc->kind = relo->kind; + kdesc->ref = 1; + kdesc->off = 0; + kdesc->insn = 0; + return kdesc; +} + +/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} + * Returns result in BPF_REG_7 + */ +static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo) +{ + int name_off, len = strlen(relo->name) + 1; + name_off = add_data(gen, relo->name, len); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, - 0, 0, 0, name)); + 0, 0, 0, name_off)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind)); emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind); - emit_check_err(gen); +} + +/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} + * Returns result in BPF_REG_7 + * Returns u64 symbol addr in BPF_REG_9 + */ +static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo) +{ + int name_off, len = strlen(relo->name) + 1, res_off; + + name_off = add_data(gen, relo->name, len); + res_off = add_data(gen, NULL, 8); /* res is u64 */ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, name_off)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); + emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, res_off)); + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name)); + emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0)); + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); + debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind); +} + +/* Expects: + * BPF_REG_8 - pointer to instruction + * + * We need to reuse BTF fd for same symbol otherwise each relocation takes a new + * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols, + * this would mean a new BTF fd index for each entry. By pairing symbol name + * with index, we get the insn->imm, insn->off pairing that kernel uses for + * kfunc_tab, which becomes the effective limit even though all of them may + * share same index in fd_array (such that kfunc_btf_tab has 1 element). + */ +static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) +{ + struct ksym_desc *kdesc; + int btf_fd_idx; + + kdesc = get_ksym_desc(gen, relo); + if (!kdesc) + return; + /* try to copy from existing bpf_insn */ + if (kdesc->ref > 1) { + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2, + kdesc->insn + offsetof(struct bpf_insn, off)); + goto log; + } + /* remember insn offset, so we can copy BTF ID and FD later */ + kdesc->insn = insn; + emit_bpf_find_by_name_kind(gen, relo); + if (!relo->is_weak) + emit_check_err(gen); + /* get index in fd_array to store BTF FD at */ + btf_fd_idx = add_kfunc_btf_fd(gen); + if (btf_fd_idx > INT16_MAX) { + pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n", + btf_fd_idx, relo->name); + gen->error = -E2BIG; + return; + } + kdesc->off = btf_fd_idx; + /* set a default value for imm */ + emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); + /* skip success case store if ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1)); /* store btf_id into insn[insn_idx].imm */ - insn = insns + sizeof(struct bpf_insn) * relo->insn_idx + - offsetof(struct bpf_insn, imm); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); + /* load fd_array slot pointer */ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, - 0, 0, 0, insn)); - emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0)); - if (relo->kind == BTF_KIND_VAR) { - /* store btf_obj_fd into insn[insn_idx + 1].imm */ - emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); - emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, - sizeof(struct bpf_insn))); + 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx))); + /* skip store of BTF fd if ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3)); + /* store BTF fd in slot */ + emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0)); + /* set a default value for off */ + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip insn->off store if ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2)); + /* skip if vmlinux BTF */ + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1)); + /* store index into insn[insn_idx].off */ + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx)); +log: + if (!gen->log_level) + return; + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, + offsetof(struct bpf_insn, imm))); + emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, + offsetof(struct bpf_insn, off))); + debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d", + relo->name, kdesc->ref); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_fd_array_off(gen, kdesc->off))); + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0)); + debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd", + relo->name, kdesc->ref); +} + +static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo, + int ref) +{ + if (!gen->log_level) + return; + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, + offsetof(struct bpf_insn, imm))); + emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) + + offsetof(struct bpf_insn, imm))); + debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d", + relo->is_typeless, relo->is_weak, relo->name, ref); + emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); + debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg", + relo->is_typeless, relo->is_weak, relo->name, ref); +} + +/* Expects: + * BPF_REG_8 - pointer to instruction + */ +static void emit_relo_ksym_typeless(struct bpf_gen *gen, + struct ksym_relo_desc *relo, int insn) +{ + struct ksym_desc *kdesc; + + kdesc = get_ksym_desc(gen, relo); + if (!kdesc) + return; + /* try to copy from existing ldimm64 insn */ + if (kdesc->ref > 1) { + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); + goto log; + } + /* remember insn offset, so we can copy ksym addr later */ + kdesc->insn = insn; + /* skip typeless ksym_desc in fd closing loop in cleanup_relos */ + kdesc->typeless = true; + emit_bpf_kallsyms_lookup_name(gen, relo); + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1)); + emit_check_err(gen); + /* store lower half of addr into insn[insn_idx].imm */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm))); + /* store upper half of addr into insn[insn_idx + 1].imm */ + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); +log: + emit_ksym_relo_log(gen, relo, kdesc->ref); +} + +static __u32 src_reg_mask(void) +{ +#if defined(__LITTLE_ENDIAN_BITFIELD) + return 0x0f; /* src_reg,dst_reg,... */ +#elif defined(__BIG_ENDIAN_BITFIELD) + return 0xf0; /* dst_reg,src_reg,... */ +#else +#error "Unsupported bit endianness, cannot proceed" +#endif +} + +/* Expects: + * BPF_REG_8 - pointer to instruction + */ +static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) +{ + struct ksym_desc *kdesc; + __u32 reg_mask; + + kdesc = get_ksym_desc(gen, relo); + if (!kdesc) + return; + /* try to copy from existing ldimm64 insn */ + if (kdesc->ref > 1) { + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm))); + /* jump over src_reg adjustment if imm is not 0 */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3)); + goto clear_src_reg; + } + /* remember insn offset, so we can copy BTF ID and FD later */ + kdesc->insn = insn; + emit_bpf_find_by_name_kind(gen, relo); + if (!relo->is_weak) + emit_check_err(gen); + /* set default values as 0 */ + emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); + emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0)); + /* skip success case stores if ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4)); + /* store btf_id into insn[insn_idx].imm */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); + /* store btf_obj_fd into insn[insn_idx + 1].imm */ + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); +clear_src_reg: + /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ + reg_mask = src_reg_mask(); + emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); + emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask)); + emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); + + emit_ksym_relo_log(gen, relo, kdesc->ref); +} + +static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) +{ + int insn; + + pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx); + insn = insns + sizeof(struct bpf_insn) * relo->insn_idx; + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn)); + switch (relo->kind) { + case BTF_KIND_VAR: + if (relo->is_typeless) + emit_relo_ksym_typeless(gen, relo, insn); + else + emit_relo_ksym_btf(gen, relo, insn); + break; + case BTF_KIND_FUNC: + emit_relo_kfunc_btf(gen, relo, insn); + break; + default: + pr_warn("Unknown relocation kind '%d'\n", relo->kind); + gen->error = -EDOM; + return; } } @@ -566,14 +874,23 @@ static void cleanup_relos(struct bpf_gen *gen, int insns) { int i, insn; - for (i = 0; i < gen->relo_cnt; i++) { - if (gen->relos[i].kind != BTF_KIND_VAR) - continue; - /* close fd recorded in insn[insn_idx + 1].imm */ - insn = insns + - sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) + - offsetof(struct bpf_insn, imm); - emit_sys_close_blob(gen, insn); + for (i = 0; i < gen->nr_ksyms; i++) { + /* only close fds for typed ksyms and kfuncs */ + if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) { + /* close fd recorded in insn[insn_idx + 1].imm */ + insn = gen->ksyms[i].insn; + insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm); + emit_sys_close_blob(gen, insn); + } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) { + emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off)); + if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ) + gen->nr_fd_array--; + } + } + if (gen->nr_ksyms) { + free(gen->ksyms); + gen->nr_ksyms = 0; + gen->ksyms = NULL; } if (gen->relo_cnt) { free(gen->relos); @@ -632,9 +949,8 @@ void bpf_gen__prog_load(struct bpf_gen *gen, /* populate union bpf_attr with a pointer to line_info */ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info); - /* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */ - emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array), - stack_off(map_fd[0])); + /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */ + emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array); /* populate union bpf_attr with user provided log details */ move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4, @@ -701,8 +1017,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user)); map_update_attr = add_data(gen, &attr, attr_size); - move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4, - stack_off(map_fd[map_idx])); + move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, + blob_fd_array_off(gen, map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); emit_rel_store(gen, attr_field(map_update_attr, value), value); /* emit MAP_UPDATE_ELEM command */ @@ -720,8 +1036,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) memset(&attr, 0, attr_size); pr_debug("gen: map_freeze: idx %d\n", map_idx); map_freeze_attr = add_data(gen, &attr, attr_size); - move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4, - stack_off(map_fd[map_idx])); + move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4, + blob_fd_array_off(gen, map_idx)); /* emit MAP_FREEZE command */ emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size); debug_ret(gen, "map_freeze"); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e4f83c304ec924..a1bea1953df673 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -195,6 +195,8 @@ enum kern_feature_id { FEAT_BTF_FLOAT, /* BPF perf link support */ FEAT_PERF_LINK, + /* BTF_KIND_DECL_TAG support */ + FEAT_BTF_DECL_TAG, __FEAT_CNT, }; @@ -218,18 +220,40 @@ struct reloc_desc { struct bpf_sec_def; -typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec, - struct bpf_program *prog); +typedef int (*init_fn_t)(struct bpf_program *prog, long cookie); +typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie); +typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie); + +/* stored as sec_def->cookie for all libbpf-supported SEC()s */ +enum sec_def_flags { + SEC_NONE = 0, + /* expected_attach_type is optional, if kernel doesn't support that */ + SEC_EXP_ATTACH_OPT = 1, + /* legacy, only used by libbpf_get_type_names() and + * libbpf_attach_type_by_name(), not used by libbpf itself at all. + * This used to be associated with cgroup (and few other) BPF programs + * that were attachable through BPF_PROG_ATTACH command. Pretty + * meaningless nowadays, though. + */ + SEC_ATTACHABLE = 2, + SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, + /* attachment target is specified through BTF ID in either kernel or + * other BPF program's BTF object */ + SEC_ATTACH_BTF = 4, + /* BPF program type allows sleeping/blocking in kernel */ + SEC_SLEEPABLE = 8, + /* allow non-strict prefix matching */ + SEC_SLOPPY_PFX = 16, +}; struct bpf_sec_def { const char *sec; - size_t len; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; - bool is_exp_attach_type_optional; - bool is_attachable; - bool is_attach_btf; - bool is_sleepable; + long cookie; + + init_fn_t init_fn; + preload_fn_t preload_fn; attach_fn_t attach_fn; }; @@ -261,7 +285,7 @@ struct bpf_program { size_t sub_insn_off; char *name; - /* sec_name with / replaced by _; makes recursive pinning + /* name with / replaced by _; makes recursive pinning * in bpf_object__pin_programs easier */ char *pin_name; @@ -346,15 +370,14 @@ enum libbpf_map_type { LIBBPF_MAP_KCONFIG, }; -static const char * const libbpf_type_to_btf_name[] = { - [LIBBPF_MAP_DATA] = DATA_SEC, - [LIBBPF_MAP_BSS] = BSS_SEC, - [LIBBPF_MAP_RODATA] = RODATA_SEC, - [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC, -}; - struct bpf_map { char *name; + /* real_name is defined for special internal maps (.rodata*, + * .data*, .bss, .kconfig) and preserves their original ELF section + * name. This is important to be be able to find corresponding BTF + * DATASEC information. + */ + char *real_name; int fd; int sec_idx; size_t sec_offset; @@ -377,6 +400,7 @@ struct bpf_map { char *pin_path; bool pinned; bool reused; + __u64 map_extra; }; enum extern_type { @@ -419,6 +443,11 @@ struct extern_desc { /* local btf_id of the ksym extern's type. */ __u32 type_id; + /* BTF fd index to be patched in for insn->off, this is + * 0 for vmlinux BTF, index in obj->fd_array for module + * BTF + */ + __s16 btf_fd_idx; } ksym; }; }; @@ -430,6 +459,41 @@ struct module_btf { char *name; __u32 id; int fd; + int fd_array_idx; +}; + +enum sec_type { + SEC_UNUSED = 0, + SEC_RELO, + SEC_BSS, + SEC_DATA, + SEC_RODATA, +}; + +struct elf_sec_desc { + enum sec_type sec_type; + Elf64_Shdr *shdr; + Elf_Data *data; +}; + +struct elf_state { + int fd; + const void *obj_buf; + size_t obj_buf_sz; + Elf *elf; + Elf64_Ehdr *ehdr; + Elf_Data *symbols; + Elf_Data *st_ops_data; + size_t shstrndx; /* section index for section name strings */ + size_t strtabidx; + struct elf_sec_desc *secs; + int sec_cnt; + int maps_shndx; + int btf_maps_shndx; + __u32 btf_maps_sec_btf_id; + int text_shndx; + int symbols_shndx; + int st_ops_shndx; }; struct bpf_object { @@ -447,47 +511,17 @@ struct bpf_object { struct extern_desc *externs; int nr_extern; int kconfig_map_idx; - int rodata_map_idx; bool loaded; bool has_subcalls; + bool has_rodata; struct bpf_gen *gen_loader; + /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ + struct elf_state efile; /* - * Information when doing elf related work. Only valid if fd - * is valid. - */ - struct { - int fd; - const void *obj_buf; - size_t obj_buf_sz; - Elf *elf; - GElf_Ehdr ehdr; - Elf_Data *symbols; - Elf_Data *data; - Elf_Data *rodata; - Elf_Data *bss; - Elf_Data *st_ops_data; - size_t shstrndx; /* section index for section name strings */ - size_t strtabidx; - struct { - GElf_Shdr shdr; - Elf_Data *data; - } *reloc_sects; - int nr_reloc_sects; - int maps_shndx; - int btf_maps_shndx; - __u32 btf_maps_sec_btf_id; - int text_shndx; - int symbols_shndx; - int data_shndx; - int rodata_shndx; - int bss_shndx; - int st_ops_shndx; - } efile; - /* - * All loaded bpf_object is linked in a list, which is + * All loaded bpf_object are linked in a list, which is * hidden to caller. bpf_objects__ handlers deal with * all objects. */ @@ -515,17 +549,22 @@ struct bpf_object { void *priv; bpf_object_clear_priv_t clear_priv; + int *fd_array; + size_t fd_array_cap; + size_t fd_array_cnt; + char path[]; }; -#define obj_elf_valid(o) ((o)->efile.elf) static const char *elf_sym_str(const struct bpf_object *obj, size_t off); static const char *elf_sec_str(const struct bpf_object *obj, size_t off); static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); -static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr); +static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); +static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); +static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); void bpf_program__unload(struct bpf_program *prog) { @@ -580,7 +619,16 @@ static char *__bpf_program__pin_name(struct bpf_program *prog) { char *name, *p; - name = p = strdup(prog->sec_name); + if (libbpf_mode & LIBBPF_STRICT_SEC_NAME) + name = strdup(prog->name); + else + name = strdup(prog->sec_name); + + if (!name) + return NULL; + + p = name; + while ((p = strchr(p, '/'))) *p = '_'; @@ -667,25 +715,25 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; int nr_progs, err, i; const char *name; - GElf_Sym sym; + Elf64_Sym *sym; progs = obj->programs; nr_progs = obj->nr_programs; - nr_syms = symbols->d_size / sizeof(GElf_Sym); + nr_syms = symbols->d_size / sizeof(Elf64_Sym); sec_off = 0; for (i = 0; i < nr_syms; i++) { - if (!gelf_getsym(symbols, i, &sym)) - continue; - if (sym.st_shndx != sec_idx) + sym = elf_sym_by_idx(obj, i); + + if (sym->st_shndx != sec_idx) continue; - if (GELF_ST_TYPE(sym.st_info) != STT_FUNC) + if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) continue; - prog_sz = sym.st_size; - sec_off = sym.st_value; + prog_sz = sym->st_size; + sec_off = sym->st_value; - name = elf_sym_str(obj, sym.st_name); + name = elf_sym_str(obj, sym->st_name); if (!name) { pr_warn("sec '%s': failed to get symbol name for offset %zu\n", sec_name, sec_off); @@ -698,7 +746,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return -LIBBPF_ERRNO__FORMAT; } - if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) { + if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); return -ENOTSUP; } @@ -731,9 +779,9 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, * as static to enable more permissive BPF verification mode * with more outside context available to BPF verifier */ - if (GELF_ST_BIND(sym.st_info) != STB_LOCAL - && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN - || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL)) + if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL + && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN + || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) prog->mark_btf_static = true; nr_progs++; @@ -1101,6 +1149,7 @@ static struct bpf_object *bpf_object__new(const char *path, size_t obj_buf_sz, const char *obj_name) { + bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST); struct bpf_object *obj; char *end; @@ -1134,24 +1183,21 @@ static struct bpf_object *bpf_object__new(const char *path, obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.maps_shndx = -1; obj->efile.btf_maps_shndx = -1; - obj->efile.data_shndx = -1; - obj->efile.rodata_shndx = -1; - obj->efile.bss_shndx = -1; obj->efile.st_ops_shndx = -1; obj->kconfig_map_idx = -1; - obj->rodata_map_idx = -1; obj->kern_version = get_kernel_version(); obj->loaded = false; INIT_LIST_HEAD(&obj->list); - list_add(&obj->list, &bpf_objects_list); + if (!strict) + list_add(&obj->list, &bpf_objects_list); return obj; } static void bpf_object__elf_finish(struct bpf_object *obj) { - if (!obj_elf_valid(obj)) + if (!obj->efile.elf) return; if (obj->efile.elf) { @@ -1159,13 +1205,10 @@ static void bpf_object__elf_finish(struct bpf_object *obj) obj->efile.elf = NULL; } obj->efile.symbols = NULL; - obj->efile.data = NULL; - obj->efile.rodata = NULL; - obj->efile.bss = NULL; obj->efile.st_ops_data = NULL; - zfree(&obj->efile.reloc_sects); - obj->efile.nr_reloc_sects = 0; + zfree(&obj->efile.secs); + obj->efile.sec_cnt = 0; zclose(obj->efile.fd); obj->efile.obj_buf = NULL; obj->efile.obj_buf_sz = 0; @@ -1173,10 +1216,11 @@ static void bpf_object__elf_finish(struct bpf_object *obj) static int bpf_object__elf_init(struct bpf_object *obj) { + Elf64_Ehdr *ehdr; int err = 0; - GElf_Ehdr *ep; + Elf *elf; - if (obj_elf_valid(obj)) { + if (obj->efile.elf) { pr_warn("elf: init internal error\n"); return -LIBBPF_ERRNO__LIBELF; } @@ -1186,10 +1230,9 @@ static int bpf_object__elf_init(struct bpf_object *obj) * obj_buf should have been validated by * bpf_object__open_buffer(). */ - obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, - obj->efile.obj_buf_sz); + elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); } else { - obj->efile.fd = open(obj->path, O_RDONLY); + obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); if (obj->efile.fd < 0) { char errmsg[STRERR_BUFSIZE], *cp; @@ -1199,23 +1242,37 @@ static int bpf_object__elf_init(struct bpf_object *obj) return err; } - obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); + elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); } - if (!obj->efile.elf) { + if (!elf) { pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__LIBELF; goto errout; } - if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { + obj->efile.elf = elf; + + if (elf_kind(elf) != ELF_K_ELF) { + err = -LIBBPF_ERRNO__FORMAT; + pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); + goto errout; + } + + if (gelf_getclass(elf) != ELFCLASS64) { + err = -LIBBPF_ERRNO__FORMAT; + pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); + goto errout; + } + + obj->efile.ehdr = ehdr = elf64_getehdr(elf); + if (!obj->efile.ehdr) { pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; goto errout; } - ep = &obj->efile.ehdr; - if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { + if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { pr_warn("elf: failed to get section names section index for %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; @@ -1223,7 +1280,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) } /* Elf is corrupted/truncated, avoid calling elf_strptr. */ - if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { + if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { pr_warn("elf: failed to get section names strings from %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; @@ -1231,8 +1288,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) } /* Old LLVM set e_machine to EM_NONE */ - if (ep->e_type != ET_REL || - (ep->e_machine && ep->e_machine != EM_BPF)) { + if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; @@ -1246,11 +1302,11 @@ static int bpf_object__elf_init(struct bpf_object *obj) static int bpf_object__check_endianness(struct bpf_object *obj) { -#if __BYTE_ORDER == __LITTLE_ENDIAN - if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) return 0; -#elif __BYTE_ORDER == __BIG_ENDIAN - if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) return 0; #else # error "Unrecognized __BYTE_ORDER__" @@ -1290,41 +1346,27 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) return false; } -int bpf_object__section_size(const struct bpf_object *obj, const char *name, - __u32 *size) +static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) { int ret = -ENOENT; + Elf_Data *data; + Elf_Scn *scn; *size = 0; - if (!name) { + if (!name) return -EINVAL; - } else if (!strcmp(name, DATA_SEC)) { - if (obj->efile.data) - *size = obj->efile.data->d_size; - } else if (!strcmp(name, BSS_SEC)) { - if (obj->efile.bss) - *size = obj->efile.bss->d_size; - } else if (!strcmp(name, RODATA_SEC)) { - if (obj->efile.rodata) - *size = obj->efile.rodata->d_size; - } else if (!strcmp(name, STRUCT_OPS_SEC)) { - if (obj->efile.st_ops_data) - *size = obj->efile.st_ops_data->d_size; - } else { - Elf_Scn *scn = elf_sec_by_name(obj, name); - Elf_Data *data = elf_sec_data(obj, scn); - if (data) { - ret = 0; /* found it */ - *size = data->d_size; - } + scn = elf_sec_by_name(obj, name); + data = elf_sec_data(obj, scn); + if (data) { + ret = 0; /* found it */ + *size = data->d_size; } return *size ? 0 : ret; } -int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, - __u32 *off) +static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off) { Elf_Data *symbols = obj->efile.symbols; const char *sname; @@ -1333,23 +1375,20 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, if (!name || !off) return -EINVAL; - for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { - GElf_Sym sym; + for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { + Elf64_Sym *sym = elf_sym_by_idx(obj, si); - if (!gelf_getsym(symbols, si, &sym)) - continue; - if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL || - GELF_ST_TYPE(sym.st_info) != STT_OBJECT) + if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL || + ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) continue; - sname = elf_sym_str(obj, sym.st_name); + sname = elf_sym_str(obj, sym->st_name); if (!sname) { - pr_warn("failed to get sym name string for var %s\n", - name); + pr_warn("failed to get sym name string for var %s\n", name); return -EIO; } if (strcmp(name, sname) == 0) { - *off = sym.st_value; + *off = sym->st_value; return 0; } } @@ -1401,17 +1440,55 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map) return map_sz; } -static char *internal_map_name(struct bpf_object *obj, - enum libbpf_map_type type) +static char *internal_map_name(struct bpf_object *obj, const char *real_name) { char map_name[BPF_OBJ_NAME_LEN], *p; - const char *sfx = libbpf_type_to_btf_name[type]; - int sfx_len = max((size_t)7, strlen(sfx)); - int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, - strlen(obj->name)); + int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); + + /* This is one of the more confusing parts of libbpf for various + * reasons, some of which are historical. The original idea for naming + * internal names was to include as much of BPF object name prefix as + * possible, so that it can be distinguished from similar internal + * maps of a different BPF object. + * As an example, let's say we have bpf_object named 'my_object_name' + * and internal map corresponding to '.rodata' ELF section. The final + * map name advertised to user and to the kernel will be + * 'my_objec.rodata', taking first 8 characters of object name and + * entire 7 characters of '.rodata'. + * Somewhat confusingly, if internal map ELF section name is shorter + * than 7 characters, e.g., '.bss', we still reserve 7 characters + * for the suffix, even though we only have 4 actual characters, and + * resulting map will be called 'my_objec.bss', not even using all 15 + * characters allowed by the kernel. Oh well, at least the truncated + * object name is somewhat consistent in this case. But if the map + * name is '.kconfig', we'll still have entirety of '.kconfig' added + * (8 chars) and thus will be left with only first 7 characters of the + * object name ('my_obje'). Happy guessing, user, that the final map + * name will be "my_obje.kconfig". + * Now, with libbpf starting to support arbitrarily named .rodata.* + * and .data.* data sections, it's possible that ELF section name is + * longer than allowed 15 chars, so we now need to be careful to take + * only up to 15 first characters of ELF name, taking no BPF object + * name characters at all. So '.rodata.abracadabra' will result in + * '.rodata.abracad' kernel and user-visible name. + * We need to keep this convoluted logic intact for .data, .bss and + * .rodata maps, but for new custom .data.custom and .rodata.custom + * maps we use their ELF names as is, not prepending bpf_object name + * in front. We still need to truncate them to 15 characters for the + * kernel. Full name can be recovered for such maps by using DATASEC + * BTF type associated with such map's value type, though. + */ + if (sfx_len >= BPF_OBJ_NAME_LEN) + sfx_len = BPF_OBJ_NAME_LEN - 1; + + /* if there are two or more dots in map name, it's a custom dot map */ + if (strchr(real_name + 1, '.') != NULL) + pfx_len = 0; + else + pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, - sfx_len, libbpf_type_to_btf_name[type]); + sfx_len, real_name); /* sanitise map name to characters allowed by kernel */ for (p = map_name; *p && p < map_name + sizeof(map_name); p++) @@ -1423,7 +1500,7 @@ static char *internal_map_name(struct bpf_object *obj, static int bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, - int sec_idx, void *data, size_t data_sz) + const char *real_name, int sec_idx, void *data, size_t data_sz) { struct bpf_map_def *def; struct bpf_map *map; @@ -1436,9 +1513,11 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, map->libbpf_type = type; map->sec_idx = sec_idx; map->sec_offset = 0; - map->name = internal_map_name(obj, type); - if (!map->name) { - pr_warn("failed to alloc map name\n"); + map->real_name = strdup(real_name); + map->name = internal_map_name(obj, real_name); + if (!map->real_name || !map->name) { + zfree(&map->real_name); + zfree(&map->name); return -ENOMEM; } @@ -1461,6 +1540,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, map->mmaped = NULL; pr_warn("failed to alloc map '%s' content buffer: %d\n", map->name, err); + zfree(&map->real_name); zfree(&map->name); return err; } @@ -1474,34 +1554,43 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, static int bpf_object__init_global_data_maps(struct bpf_object *obj) { - int err; + struct elf_sec_desc *sec_desc; + const char *sec_name; + int err = 0, sec_idx; /* * Populate obj->maps with libbpf internal maps. */ - if (obj->efile.data_shndx >= 0) { - err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, - obj->efile.data_shndx, - obj->efile.data->d_buf, - obj->efile.data->d_size); - if (err) - return err; - } - if (obj->efile.rodata_shndx >= 0) { - err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, - obj->efile.rodata_shndx, - obj->efile.rodata->d_buf, - obj->efile.rodata->d_size); - if (err) - return err; - - obj->rodata_map_idx = obj->nr_maps - 1; - } - if (obj->efile.bss_shndx >= 0) { - err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, - obj->efile.bss_shndx, - NULL, - obj->efile.bss->d_size); + for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { + sec_desc = &obj->efile.secs[sec_idx]; + + switch (sec_desc->sec_type) { + case SEC_DATA: + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, + sec_name, sec_idx, + sec_desc->data->d_buf, + sec_desc->data->d_size); + break; + case SEC_RODATA: + obj->has_rodata = true; + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, + sec_name, sec_idx, + sec_desc->data->d_buf, + sec_desc->data->d_size); + break; + case SEC_BSS: + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, + sec_name, sec_idx, + NULL, + sec_desc->data->d_size); + break; + default: + /* skip */ + break; + } if (err) return err; } @@ -1664,7 +1753,7 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj, void *ext_val; __u64 num; - if (strncmp(buf, "CONFIG_", 7)) + if (!str_has_pfx(buf, "CONFIG_")) return 0; sep = strchr(buf, '='); @@ -1798,7 +1887,7 @@ static int bpf_object__init_kconfig_map(struct bpf_object *obj) map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, - obj->efile.symbols_shndx, + ".kconfig", obj->efile.symbols_shndx, NULL, map_sz); if (err) return err; @@ -1836,13 +1925,13 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) * * TODO: Detect array of map and report error. */ - nr_syms = symbols->d_size / sizeof(GElf_Sym); + nr_syms = symbols->d_size / sizeof(Elf64_Sym); for (i = 0; i < nr_syms; i++) { - GElf_Sym sym; + Elf64_Sym *sym = elf_sym_by_idx(obj, i); - if (!gelf_getsym(symbols, i, &sym)) + if (sym->st_shndx != obj->efile.maps_shndx) continue; - if (sym.st_shndx != obj->efile.maps_shndx) + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) continue; nr_maps++; } @@ -1859,39 +1948,38 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) /* Fill obj->maps using data in "maps" section. */ for (i = 0; i < nr_syms; i++) { - GElf_Sym sym; + Elf64_Sym *sym = elf_sym_by_idx(obj, i); const char *map_name; struct bpf_map_def *def; struct bpf_map *map; - if (!gelf_getsym(symbols, i, &sym)) + if (sym->st_shndx != obj->efile.maps_shndx) continue; - if (sym.st_shndx != obj->efile.maps_shndx) + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) continue; map = bpf_object__add_map(obj); if (IS_ERR(map)) return PTR_ERR(map); - map_name = elf_sym_str(obj, sym.st_name); + map_name = elf_sym_str(obj, sym->st_name); if (!map_name) { pr_warn("failed to get map #%d name sym string for obj %s\n", i, obj->path); return -LIBBPF_ERRNO__FORMAT; } - if (GELF_ST_TYPE(sym.st_info) == STT_SECTION - || GELF_ST_BIND(sym.st_info) == STB_LOCAL) { + if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { pr_warn("map '%s' (legacy): static maps are not supported\n", map_name); return -ENOTSUP; } map->libbpf_type = LIBBPF_MAP_UNSPEC; - map->sec_idx = sym.st_shndx; - map->sec_offset = sym.st_value; + map->sec_idx = sym->st_shndx; + map->sec_offset = sym->st_value; pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", map_name, map->sec_idx, map->sec_offset); - if (sym.st_value + map_def_sz > data->d_size) { + if (sym->st_value + map_def_sz > data->d_size) { pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", obj->path, map_name); return -EINVAL; @@ -1899,11 +1987,11 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) map->name = strdup(map_name); if (!map->name) { - pr_warn("failed to alloc map name\n"); + pr_warn("map '%s': failed to alloc map name\n", map_name); return -ENOMEM; } pr_debug("map %d is \"%s\"\n", i, map->name); - def = (struct bpf_map_def *)(data->d_buf + sym.st_value); + def = (struct bpf_map_def *)(data->d_buf + sym->st_value); /* * If the definition of the map in the object file fits in * bpf_map_def, copy it. Any extra fields in our version @@ -1987,6 +2075,7 @@ static const char *__btf_kind_str(__u16 kind) case BTF_KIND_VAR: return "var"; case BTF_KIND_DATASEC: return "datasec"; case BTF_KIND_FLOAT: return "float"; + case BTF_KIND_DECL_TAG: return "decl_tag"; default: return "unknown"; } } @@ -2236,6 +2325,13 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, } map_def->pinning = val; map_def->parts |= MAP_DEF_PINNING; + } else if (strcmp(name, "map_extra") == 0) { + __u32 map_extra; + + if (!get_map_field_int(map_name, btf, m, &map_extra)) + return -EINVAL; + map_def->map_extra = map_extra; + map_def->parts |= MAP_DEF_MAP_EXTRA; } else { if (strict) { pr_warn("map '%s': unknown field '%s'.\n", map_name, name); @@ -2260,6 +2356,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def map->def.value_size = def->value_size; map->def.max_entries = def->max_entries; map->def.map_flags = def->map_flags; + map->map_extra = def->map_extra; map->numa_node = def->numa_node; map->btf_key_type_id = def->key_type_id; @@ -2283,7 +2380,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def if (def->parts & MAP_DEF_MAX_ENTRIES) pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); if (def->parts & MAP_DEF_MAP_FLAGS) - pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags); + pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); + if (def->parts & MAP_DEF_MAP_EXTRA) + pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, + (unsigned long long)def->map_extra); if (def->parts & MAP_DEF_PINNING) pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); if (def->parts & MAP_DEF_NUMA_NODE) @@ -2420,8 +2520,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, return -EINVAL; } - nr_types = btf__get_nr_types(obj->btf); - for (i = 1; i <= nr_types; i++) { + nr_types = btf__type_cnt(obj->btf); + for (i = 1; i < nr_types; i++) { t = btf__type_by_id(obj->btf, i); if (!btf_is_datasec(t)) continue; @@ -2472,12 +2572,13 @@ static int bpf_object__init_maps(struct bpf_object *obj, static bool section_have_execinstr(struct bpf_object *obj, int idx) { - GElf_Shdr sh; + Elf64_Shdr *sh; - if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh)) + sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); + if (!sh) return false; - return sh.sh_flags & SHF_EXECINSTR; + return sh->sh_flags & SHF_EXECINSTR; } static bool btf_needs_sanitization(struct bpf_object *obj) @@ -2486,8 +2587,9 @@ static bool btf_needs_sanitization(struct bpf_object *obj) bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); + bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); - return !has_func || !has_datasec || !has_func_global || !has_float; + return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag; } static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) @@ -2496,14 +2598,15 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); + bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); struct btf_type *t; int i, j, vlen; - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); - if (!has_datasec && btf_is_var(t)) { - /* replace VAR with INT */ + if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { + /* replace VAR/DECL_TAG with INT */ t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); /* * using size = 1 is the safest choice, 4 will be too @@ -2610,6 +2713,104 @@ static int bpf_object__init_btf(struct bpf_object *obj, return 0; } +static int compare_vsi_off(const void *_a, const void *_b) +{ + const struct btf_var_secinfo *a = _a; + const struct btf_var_secinfo *b = _b; + + return a->offset - b->offset; +} + +static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, + struct btf_type *t) +{ + __u32 size = 0, off = 0, i, vars = btf_vlen(t); + const char *name = btf__name_by_offset(btf, t->name_off); + const struct btf_type *t_var; + struct btf_var_secinfo *vsi; + const struct btf_var *var; + int ret; + + if (!name) { + pr_debug("No name found in string section for DATASEC kind.\n"); + return -ENOENT; + } + + /* .extern datasec size and var offsets were set correctly during + * extern collection step, so just skip straight to sorting variables + */ + if (t->size) + goto sort_vars; + + ret = find_elf_sec_sz(obj, name, &size); + if (ret || !size || (t->size && t->size != size)) { + pr_debug("Invalid size for section %s: %u bytes\n", name, size); + return -ENOENT; + } + + t->size = size; + + for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { + t_var = btf__type_by_id(btf, vsi->type); + var = btf_var(t_var); + + if (!btf_is_var(t_var)) { + pr_debug("Non-VAR type seen in section %s\n", name); + return -EINVAL; + } + + if (var->linkage == BTF_VAR_STATIC) + continue; + + name = btf__name_by_offset(btf, t_var->name_off); + if (!name) { + pr_debug("No name found in string section for VAR kind\n"); + return -ENOENT; + } + + ret = find_elf_var_offset(obj, name, &off); + if (ret) { + pr_debug("No offset found in symbol table for VAR %s\n", + name); + return -ENOENT; + } + + vsi->offset = off; + } + +sort_vars: + qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); + return 0; +} + +static int btf_finalize_data(struct bpf_object *obj, struct btf *btf) +{ + int err = 0; + __u32 i, n = btf__type_cnt(btf); + + for (i = 1; i < n; i++) { + struct btf_type *t = btf_type_by_id(btf, i); + + /* Loader needs to fix up some of the things compiler + * couldn't get its hands on while emitting BTF. This + * is section size and global variable offset. We use + * the info from the ELF itself for this purpose. + */ + if (btf_is_datasec(t)) { + err = btf_fixup_datasec(obj, btf, t); + if (err) + break; + } + } + + return libbpf_err(err); +} + +int btf__finalize_data(struct bpf_object *obj, struct btf *btf) +{ + return btf_finalize_data(obj, btf); +} + static int bpf_object__finalize_btf(struct bpf_object *obj) { int err; @@ -2617,7 +2818,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) if (!obj->btf) return 0; - err = btf__finalize_data(obj, obj->btf); + err = btf_finalize_data(obj, obj->btf); if (err) { pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); return err; @@ -2727,8 +2928,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) continue; - n = btf__get_nr_types(obj->btf); - for (j = 1; j <= n; j++) { + n = btf__type_cnt(obj->btf); + for (j = 1; j < n; j++) { t = btf_type_by_id(obj->btf, j); if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) continue; @@ -2748,7 +2949,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) __u32 sz; /* clone BTF to sanitize a copy and leave the original intact */ - raw_data = btf__get_raw_data(obj->btf, &sz); + raw_data = btf__raw_data(obj->btf, &sz); kern_btf = btf__new(raw_data, sz); err = libbpf_get_error(kern_btf); if (err) @@ -2761,7 +2962,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) if (obj->gen_loader) { __u32 raw_size = 0; - const void *raw_data = btf__get_raw_data(kern_btf, &raw_size); + const void *raw_data = btf__raw_data(kern_btf, &raw_size); if (!raw_data) return -ENOMEM; @@ -2853,32 +3054,36 @@ static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) return NULL; } -static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr) +static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) { + Elf64_Shdr *shdr; + if (!scn) - return -EINVAL; + return NULL; - if (gelf_getshdr(scn, hdr) != hdr) { + shdr = elf64_getshdr(scn); + if (!shdr) { pr_warn("elf: failed to get section(%zu) header from %s: %s\n", elf_ndxscn(scn), obj->path, elf_errmsg(-1)); - return -EINVAL; + return NULL; } - return 0; + return shdr; } static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) { const char *name; - GElf_Shdr sh; + Elf64_Shdr *sh; if (!scn) return NULL; - if (elf_sec_hdr(obj, scn, &sh)) + sh = elf_sec_hdr(obj, scn); + if (!sh) return NULL; - name = elf_sec_str(obj, sh.sh_name); + name = elf_sec_str(obj, sh->sh_name); if (!name) { pr_warn("elf: failed to get section(%zu) name from %s: %s\n", elf_ndxscn(scn), obj->path, elf_errmsg(-1)); @@ -2906,13 +3111,29 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) return data; } +static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) +{ + if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) + return NULL; + + return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; +} + +static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) +{ + if (idx >= data->d_size / sizeof(Elf64_Rel)) + return NULL; + + return (Elf64_Rel *)data->d_buf + idx; +} + static bool is_sec_name_dwarf(const char *name) { /* approximation, but the actual list is too long */ - return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; + return str_has_pfx(name, ".debug_"); } -static bool ignore_elf_section(GElf_Shdr *hdr, const char *name) +static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) { /* no special handling of .strtab */ if (hdr->sh_type == SHT_STRTAB) @@ -2931,7 +3152,7 @@ static bool ignore_elf_section(GElf_Shdr *hdr, const char *name) if (is_sec_name_dwarf(name)) return true; - if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { + if (str_has_pfx(name, ".rel")) { name += sizeof(".rel") - 1; /* DWARF section relocations */ if (is_sec_name_dwarf(name)) @@ -2960,6 +3181,7 @@ static int cmp_progs(const void *_a, const void *_b) static int bpf_object__elf_collect(struct bpf_object *obj) { + struct elf_sec_desc *sec_desc; Elf *elf = obj->efile.elf; Elf_Data *btf_ext_data = NULL; Elf_Data *btf_data = NULL; @@ -2967,17 +3189,27 @@ static int bpf_object__elf_collect(struct bpf_object *obj) const char *name; Elf_Data *data; Elf_Scn *scn; - GElf_Shdr sh; + Elf64_Shdr *sh; + + /* ELF section indices are 1-based, so allocate +1 element to keep + * indexing simple. Also include 0th invalid section into sec_cnt for + * simpler and more traditional iteration logic. + */ + obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum; + obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); + if (!obj->efile.secs) + return -ENOMEM; /* a bunch of ELF parsing functionality depends on processing symbols, * so do the first pass and find the symbol table */ scn = NULL; while ((scn = elf_nextscn(elf, scn)) != NULL) { - if (elf_sec_hdr(obj, scn, &sh)) + sh = elf_sec_hdr(obj, scn); + if (!sh) return -LIBBPF_ERRNO__FORMAT; - if (sh.sh_type == SHT_SYMTAB) { + if (sh->sh_type == SHT_SYMTAB) { if (obj->efile.symbols) { pr_warn("elf: multiple symbol tables in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; @@ -2987,24 +3219,34 @@ static int bpf_object__elf_collect(struct bpf_object *obj) if (!data) return -LIBBPF_ERRNO__FORMAT; + idx = elf_ndxscn(scn); + obj->efile.symbols = data; - obj->efile.symbols_shndx = elf_ndxscn(scn); - obj->efile.strtabidx = sh.sh_link; + obj->efile.symbols_shndx = idx; + obj->efile.strtabidx = sh->sh_link; } } + if (!obj->efile.symbols) { + pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", + obj->path); + return -ENOENT; + } + scn = NULL; while ((scn = elf_nextscn(elf, scn)) != NULL) { - idx++; + idx = elf_ndxscn(scn); + sec_desc = &obj->efile.secs[idx]; - if (elf_sec_hdr(obj, scn, &sh)) + sh = elf_sec_hdr(obj, scn); + if (!sh) return -LIBBPF_ERRNO__FORMAT; - name = elf_sec_str(obj, sh.sh_name); + name = elf_sec_str(obj, sh->sh_name); if (!name) return -LIBBPF_ERRNO__FORMAT; - if (ignore_elf_section(&sh, name)) + if (ignore_elf_section(sh, name)) continue; data = elf_sec_data(obj, scn); @@ -3013,8 +3255,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", idx, name, (unsigned long)data->d_size, - (int)sh.sh_link, (unsigned long)sh.sh_flags, - (int)sh.sh_type); + (int)sh->sh_link, (unsigned long)sh->sh_flags, + (int)sh->sh_type); if (strcmp(name, "license") == 0) { err = bpf_object__init_license(obj, data->d_buf, data->d_size); @@ -3032,21 +3274,25 @@ static int bpf_object__elf_collect(struct bpf_object *obj) btf_data = data; } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { btf_ext_data = data; - } else if (sh.sh_type == SHT_SYMTAB) { + } else if (sh->sh_type == SHT_SYMTAB) { /* already processed during the first pass above */ - } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { - if (sh.sh_flags & SHF_EXECINSTR) { + } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { + if (sh->sh_flags & SHF_EXECINSTR) { if (strcmp(name, ".text") == 0) obj->efile.text_shndx = idx; err = bpf_object__add_programs(obj, data, name, idx); if (err) return err; - } else if (strcmp(name, DATA_SEC) == 0) { - obj->efile.data = data; - obj->efile.data_shndx = idx; - } else if (strcmp(name, RODATA_SEC) == 0) { - obj->efile.rodata = data; - obj->efile.rodata_shndx = idx; + } else if (strcmp(name, DATA_SEC) == 0 || + str_has_pfx(name, DATA_SEC ".")) { + sec_desc->sec_type = SEC_DATA; + sec_desc->shdr = sh; + sec_desc->data = data; + } else if (strcmp(name, RODATA_SEC) == 0 || + str_has_pfx(name, RODATA_SEC ".")) { + sec_desc->sec_type = SEC_RODATA; + sec_desc->shdr = sh; + sec_desc->data = data; } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { obj->efile.st_ops_data = data; obj->efile.st_ops_shndx = idx; @@ -3054,37 +3300,29 @@ static int bpf_object__elf_collect(struct bpf_object *obj) pr_info("elf: skipping unrecognized data section(%d) %s\n", idx, name); } - } else if (sh.sh_type == SHT_REL) { - int nr_sects = obj->efile.nr_reloc_sects; - void *sects = obj->efile.reloc_sects; - int sec = sh.sh_info; /* points to other section */ + } else if (sh->sh_type == SHT_REL) { + int targ_sec_idx = sh->sh_info; /* points to other section */ /* Only do relo for section with exec instructions */ - if (!section_have_execinstr(obj, sec) && + if (!section_have_execinstr(obj, targ_sec_idx) && strcmp(name, ".rel" STRUCT_OPS_SEC) && strcmp(name, ".rel" MAPS_ELF_SEC)) { pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", - idx, name, sec, - elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: ""); + idx, name, targ_sec_idx, + elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: ""); continue; } - sects = libbpf_reallocarray(sects, nr_sects + 1, - sizeof(*obj->efile.reloc_sects)); - if (!sects) - return -ENOMEM; - - obj->efile.reloc_sects = sects; - obj->efile.nr_reloc_sects++; - - obj->efile.reloc_sects[nr_sects].shdr = sh; - obj->efile.reloc_sects[nr_sects].data = data; - } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { - obj->efile.bss = data; - obj->efile.bss_shndx = idx; + sec_desc->sec_type = SEC_RELO; + sec_desc->shdr = sh; + sec_desc->data = data; + } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { + sec_desc->sec_type = SEC_BSS; + sec_desc->shdr = sh; + sec_desc->data = data; } else { pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, - (size_t)sh.sh_size); + (size_t)sh->sh_size); } } @@ -3100,19 +3338,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj) return bpf_object__init_btf(obj, btf_data, btf_ext_data); } -static bool sym_is_extern(const GElf_Sym *sym) +static bool sym_is_extern(const Elf64_Sym *sym) { - int bind = GELF_ST_BIND(sym->st_info); + int bind = ELF64_ST_BIND(sym->st_info); /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ return sym->st_shndx == SHN_UNDEF && (bind == STB_GLOBAL || bind == STB_WEAK) && - GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; + ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; } -static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx) +static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) { - int bind = GELF_ST_BIND(sym->st_info); - int type = GELF_ST_TYPE(sym->st_info); + int bind = ELF64_ST_BIND(sym->st_info); + int type = ELF64_ST_TYPE(sym->st_info); /* in .text section */ if (sym->st_shndx != text_shndx) @@ -3135,8 +3373,8 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name) if (!btf) return -ESRCH; - n = btf__get_nr_types(btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (!btf_is_var(t) && !btf_is_func(t)) @@ -3167,8 +3405,8 @@ static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { if (!btf) return -ESRCH; - n = btf__get_nr_types(btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (!btf_is_datasec(t)) @@ -3252,8 +3490,8 @@ static int find_int_btf_id(const struct btf *btf) const struct btf_type *t; int i, n; - n = btf__get_nr_types(btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (btf_is_int(t) && btf_int_bits(t) == 32) @@ -3310,30 +3548,31 @@ static int bpf_object__collect_externs(struct bpf_object *obj) int i, n, off, dummy_var_btf_id; const char *ext_name, *sec_name; Elf_Scn *scn; - GElf_Shdr sh; + Elf64_Shdr *sh; if (!obj->efile.symbols) return 0; scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); - if (elf_sec_hdr(obj, scn, &sh)) + sh = elf_sec_hdr(obj, scn); + if (!sh) return -LIBBPF_ERRNO__FORMAT; dummy_var_btf_id = add_dummy_ksym_var(obj->btf); if (dummy_var_btf_id < 0) return dummy_var_btf_id; - n = sh.sh_size / sh.sh_entsize; + n = sh->sh_size / sh->sh_entsize; pr_debug("looking for externs among %d symbols...\n", n); for (i = 0; i < n; i++) { - GElf_Sym sym; + Elf64_Sym *sym = elf_sym_by_idx(obj, i); - if (!gelf_getsym(obj->efile.symbols, i, &sym)) + if (!sym) return -LIBBPF_ERRNO__FORMAT; - if (!sym_is_extern(&sym)) + if (!sym_is_extern(sym)) continue; - ext_name = elf_sym_str(obj, sym.st_name); + ext_name = elf_sym_str(obj, sym->st_name); if (!ext_name || !ext_name[0]) continue; @@ -3355,7 +3594,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) t = btf__type_by_id(obj->btf, ext->btf_id); ext->name = btf__name_by_offset(obj->btf, t->name_off); ext->sym_idx = i; - ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; + ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); if (ext->sec_btf_id <= 0) { @@ -3393,11 +3632,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj) return -ENOTSUP; } } else if (strcmp(sec_name, KSYMS_SEC) == 0) { - if (btf_is_func(t) && ext->is_weak) { - pr_warn("extern weak function %s is unsupported\n", - ext->name); - return -ENOTSUP; - } ksym_sec = sec; ext->type = EXT_KSYM; skip_mods_and_typedefs(obj->btf, t->type, @@ -3565,9 +3799,14 @@ bpf_object__find_program_by_name(const struct bpf_object *obj, static bool bpf_object__shndx_is_data(const struct bpf_object *obj, int shndx) { - return shndx == obj->efile.data_shndx || - shndx == obj->efile.bss_shndx || - shndx == obj->efile.rodata_shndx; + switch (obj->efile.secs[shndx].sec_type) { + case SEC_BSS: + case SEC_DATA: + case SEC_RODATA: + return true; + default: + return false; + } } static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, @@ -3580,22 +3819,25 @@ static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, static enum libbpf_map_type bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) { - if (shndx == obj->efile.data_shndx) - return LIBBPF_MAP_DATA; - else if (shndx == obj->efile.bss_shndx) + if (shndx == obj->efile.symbols_shndx) + return LIBBPF_MAP_KCONFIG; + + switch (obj->efile.secs[shndx].sec_type) { + case SEC_BSS: return LIBBPF_MAP_BSS; - else if (shndx == obj->efile.rodata_shndx) + case SEC_DATA: + return LIBBPF_MAP_DATA; + case SEC_RODATA: return LIBBPF_MAP_RODATA; - else if (shndx == obj->efile.symbols_shndx) - return LIBBPF_MAP_KCONFIG; - else + default: return LIBBPF_MAP_UNSPEC; + } } static int bpf_program__record_reloc(struct bpf_program *prog, struct reloc_desc *reloc_desc, __u32 insn_idx, const char *sym_name, - const GElf_Sym *sym, const GElf_Rel *rel) + const Elf64_Sym *sym, const Elf64_Rel *rel) { struct bpf_insn *insn = &prog->insns[insn_idx]; size_t map_idx, nr_maps = prog->obj->nr_maps; @@ -3612,7 +3854,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, } if (sym_is_extern(sym)) { - int sym_idx = GELF_R_SYM(rel->r_info); + int sym_idx = ELF64_R_SYM(rel->r_info); int i, n = obj->nr_extern; struct extern_desc *ext; @@ -3725,7 +3967,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, } for (map_idx = 0; map_idx < nr_maps; map_idx++) { map = &obj->maps[map_idx]; - if (map->libbpf_type != type) + if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) continue; pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", prog->name, map_idx, map->name, map->sec_idx, @@ -3777,9 +4019,8 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, } static int -bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data) +bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { - Elf_Data *symbols = obj->efile.symbols; const char *relo_sec_name, *sec_name; size_t sec_idx = shdr->sh_info; struct bpf_program *prog; @@ -3789,8 +4030,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data __u32 insn_idx; Elf_Scn *scn; Elf_Data *scn_data; - GElf_Sym sym; - GElf_Rel rel; + Elf64_Sym *sym; + Elf64_Rel *rel; scn = elf_sec_by_idx(obj, sec_idx); scn_data = elf_sec_data(obj, scn); @@ -3805,33 +4046,36 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data nrels = shdr->sh_size / shdr->sh_entsize; for (i = 0; i < nrels; i++) { - if (!gelf_getrel(data, i, &rel)) { + rel = elf_rel_by_idx(data, i); + if (!rel) { pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); return -LIBBPF_ERRNO__FORMAT; } - if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { + + sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); + if (!sym) { pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n", - relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); + relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i); return -LIBBPF_ERRNO__FORMAT; } - if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) { + if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", - relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); + relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i); return -LIBBPF_ERRNO__FORMAT; } - insn_idx = rel.r_offset / BPF_INSN_SZ; + insn_idx = rel->r_offset / BPF_INSN_SZ; /* relocations against static functions are recorded as * relocations against the section that contains a function; * in such case, symbol will be STT_SECTION and sym.st_name * will point to empty string (0), so fetch section name * instead */ - if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0) - sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx)); + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) + sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); else - sym_name = elf_sym_str(obj, sym.st_name); + sym_name = elf_sym_str(obj, sym->st_name); sym_name = sym_name ?: "sec_insn_off; err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], - insn_idx, sym_name, &sym, &rel); + insn_idx, sym_name, sym, rel); if (err) return err; @@ -3885,8 +4129,7 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) * LLVM annotates global data differently in BTF, that is, * only as '.data', '.bss' or '.rodata'. */ - ret = btf__find_by_name(obj->btf, - libbpf_type_to_btf_name[map->libbpf_type]); + ret = btf__find_by_name(obj->btf, map->real_name); } if (ret < 0) return ret; @@ -3979,6 +4222,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; map->reused = true; + map->map_extra = info.map_extra; return 0; @@ -4207,6 +4451,23 @@ static int probe_kern_btf_float(void) strs, sizeof(strs))); } +static int probe_kern_btf_decl_tag(void) +{ + static const char strs[] = "\0tag"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* VAR x */ /* [2] */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), + BTF_VAR_STATIC, + /* attr */ + BTF_TYPE_DECL_TAG_ENC(1, 2, -1), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + static int probe_kern_array_mmap(void) { struct bpf_create_map_attr attr = { @@ -4423,6 +4684,9 @@ static struct kern_feature_desc { [FEAT_PERF_LINK] = { "BPF perf link support", probe_perf_link, }, + [FEAT_BTF_DECL_TAG] = { + "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, + }, }; static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) @@ -4473,7 +4737,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) map_info.key_size == map->def.key_size && map_info.value_size == map->def.value_size && map_info.max_entries == map->def.max_entries && - map_info.map_flags == map->def.map_flags); + map_info.map_flags == map->def.map_flags && + map_info.map_extra == map->map_extra); } static int @@ -4556,7 +4821,7 @@ static void bpf_map__destroy(struct bpf_map *map); static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { - struct bpf_create_map_attr create_attr; + struct bpf_create_map_params create_attr; struct bpf_map_def *def = &map->def; int err = 0; @@ -4570,6 +4835,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.key_size = def->key_size; create_attr.value_size = def->value_size; create_attr.numa_node = map->numa_node; + create_attr.map_extra = map->map_extra; if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { int nr_cpus; @@ -4613,6 +4879,30 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.inner_map_fd = map->inner_map_fd; } + switch (def->type) { + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + case BPF_MAP_TYPE_CGROUP_ARRAY: + case BPF_MAP_TYPE_STACK_TRACE: + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH_OF_MAPS: + case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: + case BPF_MAP_TYPE_CPUMAP: + case BPF_MAP_TYPE_XSKMAP: + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_SOCKHASH: + case BPF_MAP_TYPE_QUEUE: + case BPF_MAP_TYPE_STACK: + case BPF_MAP_TYPE_RINGBUF: + create_attr.btf_fd = 0; + create_attr.btf_key_type_id = 0; + create_attr.btf_value_type_id = 0; + map->btf_key_type_id = 0; + map->btf_value_type_id = 0; + default: + break; + } + if (obj->gen_loader) { bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps); /* Pretend to have valid FD to pass various fd >= 0 checks. @@ -4620,7 +4910,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b */ map->fd = 0; } else { - map->fd = bpf_create_map_xattr(&create_attr); + map->fd = libbpf__bpf_create_map_xattr(&create_attr); } if (map->fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { @@ -4635,7 +4925,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; map->btf_value_type_id = 0; - map->fd = bpf_create_map_xattr(&create_attr); + map->fd = libbpf__bpf_create_map_xattr(&create_attr); } err = map->fd < 0 ? -errno : 0; @@ -4812,8 +5102,8 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, size_t targ_essent_len; int n, i; - n = btf__get_nr_types(targ_btf); - for (i = targ_start_id; i <= n; i++) { + n = btf__type_cnt(targ_btf); + for (i = targ_start_id; i < n; i++) { t = btf__type_by_id(targ_btf, i); if (btf_kind(t) != btf_kind(local_cand->t)) continue; @@ -4988,7 +5278,7 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l err = bpf_core_add_cands(&local_cand, local_essent_len, obj->btf_modules[i].btf, obj->btf_modules[i].name, - btf__get_nr_types(obj->btf_vmlinux) + 1, + btf__type_cnt(obj->btf_vmlinux), cands); if (err) goto err_out; @@ -5132,7 +5422,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog, * relocated, so it's enough to just subtract in-section offset */ insn_idx = insn_idx - prog->sec_insn_off; - if (insn_idx > prog->insns_cnt) + if (insn_idx >= prog->insns_cnt) return -EINVAL; insn = &prog->insns[insn_idx]; @@ -5326,7 +5616,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) case RELO_EXTERN_FUNC: ext = &obj->externs[relo->sym_off]; insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; - insn[0].imm = ext->ksym.kernel_btf_id; + if (ext->is_set) { + insn[0].imm = ext->ksym.kernel_btf_id; + insn[0].off = ext->ksym.btf_fd_idx; + } else { /* unresolved weak kfunc */ + insn[0].imm = 0; + insn[0].off = 0; + } break; case RELO_SUBPROG_ADDR: if (insn[0].src_reg != BPF_PSEUDO_FUNC) { @@ -5851,10 +6147,10 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) } static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, - GElf_Shdr *shdr, Elf_Data *data); + Elf64_Shdr *shdr, Elf_Data *data); static int bpf_object__collect_map_relos(struct bpf_object *obj, - GElf_Shdr *shdr, Elf_Data *data) + Elf64_Shdr *shdr, Elf_Data *data) { const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); int i, j, nrels, new_sz; @@ -5863,10 +6159,9 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, struct bpf_map *map = NULL, *targ_map; const struct btf_member *member; const char *name, *mname; - Elf_Data *symbols; unsigned int moff; - GElf_Sym sym; - GElf_Rel rel; + Elf64_Sym *sym; + Elf64_Rel *rel; void *tmp; if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) @@ -5875,28 +6170,30 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, if (!sec) return -EINVAL; - symbols = obj->efile.symbols; nrels = shdr->sh_size / shdr->sh_entsize; for (i = 0; i < nrels; i++) { - if (!gelf_getrel(data, i, &rel)) { + rel = elf_rel_by_idx(data, i); + if (!rel) { pr_warn(".maps relo #%d: failed to get ELF relo\n", i); return -LIBBPF_ERRNO__FORMAT; } - if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { + + sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); + if (!sym) { pr_warn(".maps relo #%d: symbol %zx not found\n", - i, (size_t)GELF_R_SYM(rel.r_info)); + i, (size_t)ELF64_R_SYM(rel->r_info)); return -LIBBPF_ERRNO__FORMAT; } - name = elf_sym_str(obj, sym.st_name) ?: ""; - if (sym.st_shndx != obj->efile.btf_maps_shndx) { + name = elf_sym_str(obj, sym->st_name) ?: ""; + if (sym->st_shndx != obj->efile.btf_maps_shndx) { pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", i, name); return -LIBBPF_ERRNO__RELOC; } - pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n", - i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value, - (size_t)rel.r_offset, sym.st_name, name); + pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", + i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, + (size_t)rel->r_offset, sym->st_name, name); for (j = 0; j < obj->nr_maps; j++) { map = &obj->maps[j]; @@ -5904,13 +6201,13 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, continue; vi = btf_var_secinfos(sec) + map->btf_var_idx; - if (vi->offset <= rel.r_offset && - rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) + if (vi->offset <= rel->r_offset && + rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) break; } if (j == obj->nr_maps) { - pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n", - i, name, (size_t)rel.r_offset); + pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", + i, name, (size_t)rel->r_offset); return -EINVAL; } @@ -5937,10 +6234,10 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -EINVAL; moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; - if (rel.r_offset - vi->offset < moff) + if (rel->r_offset - vi->offset < moff) return -EINVAL; - moff = rel.r_offset - vi->offset - moff; + moff = rel->r_offset - vi->offset - moff; /* here we use BPF pointer size, which is always 64 bit, as we * are parsing ELF that was built for BPF target */ @@ -5985,10 +6282,18 @@ static int bpf_object__collect_relos(struct bpf_object *obj) { int i, err; - for (i = 0; i < obj->efile.nr_reloc_sects; i++) { - GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; - Elf_Data *data = obj->efile.reloc_sects[i].data; - int idx = shdr->sh_info; + for (i = 0; i < obj->efile.sec_cnt; i++) { + struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; + Elf64_Shdr *shdr; + Elf_Data *data; + int idx; + + if (sec_desc->sec_type != SEC_RELO) + continue; + + shdr = sec_desc->shdr; + data = sec_desc->data; + idx = shdr->sh_info; if (shdr->sh_type != SHT_REL) { pr_warn("internal error at %d\n", __LINE__); @@ -6064,15 +6369,58 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program return 0; } +static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, + int *btf_obj_fd, int *btf_type_id); + +/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */ +static int libbpf_preload_prog(struct bpf_program *prog, + struct bpf_prog_load_params *attr, long cookie) +{ + enum sec_def_flags def = cookie; + + /* old kernels might not support specifying expected_attach_type */ + if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) + attr->expected_attach_type = 0; + + if (def & SEC_SLEEPABLE) + attr->prog_flags |= BPF_F_SLEEPABLE; + + if ((prog->type == BPF_PROG_TYPE_TRACING || + prog->type == BPF_PROG_TYPE_LSM || + prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { + int btf_obj_fd = 0, btf_type_id = 0, err; + const char *attach_name; + + attach_name = strchr(prog->sec_name, '/') + 1; + err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); + if (err) + return err; + + /* cache resolved BTF FD and BTF type ID in the prog */ + prog->attach_btf_obj_fd = btf_obj_fd; + prog->attach_btf_id = btf_type_id; + + /* but by now libbpf common logic is not utilizing + * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because + * this callback is called after attrs were populated by + * libbpf, so this callback has to update attr explicitly here + */ + attr->attach_btf_obj_fd = btf_obj_fd; + attr->attach_btf_id = btf_type_id; + } + return 0; +} + static int load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *license, __u32 kern_version, int *pfd) { struct bpf_prog_load_params load_attr = {}; + struct bpf_object *obj = prog->obj; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; char *log_buf = NULL; - int btf_fd, ret; + int btf_fd, ret, err; if (prog->type == BPF_PROG_TYPE_UNSPEC) { /* @@ -6088,29 +6436,22 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, return -EINVAL; load_attr.prog_type = prog->type; - /* old kernels might not support specifying expected_attach_type */ - if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def && - prog->sec_def->is_exp_attach_type_optional) - load_attr.expected_attach_type = 0; - else - load_attr.expected_attach_type = prog->expected_attach_type; - if (kernel_supports(prog->obj, FEAT_PROG_NAME)) + load_attr.expected_attach_type = prog->expected_attach_type; + if (kernel_supports(obj, FEAT_PROG_NAME)) load_attr.name = prog->name; load_attr.insns = insns; load_attr.insn_cnt = insns_cnt; load_attr.license = license; load_attr.attach_btf_id = prog->attach_btf_id; - if (prog->attach_prog_fd) - load_attr.attach_prog_fd = prog->attach_prog_fd; - else - load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; + load_attr.attach_prog_fd = prog->attach_prog_fd; + load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; load_attr.attach_btf_id = prog->attach_btf_id; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; /* specify func_info/line_info only if kernel supports them */ - btf_fd = bpf_object__btf_fd(prog->obj); - if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) { + btf_fd = bpf_object__btf_fd(obj); + if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { load_attr.prog_btf_fd = btf_fd; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; @@ -6121,10 +6462,21 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, } load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; + load_attr.fd_array = obj->fd_array; - if (prog->obj->gen_loader) { - bpf_gen__prog_load(prog->obj->gen_loader, &load_attr, - prog - prog->obj->programs); + /* adjust load_attr if sec_def provides custom preload callback */ + if (prog->sec_def && prog->sec_def->preload_fn) { + err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie); + if (err < 0) { + pr_warn("prog '%s': failed to prepare load attributes: %d\n", + prog->name, err); + return err; + } + } + + if (obj->gen_loader) { + bpf_gen__prog_load(obj->gen_loader, &load_attr, + prog - obj->programs); *pfd = -1; return 0; } @@ -6145,16 +6497,21 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, if (log_buf && load_attr.log_level) pr_debug("verifier log:\n%s", log_buf); - if (prog->obj->rodata_map_idx >= 0 && - kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) { - struct bpf_map *rodata_map = - &prog->obj->maps[prog->obj->rodata_map_idx]; + if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { + struct bpf_map *map; + int i; + + for (i = 0; i < obj->nr_maps; i++) { + map = &prog->obj->maps[i]; + if (map->libbpf_type != LIBBPF_MAP_RODATA) + continue; - if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("prog '%s': failed to bind .rodata map: %s\n", - prog->name, cp); - /* Don't fail hard if can't bind rodata. */ + if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) { + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warn("prog '%s': failed to bind .rodata map: %s\n", + prog->name, cp); + /* Don't fail hard if can't bind rodata. */ + } } } @@ -6218,16 +6575,13 @@ static int bpf_program__record_externs(struct bpf_program *prog) case RELO_EXTERN_VAR: if (ext->type != EXT_KSYM) continue; - if (!ext->ksym.type_id) { - pr_warn("typeless ksym %s is not supported yet\n", - ext->name); - return -ENOTSUP; - } - bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR, - relo->insn_idx); + bpf_gen__record_extern(obj->gen_loader, ext->name, + ext->is_weak, !ext->ksym.type_id, + BTF_KIND_VAR, relo->insn_idx); break; case RELO_EXTERN_FUNC: - bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC, + bpf_gen__record_extern(obj->gen_loader, ext->name, + ext->is_weak, false, BTF_KIND_FUNC, relo->insn_idx); break; default: @@ -6237,8 +6591,6 @@ static int bpf_program__record_externs(struct bpf_program *prog) return 0; } -static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id); - int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) { int err = 0, fd, i; @@ -6248,19 +6600,6 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) return libbpf_err(-EINVAL); } - if ((prog->type == BPF_PROG_TYPE_TRACING || - prog->type == BPF_PROG_TYPE_LSM || - prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { - int btf_obj_fd = 0, btf_type_id = 0; - - err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id); - if (err) - return libbpf_err(err); - - prog->attach_btf_obj_fd = btf_obj_fd; - prog->attach_btf_id = btf_type_id; - } - if (prog->instances.nr < 0 || !prog->instances.fds) { if (prog->preprocessor) { pr_warn("Internal error: can't load program '%s'\n", @@ -6328,8 +6667,6 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) out: if (err) pr_warn("failed to load program '%s'\n", prog->name); - zfree(&prog->insns); - prog->insns_cnt = 0; return libbpf_err(err); } @@ -6367,14 +6704,53 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) static const struct bpf_sec_def *find_sec_def(const char *sec_name); -static struct bpf_object * -__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, - const struct bpf_object_open_opts *opts) +static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) { - const char *obj_name, *kconfig, *btf_tmp_path; struct bpf_program *prog; - struct bpf_object *obj; - char tmp_name[64]; + int err; + + bpf_object__for_each_program(prog, obj) { + prog->sec_def = find_sec_def(prog->sec_name); + if (!prog->sec_def) { + /* couldn't guess, but user might manually specify */ + pr_debug("prog '%s': unrecognized ELF section name '%s'\n", + prog->name, prog->sec_name); + continue; + } + + bpf_program__set_type(prog, prog->sec_def->prog_type); + bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || + prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) + prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); +#pragma GCC diagnostic pop + + /* sec_def can have custom callback which should be called + * after bpf_program is initialized to adjust its properties + */ + if (prog->sec_def->init_fn) { + err = prog->sec_def->init_fn(prog, prog->sec_def->cookie); + if (err < 0) { + pr_warn("prog '%s': failed to initialize: %d\n", + prog->name, err); + return err; + } + } + } + + return 0; +} + +static struct bpf_object * +__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) +{ + const char *obj_name, *kconfig, *btf_tmp_path; + struct bpf_object *obj; + char tmp_name[64]; int err; if (elf_version(EV_CURRENT) == EV_NONE) { @@ -6430,30 +6806,12 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, err = err ? : bpf_object__collect_externs(obj); err = err ? : bpf_object__finalize_btf(obj); err = err ? : bpf_object__init_maps(obj, opts); + err = err ? : bpf_object_init_progs(obj, opts); err = err ? : bpf_object__collect_relos(obj); if (err) goto out; - bpf_object__elf_finish(obj); - - bpf_object__for_each_program(prog, obj) { - prog->sec_def = find_sec_def(prog->sec_name); - if (!prog->sec_def) { - /* couldn't guess, but user might manually specify */ - pr_debug("prog '%s': unrecognized ELF section name '%s'\n", - prog->name, prog->sec_name); - continue; - } - - if (prog->sec_def->is_sleepable) - prog->prog_flags |= BPF_F_SLEEPABLE; - bpf_program__set_type(prog, prog->sec_def->prog_type); - bpf_program__set_expected_attach_type(prog, - prog->sec_def->expected_attach_type); - if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || - prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) - prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); - } + bpf_object__elf_finish(obj); return obj; out: @@ -6529,7 +6887,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts)); } -int bpf_object__unload(struct bpf_object *obj) +static int bpf_object_unload(struct bpf_object *obj) { size_t i; @@ -6548,6 +6906,8 @@ int bpf_object__unload(struct bpf_object *obj) return 0; } +int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload"))); + static int bpf_object__sanitize_maps(struct bpf_object *obj) { struct bpf_map *m; @@ -6621,13 +6981,14 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj) static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, __u16 kind, struct btf **res_btf, - int *res_btf_fd) + struct module_btf **res_mod_btf) { - int i, id, btf_fd, err; + struct module_btf *mod_btf; struct btf *btf; + int i, id, err; btf = obj->btf_vmlinux; - btf_fd = 0; + mod_btf = NULL; id = btf__find_by_name_kind(btf, ksym_name, kind); if (id == -ENOENT) { @@ -6636,10 +6997,10 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, return err; for (i = 0; i < obj->btf_module_cnt; i++) { - btf = obj->btf_modules[i].btf; - /* we assume module BTF FD is always >0 */ - btf_fd = obj->btf_modules[i].fd; - id = btf__find_by_name_kind(btf, ksym_name, kind); + /* we assume module_btf's BTF FD is always >0 */ + mod_btf = &obj->btf_modules[i]; + btf = mod_btf->btf; + id = btf__find_by_name_kind_own(btf, ksym_name, kind); if (id != -ENOENT) break; } @@ -6648,7 +7009,7 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, return -ESRCH; *res_btf = btf; - *res_btf_fd = btf_fd; + *res_mod_btf = mod_btf; return id; } @@ -6657,14 +7018,15 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, { const struct btf_type *targ_var, *targ_type; __u32 targ_type_id, local_type_id; + struct module_btf *mod_btf = NULL; const char *targ_var_name; - int id, btf_fd = 0, err; struct btf *btf = NULL; + int id, err; - id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd); - if (id == -ESRCH && ext->is_weak) { - return 0; - } else if (id < 0) { + id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); + if (id < 0) { + if (id == -ESRCH && ext->is_weak) + return 0; pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", ext->name); return id; @@ -6696,7 +7058,7 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, } ext->is_set = true; - ext->ksym.kernel_btf_obj_fd = btf_fd; + ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; ext->ksym.kernel_btf_id = id; pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", ext->name, id, btf_kind_str(targ_var), targ_var_name); @@ -6708,26 +7070,22 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, struct extern_desc *ext) { int local_func_proto_id, kfunc_proto_id, kfunc_id; + struct module_btf *mod_btf = NULL; const struct btf_type *kern_func; struct btf *kern_btf = NULL; - int ret, kern_btf_fd = 0; + int ret; local_func_proto_id = ext->ksym.type_id; - kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, - &kern_btf, &kern_btf_fd); + kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf); if (kfunc_id < 0) { - pr_warn("extern (func ksym) '%s': not found in kernel BTF\n", + if (kfunc_id == -ESRCH && ext->is_weak) + return 0; + pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", ext->name); return kfunc_id; } - if (kern_btf != obj->btf_vmlinux) { - pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n", - ext->name); - return -ENOTSUP; - } - kern_func = btf__type_by_id(kern_btf, kfunc_id); kfunc_proto_id = kern_func->type; @@ -6739,9 +7097,30 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, return -EINVAL; } + /* set index for module BTF fd in fd_array, if unset */ + if (mod_btf && !mod_btf->fd_array_idx) { + /* insn->off is s16 */ + if (obj->fd_array_cnt == INT16_MAX) { + pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", + ext->name, mod_btf->fd_array_idx); + return -E2BIG; + } + /* Cannot use index 0 for module BTF fd */ + if (!obj->fd_array_cnt) + obj->fd_array_cnt = 1; + + ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), + obj->fd_array_cnt + 1); + if (ret) + return ret; + mod_btf->fd_array_idx = obj->fd_array_cnt; + /* we assume module BTF FD is always >0 */ + obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; + } + ext->is_set = true; - ext->ksym.kernel_btf_obj_fd = kern_btf_fd; ext->ksym.kernel_btf_id = kfunc_id; + ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", ext->name, kfunc_id); @@ -6807,8 +7186,7 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, if (err) return err; pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); - } else if (ext->type == EXT_KCFG && - strncmp(ext->name, "CONFIG_", 7) == 0) { + } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) { need_config = true; } else if (ext->type == EXT_KSYM) { if (ext->ksym.type_id) @@ -6902,6 +7280,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) err = bpf_gen__finish(obj->gen_loader); } + /* clean up fd_array */ + zfree(&obj->fd_array); + /* clean up module BTFs */ for (i = 0; i < obj->btf_module_cnt; i++) { close(obj->btf_modules[i].fd); @@ -6926,7 +7307,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) if (obj->maps[i].pinned && !obj->maps[i].reused) bpf_map__unpin(&obj->maps[i], NULL); - bpf_object__unload(obj); + bpf_object_unload(obj); pr_warn("failed to load object '%s'\n", obj->path); return libbpf_err(err); } @@ -6992,8 +7373,7 @@ static int check_path(const char *path) return err; } -int bpf_program__pin_instance(struct bpf_program *prog, const char *path, - int instance) +static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance) { char *cp, errmsg[STRERR_BUFSIZE]; int err; @@ -7028,8 +7408,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, return 0; } -int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, - int instance) +static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance) { int err; @@ -7057,6 +7436,12 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, return 0; } +__attribute__((alias("bpf_program_pin_instance"))) +int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance); + +__attribute__((alias("bpf_program_unpin_instance"))) +int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance); + int bpf_program__pin(struct bpf_program *prog, const char *path) { int i, err; @@ -7081,7 +7466,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) if (prog->instances.nr == 1) { /* don't create subdirs when pinning single instance */ - return bpf_program__pin_instance(prog, path, 0); + return bpf_program_pin_instance(prog, path, 0); } for (i = 0; i < prog->instances.nr; i++) { @@ -7097,7 +7482,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) goto err_unpin; } - err = bpf_program__pin_instance(prog, buf, i); + err = bpf_program_pin_instance(prog, buf, i); if (err) goto err_unpin; } @@ -7115,7 +7500,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) else if (len >= PATH_MAX) continue; - bpf_program__unpin_instance(prog, buf, i); + bpf_program_unpin_instance(prog, buf, i); } rmdir(path); @@ -7143,7 +7528,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path) if (prog->instances.nr == 1) { /* don't create subdirs when pinning single instance */ - return bpf_program__unpin_instance(prog, path, 0); + return bpf_program_unpin_instance(prog, path, 0); } for (i = 0; i < prog->instances.nr; i++) { @@ -7156,7 +7541,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path) else if (len >= PATH_MAX) return libbpf_err(-ENAMETOOLONG); - err = bpf_program__unpin_instance(prog, buf, i); + err = bpf_program_unpin_instance(prog, buf, i); if (err) return err; } @@ -7517,6 +7902,7 @@ static void bpf_map__destroy(struct bpf_map *map) } zfree(&map->name); + zfree(&map->real_name); zfree(&map->pin_path); if (map->fd >= 0) @@ -7535,7 +7921,7 @@ void bpf_object__close(struct bpf_object *obj) bpf_gen__free(obj->gen_loader); bpf_object__elf_finish(obj); - bpf_object__unload(obj); + bpf_object_unload(obj); btf__free(obj->btf); btf_ext__free(obj->btf_ext); @@ -7564,6 +7950,10 @@ struct bpf_object * bpf_object__next(struct bpf_object *prev) { struct bpf_object *next; + bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST); + + if (strict) + return NULL; if (!prev) next = list_first_entry(&bpf_objects_list, @@ -7669,6 +8059,12 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, struct bpf_program * bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) +{ + return bpf_object__next_program(obj, prev); +} + +struct bpf_program * +bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) { struct bpf_program *prog = prev; @@ -7681,6 +8077,12 @@ bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) struct bpf_program * bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj) +{ + return bpf_object__prev_program(obj, next); +} + +struct bpf_program * +bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) { struct bpf_program *prog = next; @@ -7762,6 +8164,16 @@ size_t bpf_program__size(const struct bpf_program *prog) return prog->insns_cnt * BPF_INSN_SZ; } +const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) +{ + return prog->insns; +} + +size_t bpf_program__insn_cnt(const struct bpf_program *prog) +{ + return prog->insns_cnt; +} + int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, bpf_program_prep_t prep) { @@ -7869,223 +8281,145 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, prog->expected_attach_type = type; } -#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \ - attachable, attach_btf) \ - { \ - .sec = string, \ - .len = sizeof(string) - 1, \ - .prog_type = ptype, \ - .expected_attach_type = eatype, \ - .is_exp_attach_type_optional = eatype_optional, \ - .is_attachable = attachable, \ - .is_attach_btf = attach_btf, \ - } - -/* Programs that can NOT be attached. */ -#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0) - -/* Programs that can be attached. */ -#define BPF_APROG_SEC(string, ptype, atype) \ - BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0) - -/* Programs that must specify expected attach type at load time. */ -#define BPF_EAPROG_SEC(string, ptype, eatype) \ - BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0) - -/* Programs that use BTF to identify attach point */ -#define BPF_PROG_BTF(string, ptype, eatype) \ - BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1) - -/* Programs that can be attached but attach type can't be identified by section - * name. Kept for backward compatibility. - */ -#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) - -#define SEC_DEF(sec_pfx, ptype, ...) { \ +#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ .sec = sec_pfx, \ - .len = sizeof(sec_pfx) - 1, \ .prog_type = BPF_PROG_TYPE_##ptype, \ + .expected_attach_type = atype, \ + .cookie = (long)(flags), \ + .preload_fn = libbpf_preload_prog, \ __VA_ARGS__ \ } -static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, - struct bpf_program *prog); -static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, - struct bpf_program *prog); -static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, - struct bpf_program *prog); -static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, - struct bpf_program *prog); -static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, - struct bpf_program *prog); -static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, - struct bpf_program *prog); +static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie); +static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie); +static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie); +static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie); +static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie); +static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie); static const struct bpf_sec_def section_defs[] = { - BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), - BPF_EAPROG_SEC("sk_reuseport/migrate", BPF_PROG_TYPE_SK_REUSEPORT, - BPF_SK_REUSEPORT_SELECT_OR_MIGRATE), - BPF_EAPROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT, - BPF_SK_REUSEPORT_SELECT), - SEC_DEF("kprobe/", KPROBE, - .attach_fn = attach_kprobe), - BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE), - SEC_DEF("kretprobe/", KPROBE, - .attach_fn = attach_kprobe), - BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE), - BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), - BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), - SEC_DEF("tracepoint/", TRACEPOINT, - .attach_fn = attach_tp), - SEC_DEF("tp/", TRACEPOINT, - .attach_fn = attach_tp), - SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, - .attach_fn = attach_raw_tp), - SEC_DEF("raw_tp/", RAW_TRACEPOINT, - .attach_fn = attach_raw_tp), - SEC_DEF("tp_btf/", TRACING, - .expected_attach_type = BPF_TRACE_RAW_TP, - .is_attach_btf = true, - .attach_fn = attach_trace), - SEC_DEF("fentry/", TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .is_attach_btf = true, - .attach_fn = attach_trace), - SEC_DEF("fmod_ret/", TRACING, - .expected_attach_type = BPF_MODIFY_RETURN, - .is_attach_btf = true, - .attach_fn = attach_trace), - SEC_DEF("fexit/", TRACING, - .expected_attach_type = BPF_TRACE_FEXIT, - .is_attach_btf = true, - .attach_fn = attach_trace), - SEC_DEF("fentry.s/", TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .is_attach_btf = true, - .is_sleepable = true, - .attach_fn = attach_trace), - SEC_DEF("fmod_ret.s/", TRACING, - .expected_attach_type = BPF_MODIFY_RETURN, - .is_attach_btf = true, - .is_sleepable = true, - .attach_fn = attach_trace), - SEC_DEF("fexit.s/", TRACING, - .expected_attach_type = BPF_TRACE_FEXIT, - .is_attach_btf = true, - .is_sleepable = true, - .attach_fn = attach_trace), - SEC_DEF("freplace/", EXT, - .is_attach_btf = true, - .attach_fn = attach_trace), - SEC_DEF("lsm/", LSM, - .is_attach_btf = true, - .expected_attach_type = BPF_LSM_MAC, - .attach_fn = attach_lsm), - SEC_DEF("lsm.s/", LSM, - .is_attach_btf = true, - .is_sleepable = true, - .expected_attach_type = BPF_LSM_MAC, - .attach_fn = attach_lsm), - SEC_DEF("iter/", TRACING, - .expected_attach_type = BPF_TRACE_ITER, - .is_attach_btf = true, - .attach_fn = attach_iter), - SEC_DEF("syscall", SYSCALL, - .is_sleepable = true), - BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, - BPF_XDP_DEVMAP), - BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, - BPF_XDP_CPUMAP), - BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP, - BPF_XDP), - BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), - BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), - BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), - BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), - BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), - BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB, - BPF_CGROUP_INET_INGRESS), - BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, - BPF_CGROUP_INET_EGRESS), - BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), - BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, - BPF_CGROUP_INET_SOCK_CREATE), - BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, - BPF_CGROUP_INET_SOCK_RELEASE), - BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, - BPF_CGROUP_INET_SOCK_CREATE), - BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, - BPF_CGROUP_INET4_POST_BIND), - BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK, - BPF_CGROUP_INET6_POST_BIND), - BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE, - BPF_CGROUP_DEVICE), - BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS, - BPF_CGROUP_SOCK_OPS), - BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB, - BPF_SK_SKB_STREAM_PARSER), - BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB, - BPF_SK_SKB_STREAM_VERDICT), - BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB), - BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG, - BPF_SK_MSG_VERDICT), - BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2, - BPF_LIRC_MODE2), - BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR, - BPF_FLOW_DISSECTOR), - BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET4_BIND), - BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET6_BIND), - BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET4_CONNECT), - BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET6_CONNECT), - BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_UDP4_SENDMSG), - BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_UDP6_SENDMSG), - BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_UDP4_RECVMSG), - BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_UDP6_RECVMSG), - BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET4_GETPEERNAME), - BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET6_GETPEERNAME), - BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET4_GETSOCKNAME), - BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - BPF_CGROUP_INET6_GETSOCKNAME), - BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, - BPF_CGROUP_SYSCTL), - BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, - BPF_CGROUP_GETSOCKOPT), - BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, - BPF_CGROUP_SETSOCKOPT), - BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS), - BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP, - BPF_SK_LOOKUP), + SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), + SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE), + SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), + SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE), + SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), + SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp), + SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp), + SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), + SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), + SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), + SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), + SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), + SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), + SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), + SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX), }; -#undef BPF_PROG_SEC_IMPL -#undef BPF_PROG_SEC -#undef BPF_APROG_SEC -#undef BPF_EAPROG_SEC -#undef BPF_APROG_COMPAT -#undef SEC_DEF - #define MAX_TYPE_NAME_SIZE 32 static const struct bpf_sec_def *find_sec_def(const char *sec_name) { - int i, n = ARRAY_SIZE(section_defs); + const struct bpf_sec_def *sec_def; + enum sec_def_flags sec_flags; + int i, n = ARRAY_SIZE(section_defs), len; + bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME; for (i = 0; i < n; i++) { - if (strncmp(sec_name, - section_defs[i].sec, section_defs[i].len)) + sec_def = §ion_defs[i]; + sec_flags = sec_def->cookie; + len = strlen(sec_def->sec); + + /* "type/" always has to have proper SEC("type/extras") form */ + if (sec_def->sec[len - 1] == '/') { + if (str_has_pfx(sec_name, sec_def->sec)) + return sec_def; + continue; + } + + /* "type+" means it can be either exact SEC("type") or + * well-formed SEC("type/extras") with proper '/' separator + */ + if (sec_def->sec[len - 1] == '+') { + len--; + /* not even a prefix */ + if (strncmp(sec_name, sec_def->sec, len) != 0) + continue; + /* exact match or has '/' separator */ + if (sec_name[len] == '\0' || sec_name[len] == '/') + return sec_def; continue; - return §ion_defs[i]; + } + + /* SEC_SLOPPY_PFX definitions are allowed to be just prefix + * matches, unless strict section name mode + * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the + * match has to be exact. + */ + if ((sec_flags & SEC_SLOPPY_PFX) && !strict) { + if (str_has_pfx(sec_name, sec_def->sec)) + return sec_def; + continue; + } + + /* Definitions not marked SEC_SLOPPY_PFX (e.g., + * SEC("syscall")) are exact matches in both modes. + */ + if (strcmp(sec_name, sec_def->sec) == 0) + return sec_def; } return NULL; } @@ -8102,8 +8436,15 @@ static char *libbpf_get_type_names(bool attach_type) buf[0] = '\0'; /* Forge string buf with all available names */ for (i = 0; i < ARRAY_SIZE(section_defs); i++) { - if (attach_type && !section_defs[i].is_attachable) - continue; + const struct bpf_sec_def *sec_def = §ion_defs[i]; + + if (attach_type) { + if (sec_def->preload_fn != libbpf_preload_prog) + continue; + + if (!(sec_def->cookie & SEC_ATTACHABLE)) + continue; + } if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { free(buf); @@ -8162,7 +8503,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, /* Collect the reloc from ELF and populate the st_ops->progs[] */ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, - GElf_Shdr *shdr, Elf_Data *data) + Elf64_Shdr *shdr, Elf_Data *data) { const struct btf_member *member; struct bpf_struct_ops *st_ops; @@ -8170,58 +8511,58 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, unsigned int shdr_idx; const struct btf *btf; struct bpf_map *map; - Elf_Data *symbols; unsigned int moff, insn_idx; const char *name; __u32 member_idx; - GElf_Sym sym; - GElf_Rel rel; + Elf64_Sym *sym; + Elf64_Rel *rel; int i, nrels; - symbols = obj->efile.symbols; btf = obj->btf; nrels = shdr->sh_size / shdr->sh_entsize; for (i = 0; i < nrels; i++) { - if (!gelf_getrel(data, i, &rel)) { + rel = elf_rel_by_idx(data, i); + if (!rel) { pr_warn("struct_ops reloc: failed to get %d reloc\n", i); return -LIBBPF_ERRNO__FORMAT; } - if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { + sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); + if (!sym) { pr_warn("struct_ops reloc: symbol %zx not found\n", - (size_t)GELF_R_SYM(rel.r_info)); + (size_t)ELF64_R_SYM(rel->r_info)); return -LIBBPF_ERRNO__FORMAT; } - name = elf_sym_str(obj, sym.st_name) ?: ""; - map = find_struct_ops_map_by_offset(obj, rel.r_offset); + name = elf_sym_str(obj, sym->st_name) ?: ""; + map = find_struct_ops_map_by_offset(obj, rel->r_offset); if (!map) { - pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n", - (size_t)rel.r_offset); + pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", + (size_t)rel->r_offset); return -EINVAL; } - moff = rel.r_offset - map->sec_offset; - shdr_idx = sym.st_shndx; + moff = rel->r_offset - map->sec_offset; + shdr_idx = sym->st_shndx; st_ops = map->st_ops; - pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", + pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", map->name, - (long long)(rel.r_info >> 32), - (long long)sym.st_value, - shdr_idx, (size_t)rel.r_offset, - map->sec_offset, sym.st_name, name); + (long long)(rel->r_info >> 32), + (long long)sym->st_value, + shdr_idx, (size_t)rel->r_offset, + map->sec_offset, sym->st_name, name); if (shdr_idx >= SHN_LORESERVE) { - pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", - map->name, (size_t)rel.r_offset, shdr_idx); + pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", + map->name, (size_t)rel->r_offset, shdr_idx); return -LIBBPF_ERRNO__RELOC; } - if (sym.st_value % BPF_INSN_SZ) { + if (sym->st_value % BPF_INSN_SZ) { pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", - map->name, (unsigned long long)sym.st_value); + map->name, (unsigned long long)sym->st_value); return -LIBBPF_ERRNO__FORMAT; } - insn_idx = sym.st_value / BPF_INSN_SZ; + insn_idx = sym->st_value / BPF_INSN_SZ; member = find_member_by_offset(st_ops->type, moff * 8); if (!member) { @@ -8245,35 +8586,37 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, return -EINVAL; } - if (prog->type == BPF_PROG_TYPE_UNSPEC) { - const struct bpf_sec_def *sec_def; - - sec_def = find_sec_def(prog->sec_name); - if (sec_def && - sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { - /* for pr_warn */ - prog->type = sec_def->prog_type; - goto invalid_prog; - } + /* prevent the use of BPF prog with invalid type */ + if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { + pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", + map->name, prog->name); + return -EINVAL; + } - prog->type = BPF_PROG_TYPE_STRUCT_OPS; + /* if we haven't yet processed this BPF program, record proper + * attach_btf_id and member_idx + */ + if (!prog->attach_btf_id) { prog->attach_btf_id = st_ops->type_id; prog->expected_attach_type = member_idx; - } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || - prog->attach_btf_id != st_ops->type_id || - prog->expected_attach_type != member_idx) { - goto invalid_prog; } + + /* struct_ops BPF prog can be re-used between multiple + * .struct_ops as long as it's the same struct_ops struct + * definition and the same function pointer field + */ + if (prog->attach_btf_id != st_ops->type_id || + prog->expected_attach_type != member_idx) { + pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", + map->name, prog->name, prog->sec_name, prog->type, + prog->attach_btf_id, prog->expected_attach_type, name); + return -EINVAL; + } + st_ops->progs[member_idx] = prog; } return 0; - -invalid_prog: - pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", - map->name, prog->name, prog->sec_name, prog->type, - prog->attach_btf_id, prog->expected_attach_type, name); - return -EINVAL; } #define BTF_TRACE_PREFIX "btf_trace_" @@ -8353,28 +8696,27 @@ int libbpf_find_vmlinux_btf_id(const char *name, static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) { - struct bpf_prog_info_linear *info_linear; - struct bpf_prog_info *info; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); struct btf *btf; int err; - info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0); - err = libbpf_get_error(info_linear); + err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); if (err) { - pr_warn("failed get_prog_info_linear for FD %d\n", - attach_prog_fd); + pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n", + attach_prog_fd, err); return err; } err = -EINVAL; - info = &info_linear->info; - if (!info->btf_id) { + if (!info.btf_id) { pr_warn("The target program doesn't have BTF\n"); goto out; } - btf = btf__load_from_kernel_by_id(info->btf_id); - if (libbpf_get_error(btf)) { - pr_warn("Failed to get BTF of the program\n"); + btf = btf__load_from_kernel_by_id(info.btf_id); + err = libbpf_get_error(btf); + if (err) { + pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); goto out; } err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); @@ -8384,7 +8726,6 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) goto out; } out: - free(info_linear); return err; } @@ -8425,32 +8766,12 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, return -ESRCH; } -static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id) +static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, + int *btf_obj_fd, int *btf_type_id) { enum bpf_attach_type attach_type = prog->expected_attach_type; __u32 attach_prog_fd = prog->attach_prog_fd; - const char *name = prog->sec_name, *attach_name; - const struct bpf_sec_def *sec = NULL; - int i, err = 0; - - if (!name) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(section_defs); i++) { - if (!section_defs[i].is_attach_btf) - continue; - if (strncmp(name, section_defs[i].sec, section_defs[i].len)) - continue; - - sec = §ion_defs[i]; - break; - } - - if (!sec) { - pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name); - return -ESRCH; - } - attach_name = name + sec->len; + int err = 0; /* BPF program's BTF ID */ if (attach_prog_fd) { @@ -8484,27 +8805,30 @@ int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { char *type_names; - int i; + const struct bpf_sec_def *sec_def; if (!name) return libbpf_err(-EINVAL); - for (i = 0; i < ARRAY_SIZE(section_defs); i++) { - if (strncmp(name, section_defs[i].sec, section_defs[i].len)) - continue; - if (!section_defs[i].is_attachable) - return libbpf_err(-EINVAL); - *attach_type = section_defs[i].expected_attach_type; - return 0; - } - pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); - type_names = libbpf_get_type_names(true); - if (type_names != NULL) { - pr_debug("attachable section(type) names are:%s\n", type_names); - free(type_names); + sec_def = find_sec_def(name); + if (!sec_def) { + pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); + type_names = libbpf_get_type_names(true); + if (type_names != NULL) { + pr_debug("attachable section(type) names are:%s\n", type_names); + free(type_names); + } + + return libbpf_err(-EINVAL); } - return libbpf_err(-EINVAL); + if (sec_def->preload_fn != libbpf_preload_prog) + return libbpf_err(-EINVAL); + if (!(sec_def->cookie & SEC_ATTACHABLE)) + return libbpf_err(-EINVAL); + + *attach_type = sec_def->expected_attach_type; + return 0; } int bpf_map__fd(const struct bpf_map *map) @@ -8517,9 +8841,30 @@ const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) return map ? &map->def : libbpf_err_ptr(-EINVAL); } +static bool map_uses_real_name(const struct bpf_map *map) +{ + /* Since libbpf started to support custom .data.* and .rodata.* maps, + * their user-visible name differs from kernel-visible name. Users see + * such map's corresponding ELF section name as a map name. + * This check distinguishes .data/.rodata from .data.* and .rodata.* + * maps to know which name has to be returned to the user. + */ + if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) + return true; + if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) + return true; + return false; +} + const char *bpf_map__name(const struct bpf_map *map) { - return map ? map->name : NULL; + if (!map) + return NULL; + + if (map_uses_real_name(map)) + return map->real_name; + + return map->name; } enum bpf_map_type bpf_map__type(const struct bpf_map *map) @@ -8548,6 +8893,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) return 0; } +__u64 bpf_map__map_extra(const struct bpf_map *map) +{ + return map->map_extra; +} + +int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->map_extra = map_extra; + return 0; +} + __u32 bpf_map__numa_node(const struct bpf_map *map) { return map->numa_node; @@ -8701,6 +9059,12 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) struct bpf_map * bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj) +{ + return bpf_object__next_map(obj, prev); +} + +struct bpf_map * +bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) { if (prev == NULL) return obj->maps; @@ -8710,6 +9074,12 @@ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj) struct bpf_map * bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj) +{ + return bpf_object__prev_map(obj, next); +} + +struct bpf_map * +bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) { if (next == NULL) { if (!obj->nr_maps) @@ -8726,7 +9096,22 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) struct bpf_map *pos; bpf_object__for_each_map(pos, obj) { - if (pos->name && !strcmp(pos->name, name)) + /* if it's a special internal map name (which always starts + * with dot) then check if that special name matches the + * real map name (ELF section name) + */ + if (name[0] == '.') { + if (pos->real_name && strcmp(pos->real_name, name) == 0) + return pos; + continue; + } + /* otherwise map name has to be an exact match */ + if (map_uses_real_name(pos)) { + if (strcmp(pos->real_name, name) == 0) + return pos; + continue; + } + if (strcmp(pos->name, name) == 0) return pos; } return errno = ENOENT, NULL; @@ -8991,8 +9376,15 @@ int bpf_link__unpin(struct bpf_link *link) struct bpf_link_perf { struct bpf_link link; int perf_event_fd; + /* legacy kprobe support: keep track of probe identifier and type */ + char *legacy_probe_name; + bool legacy_is_kprobe; + bool legacy_is_retprobe; }; +static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); +static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); + static int bpf_link_perf_detach(struct bpf_link *link) { struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); @@ -9005,17 +9397,29 @@ static int bpf_link_perf_detach(struct bpf_link *link) close(perf_link->perf_event_fd); close(link->fd); - return libbpf_err(err); + /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ + if (perf_link->legacy_probe_name) { + if (perf_link->legacy_is_kprobe) { + err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, + perf_link->legacy_is_retprobe); + } else { + err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, + perf_link->legacy_is_retprobe); + } + } + + return err; } static void bpf_link_perf_dealloc(struct bpf_link *link) { struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + free(perf_link->legacy_probe_name); free(perf_link); } -struct bpf_link *bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd, +struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, const struct bpf_perf_event_opts *opts) { char errmsg[STRERR_BUFSIZE]; @@ -9090,7 +9494,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(struct bpf_program *prog, i return libbpf_err_ptr(err); } -struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd) +struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) { return bpf_program__attach_perf_event_opts(prog, pfd, NULL); } @@ -9207,16 +9611,110 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, return pfd; } +static int append_to_file(const char *file, const char *fmt, ...) +{ + int fd, n, err = 0; + va_list ap; + + fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); + if (fd < 0) + return -errno; + + va_start(ap, fmt); + n = vdprintf(fd, fmt, ap); + va_end(ap); + + if (n < 0) + err = -errno; + + close(fd); + return err; +} + +static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, + const char *kfunc_name, size_t offset) +{ + snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), kfunc_name, offset); +} + +static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, + const char *kfunc_name, size_t offset) +{ + const char *file = "/sys/kernel/debug/tracing/kprobe_events"; + + return append_to_file(file, "%c:%s/%s %s+0x%zx", + retprobe ? 'r' : 'p', + retprobe ? "kretprobes" : "kprobes", + probe_name, kfunc_name, offset); +} + +static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) +{ + const char *file = "/sys/kernel/debug/tracing/kprobe_events"; + + return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name); +} + +static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) +{ + char file[256]; + + snprintf(file, sizeof(file), + "/sys/kernel/debug/tracing/events/%s/%s/id", + retprobe ? "kretprobes" : "kprobes", probe_name); + + return parse_uint_from_file(file, "%d\n"); +} + +static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, + const char *kfunc_name, size_t offset, int pid) +{ + struct perf_event_attr attr = {}; + char errmsg[STRERR_BUFSIZE]; + int type, pfd, err; + + err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); + if (err < 0) { + pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", + kfunc_name, offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return err; + } + type = determine_kprobe_perf_type_legacy(probe_name, retprobe); + if (type < 0) { + pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", + kfunc_name, offset, + libbpf_strerror_r(type, errmsg, sizeof(errmsg))); + return type; + } + attr.size = sizeof(attr); + attr.config = type; + attr.type = PERF_TYPE_TRACEPOINT; + + pfd = syscall(__NR_perf_event_open, &attr, + pid < 0 ? -1 : pid, /* pid */ + pid == -1 ? 0 : -1, /* cpu */ + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (pfd < 0) { + err = -errno; + pr_warn("legacy kprobe perf_event_open() failed: %s\n", + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return err; + } + return pfd; +} + struct bpf_link * -bpf_program__attach_kprobe_opts(struct bpf_program *prog, +bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const char *func_name, const struct bpf_kprobe_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); char errmsg[STRERR_BUFSIZE]; + char *legacy_probe = NULL; struct bpf_link *link; - unsigned long offset; - bool retprobe; + size_t offset; + bool retprobe, legacy; int pfd, err; if (!OPTS_VALID(opts, bpf_kprobe_opts)) @@ -9226,27 +9724,57 @@ bpf_program__attach_kprobe_opts(struct bpf_program *prog, offset = OPTS_GET(opts, offset, 0); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); - pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, - offset, -1 /* pid */, 0 /* ref_ctr_off */); + legacy = determine_kprobe_perf_type() < 0; + if (!legacy) { + pfd = perf_event_open_probe(false /* uprobe */, retprobe, + func_name, offset, + -1 /* pid */, 0 /* ref_ctr_off */); + } else { + char probe_name[256]; + + gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), + func_name, offset); + + legacy_probe = strdup(func_name); + if (!legacy_probe) + return libbpf_err_ptr(-ENOMEM); + + pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, + offset, -1 /* pid */); + } if (pfd < 0) { - pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", - prog->name, retprobe ? "kretprobe" : "kprobe", func_name, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); - return libbpf_err_ptr(pfd); + err = -errno; + pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", + prog->name, retprobe ? "kretprobe" : "kprobe", + func_name, offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); err = libbpf_get_error(link); if (err) { close(pfd); - pr_warn("prog '%s': failed to attach to %s '%s': %s\n", - prog->name, retprobe ? "kretprobe" : "kprobe", func_name, + pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", + prog->name, retprobe ? "kretprobe" : "kprobe", + func_name, offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); - return libbpf_err_ptr(err); + goto err_out; + } + if (legacy) { + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + + perf_link->legacy_probe_name = legacy_probe; + perf_link->legacy_is_kprobe = true; + perf_link->legacy_is_retprobe = retprobe; } + return link; +err_out: + free(legacy_probe); + return libbpf_err_ptr(err); } -struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, +struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, const char *func_name) { @@ -9257,8 +9785,7 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } -static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, - struct bpf_program *prog) +static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); unsigned long offset = 0; @@ -9267,8 +9794,11 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, char *func; int n, err; - func_name = prog->sec_name + sec->len; - opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0; + opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); + if (opts.retprobe) + func_name = prog->sec_name + sizeof("kretprobe/") - 1; + else + func_name = prog->sec_name + sizeof("kprobe/") - 1; n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); if (n < 1) { @@ -9289,17 +9819,96 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, return link; } +static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, + const char *binary_path, uint64_t offset) +{ + int i; + + snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); + + /* sanitize binary_path in the probe name */ + for (i = 0; buf[i]; i++) { + if (!isalnum(buf[i])) + buf[i] = '_'; + } +} + +static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, + const char *binary_path, size_t offset) +{ + const char *file = "/sys/kernel/debug/tracing/uprobe_events"; + + return append_to_file(file, "%c:%s/%s %s:0x%zx", + retprobe ? 'r' : 'p', + retprobe ? "uretprobes" : "uprobes", + probe_name, binary_path, offset); +} + +static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) +{ + const char *file = "/sys/kernel/debug/tracing/uprobe_events"; + + return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name); +} + +static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) +{ + char file[512]; + + snprintf(file, sizeof(file), + "/sys/kernel/debug/tracing/events/%s/%s/id", + retprobe ? "uretprobes" : "uprobes", probe_name); + + return parse_uint_from_file(file, "%d\n"); +} + +static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, + const char *binary_path, size_t offset, int pid) +{ + struct perf_event_attr attr; + int type, pfd, err; + + err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); + if (err < 0) { + pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", + binary_path, (size_t)offset, err); + return err; + } + type = determine_uprobe_perf_type_legacy(probe_name, retprobe); + if (type < 0) { + pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", + binary_path, offset, err); + return type; + } + + memset(&attr, 0, sizeof(attr)); + attr.size = sizeof(attr); + attr.config = type; + attr.type = PERF_TYPE_TRACEPOINT; + + pfd = syscall(__NR_perf_event_open, &attr, + pid < 0 ? -1 : pid, /* pid */ + pid == -1 ? 0 : -1, /* cpu */ + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (pfd < 0) { + err = -errno; + pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); + return err; + } + return pfd; +} + LIBBPF_API struct bpf_link * -bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid, +bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, const struct bpf_uprobe_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); - char errmsg[STRERR_BUFSIZE]; + char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; struct bpf_link *link; size_t ref_ctr_off; int pfd, err; - bool retprobe; + bool retprobe, legacy; if (!OPTS_VALID(opts, bpf_uprobe_opts)) return libbpf_err_ptr(-EINVAL); @@ -9308,15 +9917,35 @@ bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid, ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); - pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, - func_offset, pid, ref_ctr_off); + legacy = determine_uprobe_perf_type() < 0; + if (!legacy) { + pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, + func_offset, pid, ref_ctr_off); + } else { + char probe_name[512]; + + if (ref_ctr_off) + return libbpf_err_ptr(-EINVAL); + + gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), + binary_path, func_offset); + + legacy_probe = strdup(probe_name); + if (!legacy_probe) + return libbpf_err_ptr(-ENOMEM); + + pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, + binary_path, func_offset, pid); + } if (pfd < 0) { + err = -errno; pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", prog->name, retprobe ? "uretprobe" : "uprobe", binary_path, func_offset, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); - return libbpf_err_ptr(pfd); + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; } + link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); err = libbpf_get_error(link); if (err) { @@ -9325,12 +9954,23 @@ bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid, prog->name, retprobe ? "uretprobe" : "uprobe", binary_path, func_offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); - return libbpf_err_ptr(err); + goto err_out; + } + if (legacy) { + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + + perf_link->legacy_probe_name = legacy_probe; + perf_link->legacy_is_kprobe = false; + perf_link->legacy_is_retprobe = retprobe; } return link; +err_out: + free(legacy_probe); + return libbpf_err_ptr(err); + } -struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, +struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, size_t func_offset) @@ -9390,7 +10030,7 @@ static int perf_event_open_tracepoint(const char *tp_category, return pfd; } -struct bpf_link *bpf_program__attach_tracepoint_opts(struct bpf_program *prog, +struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, const char *tp_category, const char *tp_name, const struct bpf_tracepoint_opts *opts) @@ -9424,15 +10064,14 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(struct bpf_program *prog, return link; } -struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog, +struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, const char *tp_category, const char *tp_name) { return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); } -static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, - struct bpf_program *prog) +static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie) { char *sec_name, *tp_cat, *tp_name; struct bpf_link *link; @@ -9441,8 +10080,11 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, if (!sec_name) return libbpf_err_ptr(-ENOMEM); - /* extract "tp//" */ - tp_cat = sec_name + sec->len; + /* extract "tp//" or "tracepoint//" */ + if (str_has_pfx(prog->sec_name, "tp/")) + tp_cat = sec_name + sizeof("tp/") - 1; + else + tp_cat = sec_name + sizeof("tracepoint/") - 1; tp_name = strchr(tp_cat, '/'); if (!tp_name) { free(sec_name); @@ -9456,7 +10098,7 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, return link; } -struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, +struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, const char *tp_name) { char errmsg[STRERR_BUFSIZE]; @@ -9486,16 +10128,34 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, return link; } -static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, - struct bpf_program *prog) +static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie) { - const char *tp_name = prog->sec_name + sec->len; + static const char *const prefixes[] = { + "raw_tp/", + "raw_tracepoint/", + "raw_tp.w/", + "raw_tracepoint.w/", + }; + size_t i; + const char *tp_name = NULL; + + for (i = 0; i < ARRAY_SIZE(prefixes); i++) { + if (str_has_pfx(prog->sec_name, prefixes[i])) { + tp_name = prog->sec_name + strlen(prefixes[i]); + break; + } + } + if (!tp_name) { + pr_warn("prog '%s': invalid section name '%s'\n", + prog->name, prog->sec_name); + return libbpf_err_ptr(-EINVAL); + } return bpf_program__attach_raw_tracepoint(prog, tp_name); } /* Common logic for all BPF program types that attach to a btf_id */ -static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog) +static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog) { char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; @@ -9524,30 +10184,28 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog) return (struct bpf_link *)link; } -struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) +struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) { return bpf_program__attach_btf_id(prog); } -struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog) +struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) { return bpf_program__attach_btf_id(prog); } -static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, - struct bpf_program *prog) +static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie) { return bpf_program__attach_trace(prog); } -static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, - struct bpf_program *prog) +static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie) { return bpf_program__attach_lsm(prog); } static struct bpf_link * -bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id, +bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id, const char *target_name) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, @@ -9583,24 +10241,24 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id, } struct bpf_link * -bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd) +bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) { return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup"); } struct bpf_link * -bpf_program__attach_netns(struct bpf_program *prog, int netns_fd) +bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) { return bpf_program__attach_fd(prog, netns_fd, 0, "netns"); } -struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex) +struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) { /* target_fd/target_ifindex use the same field in LINK_CREATE */ return bpf_program__attach_fd(prog, ifindex, 0, "xdp"); } -struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog, +struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, int target_fd, const char *attach_func_name) { @@ -9633,7 +10291,7 @@ struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog, } struct bpf_link * -bpf_program__attach_iter(struct bpf_program *prog, +bpf_program__attach_iter(const struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); @@ -9672,21 +10330,17 @@ bpf_program__attach_iter(struct bpf_program *prog, return link; } -static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, - struct bpf_program *prog) +static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie) { return bpf_program__attach_iter(prog, NULL); } -struct bpf_link *bpf_program__attach(struct bpf_program *prog) +struct bpf_link *bpf_program__attach(const struct bpf_program *prog) { - const struct bpf_sec_def *sec_def; - - sec_def = find_sec_def(prog->sec_name); - if (!sec_def || !sec_def->attach_fn) + if (!prog->sec_def || !prog->sec_def->attach_fn) return libbpf_err_ptr(-ESRCH); - return sec_def->attach_fn(sec_def, prog); + return prog->sec_def->attach_fn(prog, prog->sec_def->cookie); } static int bpf_link__detach_struct_ops(struct bpf_link *link) @@ -9699,7 +10353,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link) return 0; } -struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) +struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) { struct bpf_struct_ops *st_ops; struct bpf_link *link; @@ -10512,18 +11166,29 @@ int bpf_program__set_attach_target(struct bpf_program *prog, { int btf_obj_fd = 0, btf_id = 0, err; - if (!prog || attach_prog_fd < 0 || !attach_func_name) + if (!prog || attach_prog_fd < 0) return libbpf_err(-EINVAL); if (prog->obj->loaded) return libbpf_err(-EINVAL); + if (attach_prog_fd && !attach_func_name) { + /* remember attach_prog_fd and let bpf_program__load() find + * BTF ID during the program load + */ + prog->attach_prog_fd = attach_prog_fd; + return 0; + } + if (attach_prog_fd) { btf_id = libbpf_find_prog_btf_id(attach_func_name, attach_prog_fd); if (btf_id < 0) return libbpf_err(btf_id); } else { + if (!attach_func_name) + return libbpf_err(-EINVAL); + /* load btf_vmlinux, if not yet */ err = bpf_object__load_vmlinux_btf(prog->obj, true); if (err) @@ -10596,7 +11261,7 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) int fd, err = 0, len; char buf[128]; - fd = open(fcpu, O_RDONLY); + fd = open(fcpu, O_RDONLY | O_CLOEXEC); if (fd < 0) { err = -errno; pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); @@ -10765,16 +11430,15 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) for (i = 0; i < s->prog_cnt; i++) { struct bpf_program *prog = *s->progs[i].prog; struct bpf_link **link = s->progs[i].link; - const struct bpf_sec_def *sec_def; if (!prog->load) continue; - sec_def = find_sec_def(prog->sec_name); - if (!sec_def || !sec_def->attach_fn) + /* auto-attaching not supported for this program */ + if (!prog->sec_def || !prog->sec_def->attach_fn) continue; - *link = sec_def->attach_fn(sec_def, prog); + *link = bpf_program__attach(prog); err = libbpf_get_error(*link); if (err) { pr_warn("failed to auto-attach program '%s': %d\n", diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index f177d897c5f716..9de0f299706b14 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -83,12 +83,15 @@ struct bpf_object_open_opts { * Non-relocatable instructions are replaced with invalid ones to * prevent accidental errors. * */ + LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect") bool relaxed_core_relocs; /* maps that set the 'pinning' attribute in their definition will have * their pin_path attribute set to a file in this directory, and be * auto-pinned to that path on load; defaults to "/sys/fs/bpf". */ const char *pin_root_path; + + LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program") __u32 attach_prog_fd; /* Additional kernel config content that augments and overrides * system Kconfig for CONFIG_xxx externs. @@ -147,6 +150,7 @@ struct bpf_object_load_attr { /* Load/unload object into/from kernel */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); +LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead") LIBBPF_API int bpf_object__unload(struct bpf_object *obj); LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); @@ -164,7 +168,8 @@ LIBBPF_API struct bpf_program * bpf_object__find_program_by_name(const struct bpf_object *obj, const char *name); -LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead") +struct bpf_object *bpf_object__next(struct bpf_object *prev); #define bpf_object__for_each_safe(pos, tmp) \ for ((pos) = bpf_object__next(NULL), \ (tmp) = bpf_object__next(pos); \ @@ -186,16 +191,22 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, /* Accessors of bpf_program */ struct bpf_program; -LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog, - const struct bpf_object *obj); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead") +struct bpf_program *bpf_program__next(struct bpf_program *prog, + const struct bpf_object *obj); +LIBBPF_API struct bpf_program * +bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog); -#define bpf_object__for_each_program(pos, obj) \ - for ((pos) = bpf_program__next(NULL, (obj)); \ - (pos) != NULL; \ - (pos) = bpf_program__next((pos), (obj))) +#define bpf_object__for_each_program(pos, obj) \ + for ((pos) = bpf_object__next_program((obj), NULL); \ + (pos) != NULL; \ + (pos) = bpf_object__next_program((obj), (pos))) -LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog, - const struct bpf_object *obj); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead") +struct bpf_program *bpf_program__prev(struct bpf_program *prog, + const struct bpf_object *obj); +LIBBPF_API struct bpf_program * +bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog); typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *); @@ -214,14 +225,51 @@ LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); /* returns program size in bytes */ +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead") LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); +struct bpf_insn; + +/** + * @brief **bpf_program__insns()** gives read-only access to BPF program's + * underlying BPF instructions. + * @param prog BPF program for which to return instructions + * @return a pointer to an array of BPF instructions that belong to the + * specified BPF program + * + * Returned pointer is always valid and not NULL. Number of `struct bpf_insn` + * pointed to can be fetched using **bpf_program__insn_cnt()** API. + * + * Keep in mind, libbpf can modify and append/delete BPF program's + * instructions as it processes BPF object file and prepares everything for + * uploading into the kernel. So depending on the point in BPF object + * lifetime, **bpf_program__insns()** can return different sets of + * instructions. As an example, during BPF object load phase BPF program + * instructions will be CO-RE-relocated, BPF subprograms instructions will be + * appended, ldimm64 instructions will have FDs embedded, etc. So instructions + * returned before **bpf_object__load()** and after it might be quite + * different. + */ +LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog); +/** + * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s + * that form specified BPF program. + * @param prog BPF program for which to return number of BPF instructions + * + * See **bpf_program__insns()** documentation for notes on how libbpf can + * change instructions and their count during different phases of + * **bpf_object** lifetime. + */ +LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); + LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_version); LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog, const char *path, int instance); +LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance); @@ -243,7 +291,7 @@ LIBBPF_API int bpf_link__detach(struct bpf_link *link); LIBBPF_API int bpf_link__destroy(struct bpf_link *link); LIBBPF_API struct bpf_link * -bpf_program__attach(struct bpf_program *prog); +bpf_program__attach(const struct bpf_program *prog); struct bpf_perf_event_opts { /* size of this struct, for forward/backward compatiblity */ @@ -254,10 +302,10 @@ struct bpf_perf_event_opts { #define bpf_perf_event_opts__last_field bpf_cookie LIBBPF_API struct bpf_link * -bpf_program__attach_perf_event(struct bpf_program *prog, int pfd); +bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); LIBBPF_API struct bpf_link * -bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd, +bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, const struct bpf_perf_event_opts *opts); struct bpf_kprobe_opts { @@ -266,7 +314,7 @@ struct bpf_kprobe_opts { /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; /* function's offset to install kprobe to */ - unsigned long offset; + size_t offset; /* kprobe is return probe */ bool retprobe; size_t :0; @@ -274,10 +322,10 @@ struct bpf_kprobe_opts { #define bpf_kprobe_opts__last_field retprobe LIBBPF_API struct bpf_link * -bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe, +bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, const char *func_name); LIBBPF_API struct bpf_link * -bpf_program__attach_kprobe_opts(struct bpf_program *prog, +bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const char *func_name, const struct bpf_kprobe_opts *opts); @@ -297,11 +345,11 @@ struct bpf_uprobe_opts { #define bpf_uprobe_opts__last_field retprobe LIBBPF_API struct bpf_link * -bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe, +bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, size_t func_offset); LIBBPF_API struct bpf_link * -bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid, +bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, const struct bpf_uprobe_opts *opts); @@ -314,35 +362,35 @@ struct bpf_tracepoint_opts { #define bpf_tracepoint_opts__last_field bpf_cookie LIBBPF_API struct bpf_link * -bpf_program__attach_tracepoint(struct bpf_program *prog, +bpf_program__attach_tracepoint(const struct bpf_program *prog, const char *tp_category, const char *tp_name); LIBBPF_API struct bpf_link * -bpf_program__attach_tracepoint_opts(struct bpf_program *prog, +bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, const char *tp_category, const char *tp_name, const struct bpf_tracepoint_opts *opts); LIBBPF_API struct bpf_link * -bpf_program__attach_raw_tracepoint(struct bpf_program *prog, +bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, const char *tp_name); LIBBPF_API struct bpf_link * -bpf_program__attach_trace(struct bpf_program *prog); +bpf_program__attach_trace(const struct bpf_program *prog); LIBBPF_API struct bpf_link * -bpf_program__attach_lsm(struct bpf_program *prog); +bpf_program__attach_lsm(const struct bpf_program *prog); LIBBPF_API struct bpf_link * -bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd); +bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); LIBBPF_API struct bpf_link * -bpf_program__attach_netns(struct bpf_program *prog, int netns_fd); +bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); LIBBPF_API struct bpf_link * -bpf_program__attach_xdp(struct bpf_program *prog, int ifindex); +bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); LIBBPF_API struct bpf_link * -bpf_program__attach_freplace(struct bpf_program *prog, +bpf_program__attach_freplace(const struct bpf_program *prog, int target_fd, const char *attach_func_name); struct bpf_map; -LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map); +LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); struct bpf_iter_attach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ @@ -352,11 +400,9 @@ struct bpf_iter_attach_opts { #define bpf_iter_attach_opts__last_field link_info_len LIBBPF_API struct bpf_link * -bpf_program__attach_iter(struct bpf_program *prog, +bpf_program__attach_iter(const struct bpf_program *prog, const struct bpf_iter_attach_opts *opts); -struct bpf_insn; - /* * Libbpf allows callers to adjust BPF programs before being loaded * into kernel. One program in an object file can be transformed into @@ -385,7 +431,7 @@ struct bpf_insn; * one instance. In this case bpf_program__fd(prog) is equal to * bpf_program__nth_fd(prog, 0). */ - +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions") struct bpf_prog_prep_result { /* * If not NULL, load new instruction array. @@ -414,9 +460,11 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n, struct bpf_insn *insns, int insns_cnt, struct bpf_prog_prep_result *res); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions") LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance, bpf_program_prep_t prep); +LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n); /* @@ -478,9 +526,13 @@ struct bpf_map_def { unsigned int map_flags; }; -/* - * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel, - * so no need to worry about a name clash. +/** + * @brief **bpf_object__find_map_by_name()** returns BPF map of + * the given name, if it exists within the passed BPF object + * @param obj BPF object + * @param name name of the BPF map + * @return BPF map instance, if such map exists within the BPF object; + * or NULL otherwise. */ LIBBPF_API struct bpf_map * bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); @@ -495,18 +547,28 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); LIBBPF_API struct bpf_map * bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead") +struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); LIBBPF_API struct bpf_map * -bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); +bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map); + #define bpf_object__for_each_map(pos, obj) \ - for ((pos) = bpf_map__next(NULL, (obj)); \ + for ((pos) = bpf_object__next_map((obj), NULL); \ (pos) != NULL; \ - (pos) = bpf_map__next((pos), (obj))) + (pos) = bpf_object__next_map((obj), (pos))) #define bpf_map__for_each bpf_object__for_each_map +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead") +struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); LIBBPF_API struct bpf_map * -bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); +bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); -/* get/set map FD */ +/** + * @brief **bpf_map__fd()** gets the file descriptor of the passed + * BPF map + * @param map the BPF map instance + * @return the file descriptor; or -EINVAL in case of an error + */ LIBBPF_API int bpf_map__fd(const struct bpf_map *map); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); /* get map definition */ @@ -538,6 +600,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); /* get/set map if_index */ LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); +/* get/set map map_extra flags */ +LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, @@ -547,6 +612,14 @@ LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, const void *data, size_t size); LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); + +/** + * @brief **bpf_map__is_internal()** tells the caller whether or not the + * passed map is a special map created by libbpf automatically for things like + * global variables, __ksym externs, Kconfig values, etc + * @param map the bpf_map + * @return true, if the map is an internal map; false, otherwise + */ LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); @@ -558,6 +631,38 @@ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); +/** + * @brief **libbpf_get_error()** extracts the error code from the passed + * pointer + * @param ptr pointer returned from libbpf API function + * @return error code; or 0 if no error occured + * + * Many libbpf API functions which return pointers have logic to encode error + * codes as pointers, and do not return NULL. Meaning **libbpf_get_error()** + * should be used on the return value from these functions immediately after + * calling the API function, with no intervening calls that could clobber the + * `errno` variable. Consult the individual functions documentation to verify + * if this logic applies should be used. + * + * For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` + * is enabled, NULL is returned on error instead. + * + * If ptr is NULL, then errno should be already set by the failing + * API, because libbpf never returns NULL on success and it now always + * sets errno on error. + * + * Example usage: + * + * struct perf_buffer *pb; + * + * pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts); + * err = libbpf_get_error(pb); + * if (err) { + * pb = NULL; + * fprintf(stderr, "failed to open perf buffer: %d\n", err); + * goto cleanup; + * } + */ LIBBPF_API long libbpf_get_error(const void *ptr); struct bpf_prog_load_attr { @@ -822,9 +927,10 @@ bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear); LIBBPF_API void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); -/* - * A helper function to get the number of possible CPUs before looking up - * per-CPU maps. Negative errno is returned on failure. +/** + * @brief **libbpf_num_possible_cpus()** is a helper function to get the + * number of possible CPUs that the host kernel supports and expects. + * @return number of possible CPUs; or error code on failure * * Example usage: * @@ -834,7 +940,6 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); * } * long values[ncpus]; * bpf_map_lookup_elem(per_cpu_map_fd, key, values); - * */ LIBBPF_API int libbpf_num_possible_cpus(void); @@ -854,7 +959,7 @@ struct bpf_object_skeleton { size_t sz; /* size of this struct, for forward/backward compatibility */ const char *name; - void *data; + const void *data; size_t data_sz; struct bpf_object **obj; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index bbc53bb25f68f0..43580eb477405f 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -386,3 +386,19 @@ LIBBPF_0.5.0 { btf_dump__dump_type_data; libbpf_set_strict_mode; } LIBBPF_0.4.0; + +LIBBPF_0.6.0 { + global: + bpf_map__map_extra; + bpf_map__set_map_extra; + bpf_object__next_map; + bpf_object__next_program; + bpf_object__prev_map; + bpf_object__prev_program; + bpf_program__insn_cnt; + bpf_program__insns; + btf__add_btf; + btf__add_decl_tag; + btf__raw_data; + btf__type_cnt; +} LIBBPF_0.5.0; diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h index 947d8bd8a7bb63..aaa1efbf6f5109 100644 --- a/tools/lib/bpf/libbpf_common.h +++ b/tools/lib/bpf/libbpf_common.h @@ -10,6 +10,7 @@ #define __LIBBPF_LIBBPF_COMMON_H #include +#include "libbpf_version.h" #ifndef LIBBPF_API #define LIBBPF_API __attribute__((visibility("default"))) @@ -17,6 +18,29 @@ #define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg))) +/* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */ +#define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \ + __LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \ + (LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg)) + +#define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \ + (LIBBPF_MAJOR_VERSION > (major) || \ + (LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor))) + +/* Add checks for other versions below when planning deprecation of API symbols + * with the LIBBPF_DEPRECATED_SINCE macro. + */ +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6) +#define __LIBBPF_MARK_DEPRECATED_0_6(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_6(X) +#endif +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7) +#define __LIBBPF_MARK_DEPRECATED_0_7(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_7(X) +#endif + /* Helper macro to declare and initialize libbpf options struct * * This dance with uninitialized declaration, followed by memset to zero, diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 533b0211f40ac6..aeb79e3a8ff994 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "libbpf_legacy.h" #include "relo_core.h" @@ -52,8 +54,8 @@ #endif /* Older libelf all end up in this expression, for both 32 and 64 bit */ -#ifndef GELF_ST_VISIBILITY -#define GELF_ST_VISIBILITY(o) ((o) & 0x03) +#ifndef ELF64_ST_VISIBILITY +#define ELF64_ST_VISIBILITY(o) ((o) & 0x03) #endif #define BTF_INFO_ENC(kind, kind_flag, vlen) \ @@ -69,6 +71,8 @@ #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) #define BTF_TYPE_FLOAT_ENC(name, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) +#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) @@ -87,20 +91,40 @@ (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) #endif +/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is + * a string literal known at compilation time or char * pointer known only at + * runtime. + */ +#define str_has_pfx(str, pfx) \ + (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) + /* Symbol versioning is different between static and shared library. * Properly versioned symbols are needed for shared library, but * only the symbol of the new version is needed for static library. + * Starting with GNU C 10, use symver attribute instead of .symver assembler + * directive, which works better with GCC LTO builds. */ -#ifdef SHARED -# define COMPAT_VERSION(internal_name, api_name, version) \ +#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10 + +#define DEFAULT_VERSION(internal_name, api_name, version) \ + __attribute__((symver(#api_name "@@" #version))) +#define COMPAT_VERSION(internal_name, api_name, version) \ + __attribute__((symver(#api_name "@" #version))) + +#elif defined(SHARED) + +#define COMPAT_VERSION(internal_name, api_name, version) \ asm(".symver " #internal_name "," #api_name "@" #version); -# define DEFAULT_VERSION(internal_name, api_name, version) \ +#define DEFAULT_VERSION(internal_name, api_name, version) \ asm(".symver " #internal_name "," #api_name "@@" #version); -#else -# define COMPAT_VERSION(internal_name, api_name, version) -# define DEFAULT_VERSION(internal_name, api_name, version) \ + +#else /* !SHARED */ + +#define COMPAT_VERSION(internal_name, api_name, version) +#define DEFAULT_VERSION(internal_name, api_name, version) \ extern typeof(internal_name) api_name \ __attribute__((alias(#internal_name))); + #endif extern void libbpf_print(enum libbpf_print_level level, @@ -171,8 +195,9 @@ enum map_def_parts { MAP_DEF_NUMA_NODE = 0x080, MAP_DEF_PINNING = 0x100, MAP_DEF_INNER_MAP = 0x200, + MAP_DEF_MAP_EXTRA = 0x400, - MAP_DEF_ALL = 0x3ff, /* combination of all above */ + MAP_DEF_ALL = 0x7ff, /* combination of all above */ }; struct btf_map_def { @@ -186,6 +211,7 @@ struct btf_map_def { __u32 map_flags; __u32 numa_node; __u32 pinning; + __u64 map_extra; }; int parse_btf_map_def(const char *map_name, struct btf *btf, @@ -276,14 +302,32 @@ struct bpf_prog_load_params { __u32 log_level; char *log_buf; size_t log_buf_sz; + int *fd_array; }; int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr); -int bpf_object__section_size(const struct bpf_object *obj, const char *name, - __u32 *size); -int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, - __u32 *off); +struct bpf_create_map_params { + const char *name; + enum bpf_map_type map_type; + __u32 map_flags; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 numa_node; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 map_ifindex; + union { + __u32 inner_map_fd; + __u32 btf_vmlinux_value_type_id; + }; + __u64 map_extra; +}; + +int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr); + struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, const char **prefix, int *kind); @@ -386,6 +430,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx); int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); +__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, + __u32 kind); extern enum libbpf_strict_mode libbpf_mode; @@ -447,4 +493,26 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn) return insn->code == (BPF_LD | BPF_IMM | BPF_DW); } +/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2 + * Takes ownership of the fd passed in, and closes it if calling + * fcntl(fd, F_DUPFD_CLOEXEC, 3). + */ +static inline int ensure_good_fd(int fd) +{ + int old_fd = fd, saved_errno; + + if (fd < 0) + return fd; + if (fd < 3) { + fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); + saved_errno = errno; + close(old_fd); + if (fd < 0) { + pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno); + errno = saved_errno; + } + } + return fd; +} + #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index df0d03dcffabc9..5ba5c9beccfa92 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -46,6 +46,24 @@ enum libbpf_strict_mode { */ LIBBPF_STRICT_DIRECT_ERRS = 0x02, + /* + * Enforce strict BPF program section (SEC()) names. + * E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were + * allowed, with LIBBPF_STRICT_SEC_PREFIX this will become + * unrecognized by libbpf and would have to be just SEC("xdp") and + * SEC("xdp") and SEC("perf_event"). + * + * Note, in this mode the program pin path will be based on the + * function name instead of section name. + */ + LIBBPF_STRICT_SEC_NAME = 0x04, + /* + * Disable the global 'bpf_objects_list'. Maintaining this list adds + * a race condition to bpf_object__open() and bpf_object__close(). + * Clients can maintain it on their own if it is valuable for them. + */ + LIBBPF_STRICT_NO_OBJECT_LIST = 0x08, + __LIBBPF_STRICT_LAST, }; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index cd8c703dde7187..68f2dbf364aa0c 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -33,7 +33,7 @@ static int get_vendor_id(int ifindex) snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); - fd = open(path, O_RDONLY); + fd = open(path, O_RDONLY | O_CLOEXEC); if (fd < 0) return -1; diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h new file mode 100644 index 00000000000000..dd56d76f291cde --- /dev/null +++ b/tools/lib/bpf/libbpf_version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (C) 2021 Facebook */ +#ifndef __LIBBPF_VERSION_H +#define __LIBBPF_VERSION_H + +#define LIBBPF_MAJOR_VERSION 0 +#define LIBBPF_MINOR_VERSION 6 + +#endif /* __LIBBPF_VERSION_H */ diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 2df880cefdaeea..f677dccdeae44b 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include "libbpf.h" #include "btf.h" @@ -302,7 +301,7 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) if (!linker->filename) return -ENOMEM; - linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0644); + linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644); if (linker->fd < 0) { err = -errno; pr_warn("failed to create '%s': %d\n", file, err); @@ -324,12 +323,12 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) linker->elf_hdr->e_machine = EM_BPF; linker->elf_hdr->e_type = ET_REL; -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB; #else -#error "Unknown __BYTE_ORDER" +#error "Unknown __BYTE_ORDER__" #endif /* STRTAB */ @@ -539,12 +538,12 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts, struct src_obj *obj) { -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ const int host_endianness = ELFDATA2LSB; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ const int host_endianness = ELFDATA2MSB; #else -#error "Unknown __BYTE_ORDER" +#error "Unknown __BYTE_ORDER__" #endif int err = 0; Elf_Scn *scn; @@ -557,7 +556,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, obj->filename = filename; - obj->fd = open(filename, O_RDONLY); + obj->fd = open(filename, O_RDONLY | O_CLOEXEC); if (obj->fd < 0) { err = -errno; pr_warn("failed to open file '%s': %d\n", filename, err); @@ -921,7 +920,7 @@ static int check_btf_type_id(__u32 *type_id, void *ctx) { struct btf *btf = ctx; - if (*type_id > btf__get_nr_types(btf)) + if (*type_id >= btf__type_cnt(btf)) return -EINVAL; return 0; @@ -948,8 +947,8 @@ static int linker_sanity_check_btf(struct src_obj *obj) if (!obj->btf) return 0; - n = btf__get_nr_types(obj->btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(obj->btf); + for (i = 1; i < n; i++) { t = btf_type_by_id(obj->btf, i); err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf); @@ -1659,8 +1658,8 @@ static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sy return -EINVAL; } - n = btf__get_nr_types(obj->btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(obj->btf); + for (i = 1; i < n; i++) { t = btf__type_by_id(obj->btf, i); /* some global and extern FUNCs and VARs might not be associated with any @@ -2131,8 +2130,8 @@ static int linker_fixup_btf(struct src_obj *obj) if (!obj->btf) return 0; - n = btf__get_nr_types(obj->btf); - for (i = 1; i <= n; i++) { + n = btf__type_cnt(obj->btf); + for (i = 1; i < n; i++) { struct btf_var_secinfo *vi; struct btf_type *t; @@ -2235,14 +2234,14 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) if (!obj->btf) return 0; - start_id = btf__get_nr_types(linker->btf) + 1; - n = btf__get_nr_types(obj->btf); + start_id = btf__type_cnt(linker->btf); + n = btf__type_cnt(obj->btf); obj->btf_type_map = calloc(n + 1, sizeof(int)); if (!obj->btf_type_map) return -ENOMEM; - for (i = 1; i <= n; i++) { + for (i = 1; i < n; i++) { struct glob_sym *glob_sym = NULL; t = btf__type_by_id(obj->btf, i); @@ -2297,8 +2296,8 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) } /* remap all the types except DATASECs */ - n = btf__get_nr_types(linker->btf); - for (i = start_id; i <= n; i++) { + n = btf__type_cnt(linker->btf); + for (i = start_id; i < n; i++) { struct btf_type *dst_t = btf_type_by_id(linker->btf, i); if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map)) @@ -2657,7 +2656,7 @@ static int finalize_btf(struct bpf_linker *linker) __u32 raw_sz; /* bail out if no BTF data was produced */ - if (btf__get_nr_types(linker->btf) == 0) + if (btf__type_cnt(linker->btf) == 1) return 0; for (i = 1; i < linker->sec_cnt; i++) { @@ -2694,7 +2693,7 @@ static int finalize_btf(struct bpf_linker *linker) } /* Emit .BTF section */ - raw_data = btf__get_raw_data(linker->btf, &raw_sz); + raw_data = btf__raw_data(linker->btf, &raw_sz); if (!raw_data) return -ENOMEM; diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index 4016ed492d0c20..b5b8956a1be85d 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -662,7 +662,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, *validate = true; /* signedness is never ambiguous */ break; case BPF_FIELD_LSHIFT_U64: -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ *val = 64 - (bit_off + bit_sz - byte_off * 8); #else *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index b22b50c1b173e8..9cf66702fa8ddb 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -105,10 +105,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr)); if (err < 0 || (int)attr.test.retval < 0) { opts->errstr = "failed to execute loader prog"; - if (err < 0) + if (err < 0) { err = -errno; - else + } else { err = (int)attr.test.retval; + errno = -err; + } goto out; } err = 0; diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index e9b619aa0cdf3b..81f8fbc85e70d7 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -281,6 +281,7 @@ static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, return err; } +DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, @@ -299,7 +300,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, if (!umem) return -ENOMEM; - umem->fd = socket(AF_XDP, SOCK_RAW, 0); + umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0); if (umem->fd < 0) { err = -errno; goto out_umem_alloc; @@ -345,6 +346,7 @@ struct xsk_umem_config_v1 { __u32 frame_headroom; }; +COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2) int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, @@ -358,8 +360,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp, &config); } -COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2) -DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) static enum xsk_prog get_xsk_prog(void) { @@ -549,7 +549,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk) struct ifreq ifr = {}; int fd, err, ret; - fd = socket(AF_LOCAL, SOCK_DGRAM, 0); + fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0); if (fd < 0) return -errno; @@ -1046,7 +1046,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, } if (umem->refcount++ > 0) { - xsk->fd = socket(AF_XDP, SOCK_RAW, 0); + xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0); if (xsk->fd < 0) { err = -errno; goto out_xsk_alloc; diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h index 01c12dca9c100f..64e9c57fd792d4 100644 --- a/tools/lib/bpf/xsk.h +++ b/tools/lib/bpf/xsk.h @@ -23,6 +23,12 @@ extern "C" { #endif +/* This whole API has been deprecated and moved to libxdp that can be found at + * https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so + * it should just be linking with libxdp instead of libbpf for this set of + * functionality. If not, please submit a bug report on the aforementioned page. + */ + /* Load-Acquire Store-Release barriers used by the XDP socket * library. The following macros should *NOT* be considered part of * the xsk.h API, and is subject to change anytime. @@ -245,8 +251,10 @@ static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); } -LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem); -LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__fd(const struct xsk_umem *umem); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__fd(const struct xsk_socket *xsk); #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 @@ -263,10 +271,10 @@ struct xsk_umem_config { __u32 flags; }; -LIBBPF_API int xsk_setup_xdp_prog(int ifindex, - int *xsks_map_fd); -LIBBPF_API int xsk_socket__update_xskmap(struct xsk_socket *xsk, - int xsks_map_fd); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd); /* Flags for the libbpf_flags field. */ #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) @@ -280,40 +288,46 @@ struct xsk_socket_config { }; /* Set config to NULL to get the default configuration. */ -LIBBPF_API int xsk_umem__create(struct xsk_umem **umem, - void *umem_area, __u64 size, - struct xsk_ring_prod *fill, - struct xsk_ring_cons *comp, - const struct xsk_umem_config *config); -LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem, - void *umem_area, __u64 size, - struct xsk_ring_prod *fill, - struct xsk_ring_cons *comp, - const struct xsk_umem_config *config); -LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem, - void *umem_area, __u64 size, - struct xsk_ring_prod *fill, - struct xsk_ring_cons *comp, - const struct xsk_umem_config *config); -LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, - const char *ifname, __u32 queue_id, - struct xsk_umem *umem, - struct xsk_ring_cons *rx, - struct xsk_ring_prod *tx, - const struct xsk_socket_config *config); -LIBBPF_API int -xsk_socket__create_shared(struct xsk_socket **xsk_ptr, - const char *ifname, - __u32 queue_id, struct xsk_umem *umem, - struct xsk_ring_cons *rx, - struct xsk_ring_prod *tx, - struct xsk_ring_prod *fill, - struct xsk_ring_cons *comp, - const struct xsk_socket_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__create(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__create_v0_0_2(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__create_v0_0_4(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__create(struct xsk_socket **xsk, + const char *ifname, __u32 queue_id, + struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + const struct xsk_socket_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, + const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_socket_config *config); /* Returns 0 for success and -EBUSY if the umem is still in use. */ -LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem); -LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__delete(struct xsk_umem *umem); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +void xsk_socket__delete(struct xsk_socket *xsk); #ifdef __cplusplus } /* extern "C" */ diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c index 1a7112a87736ab..388847bab6d905 100644 --- a/tools/perf/util/bpf-event.c +++ b/tools/perf/util/bpf-event.c @@ -110,7 +110,7 @@ static int perf_env__fetch_btf(struct perf_env *env, u32 data_size; const void *data; - data = btf__get_raw_data(btf, &data_size); + data = btf__raw_data(btf, &data_size); node = malloc(data_size + sizeof(struct btf_node)); if (!node) diff --git a/tools/scripts/Makefile.arch b/tools/scripts/Makefile.arch index b10b7a27c33fd1..0c6c7f45688784 100644 --- a/tools/scripts/Makefile.arch +++ b/tools/scripts/Makefile.arch @@ -4,7 +4,8 @@ HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ -e /arm64/!s/arm.*/arm/ -e s/sa110/arm/ \ -e s/s390x/s390/ -e s/parisc64/parisc/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ - -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ + -e s/riscv.*/riscv/) ifndef ARCH ARCH := $(HOSTARCH) diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 433f8bef261e29..1dad8d617da81e 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -9,8 +9,9 @@ test_tag FEATURE-DUMP.libbpf fixdep test_dev_cgroup -/test_progs* -!test_progs.h +/test_progs +/test_progs-no_alu32 +/test_progs-bpf_gcc test_verifier_log feature test_sock diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 799b88152e9e8d..54b0a41a377508 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -122,12 +122,15 @@ BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a ifneq ($(CROSS_COMPILE),) HOST_BUILD_DIR := $(BUILD_DIR)/host HOST_SCRATCH_DIR := $(OUTPUT)/host-tools +HOST_INCLUDE_DIR := $(HOST_SCRATCH_DIR)/include else HOST_BUILD_DIR := $(BUILD_DIR) HOST_SCRATCH_DIR := $(SCRATCH_DIR) +HOST_INCLUDE_DIR := $(INCLUDE_DIR) endif HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids +RUNQSLOWER_OUTPUT := $(BUILD_DIR)/runqslower/ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ @@ -152,7 +155,7 @@ $(notdir $(TEST_GEN_PROGS) \ # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ $(HOST_BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/resolve_btfids \ - $(INCLUDE_DIR)) + $(RUNQSLOWER_OUTPUT) $(INCLUDE_DIR)) $(MAKE_DIRS): $(call msg,MKDIR,,$@) $(Q)mkdir -p $@ @@ -181,11 +184,13 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool -$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) - $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ - OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \ - BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \ - cp $(SCRATCH_DIR)/runqslower $@ +$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ + OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \ + BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/ \ + BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \ + BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \ + cp $(RUNQSLOWER_OUTPUT)runqslower $@ TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL) @@ -209,7 +214,9 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ CC=$(HOSTCC) LD=$(HOSTLD) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ - prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install + LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ + LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ + prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin all: docs @@ -225,7 +232,7 @@ docs-clean: $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ ../../../include/uapi/linux/bpf.h \ - | $(INCLUDE_DIR) $(BUILD_DIR)/libbpf + | $(BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ EXTRA_CFLAGS='-g -O0' \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers @@ -233,7 +240,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ ifneq ($(BPFOBJ),$(HOST_BPFOBJ)) $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ ../../../include/uapi/linux/bpf.h \ - | $(INCLUDE_DIR) $(HOST_BUILD_DIR)/libbpf + | $(HOST_BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \ @@ -258,6 +265,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \ $(TOOLSDIR)/lib/str_error_r.c $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) \ + LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \ OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ) # Get Clang's default includes on this system, as opposed to those seen by @@ -269,7 +277,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \ define get_sys_includes $(shell $(1) -v -E - &1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ -$(shell $(1) -dM -E - $$@ + $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=_lskel)) > $$@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o)) @@ -411,10 +421,9 @@ ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),) $(TRUNNER_TESTS_DIR)-tests-hdr := y $(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c $$(call msg,TEST-HDR,$(TRUNNER_BINARY),$$@) - $$(shell ( cd $(TRUNNER_TESTS_DIR); \ - echo '/* Generated header, do not edit */'; \ - ls *.c 2> /dev/null | \ - sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \ + $$(shell (echo '/* Generated header, do not edit */'; \ + sed -n -E 's/^void (serial_)?test_([a-zA-Z0-9_]+)\((void)?\).*/DEFINE_TEST(\2)/p' \ + $(TRUNNER_TESTS_DIR)/*.c | sort ; \ ) > $$@) endif @@ -453,7 +462,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ - $(Q)$(RESOLVE_BTFIDS) --no-fail --btf $(TRUNNER_OUTPUT)/btf_data.o $$@ + $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@ endef @@ -513,20 +522,22 @@ $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ) $(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ # Benchmark runner -$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h +$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ) $(call msg,CC,,$@) - $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@ + $(Q)$(CC) $(CFLAGS) -O2 -c $(filter %.c,$^) $(LDLIBS) -o $@ $(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \ $(OUTPUT)/perfbuf_bench.skel.h -$(OUTPUT)/bench.o: bench.h testing_helpers.h +$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h +$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \ $(OUTPUT)/bench_count.o \ $(OUTPUT)/bench_rename.o \ $(OUTPUT)/bench_trigger.o \ - $(OUTPUT)/bench_ringbufs.o + $(OUTPUT)/bench_ringbufs.o \ + $(OUTPUT)/bench_bloom_filter_map.o $(call msg,BINARY,,$@) $(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS) diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index 9b17f2867488aa..5e287e445f7562 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -201,6 +201,20 @@ Without it, the error from compiling bpf selftests looks like: __ https://reviews.llvm.org/D93563 +btf_tag test and Clang version +============================== + +The btf_tag selftest require LLVM support to recognize the btf_decl_tag attribute. +It was introduced in `Clang 14`__. + +Without it, the btf_tag selftest will be skipped and you will observe: + +.. code-block:: console + + # btf_tag:SKIP + +__ https://reviews.llvm.org/D111588 + Clang dependencies for static linking tests =========================================== @@ -228,3 +242,16 @@ To fix this issue, user newer libbpf. .. Links .. _clang reloc patch: https://reviews.llvm.org/D102712 .. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst + +Clang dependencies for the u32 spill test (xdpwall) +=================================================== +The xdpwall selftest requires a change in `Clang 14`__. + +Without it, the xdpwall selftest will fail and the error message +from running test_progs will look like: + +.. code-block:: console + + test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007 + +__ https://reviews.llvm.org/D109073 diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 6ea15b93a2f8a1..cc4722f693e9a9 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -51,6 +51,35 @@ void setup_libbpf() fprintf(stderr, "failed to increase RLIMIT_MEMLOCK: %d", err); } +void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns) +{ + long total = res->false_hits + res->hits + res->drops; + + printf("Iter %3d (%7.3lfus): ", + iter, (delta_ns - 1000000000) / 1000.0); + + printf("%ld false hits of %ld total operations. Percentage = %2.2f %%\n", + res->false_hits, total, ((float)res->false_hits / total) * 100); +} + +void false_hits_report_final(struct bench_res res[], int res_cnt) +{ + long total_hits = 0, total_drops = 0, total_false_hits = 0, total_ops = 0; + int i; + + for (i = 0; i < res_cnt; i++) { + total_hits += res[i].hits; + total_false_hits += res[i].false_hits; + total_drops += res[i].drops; + } + total_ops = total_hits + total_false_hits + total_drops; + + printf("Summary: %ld false hits of %ld total operations. ", + total_false_hits, total_ops); + printf("Percentage = %2.2f %%\n", + ((float)total_false_hits / total_ops) * 100); +} + void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns) { double hits_per_sec, drops_per_sec; @@ -63,20 +92,22 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns) printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0); - printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s\n", - hits_per_sec, hits_per_prod, drops_per_sec); + printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s, total operations %8.3lfM/s\n", + hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec); } void hits_drops_report_final(struct bench_res res[], int res_cnt) { int i; - double hits_mean = 0.0, drops_mean = 0.0; - double hits_stddev = 0.0, drops_stddev = 0.0; + double hits_mean = 0.0, drops_mean = 0.0, total_ops_mean = 0.0; + double hits_stddev = 0.0, drops_stddev = 0.0, total_ops_stddev = 0.0; + double total_ops; for (i = 0; i < res_cnt; i++) { hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); drops_mean += res[i].drops / 1000000.0 / (0.0 + res_cnt); } + total_ops_mean = hits_mean + drops_mean; if (res_cnt > 1) { for (i = 0; i < res_cnt; i++) { @@ -86,14 +117,21 @@ void hits_drops_report_final(struct bench_res res[], int res_cnt) drops_stddev += (drops_mean - res[i].drops / 1000000.0) * (drops_mean - res[i].drops / 1000000.0) / (res_cnt - 1.0); + total_ops = res[i].hits + res[i].drops; + total_ops_stddev += (total_ops_mean - total_ops / 1000000.0) * + (total_ops_mean - total_ops / 1000000.0) / + (res_cnt - 1.0); } hits_stddev = sqrt(hits_stddev); drops_stddev = sqrt(drops_stddev); + total_ops_stddev = sqrt(total_ops_stddev); } printf("Summary: hits %8.3lf \u00B1 %5.3lfM/s (%7.3lfM/prod), ", hits_mean, hits_stddev, hits_mean / env.producer_cnt); - printf("drops %8.3lf \u00B1 %5.3lfM/s\n", + printf("drops %8.3lf \u00B1 %5.3lfM/s, ", drops_mean, drops_stddev); + printf("total operations %8.3lf \u00B1 %5.3lfM/s\n", + total_ops_mean, total_ops_stddev); } const char *argp_program_version = "benchmark"; @@ -132,9 +170,11 @@ static const struct argp_option opts[] = { }; extern struct argp bench_ringbufs_argp; +extern struct argp bench_bloom_map_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, + { &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 }, {}, }; @@ -323,6 +363,11 @@ extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; extern const struct bench bench_pb_libbpf; extern const struct bench bench_pb_custom; +extern const struct bench bench_bloom_lookup; +extern const struct bench bench_bloom_update; +extern const struct bench bench_bloom_false_positive; +extern const struct bench bench_hashmap_without_bloom; +extern const struct bench bench_hashmap_with_bloom; static const struct bench *benchs[] = { &bench_count_global, @@ -344,6 +389,11 @@ static const struct bench *benchs[] = { &bench_rb_custom, &bench_pb_libbpf, &bench_pb_custom, + &bench_bloom_lookup, + &bench_bloom_update, + &bench_bloom_false_positive, + &bench_hashmap_without_bloom, + &bench_hashmap_with_bloom, }; static void setup_benchmark() diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index c1f48a473b022d..624c6b11501f9a 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -33,6 +33,7 @@ struct env { struct bench_res { long hits; long drops; + long false_hits; }; struct bench { @@ -56,6 +57,8 @@ extern const struct bench *bench; void setup_libbpf(); void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns); void hits_drops_report_final(struct bench_res res[], int res_cnt); +void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns); +void false_hits_report_final(struct bench_res res[], int res_cnt); static inline __u64 get_time_ns() { struct timespec t; diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c new file mode 100644 index 00000000000000..6eeeed2913e627 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include +#include "bench.h" +#include "bloom_filter_bench.skel.h" +#include "bpf_util.h" + +static struct ctx { + bool use_array_map; + bool use_hashmap; + bool hashmap_use_bloom; + bool count_false_hits; + + struct bloom_filter_bench *skel; + + int bloom_fd; + int hashmap_fd; + int array_map_fd; + + pthread_mutex_t map_done_mtx; + pthread_cond_t map_done_cv; + bool map_done; + bool map_prepare_err; + + __u32 next_map_idx; +} ctx = { + .map_done_mtx = PTHREAD_MUTEX_INITIALIZER, + .map_done_cv = PTHREAD_COND_INITIALIZER, +}; + +struct stat { + __u32 stats[3]; +}; + +static struct { + __u32 nr_entries; + __u8 nr_hash_funcs; + __u8 value_size; +} args = { + .nr_entries = 1000, + .nr_hash_funcs = 3, + .value_size = 8, +}; + +enum { + ARG_NR_ENTRIES = 3000, + ARG_NR_HASH_FUNCS = 3001, + ARG_VALUE_SIZE = 3002, +}; + +static const struct argp_option opts[] = { + { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0, + "Set number of expected unique entries in the bloom filter"}, + { "nr_hash_funcs", ARG_NR_HASH_FUNCS, "NR_HASH_FUNCS", 0, + "Set number of hash functions in the bloom filter"}, + { "value_size", ARG_VALUE_SIZE, "VALUE_SIZE", 0, + "Set value size (in bytes) of bloom filter entries"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case ARG_NR_ENTRIES: + args.nr_entries = strtol(arg, NULL, 10); + if (args.nr_entries == 0) { + fprintf(stderr, "Invalid nr_entries count."); + argp_usage(state); + } + break; + case ARG_NR_HASH_FUNCS: + args.nr_hash_funcs = strtol(arg, NULL, 10); + if (args.nr_hash_funcs == 0 || args.nr_hash_funcs > 15) { + fprintf(stderr, + "The bloom filter must use 1 to 15 hash functions."); + argp_usage(state); + } + break; + case ARG_VALUE_SIZE: + args.value_size = strtol(arg, NULL, 10); + if (args.value_size < 2 || args.value_size > 256) { + fprintf(stderr, + "Invalid value size. Must be between 2 and 256 bytes"); + argp_usage(state); + } + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +/* exported into benchmark runner */ +const struct argp bench_bloom_map_argp = { + .options = opts, + .parser = parse_arg, +}; + +static void validate(void) +{ + if (env.consumer_cnt != 1) { + fprintf(stderr, + "The bloom filter benchmarks do not support multi-consumer use\n"); + exit(1); + } +} + +static inline void trigger_bpf_program(void) +{ + syscall(__NR_getpgid); +} + +static void *producer(void *input) +{ + while (true) + trigger_bpf_program(); + + return NULL; +} + +static void *map_prepare_thread(void *arg) +{ + __u32 val_size, i; + void *val = NULL; + int err; + + val_size = args.value_size; + val = malloc(val_size); + if (!val) { + ctx.map_prepare_err = true; + goto done; + } + + while (true) { + i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED); + if (i > args.nr_entries) + break; + +again: + /* Populate hashmap, bloom filter map, and array map with the same + * random values + */ + err = syscall(__NR_getrandom, val, val_size, 0); + if (err != val_size) { + ctx.map_prepare_err = true; + fprintf(stderr, "failed to get random value: %d\n", -errno); + break; + } + + if (ctx.use_hashmap) { + err = bpf_map_update_elem(ctx.hashmap_fd, val, val, BPF_NOEXIST); + if (err) { + if (err != -EEXIST) { + ctx.map_prepare_err = true; + fprintf(stderr, "failed to add elem to hashmap: %d\n", + -errno); + break; + } + goto again; + } + } + + i--; + + if (ctx.use_array_map) { + err = bpf_map_update_elem(ctx.array_map_fd, &i, val, 0); + if (err) { + ctx.map_prepare_err = true; + fprintf(stderr, "failed to add elem to array map: %d\n", -errno); + break; + } + } + + if (ctx.use_hashmap && !ctx.hashmap_use_bloom) + continue; + + err = bpf_map_update_elem(ctx.bloom_fd, NULL, val, 0); + if (err) { + ctx.map_prepare_err = true; + fprintf(stderr, + "failed to add elem to bloom filter map: %d\n", -errno); + break; + } + } +done: + pthread_mutex_lock(&ctx.map_done_mtx); + ctx.map_done = true; + pthread_cond_signal(&ctx.map_done_cv); + pthread_mutex_unlock(&ctx.map_done_mtx); + + if (val) + free(val); + + return NULL; +} + +static void populate_maps(void) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + pthread_t map_thread; + int i, err, nr_rand_bytes; + + ctx.bloom_fd = bpf_map__fd(ctx.skel->maps.bloom_map); + ctx.hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap); + ctx.array_map_fd = bpf_map__fd(ctx.skel->maps.array_map); + + for (i = 0; i < nr_cpus; i++) { + err = pthread_create(&map_thread, NULL, map_prepare_thread, + NULL); + if (err) { + fprintf(stderr, "failed to create pthread: %d\n", -errno); + exit(1); + } + } + + pthread_mutex_lock(&ctx.map_done_mtx); + while (!ctx.map_done) + pthread_cond_wait(&ctx.map_done_cv, &ctx.map_done_mtx); + pthread_mutex_unlock(&ctx.map_done_mtx); + + if (ctx.map_prepare_err) + exit(1); + + nr_rand_bytes = syscall(__NR_getrandom, ctx.skel->bss->rand_vals, + ctx.skel->rodata->nr_rand_bytes, 0); + if (nr_rand_bytes != ctx.skel->rodata->nr_rand_bytes) { + fprintf(stderr, "failed to get random bytes\n"); + exit(1); + } +} + +static void check_args(void) +{ + if (args.value_size < 8) { + __u64 nr_unique_entries = 1ULL << (args.value_size * 8); + + if (args.nr_entries > nr_unique_entries) { + fprintf(stderr, + "Not enough unique values for the nr_entries requested\n"); + exit(1); + } + } +} + +static struct bloom_filter_bench *setup_skeleton(void) +{ + struct bloom_filter_bench *skel; + + check_args(); + + setup_libbpf(); + + skel = bloom_filter_bench__open(); + if (!skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + skel->rodata->hashmap_use_bloom = ctx.hashmap_use_bloom; + skel->rodata->count_false_hits = ctx.count_false_hits; + + /* Resize number of entries */ + bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries); + + bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries); + + bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries); + + /* Set value size */ + bpf_map__set_value_size(skel->maps.array_map, args.value_size); + + bpf_map__set_value_size(skel->maps.bloom_map, args.value_size); + + bpf_map__set_value_size(skel->maps.hashmap, args.value_size); + + /* For the hashmap, we use the value as the key as well */ + bpf_map__set_key_size(skel->maps.hashmap, args.value_size); + + skel->bss->value_size = args.value_size; + + /* Set number of hash functions */ + bpf_map__set_map_extra(skel->maps.bloom_map, args.nr_hash_funcs); + + if (bloom_filter_bench__load(skel)) { + fprintf(stderr, "failed to load skeleton\n"); + exit(1); + } + + return skel; +} + +static void bloom_lookup_setup(void) +{ + struct bpf_link *link; + + ctx.use_array_map = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void bloom_update_setup(void) +{ + struct bpf_link *link; + + ctx.use_array_map = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_update); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void false_positive_setup(void) +{ + struct bpf_link *link; + + ctx.use_hashmap = true; + ctx.hashmap_use_bloom = true; + ctx.count_false_hits = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void hashmap_with_bloom_setup(void) +{ + struct bpf_link *link; + + ctx.use_hashmap = true; + ctx.hashmap_use_bloom = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void hashmap_no_bloom_setup(void) +{ + struct bpf_link *link; + + ctx.use_hashmap = true; + + ctx.skel = setup_skeleton(); + + populate_maps(); + + link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static void measure(struct bench_res *res) +{ + unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0; + static unsigned long last_hits, last_drops, last_false_hits; + unsigned int nr_cpus = bpf_num_possible_cpus(); + int hit_key, drop_key, false_hit_key; + int i; + + hit_key = ctx.skel->rodata->hit_key; + drop_key = ctx.skel->rodata->drop_key; + false_hit_key = ctx.skel->rodata->false_hit_key; + + if (ctx.skel->bss->error != 0) { + fprintf(stderr, "error (%d) when searching the bloom filter\n", + ctx.skel->bss->error); + exit(1); + } + + for (i = 0; i < nr_cpus; i++) { + struct stat *s = (void *)&ctx.skel->bss->percpu_stats[i]; + + total_hits += s->stats[hit_key]; + total_drops += s->stats[drop_key]; + total_false_hits += s->stats[false_hit_key]; + } + + res->hits = total_hits - last_hits; + res->drops = total_drops - last_drops; + res->false_hits = total_false_hits - last_false_hits; + + last_hits = total_hits; + last_drops = total_drops; + last_false_hits = total_false_hits; +} + +static void *consumer(void *input) +{ + return NULL; +} + +const struct bench bench_bloom_lookup = { + .name = "bloom-lookup", + .validate = validate, + .setup = bloom_lookup_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_bloom_update = { + .name = "bloom-update", + .validate = validate, + .setup = bloom_update_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_bloom_false_positive = { + .name = "bloom-false-positive", + .validate = validate, + .setup = false_positive_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = false_hits_report_progress, + .report_final = false_hits_report_final, +}; + +const struct bench bench_hashmap_without_bloom = { + .name = "hashmap-without-bloom", + .validate = validate, + .setup = hashmap_no_bloom_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_hashmap_with_bloom = { + .name = "hashmap-with-bloom", + .validate = validate, + .setup = hashmap_with_bloom_setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh new file mode 100755 index 00000000000000..8ffd385ab2f42c --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source ./benchs/run_common.sh + +set -eufo pipefail + +header "Bloom filter map" +for v in 2 4 8 16 40; do +for t in 1 4 8 12 16; do +for h in {1..10}; do +subtitle "value_size: $v bytes, # threads: $t, # hashes: $h" + for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do + printf "%'d entries -\n" $e + printf "\t" + summarize "Lookups, total operations: " \ + "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-lookup)" + printf "\t" + summarize "Updates, total operations: " \ + "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-update)" + printf "\t" + summarize_percentage "False positive rate: " \ + "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-false-positive)" + done + printf "\n" +done +done +done + +header "Hashmap without bloom filter vs. hashmap with bloom filter (throughput, 8 threads)" +for v in 2 4 8 16 40; do +for h in {1..10}; do +subtitle "value_size: $v, # hashes: $h" + for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do + printf "%'d entries -\n" $e + printf "\t" + summarize_total "Hashmap without bloom filter: " \ + "$($RUN_BENCH --nr_hash_funcs $h --nr_entries $e --value_size $v -p 8 hashmap-without-bloom)" + printf "\t" + summarize_total "Hashmap with bloom filter: " \ + "$($RUN_BENCH --nr_hash_funcs $h --nr_entries $e --value_size $v -p 8 hashmap-with-bloom)" + done + printf "\n" +done +done diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh index af4aa04caba6a6..ada028aa900732 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh @@ -1,34 +1,8 @@ #!/bin/bash -set -eufo pipefail - -RUN_BENCH="sudo ./bench -w3 -d10 -a" - -function hits() -{ - echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" -} - -function drops() -{ - echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" -} +source ./benchs/run_common.sh -function header() -{ - local len=${#1} - - printf "\n%s\n" "$1" - for i in $(seq 1 $len); do printf '='; done - printf '\n' -} - -function summarize() -{ - bench="$1" - summary=$(echo $2 | tail -n1) - printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)" -} +set -eufo pipefail header "Single-producer, parallel producer" for b in rb-libbpf rb-custom pb-libbpf pb-custom; do diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh new file mode 100644 index 00000000000000..9a16be78b18099 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_common.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +RUN_BENCH="sudo ./bench -w3 -d10 -a" + +function header() +{ + local len=${#1} + + printf "\n%s\n" "$1" + for i in $(seq 1 $len); do printf '='; done + printf '\n' +} + +function subtitle() +{ + local len=${#1} + printf "\t%s\n" "$1" +} + +function hits() +{ + echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + +function drops() +{ + echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + +function percentage() +{ + echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/" +} + +function total() +{ + echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" +} + +function summarize() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)" +} + +function summarize_percentage() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s%%\n" "$bench" "$(percentage $summary)" +} + +function summarize_total() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s\n" "$bench" "$(total $summary)" +} diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 89c6d58e5dd68f..11ee801e75e7e0 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -34,6 +34,21 @@ DECLARE_TRACE(bpf_testmod_test_write_bare, TP_ARGS(task, ctx) ); +#undef BPF_TESTMOD_DECLARE_TRACE +#ifdef DECLARE_TRACE_WRITABLE +#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE_WRITABLE(call, PARAMS(proto), PARAMS(args), size) +#else +#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) +#endif + +BPF_TESTMOD_DECLARE_TRACE(bpf_testmod_test_writable_bare, + TP_PROTO(struct bpf_testmod_test_writable_ctx *ctx), + TP_ARGS(ctx), + sizeof(struct bpf_testmod_test_writable_ctx) +); + #endif /* _BPF_TESTMOD_EVENTS_H */ #undef TRACE_INCLUDE_PATH diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 141d8da687d21f..5d52ea2768df44 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ +#include +#include #include #include #include @@ -13,6 +15,24 @@ DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; +noinline void +bpf_testmod_test_mod_kfunc(int i) +{ + *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; +} + +noinline int bpf_testmod_loop_test(int n) +{ + int i, sum = 0; + + /* the primary goal of this test is to test LBR. Create a lot of + * branches in the function, so we can catch it easily. + */ + for (i = 0; i < n; i++) + sum += i; + return sum; +} + noinline ssize_t bpf_testmod_test_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -24,7 +44,21 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, .len = len, }; - trace_bpf_testmod_test_read(current, &ctx); + /* This is always true. Use the check to make sure the compiler + * doesn't remove bpf_testmod_loop_test. + */ + if (bpf_testmod_loop_test(101) > 100) + trace_bpf_testmod_test_read(current, &ctx); + + /* Magic number to enable writable tp */ + if (len == 64) { + struct bpf_testmod_test_writable_ctx writable = { + .val = 1024, + }; + trace_bpf_testmod_test_writable_bare(&writable); + if (writable.early_ret) + return snprintf(buf, len, "%d\n", writable.val); + } return -EIO; /* always fail */ } @@ -55,13 +89,26 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; +BTF_SET_START(bpf_testmod_kfunc_ids) +BTF_ID(func, bpf_testmod_test_mod_kfunc) +BTF_SET_END(bpf_testmod_kfunc_ids) + +static DEFINE_KFUNC_BTF_ID_SET(&bpf_testmod_kfunc_ids, bpf_testmod_kfunc_btf_set); + static int bpf_testmod_init(void) { - return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + int ret; + + ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + if (ret) + return ret; + register_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set); + return 0; } static void bpf_testmod_exit(void) { + unregister_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set); return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); } @@ -71,4 +118,3 @@ module_exit(bpf_testmod_exit); MODULE_AUTHOR("Andrii Nakryiko"); MODULE_DESCRIPTION("BPF selftests module"); MODULE_LICENSE("Dual BSD/GPL"); - diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index b3892dc401113e..0d71e2607832e5 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -17,4 +17,9 @@ struct bpf_testmod_test_write_ctx { size_t len; }; +struct bpf_testmod_test_writable_ctx { + bool early_ret; + int val; +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c index b692e6ead9b55e..b5b6b013a245e4 100644 --- a/tools/testing/selftests/bpf/btf_helpers.c +++ b/tools/testing/selftests/bpf/btf_helpers.c @@ -24,11 +24,12 @@ static const char * const btf_kind_str_mapping[] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; static const char *btf_kind_str(__u16 kind) { - if (kind > BTF_KIND_DATASEC) + if (kind > BTF_KIND_DECL_TAG) return "UNKNOWN"; return btf_kind_str_mapping[kind]; } @@ -177,6 +178,10 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id) case BTF_KIND_FLOAT: fprintf(out, " size=%u", t->size); break; + case BTF_KIND_DECL_TAG: + fprintf(out, " type_id=%u component_idx=%d", + t->type, btf_decl_tag(t)->component_idx); + break; default: break; } @@ -210,7 +215,7 @@ int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[]) int i; bool ok = true; - ASSERT_EQ(btf__get_nr_types(btf), nr_types, "btf_nr_types"); + ASSERT_EQ(btf__type_cnt(btf) - 1, nr_types, "btf_nr_types"); for (i = 1; i <= nr_types; i++) { if (!ASSERT_STREQ(btf_type_raw_dump(btf, i), exp_types[i - 1], "raw_dump")) @@ -249,7 +254,7 @@ const char *btf_type_c_dump(const struct btf *btf) return NULL; } - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); if (err) { fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err); diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index f3daa44a82660a..9d59c3990ca8d4 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -33,10 +33,9 @@ #define CGROUP_MOUNT_DFLT "/sys/fs/cgroup" #define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls" #define CGROUP_WORK_DIR "/cgroup-test-work-dir" - #define format_cgroup_path(buf, path) \ - snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \ - CGROUP_WORK_DIR, path) + snprintf(buf, sizeof(buf), "%s%s%d%s", CGROUP_MOUNT_PATH, \ + CGROUP_WORK_DIR, getpid(), path) #define format_classid_path(buf) \ snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \ diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 629da3854b3e7a..fcc9cb91b21112 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -26,4 +26,4 @@ int join_classid(void); int setup_classid_environment(void); void cleanup_classid_environment(void); -#endif /* __CGROUP_HELPERS_H */ +#endif /* __CGROUP_HELPERS_H */ \ No newline at end of file diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c index 3fd83b9dc1bfe2..87fd1aa323a925 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.c +++ b/tools/testing/selftests/bpf/flow_dissector_load.c @@ -17,7 +17,7 @@ const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector"; const char *cfg_map_name = "jmp_table"; bool cfg_attach = true; -char *cfg_section_name; +char *cfg_prog_name; char *cfg_path_name; static void load_and_attach_program(void) @@ -25,7 +25,11 @@ static void load_and_attach_program(void) int prog_fd, ret; struct bpf_object *obj; - ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name, + ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + if (ret) + error(1, 0, "failed to enable libbpf strict mode: %d", ret); + + ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name, cfg_map_name, NULL, &prog_fd, NULL); if (ret) error(1, 0, "bpf_flow_load %s", cfg_path_name); @@ -75,15 +79,15 @@ static void parse_opts(int argc, char **argv) break; case 'p': if (cfg_path_name) - error(1, 0, "only one prog name can be given"); + error(1, 0, "only one path can be given"); cfg_path_name = optarg; break; case 's': - if (cfg_section_name) - error(1, 0, "only one section can be given"); + if (cfg_prog_name) + error(1, 0, "only one prog can be given"); - cfg_section_name = optarg; + cfg_prog_name = optarg; break; } } @@ -94,7 +98,7 @@ static void parse_opts(int argc, char **argv) if (cfg_attach && !cfg_path_name) error(1, 0, "must provide a path to the BPF program"); - if (cfg_attach && !cfg_section_name) + if (cfg_attach && !cfg_prog_name) error(1, 0, "must provide a section name"); } diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h index 7290401ec1727e..9d0acc2fc6cc0f 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.h +++ b/tools/testing/selftests/bpf/flow_dissector_load.h @@ -7,7 +7,7 @@ static inline int bpf_flow_load(struct bpf_object **obj, const char *path, - const char *section_name, + const char *prog_name, const char *map_name, const char *keys_map_name, int *prog_fd, @@ -23,13 +23,7 @@ static inline int bpf_flow_load(struct bpf_object **obj, if (ret) return ret; - main_prog = NULL; - bpf_object__for_each_program(prog, *obj) { - if (strcmp(section_name, bpf_program__section_name(prog)) == 0) { - main_prog = prog; - break; - } - } + main_prog = bpf_object__find_program_by_name(*obj, prog_name); if (!main_prog) return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index ba0e1efe5a45fe..0f9525293881b3 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -4,13 +4,13 @@ #include "atomics.lskel.h" -static void test_add(struct atomics *skel) +static void test_add(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__add__attach(skel); + link_fd = atomics_lskel__add__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(add)")) return; @@ -36,13 +36,13 @@ static void test_add(struct atomics *skel) close(link_fd); } -static void test_sub(struct atomics *skel) +static void test_sub(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__sub__attach(skel); + link_fd = atomics_lskel__sub__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(sub)")) return; @@ -69,13 +69,13 @@ static void test_sub(struct atomics *skel) close(link_fd); } -static void test_and(struct atomics *skel) +static void test_and(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__and__attach(skel); + link_fd = atomics_lskel__and__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(and)")) return; @@ -97,13 +97,13 @@ static void test_and(struct atomics *skel) close(link_fd); } -static void test_or(struct atomics *skel) +static void test_or(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__or__attach(skel); + link_fd = atomics_lskel__or__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(or)")) return; @@ -126,13 +126,13 @@ static void test_or(struct atomics *skel) close(link_fd); } -static void test_xor(struct atomics *skel) +static void test_xor(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__xor__attach(skel); + link_fd = atomics_lskel__xor__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(xor)")) return; @@ -154,13 +154,13 @@ static void test_xor(struct atomics *skel) close(link_fd); } -static void test_cmpxchg(struct atomics *skel) +static void test_cmpxchg(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__cmpxchg__attach(skel); + link_fd = atomics_lskel__cmpxchg__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)")) return; @@ -183,13 +183,13 @@ static void test_cmpxchg(struct atomics *skel) close(link_fd); } -static void test_xchg(struct atomics *skel) +static void test_xchg(struct atomics_lskel *skel) { int err, prog_fd; __u32 duration = 0, retval; int link_fd; - link_fd = atomics__xchg__attach(skel); + link_fd = atomics_lskel__xchg__attach(skel); if (!ASSERT_GT(link_fd, 0, "attach(xchg)")) return; @@ -212,10 +212,10 @@ static void test_xchg(struct atomics *skel) void test_atomics(void) { - struct atomics *skel; + struct atomics_lskel *skel; __u32 duration = 0; - skel = atomics__open_and_load(); + skel = atomics_lskel__open_and_load(); if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) return; @@ -225,6 +225,7 @@ void test_atomics(void) test__skip(); goto cleanup; } + skel->bss->pid = getpid(); if (test__start_subtest("add")) test_add(skel); @@ -242,5 +243,5 @@ void test_atomics(void) test_xchg(skel); cleanup: - atomics__destroy(skel); + atomics_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index bf307bb9e446d0..d0bd51eb23c89b 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -5,6 +5,11 @@ /* this is how USDT semaphore is actually defined, except volatile modifier */ volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes"))); +/* attach point */ +static void method(void) { + return ; +} + void test_attach_probe(void) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); @@ -14,12 +19,26 @@ void test_attach_probe(void) struct test_attach_probe* skel; size_t uprobe_offset; ssize_t base_addr, ref_ctr_offset; + bool legacy; + + /* Check if new-style kprobe/uprobe API is supported. + * Kernels that support new FD-based kprobe and uprobe BPF attachment + * through perf_event_open() syscall expose + * /sys/bus/event_source/devices/kprobe/type and + * /sys/bus/event_source/devices/uprobe/type files, respectively. They + * contain magic numbers that are passed as "type" field of + * perf_event_attr. Lack of such file in the system indicates legacy + * kernel with old-style kprobe/uprobe attach interface through + * creating per-probe event through tracefs. For such cases + * ref_ctr_offset feature is not supported, so we don't test it. + */ + legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0; base_addr = get_base_addr(); if (CHECK(base_addr < 0, "get_base_addr", "failed to find base addr: %zd", base_addr)) return; - uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); + uprobe_offset = get_uprobe_offset(&method, base_addr); ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) @@ -45,10 +64,11 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_kretprobe = kretprobe_link; - ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); + if (!legacy) + ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); uprobe_opts.retprobe = false; - uprobe_opts.ref_ctr_offset = ref_ctr_offset; + uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, "/proc/self/exe", @@ -58,11 +78,12 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_uprobe = uprobe_link; - ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); + if (!legacy) + ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ uprobe_opts.retprobe = true; - uprobe_opts.ref_ctr_offset = ref_ctr_offset; + uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, "/proc/self/exe", @@ -82,7 +103,7 @@ void test_attach_probe(void) goto cleanup; /* trigger & validate uprobe & uretprobe */ - get_base_addr(); + method(); if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res", "wrong uprobe res: %d\n", skel->bss->uprobe_res)) diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c new file mode 100644 index 00000000000000..be73e3de666805 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include "bloom_filter_map.skel.h" + +static void test_fail_cases(void) +{ + __u32 value; + int fd, err; + + /* Invalid key size */ + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 4, sizeof(value), 100, 0); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid key size")) + close(fd); + + /* Invalid value size */ + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, 0, 100, 0); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid value size 0")) + close(fd); + + /* Invalid max entries size */ + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 0, 0); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid max entries size")) + close(fd); + + /* Bloom filter maps do not support BPF_F_NO_PREALLOC */ + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, + BPF_F_NO_PREALLOC); + if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid flags")) + close(fd); + + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, 0); + if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter")) + return; + + /* Test invalid flags */ + err = bpf_map_update_elem(fd, NULL, &value, -1); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_EXIST); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_F_LOCK); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_NOEXIST); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, 10000); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + close(fd); +} + +static void test_success_cases(void) +{ + char value[11]; + int fd, err; + + /* Create a map */ + fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, + BPF_F_ZERO_SEED | BPF_F_NUMA_NODE); + if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter success case")) + return; + + /* Add a value to the bloom filter */ + err = bpf_map_update_elem(fd, NULL, &value, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem bloom filter success case")) + goto done; + + /* Lookup a value in the bloom filter */ + err = bpf_map_lookup_elem(fd, NULL, &value); + ASSERT_OK(err, "bpf_map_update_elem bloom filter success case"); + +done: + close(fd); +} + +static void check_bloom(struct bloom_filter_map *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.check_bloom); + if (!ASSERT_OK_PTR(link, "link")) + return; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->error, 0, "error"); + + bpf_link__destroy(link); +} + +static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals, + __u32 nr_rand_vals) +{ + int outer_map_fd, inner_map_fd, err, i, key = 0; + struct bpf_link *link; + + /* Create a bloom filter map that will be used as the inner map */ + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(*rand_vals), + nr_rand_vals, 0); + if (!ASSERT_GE(inner_map_fd, 0, "bpf_create_map bloom filter inner map")) + return; + + for (i = 0; i < nr_rand_vals; i++) { + err = bpf_map_update_elem(inner_map_fd, NULL, rand_vals + i, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to inner_map_fd")) + goto done; + } + + /* Add the bloom filter map to the outer map */ + outer_map_fd = bpf_map__fd(skel->maps.outer_map); + err = bpf_map_update_elem(outer_map_fd, &key, &inner_map_fd, BPF_ANY); + if (!ASSERT_OK(err, "Add bloom filter map to outer map")) + goto done; + + /* Attach the bloom_filter_inner_map prog */ + link = bpf_program__attach(skel->progs.inner_map); + if (!ASSERT_OK_PTR(link, "link")) + goto delete_inner_map; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->error, 0, "error"); + + bpf_link__destroy(link); + +delete_inner_map: + /* Ensure the inner bloom filter map can be deleted */ + err = bpf_map_delete_elem(outer_map_fd, &key); + ASSERT_OK(err, "Delete inner bloom filter map"); + +done: + close(inner_map_fd); +} + +static int setup_progs(struct bloom_filter_map **out_skel, __u32 **out_rand_vals, + __u32 *out_nr_rand_vals) +{ + struct bloom_filter_map *skel; + int random_data_fd, bloom_fd; + __u32 *rand_vals = NULL; + __u32 map_size, val; + int err, i; + + /* Set up a bloom filter map skeleton */ + skel = bloom_filter_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bloom_filter_map__open_and_load")) + return -EINVAL; + + /* Set up rand_vals */ + map_size = bpf_map__max_entries(skel->maps.map_random_data); + rand_vals = malloc(sizeof(*rand_vals) * map_size); + if (!rand_vals) { + err = -ENOMEM; + goto error; + } + + /* Generate random values and populate both skeletons */ + random_data_fd = bpf_map__fd(skel->maps.map_random_data); + bloom_fd = bpf_map__fd(skel->maps.map_bloom); + for (i = 0; i < map_size; i++) { + val = rand(); + + err = bpf_map_update_elem(random_data_fd, &i, &val, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to map_random_data")) + goto error; + + err = bpf_map_update_elem(bloom_fd, NULL, &val, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to map_bloom")) + goto error; + + rand_vals[i] = val; + } + + *out_skel = skel; + *out_rand_vals = rand_vals; + *out_nr_rand_vals = map_size; + + return 0; + +error: + bloom_filter_map__destroy(skel); + if (rand_vals) + free(rand_vals); + return err; +} + +void test_bloom_filter_map(void) +{ + __u32 *rand_vals, nr_rand_vals; + struct bloom_filter_map *skel; + int err; + + test_fail_cases(); + test_success_cases(); + + err = setup_progs(&skel, &rand_vals, &nr_rand_vals); + if (err) + return; + + test_inner_map(skel, rand_vals, nr_rand_vals); + free(rand_vals); + + check_bloom(skel); + + bloom_filter_map__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 77ac24b191d4ca..9454331aaf85fe 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -589,7 +589,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) static void test_bpf_hash_map(void) { - __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; + __u32 expected_key_a = 0, expected_key_b = 0; DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); struct bpf_iter_bpf_hash_map *skel; int err, i, len, map_fd, iter_fd; @@ -638,7 +638,6 @@ static void test_bpf_hash_map(void) val = i + 4; expected_key_a += key.a; expected_key_b += key.b; - expected_key_c += key.c; expected_val += val; err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); @@ -685,7 +684,7 @@ static void test_bpf_hash_map(void) static void test_bpf_percpu_hash_map(void) { - __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; + __u32 expected_key_a = 0, expected_key_b = 0; DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); struct bpf_iter_bpf_percpu_hash_map *skel; int err, i, j, len, map_fd, iter_fd; @@ -722,7 +721,6 @@ static void test_bpf_percpu_hash_map(void) key.c = i + 3; expected_key_a += key.a; expected_key_b += key.b; - expected_key_c += key.c; for (j = 0; j < bpf_num_possible_cpus(); j++) { *(__u32 *)(val + j * 8) = i + j; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c index 85babb0487b314..b52ff8ce34db82 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -179,7 +179,7 @@ static void do_bpf_iter_setsockopt(struct bpf_iter_setsockopt *iter_skel, free_fds(est_fds, nr_est); } -void test_bpf_iter_setsockopt(void) +void serial_test_bpf_iter_setsockopt(void) { struct bpf_iter_setsockopt *iter_skel = NULL; struct bpf_cubic *cubic_skel = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index 284d5921c3458b..eb8eeebe69359e 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -3,7 +3,7 @@ #define nr_iters 2 -void test_bpf_obj_id(void) +void serial_test_bpf_obj_id(void) { const __u64 array_magic_value = 0xfaceb00c; const __u32 array_key = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 3d002c245d2b74..27f5d8ea7964d9 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -39,82 +39,171 @@ struct scale_test_def { bool fails; }; -void test_bpf_verif_scale(void) -{ - struct scale_test_def tests[] = { - { "loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */ }, - - { "test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS }, - { "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS }, - { "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS }, - - { "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* full unroll by llvm */ - { "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* partial unroll. llvm will unroll loop ~150 times. - * C loop count -> 600. - * Asm loop count -> 4. - * 16k insns in loop body. - * Total of 5 such loops. Total program size ~82k insns. - */ - { "pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* no unroll at all. - * C loop count -> 600. - * ASM loop count -> 600. - * ~110 insns in loop body. - * Total of 5 such loops. Total program size ~1500 insns. - */ - { "pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - { "loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "loop4.o", BPF_PROG_TYPE_SCHED_CLS }, - { "loop5.o", BPF_PROG_TYPE_SCHED_CLS }, - { "loop6.o", BPF_PROG_TYPE_KPROBE }, - - /* partial unroll. 19k insn in a loop. - * Total program size 20.8k insn. - * ~350k processed_insns - */ - { "strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* no unroll, tiny loops */ - { "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - { "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - /* non-inlined subprogs */ - { "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - - { "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, - { "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, - - { "test_xdp_loop.o", BPF_PROG_TYPE_XDP }, - { "test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL }, - }; +static void scale_test(const char *file, + enum bpf_prog_type attach_type, + bool should_fail) +{ libbpf_print_fn_t old_print_fn = NULL; - int err, i; + int err; if (env.verifier_stats) { test__force_log(); old_print_fn = libbpf_set_print(libbpf_debug_print); } - for (i = 0; i < ARRAY_SIZE(tests); i++) { - const struct scale_test_def *test = &tests[i]; - - if (!test__start_subtest(test->file)) - continue; - - err = check_load(test->file, test->attach_type); - CHECK_FAIL(err && !test->fails); - } + err = check_load(file, attach_type); + if (should_fail) + ASSERT_ERR(err, "expect_error"); + else + ASSERT_OK(err, "expect_success"); if (env.verifier_stats) libbpf_set_print(old_print_fn); } + +void test_verif_scale1() +{ + scale_test("test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale2() +{ + scale_test("test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale3() +{ + scale_test("test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_pyperf_global() +{ + scale_test("pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf_subprogs() +{ + scale_test("pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf50() +{ + /* full unroll by llvm */ + scale_test("pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf100() +{ + /* full unroll by llvm */ + scale_test("pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf180() +{ + /* full unroll by llvm */ + scale_test("pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600() +{ + /* partial unroll. llvm will unroll loop ~150 times. + * C loop count -> 600. + * Asm loop count -> 4. + * 16k insns in loop body. + * Total of 5 such loops. Total program size ~82k insns. + */ + scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600_nounroll() +{ + /* no unroll at all. + * C loop count -> 600. + * ASM loop count -> 600. + * ~110 insns in loop body. + * Total of 5 such loops. Total program size ~1500 insns. + */ + scale_test("pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop1() +{ + scale_test("loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop2() +{ + scale_test("loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop3_fail() +{ + scale_test("loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */); +} + +void test_verif_scale_loop4() +{ + scale_test("loop4.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_loop5() +{ + scale_test("loop5.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_loop6() +{ + scale_test("loop6.o", BPF_PROG_TYPE_KPROBE, false); +} + +void test_verif_scale_strobemeta() +{ + /* partial unroll. 19k insn in a loop. + * Total program size 20.8k insn. + * ~350k processed_insns + */ + scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_nounroll1() +{ + /* no unroll, tiny loops */ + scale_test("strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_nounroll2() +{ + /* no unroll, tiny loops */ + scale_test("strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_subprogs() +{ + /* non-inlined subprogs */ + scale_test("strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_sysctl_loop1() +{ + scale_test("test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); +} + +void test_verif_scale_sysctl_loop2() +{ + scale_test("test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); +} + +void test_verif_scale_xdp_loop() +{ + scale_test("test_xdp_loop.o", BPF_PROG_TYPE_XDP, false); +} + +void test_verif_scale_seg6_loop() +{ + scale_test("test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false); +} + +void test_verif_twfw() +{ + scale_test("twfw.o", BPF_PROG_TYPE_CGROUP_SKB, false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 649f87382c8d87..ac596cb06e405f 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -39,8 +39,8 @@ static bool always_log; #define BTF_END_RAW 0xdeadbeef #define NAME_TBD 0xdeadb33f -#define NAME_NTH(N) (0xffff0000 | N) -#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000) +#define NAME_NTH(N) (0xfffe0000 | N) +#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xfffe0000) #define GET_NAME_NTH_IDX(X) (X & 0x0000ffff) #define MAX_NR_RAW_U32 1024 @@ -3661,6 +3661,285 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid type_size", }, +{ + .descr = "decl_tag test #1, struct/member, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #2, union/member, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #3, variable, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0global\0tag1\0tag2"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #4, func/parameter, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #5, invalid value", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_DECL_TAG_ENC(0, 2, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid value", +}, +{ + .descr = "decl_tag test #6, invalid target type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_DECL_TAG_ENC(NAME_TBD, 1, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type", +}, +{ + .descr = "decl_tag test #7, invalid vlen", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 1), 2), (0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "vlen != 0", +}, +{ + .descr = "decl_tag test #8, invalid kflag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 1, 0), 2), (-1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, +{ + .descr = "decl_tag test #9, var, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #10, struct member, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #11, func parameter, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, 2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #12, < -1 component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, -2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #13, typedef, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #14, typedef, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, + }; /* struct btf_raw_test raw_tests[] */ static const char *get_next_str(const char *start, const char *end) @@ -4268,7 +4547,7 @@ static void do_test_file(unsigned int test_num) if (CHECK(err, "obj: %d", err)) return; - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); if (CHECK(!prog, "Cannot find bpf_prog")) { err = -1; goto done; @@ -6421,27 +6700,33 @@ const struct btf_dedup_test dedup_tests[] = { BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */ BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */ BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */ - BTF_MEMBER_ENC(NAME_NTH(8), 13, 672), /* float d; */ + BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), /* float d; */ /* ptr -> [3] struct s */ BTF_PTR_ENC(3), /* [4] */ /* ptr -> [6] const int */ BTF_PTR_ENC(6), /* [5] */ /* const -> [1] int */ BTF_CONST_ENC(1), /* [6] */ + /* tag -> [3] struct s */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ + /* tag -> [3] struct s, member 1 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ /* full copy of the above */ - BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */ - BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */ - BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [9] */ - BTF_MEMBER_ENC(NAME_NTH(3), 10, 0), - BTF_MEMBER_ENC(NAME_NTH(4), 11, 64), - BTF_MEMBER_ENC(NAME_NTH(5), 8, 128), - BTF_MEMBER_ENC(NAME_NTH(6), 7, 640), - BTF_MEMBER_ENC(NAME_NTH(8), 13, 672), - BTF_PTR_ENC(9), /* [10] */ - BTF_PTR_ENC(12), /* [11] */ - BTF_CONST_ENC(7), /* [12] */ - BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [13] */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */ + BTF_TYPE_ARRAY_ENC(9, 9, 16), /* [10] */ + BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [11] */ + BTF_MEMBER_ENC(NAME_NTH(3), 12, 0), + BTF_MEMBER_ENC(NAME_NTH(4), 13, 64), + BTF_MEMBER_ENC(NAME_NTH(5), 10, 128), + BTF_MEMBER_ENC(NAME_NTH(6), 9, 640), + BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), + BTF_PTR_ENC(11), /* [12] */ + BTF_PTR_ENC(14), /* [13] */ + BTF_CONST_ENC(9), /* [14] */ + BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */ BTF_END_RAW, }, BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"), @@ -6458,14 +6743,16 @@ const struct btf_dedup_test dedup_tests[] = { BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */ BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */ BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */ - BTF_MEMBER_ENC(NAME_NTH(4), 7, 672), /* float d; */ + BTF_MEMBER_ENC(NAME_NTH(4), 9, 672), /* float d; */ /* ptr -> [3] struct s */ BTF_PTR_ENC(3), /* [4] */ /* ptr -> [6] const int */ BTF_PTR_ENC(6), /* [5] */ /* const -> [1] int */ BTF_CONST_ENC(1), /* [6] */ - BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ + BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */ BTF_END_RAW, }, BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"), @@ -6590,9 +6877,12 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ BTF_END_RAW, }, - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"), + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), }, .expect = { .raw_types = { @@ -6616,9 +6906,12 @@ const struct btf_dedup_test dedup_tests[] = { BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ BTF_END_RAW, }, - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"), + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), }, .opts = { .dont_resolve_fwds = false, @@ -6767,6 +7060,185 @@ const struct btf_dedup_test dedup_tests[] = { .dedup_table_size = 1 }, }, +{ + .descr = "dedup: func/func_arg/var tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* static int t */ + BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */ + /* void f(int a1, int a2) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), + BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ + /* tag -> t */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */ + /* tag -> func */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */ + /* tag -> func arg a1 */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), + BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), + }, + .opts = { + .dont_resolve_fwds = false, + }, +}, +{ + .descr = "dedup: func/func_param tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* void f(int a1, int a2) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ + /* void f(int a1, int a2) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */ + /* tag -> f: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */ + /* tag -> f/a2: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */ + /* tag -> f: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */ + /* tag -> f/a2: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), + }, + .opts = { + .dont_resolve_fwds = false, + }, +}, +{ + .descr = "dedup: struct/struct_member tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), + /* tag -> t: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + /* tag -> t/m2: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ + /* tag -> t: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */ + /* tag -> t/m2: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), + }, + .opts = { + .dont_resolve_fwds = false, + }, +}, +{ + .descr = "dedup: typedef tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [3] */ + /* tag -> t: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [5] */ + /* tag -> t: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), + }, + .opts = { + .dont_resolve_fwds = false, + }, +}, }; @@ -6801,6 +7273,8 @@ static int btf_type_size(const struct btf_type *t) return base_size + sizeof(struct btf_var); case BTF_KIND_DATASEC: return base_size + vlen * sizeof(struct btf_var_secinfo); + case BTF_KIND_DECL_TAG: + return base_size + sizeof(struct btf_decl_tag); default: fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind); return -EINVAL; @@ -6871,8 +7345,8 @@ static void do_test_dedup(unsigned int test_num) goto done; } - test_btf_data = btf__get_raw_data(test_btf, &test_btf_size); - expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size); + test_btf_data = btf__raw_data(test_btf, &test_btf_size); + expect_btf_data = btf__raw_data(expect_btf, &expect_btf_size); if (CHECK(test_btf_size != expect_btf_size, "test_btf_size:%u != expect_btf_size:%u", test_btf_size, expect_btf_size)) { @@ -6926,8 +7400,8 @@ static void do_test_dedup(unsigned int test_num) expect_str_cur += expect_len + 1; } - test_nr_types = btf__get_nr_types(test_btf); - expect_nr_types = btf__get_nr_types(expect_btf); + test_nr_types = btf__type_cnt(test_btf); + expect_nr_types = btf__type_cnt(expect_btf); if (CHECK(test_nr_types != expect_nr_types, "test_nr_types:%u != expect_nr_types:%u", test_nr_types, expect_nr_types)) { @@ -6935,7 +7409,7 @@ static void do_test_dedup(unsigned int test_num) goto done; } - for (i = 1; i <= test_nr_types; i++) { + for (i = 1; i < test_nr_types; i++) { const struct btf_type *test_type, *expect_type; int test_size, expect_size; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 52ccf0cf35e1ce..aa76360d8f4990 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -27,7 +27,7 @@ static struct btf_dump_test_case { static int btf_dump_all_types(const struct btf *btf, const struct btf_dump_opts *opts) { - size_t type_cnt = btf__get_nr_types(btf); + size_t type_cnt = btf__type_cnt(btf); struct btf_dump *d; int err = 0, id; @@ -36,7 +36,7 @@ static int btf_dump_all_types(const struct btf *btf, if (err) return err; - for (id = 1; id <= type_cnt; id++) { + for (id = 1; id < type_cnt; id++) { err = btf_dump__dump_type(d, id); if (err) goto done; @@ -133,7 +133,7 @@ static char *dump_buf; static size_t dump_buf_sz; static FILE *dump_buf_file; -void test_btf_dump_incremental(void) +static void test_btf_dump_incremental(void) { struct btf *btf = NULL; struct btf_dump *d = NULL; @@ -171,7 +171,7 @@ void test_btf_dump_incremental(void) err = btf__add_field(btf, "x", 2, 0, 0); ASSERT_OK(err, "field_ok"); - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } @@ -210,7 +210,7 @@ void test_btf_dump_incremental(void) err = btf__add_field(btf, "s", 3, 32, 0); ASSERT_OK(err, "field_ok"); - for (i = 1; i <= btf__get_nr_types(btf); i++) { + for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } @@ -358,12 +358,27 @@ static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d, TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1); #ifdef __SIZEOF_INT128__ - TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128, BTF_F_COMPACT, - "(__int128)0xffffffffffffffff", - 0xffffffffffffffff); - ASSERT_OK(btf_dump_data(btf, d, "__int128", NULL, 0, &i, 16, str, - "(__int128)0xfffffffffffffffffffffffffffffffe"), - "dump __int128"); + /* gcc encode unsigned __int128 type with name "__int128 unsigned" in dwarf, + * and clang encode it with name "unsigned __int128" in dwarf. + * Do an availability test for either variant before doing actual test. + */ + if (btf__find_by_name(btf, "unsigned __int128") > 0) { + TEST_BTF_DUMP_DATA(btf, d, NULL, str, unsigned __int128, BTF_F_COMPACT, + "(unsigned __int128)0xffffffffffffffff", + 0xffffffffffffffff); + ASSERT_OK(btf_dump_data(btf, d, "unsigned __int128", NULL, 0, &i, 16, str, + "(unsigned __int128)0xfffffffffffffffffffffffffffffffe"), + "dump unsigned __int128"); + } else if (btf__find_by_name(btf, "__int128 unsigned") > 0) { + TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128 unsigned, BTF_F_COMPACT, + "(__int128 unsigned)0xffffffffffffffff", + 0xffffffffffffffff); + ASSERT_OK(btf_dump_data(btf, d, "__int128 unsigned", NULL, 0, &i, 16, str, + "(__int128 unsigned)0xfffffffffffffffffffffffffffffffe"), + "dump unsigned __int128"); + } else { + ASSERT_TRUE(false, "unsigned_int128_not_found"); + } #endif } @@ -763,8 +778,10 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, char *str) { +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT, "int cpu_number = (int)100", 100); +#endif TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT, "static int cpu_profile_flip = (int)2", 2); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c index 8ab5d3e358dd2e..8afbf3d0b89a22 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c @@ -7,12 +7,12 @@ #include void test_btf_endian() { -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ enum btf_endianness endian = BTF_LITTLE_ENDIAN; -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ enum btf_endianness endian = BTF_BIG_ENDIAN; #else -#error "Unrecognized __BYTE_ORDER" +#error "Unrecognized __BYTE_ORDER__" #endif enum btf_endianness swap_endian = 1 - endian; struct btf *btf = NULL, *swap_btf = NULL; @@ -32,7 +32,7 @@ void test_btf_endian() { ASSERT_EQ(btf__endianness(btf), swap_endian, "endian"); /* Get raw BTF data in non-native endianness... */ - raw_data = btf__get_raw_data(btf, &raw_sz); + raw_data = btf__raw_data(btf, &raw_sz); if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted")) goto err_out; @@ -42,9 +42,9 @@ void test_btf_endian() { goto err_out; ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian"); - ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types"); + ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types"); - swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz); + swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz); if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data")) goto err_out; @@ -58,7 +58,7 @@ void test_btf_endian() { /* swap it back to native endianness */ btf__set_endianness(swap_btf, endian); - swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz); + swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz); if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data")) goto err_out; @@ -75,7 +75,7 @@ void test_btf_endian() { swap_btf = NULL; btf__set_endianness(btf, swap_endian); - raw_data = btf__get_raw_data(btf, &raw_sz); + raw_data = btf__raw_data(btf, &raw_sz); if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted")) goto err_out; @@ -85,7 +85,7 @@ void test_btf_endian() { goto err_out; ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian"); - ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types"); + ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types"); /* the type should appear as if it was stored in native endianness */ t = btf__type_by_id(swap_btf, var_id); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c index ca7c2a91610aa1..b1ffe61f2aa9f4 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c @@ -72,7 +72,7 @@ void test_btf_split() { d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf); if (!ASSERT_OK_PTR(d, "btf_dump__new")) goto cleanup; - for (i = 1; i <= btf__get_nr_types(btf2); i++) { + for (i = 1; i < btf__type_cnt(btf2); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c new file mode 100644 index 00000000000000..91821f42714dad --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include +#include "tag.skel.h" + +void test_btf_tag(void) +{ + struct tag *skel; + + skel = tag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "btf_tag")) + return; + + if (skel->rodata->skip_tests) { + printf("%s:SKIP: btf_tag attribute not supported", __func__); + test__skip(); + } + + tag__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c index 022c7d89d6f462..b912eeb0b6b48b 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_write.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c @@ -4,19 +4,15 @@ #include #include "btf_helpers.h" -void test_btf_write() { +static void gen_btf(struct btf *btf) +{ const struct btf_var_secinfo *vi; const struct btf_type *t; const struct btf_member *m; const struct btf_enum *v; const struct btf_param *p; - struct btf *btf; int id, err, str_off; - btf = btf__new_empty(); - if (!ASSERT_OK_PTR(btf, "new_empty")) - return; - str_off = btf__find_str(btf, "int"); ASSERT_EQ(str_off, -ENOENT, "int_str_missing_off"); @@ -281,5 +277,159 @@ void test_btf_write() { "[17] DATASEC 'datasec1' size=12 vlen=1\n" "\ttype_id=1 offset=4 size=8", "raw_dump"); + /* DECL_TAG */ + id = btf__add_decl_tag(btf, "tag1", 16, -1); + ASSERT_EQ(id, 18, "tag_id"); + t = btf__type_by_id(btf, 18); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind"); + ASSERT_EQ(t->type, 16, "tag_type"); + ASSERT_EQ(btf_decl_tag(t)->component_idx, -1, "tag_component_idx"); + ASSERT_STREQ(btf_type_raw_dump(btf, 18), + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", "raw_dump"); + + id = btf__add_decl_tag(btf, "tag2", 14, 1); + ASSERT_EQ(id, 19, "tag_id"); + t = btf__type_by_id(btf, 19); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind"); + ASSERT_EQ(t->type, 14, "tag_type"); + ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx"); + ASSERT_STREQ(btf_type_raw_dump(btf, 19), + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); +} + +static void test_btf_add() +{ + struct btf *btf; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "new_empty")) + return; + + gen_btf(btf); + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] CONST '(anon)' type_id=5", + "[4] VOLATILE '(anon)' type_id=3", + "[5] RESTRICT '(anon)' type_id=4", + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", + "[9] ENUM 'e1' size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[10] FWD 'struct_fwd' fwd_kind=struct", + "[11] FWD 'union_fwd' fwd_kind=union", + "[12] ENUM 'enum_fwd' size=4 vlen=0", + "[13] TYPEDEF 'typedef1' type_id=1", + "[14] FUNC 'func1' type_id=15 linkage=global", + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", + "[16] VAR 'var1' type_id=1, linkage=global-alloc", + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1"); + btf__free(btf); } + +static void test_btf_add_btf() +{ + struct btf *btf1 = NULL, *btf2 = NULL; + int id; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "btf1")) + return; + + btf2 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf2, "btf2")) + goto cleanup; + + gen_btf(btf1); + gen_btf(btf2); + + id = btf__add_btf(btf1, btf2); + if (!ASSERT_EQ(id, 20, "id")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] CONST '(anon)' type_id=5", + "[4] VOLATILE '(anon)' type_id=3", + "[5] RESTRICT '(anon)' type_id=4", + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", + "[9] ENUM 'e1' size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[10] FWD 'struct_fwd' fwd_kind=struct", + "[11] FWD 'union_fwd' fwd_kind=union", + "[12] ENUM 'enum_fwd' size=4 vlen=0", + "[13] TYPEDEF 'typedef1' type_id=1", + "[14] FUNC 'func1' type_id=15 linkage=global", + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", + "[16] VAR 'var1' type_id=1, linkage=global-alloc", + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", + + /* types appended from the second BTF */ + "[20] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[21] PTR '(anon)' type_id=20", + "[22] CONST '(anon)' type_id=24", + "[23] VOLATILE '(anon)' type_id=22", + "[24] RESTRICT '(anon)' type_id=23", + "[25] ARRAY '(anon)' type_id=21 index_type_id=20 nr_elems=10", + "[26] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=20 bits_offset=0\n" + "\t'f2' type_id=20 bits_offset=32 bitfield_size=16", + "[27] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=20 bits_offset=0 bitfield_size=16", + "[28] ENUM 'e1' size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[29] FWD 'struct_fwd' fwd_kind=struct", + "[30] FWD 'union_fwd' fwd_kind=union", + "[31] ENUM 'enum_fwd' size=4 vlen=0", + "[32] TYPEDEF 'typedef1' type_id=20", + "[33] FUNC 'func1' type_id=34 linkage=global", + "[34] FUNC_PROTO '(anon)' ret_type_id=20 vlen=2\n" + "\t'p1' type_id=20\n" + "\t'p2' type_id=21", + "[35] VAR 'var1' type_id=20, linkage=global-alloc", + "[36] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=20 offset=4 size=8", + "[37] DECL_TAG 'tag1' type_id=35 component_idx=-1", + "[38] DECL_TAG 'tag2' type_id=33 component_idx=1"); + +cleanup: + btf__free(btf1); + btf__free(btf2); +} + +void test_btf_write() +{ + if (test__start_subtest("btf_add")) + test_btf_add(); + if (test__start_subtest("btf_add_btf")) + test_btf_add_btf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index 876be0ecb654f7..621c5722219182 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -363,7 +363,7 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd) cg_storage_multi_shared__destroy(obj); } -void test_cg_storage_multi(void) +void serial_test_cg_storage_multi(void) { int parent_cgroup_fd = -1, child_cgroup_fd = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c index 70e94e783070bd..5de485c7370fd9 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -21,7 +21,7 @@ static int prog_load(void) bpf_log_buf, BPF_LOG_BUF_SIZE); } -void test_cgroup_attach_autodetach(void) +void serial_test_cgroup_attach_autodetach(void) { __u32 duration = 0, prog_cnt = 4, attach_flags; int allow_prog[2] = {-1}; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index 20bb8831dda67b..731bea84d8edd0 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -74,7 +74,7 @@ static int prog_load_cnt(int verdict, int val) return ret; } -void test_cgroup_attach_multi(void) +void serial_test_cgroup_attach_multi(void) { __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c index 9e96f8d87fea49..10d3c33821a7b4 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -23,7 +23,7 @@ static int prog_load(int verdict) bpf_log_buf, BPF_LOG_BUF_SIZE); } -void test_cgroup_attach_override(void) +void serial_test_cgroup_attach_override(void) { int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1; __u32 duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c index 9091524131d656..9e6e6aad347c7b 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -24,7 +24,7 @@ int ping_and_check(int exp_calls, int exp_alt_calls) return 0; } -void test_cgroup_link(void) +void serial_test_cgroup_link(void) { struct { const char *path; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index ab3b9bc5e6d1cc..9026b42914d31b 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -46,7 +46,7 @@ void test_cgroup_v1v2(void) { struct network_helper_opts opts = {}; int server_fd, client_fd, cgroup_fd; - static const int port = 60123; + static const int port = 60120; /* Step 1: Check base connectivity works without any BPF. */ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index 012068f33a0a81..f73e6e36b74ddb 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -195,7 +195,7 @@ static void test_check_mtu_tc(__u32 mtu, __u32 ifindex) test_check_mtu__destroy(skel); } -void test_check_mtu(void) +void serial_test_check_mtu(void) { __u32 mtu_lo; diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 3d4b2a358d476d..1dfe14ff6aa45d 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -112,7 +112,7 @@ void test_core_autosize(void) if (!ASSERT_OK_PTR(f, "btf_fdopen")) goto cleanup; - raw_data = btf__get_raw_data(btf, &raw_sz); + raw_data = btf__raw_data(btf, &raw_sz); if (!ASSERT_OK_PTR(raw_data, "raw_data")) goto cleanup; written = fwrite(raw_data, 1, raw_sz, f); @@ -163,7 +163,7 @@ void test_core_autosize(void) usleep(1); - bss_map = bpf_object__find_map_by_name(skel->obj, "test_cor.bss"); + bss_map = bpf_object__find_map_by_name(skel->obj, ".bss"); if (!ASSERT_OK_PTR(bss_map, "bss_map_find")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 4739b15b2a979a..55ec85ba737547 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -30,7 +30,7 @@ static int duration = 0; .output_len = sizeof(struct core_reloc_module_output), \ .prog_sec_name = sec_name, \ .raw_tp_name = tp_name, \ - .trigger = trigger_module_test_read, \ + .trigger = __trigger_module_test_read, \ .needs_testmod = true, \ } @@ -249,8 +249,7 @@ static int duration = 0; #define SIZE_CASE_COMMON(name) \ .case_name = #name, \ .bpf_obj_file = "test_core_reloc_size.o", \ - .btf_src_file = "btf__core_reloc_" #name ".o", \ - .relaxed_core_relocs = true + .btf_src_file = "btf__core_reloc_" #name ".o" #define SIZE_OUTPUT_DATA(type) \ STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ @@ -382,7 +381,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test) exp->local_anon_void_ptr = -1; exp->local_anon_arr = -1; - for (i = 1; i <= btf__get_nr_types(local_btf); i++) + for (i = 1; i < btf__type_cnt(local_btf); i++) { t = btf__type_by_id(local_btf, i); /* we are interested only in anonymous types */ @@ -475,19 +474,11 @@ static int setup_type_id_case_failure(struct core_reloc_test_case *test) return 0; } -static int trigger_module_test_read(const struct core_reloc_test_case *test) +static int __trigger_module_test_read(const struct core_reloc_test_case *test) { struct core_reloc_module_output *exp = (void *)test->output; - int fd, err; - - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); - err = -errno; - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) - return err; - - read(fd, NULL, exp->len); /* request expected number of bytes */ - close(fd); + trigger_module_test_read(exp->len); return 0; } @@ -876,7 +867,7 @@ void test_core_reloc(void) goto cleanup; } - data_map = bpf_object__find_map_by_name(obj, "test_cor.bss"); + data_map = bpf_object__find_map_by_name(obj, ".bss"); if (CHECK(!data_map, "find_data_map", "data map not found\n")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c new file mode 100644 index 00000000000000..cbaa44ffb8c6e4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include +#include "dummy_st_ops.skel.h" + +/* Need to keep consistent with definition in include/linux/bpf.h */ +struct bpf_dummy_ops_state { + int val; +}; + +static void test_dummy_st_ops_attach(void) +{ + struct dummy_st_ops *skel; + struct bpf_link *link; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.dummy_1); + ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_init_ret_value(void) +{ + __u64 args[1] = {0}; + struct bpf_prog_test_run_attr attr = { + .ctx_size_in = sizeof(args), + .ctx_in = args, + }; + struct dummy_st_ops *skel; + int fd, err; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_1); + attr.prog_fd = fd; + err = bpf_prog_test_run_xattr(&attr); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_init_ptr_arg(void) +{ + int exp_retval = 0xbeef; + struct bpf_dummy_ops_state in_state = { + .val = exp_retval, + }; + __u64 args[1] = {(unsigned long)&in_state}; + struct bpf_prog_test_run_attr attr = { + .ctx_size_in = sizeof(args), + .ctx_in = args, + }; + struct dummy_st_ops *skel; + int fd, err; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_1); + attr.prog_fd = fd; + err = bpf_prog_test_run_xattr(&attr); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); + ASSERT_EQ(attr.retval, exp_retval, "test_ret"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_multiple_args(void) +{ + __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; + struct bpf_prog_test_run_attr attr = { + .ctx_size_in = sizeof(args), + .ctx_in = args, + }; + struct dummy_st_ops *skel; + int fd, err; + size_t i; + char name[8]; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_2); + attr.prog_fd = fd; + err = bpf_prog_test_run_xattr(&attr); + ASSERT_OK(err, "test_run"); + for (i = 0; i < ARRAY_SIZE(args); i++) { + snprintf(name, sizeof(name), "arg %zu", i); + ASSERT_EQ(skel->bss->test_2_args[i], args[i], name); + } + + dummy_st_ops__destroy(skel); +} + +void test_dummy_st_ops(void) +{ + if (test__start_subtest("dummy_st_ops_attach")) + test_dummy_st_ops_attach(); + if (test__start_subtest("dummy_init_ret_value")) + test_dummy_init_ret_value(); + if (test__start_subtest("dummy_init_ptr_arg")) + test_dummy_init_ptr_arg(); + if (test__start_subtest("dummy_multiple_args")) + test_dummy_multiple_args(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c index 91154c2ba256d6..4374ac8a8a915a 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -6,23 +6,23 @@ void test_fentry_fexit(void) { - struct fentry_test *fentry_skel = NULL; - struct fexit_test *fexit_skel = NULL; + struct fentry_test_lskel *fentry_skel = NULL; + struct fexit_test_lskel *fexit_skel = NULL; __u64 *fentry_res, *fexit_res; __u32 duration = 0, retval; int err, prog_fd, i; - fentry_skel = fentry_test__open_and_load(); + fentry_skel = fentry_test_lskel__open_and_load(); if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) goto close_prog; - fexit_skel = fexit_test__open_and_load(); + fexit_skel = fexit_test_lskel__open_and_load(); if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) goto close_prog; - err = fentry_test__attach(fentry_skel); + err = fentry_test_lskel__attach(fentry_skel); if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) goto close_prog; - err = fexit_test__attach(fexit_skel); + err = fexit_test_lskel__attach(fexit_skel); if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) goto close_prog; @@ -44,6 +44,6 @@ void test_fentry_fexit(void) } close_prog: - fentry_test__destroy(fentry_skel); - fexit_test__destroy(fexit_skel); + fentry_test_lskel__destroy(fentry_skel); + fexit_test_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index 174c89e7456e5c..12921b3850d208 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -3,19 +3,19 @@ #include #include "fentry_test.lskel.h" -static int fentry_test(struct fentry_test *fentry_skel) +static int fentry_test(struct fentry_test_lskel *fentry_skel) { int err, prog_fd, i; __u32 duration = 0, retval; int link_fd; __u64 *result; - err = fentry_test__attach(fentry_skel); + err = fentry_test_lskel__attach(fentry_skel); if (!ASSERT_OK(err, "fentry_attach")) return err; /* Check that already linked program can't be attached again. */ - link_fd = fentry_test__test1__attach(fentry_skel); + link_fd = fentry_test_lskel__test1__attach(fentry_skel); if (!ASSERT_LT(link_fd, 0, "fentry_attach_link")) return -1; @@ -31,7 +31,7 @@ static int fentry_test(struct fentry_test *fentry_skel) return -1; } - fentry_test__detach(fentry_skel); + fentry_test_lskel__detach(fentry_skel); /* zero results for re-attach test */ memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss)); @@ -40,10 +40,10 @@ static int fentry_test(struct fentry_test *fentry_skel) void test_fentry_test(void) { - struct fentry_test *fentry_skel = NULL; + struct fentry_test_lskel *fentry_skel = NULL; int err; - fentry_skel = fentry_test__open_and_load(); + fentry_skel = fentry_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) goto cleanup; @@ -55,5 +55,5 @@ void test_fentry_test(void) ASSERT_OK(err, "fentry_second_attach"); cleanup: - fentry_test__destroy(fentry_skel); + fentry_test_lskel__destroy(fentry_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index 73b4c76e6b869b..9cff14a23bb720 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -60,7 +60,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, struct bpf_object *obj = NULL, *tgt_obj; __u32 retval, tgt_prog_id, info_len; struct bpf_prog_info prog_info = {}; - struct bpf_program **prog = NULL; + struct bpf_program **prog = NULL, *p; struct bpf_link **link = NULL; int err, tgt_fd, i; struct btf *btf; @@ -69,9 +69,6 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, &tgt_obj, &tgt_fd); if (!ASSERT_OK(err, "tgt_prog_load")) return; - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, - .attach_prog_fd = tgt_fd, - ); info_len = sizeof(prog_info); err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len); @@ -89,10 +86,15 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, if (!ASSERT_OK_PTR(prog, "prog_ptr")) goto close_prog; - obj = bpf_object__open_file(obj_file, &opts); + obj = bpf_object__open_file(obj_file, NULL); if (!ASSERT_OK_PTR(obj, "obj_open")) goto close_prog; + bpf_object__for_each_program(p, obj) { + err = bpf_program__set_attach_target(p, tgt_fd, NULL); + ASSERT_OK(err, "set_attach_target"); + } + err = bpf_object__load(obj); if (!ASSERT_OK(err, "obj_load")) goto close_prog; @@ -270,7 +272,7 @@ static void test_fmod_ret_freplace(void) struct bpf_link *freplace_link = NULL; struct bpf_program *prog; __u32 duration = 0; - int err, pkt_fd; + int err, pkt_fd, attach_prog_fd; err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC, &pkt_obj, &pkt_fd); @@ -278,26 +280,32 @@ static void test_fmod_ret_freplace(void) if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", tgt_name, err, errno)) return; - opts.attach_prog_fd = pkt_fd; - freplace_obj = bpf_object__open_file(freplace_name, &opts); + freplace_obj = bpf_object__open_file(freplace_name, NULL); if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open")) goto out; + prog = bpf_object__next_program(freplace_obj, NULL); + err = bpf_program__set_attach_target(prog, pkt_fd, NULL); + ASSERT_OK(err, "freplace__set_attach_target"); + err = bpf_object__load(freplace_obj); if (CHECK(err, "freplace_obj_load", "err %d\n", err)) goto out; - prog = bpf_program__next(NULL, freplace_obj); freplace_link = bpf_program__attach_trace(prog); if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace")) goto out; - opts.attach_prog_fd = bpf_program__fd(prog); - fmod_obj = bpf_object__open_file(fmod_ret_name, &opts); + fmod_obj = bpf_object__open_file(fmod_ret_name, NULL); if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open")) goto out; + attach_prog_fd = bpf_program__fd(prog); + prog = bpf_object__next_program(fmod_obj, NULL); + err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL); + ASSERT_OK(err, "fmod_ret_set_attach_target"); + err = bpf_object__load(fmod_obj); if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n")) goto out; @@ -322,14 +330,14 @@ static void test_func_sockmap_update(void) } static void test_obj_load_failure_common(const char *obj_file, - const char *target_obj_file) - + const char *target_obj_file) { /* * standalone test that asserts failure to load freplace prog * because of invalid return code. */ struct bpf_object *obj = NULL, *pkt_obj; + struct bpf_program *prog; int err, pkt_fd; __u32 duration = 0; @@ -339,14 +347,15 @@ static void test_obj_load_failure_common(const char *obj_file, if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", target_obj_file, err, errno)) return; - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, - .attach_prog_fd = pkt_fd, - ); - obj = bpf_object__open_file(obj_file, &opts); + obj = bpf_object__open_file(obj_file, NULL); if (!ASSERT_OK_PTR(obj, "obj_open")) goto close_prog; + prog = bpf_object__next_program(obj, NULL); + err = bpf_program__set_attach_target(prog, pkt_fd, NULL); + ASSERT_OK(err, "set_attach_target"); + /* It should fail to load the program */ err = bpf_object__load(obj); if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err)) @@ -371,7 +380,8 @@ static void test_func_map_prog_compatibility(void) "./test_attach_probe.o"); } -void test_fexit_bpf2bpf(void) +/* NOTE: affect other tests, must run in serial mode */ +void serial_test_fexit_bpf2bpf(void) { if (test__start_subtest("target_no_callees")) test_target_no_callees(); diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c index 4e7f4b42ea298d..f949647dbbc21c 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c @@ -10,7 +10,7 @@ static int do_sleep(void *skel) { - struct fexit_sleep *fexit_skel = skel; + struct fexit_sleep_lskel *fexit_skel = skel; struct timespec ts1 = { .tv_nsec = 1 }; struct timespec ts2 = { .tv_sec = 10 }; @@ -25,16 +25,16 @@ static char child_stack[STACK_SIZE]; void test_fexit_sleep(void) { - struct fexit_sleep *fexit_skel = NULL; + struct fexit_sleep_lskel *fexit_skel = NULL; int wstatus, duration = 0; pid_t cpid; int err, fexit_cnt; - fexit_skel = fexit_sleep__open_and_load(); + fexit_skel = fexit_sleep_lskel__open_and_load(); if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) goto cleanup; - err = fexit_sleep__attach(fexit_skel); + err = fexit_sleep_lskel__attach(fexit_skel); if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) goto cleanup; @@ -60,7 +60,7 @@ void test_fexit_sleep(void) */ close(fexit_skel->progs.nanosleep_fentry.prog_fd); close(fexit_skel->progs.nanosleep_fexit.prog_fd); - fexit_sleep__detach(fexit_skel); + fexit_sleep_lskel__detach(fexit_skel); /* kill the thread to unwind sys_nanosleep stack through the trampoline */ kill(cpid, 9); @@ -78,5 +78,5 @@ void test_fexit_sleep(void) goto cleanup; cleanup: - fexit_sleep__destroy(fexit_skel); + fexit_sleep_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index af3dba72670178..d4887d8bb39686 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -3,19 +3,19 @@ #include #include "fexit_test.lskel.h" -static int fexit_test(struct fexit_test *fexit_skel) +static int fexit_test(struct fexit_test_lskel *fexit_skel) { int err, prog_fd, i; __u32 duration = 0, retval; int link_fd; __u64 *result; - err = fexit_test__attach(fexit_skel); + err = fexit_test_lskel__attach(fexit_skel); if (!ASSERT_OK(err, "fexit_attach")) return err; /* Check that already linked program can't be attached again. */ - link_fd = fexit_test__test1__attach(fexit_skel); + link_fd = fexit_test_lskel__test1__attach(fexit_skel); if (!ASSERT_LT(link_fd, 0, "fexit_attach_link")) return -1; @@ -31,7 +31,7 @@ static int fexit_test(struct fexit_test *fexit_skel) return -1; } - fexit_test__detach(fexit_skel); + fexit_test_lskel__detach(fexit_skel); /* zero results for re-attach test */ memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss)); @@ -40,10 +40,10 @@ static int fexit_test(struct fexit_test *fexit_skel) void test_fexit_test(void) { - struct fexit_test *fexit_skel = NULL; + struct fexit_test_lskel *fexit_skel = NULL; int err; - fexit_skel = fexit_test__open_and_load(); + fexit_skel = fexit_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) goto cleanup; @@ -55,5 +55,5 @@ void test_fexit_test(void) ASSERT_OK(err, "fexit_second_attach"); cleanup: - fexit_test__destroy(fexit_skel); + fexit_test_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 225714f71ac6e1..ac54e3f91d42fd 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -458,9 +458,9 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) return -1; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i); + snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (!prog) return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index 0e8a4d2f023d04..6093728497c709 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -2,7 +2,7 @@ #include #include -void test_flow_dissector_load_bytes(void) +void serial_test_flow_dissector_load_bytes(void) { struct bpf_flow_keys flow_keys; __u32 duration = 0, retval, size; diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c index 3931ede5c53404..f0c6c226aba8c7 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -628,7 +628,7 @@ static void run_tests(int netns) } } -void test_flow_dissector_reattach(void) +void serial_test_flow_dissector_reattach(void) { int err, new_net, saved_net; diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c new file mode 100644 index 00000000000000..3948da12a5284d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include +#include "get_branch_snapshot.skel.h" + +static int *pfd_array; +static int cpu_cnt; + +static bool is_hypervisor(void) +{ + char *line = NULL; + bool ret = false; + size_t len; + FILE *fp; + + fp = fopen("/proc/cpuinfo", "r"); + if (!fp) + return false; + + while (getline(&line, &len, fp) != -1) { + if (!strncmp(line, "flags", 5)) { + if (strstr(line, "hypervisor") != NULL) + ret = true; + break; + } + } + + free(line); + fclose(fp); + return ret; +} + +static int create_perf_events(void) +{ + struct perf_event_attr attr = {0}; + int cpu; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_RAW; + attr.config = 0x1b00; + attr.sample_type = PERF_SAMPLE_BRANCH_STACK; + attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL | + PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; + + cpu_cnt = libbpf_num_possible_cpus(); + pfd_array = malloc(sizeof(int) * cpu_cnt); + if (!pfd_array) { + cpu_cnt = 0; + return 1; + } + + for (cpu = 0; cpu < cpu_cnt; cpu++) { + pfd_array[cpu] = syscall(__NR_perf_event_open, &attr, + -1, cpu, -1, PERF_FLAG_FD_CLOEXEC); + if (pfd_array[cpu] < 0) + break; + } + + return cpu == 0; +} + +static void close_perf_events(void) +{ + int cpu, fd; + + for (cpu = 0; cpu < cpu_cnt; cpu++) { + fd = pfd_array[cpu]; + if (fd < 0) + break; + close(fd); + } + free(pfd_array); +} + +void serial_test_get_branch_snapshot(void) +{ + struct get_branch_snapshot *skel = NULL; + int err; + + /* Skip the test before we fix LBR snapshot for hypervisor. */ + if (is_hypervisor()) { + test__skip(); + return; + } + + if (create_perf_events()) { + test__skip(); /* system doesn't support LBR */ + goto cleanup; + } + + skel = get_branch_snapshot__open_and_load(); + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) + goto cleanup; + + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); + if (!ASSERT_OK(err, "kallsyms_find")) + goto cleanup; + + /* Just a guess for the end of this function, as module functions + * in /proc/kallsyms could come in any order. + */ + skel->bss->address_high = skel->bss->address_low + 128; + + err = get_branch_snapshot__attach(skel); + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) + goto cleanup; + + trigger_module_test_read(100); + + if (skel->bss->total_entries < 16) { + /* too few entries for the hit/waste test */ + test__skip(); + goto cleanup; + } + + ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr"); + + /* Given we stop LBR in software, we will waste a few entries. + * But we should try to waste as few as possible entries. We are at + * about 7 on x86_64 systems. + * Add a check for < 10 so that we get heads-up when something + * changes and wastes too many entries. + */ + ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries"); + +cleanup: + get_branch_snapshot__destroy(skel); + close_perf_events(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index 9efa7e50eab27f..afd8639f9a947d 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -103,11 +103,18 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration) static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) { int err = -ENOMEM, map_fd, zero = 0; - struct bpf_map *map; + struct bpf_map *map, *map2; __u8 *buff; map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); - if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) + if (!ASSERT_OK_PTR(map, "map")) + return; + if (!ASSERT_TRUE(bpf_map__is_internal(map), "is_internal")) + return; + + /* ensure we can lookup internal maps by their ELF names */ + map2 = bpf_object__find_map_by_name(obj, ".rodata"); + if (!ASSERT_EQ(map, map2, "same_maps")) return; map_fd = bpf_map__fd(map); diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c index ee46b11f1f9a20..1db86eab101b30 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c @@ -16,7 +16,7 @@ void test_global_data_init(void) if (CHECK_FAIL(err)) return; - map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); + map = bpf_object__find_map_by_name(obj, ".rodata"); if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index ddfb6bf9715243..01e51d16c8b8d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -48,7 +48,8 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) *(bool *)ctx = true; } -void test_kfree_skb(void) +/* TODO: fix kernel panic caused by this test in parallel mode */ +void serial_test_kfree_skb(void) { struct __sk_buff skb = {}; struct bpf_prog_test_run_attr tattr = { @@ -92,7 +93,7 @@ void test_kfree_skb(void) if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) goto close_prog; - global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss"); + global_data = bpf_object__find_map_by_name(obj2, ".bss"); if (CHECK(!global_data, "find global data", "not found\n")) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 9611f2bc50dfa0..5c9c0176991b72 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -7,10 +7,10 @@ static void test_main(void) { - struct kfunc_call_test *skel; + struct kfunc_call_test_lskel *skel; int prog_fd, retval, err; - skel = kfunc_call_test__open_and_load(); + skel = kfunc_call_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return; @@ -26,7 +26,7 @@ static void test_main(void) ASSERT_OK(err, "bpf_prog_test_run(test2)"); ASSERT_EQ(retval, 3, "test2-retval"); - kfunc_call_test__destroy(skel); + kfunc_call_test_lskel__destroy(skel); } static void test_subprog(void) diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c index cf3acfa5a91d52..79f6bd1e50d608 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -7,6 +7,7 @@ #include "test_ksyms_btf.skel.h" #include "test_ksyms_btf_null_check.skel.h" #include "test_ksyms_weak.skel.h" +#include "test_ksyms_weak.lskel.h" static int duration; @@ -89,11 +90,11 @@ static void test_weak_syms(void) int err; skel = test_ksyms_weak__open_and_load(); - if (CHECK(!skel, "test_ksyms_weak__open_and_load", "failed\n")) + if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load")) return; err = test_ksyms_weak__attach(skel); - if (CHECK(err, "test_ksyms_weak__attach", "skeleton attach failed: %d\n", err)) + if (!ASSERT_OK(err, "test_ksyms_weak__attach")) goto cleanup; /* trigger tracepoint */ @@ -109,6 +110,33 @@ static void test_weak_syms(void) test_ksyms_weak__destroy(skel); } +static void test_weak_syms_lskel(void) +{ + struct test_ksyms_weak_lskel *skel; + struct test_ksyms_weak_lskel__data *data; + int err; + + skel = test_ksyms_weak_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load")) + return; + + err = test_ksyms_weak_lskel__attach(skel); + if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym"); + ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym"); + ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym"); + ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym"); + +cleanup: + test_ksyms_weak_lskel__destroy(skel); +} + void test_ksyms_btf(void) { int percpu_datasec; @@ -136,4 +164,7 @@ void test_ksyms_btf(void) if (test__start_subtest("weak_ksyms")) test_weak_syms(); + + if (test__start_subtest("weak_ksyms_lskel")) + test_weak_syms_lskel(); } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c index 2cd5cded543fa6..d490ad80eccb48 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -2,30 +2,61 @@ /* Copyright (c) 2021 Facebook */ #include -#include -#include +#include #include "test_ksyms_module.lskel.h" +#include "test_ksyms_module.skel.h" -static int duration; - -void test_ksyms_module(void) +void test_ksyms_module_lskel(void) { - struct test_ksyms_module* skel; + struct test_ksyms_module_lskel *skel; + int retval; int err; - skel = test_ksyms_module__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!env.has_testmod) { + test__skip(); return; + } - err = test_ksyms_module__attach(skel); - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + skel = test_ksyms_module_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load")) + return; + err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, (__u32 *)&retval, NULL); + if (!ASSERT_OK(err, "bpf_prog_test_run")) goto cleanup; + ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); +cleanup: + test_ksyms_module_lskel__destroy(skel); +} - usleep(1); +void test_ksyms_module_libbpf(void) +{ + struct test_ksyms_module *skel; + int retval, err; - ASSERT_EQ(skel->bss->triggered, true, "triggered"); - ASSERT_EQ(skel->bss->out_mod_ksym_global, 123, "global_ksym_val"); + if (!env.has_testmod) { + test__skip(); + return; + } + skel = test_ksyms_module__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open")) + return; + err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4, + sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); cleanup: test_ksyms_module__destroy(skel); } + +void test_ksyms_module(void) +{ + if (test__start_subtest("lskel")) + test_ksyms_module_lskel(); + if (test__start_subtest("libbpf")) + test_ksyms_module_libbpf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c index 59adb4715394fa..7589c03fd26be7 100644 --- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c @@ -541,7 +541,7 @@ static void run_test(struct migrate_reuseport_test_case *test_case, } } -void test_migrate_reuseport(void) +void serial_test_migrate_reuseport(void) { struct test_migrate_reuseport *skel; int i; diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c index 97fec70c600be0..b772fe30ce9bf4 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -53,7 +53,8 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret) modify_return__destroy(skel); } -void test_modify_return(void) +/* TODO: conflict with get_func_ip_test */ +void serial_test_modify_return(void) { run_test(0 /* input_retval */, 1 /* want_side_effect */, diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c index d85a69b7ce4491..6d0e50dcf47cc9 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -2,46 +2,33 @@ /* Copyright (c) 2020 Facebook */ #include +#include #include "test_module_attach.skel.h" static int duration; -static int trigger_module_test_read(int read_sz) +static int trigger_module_test_writable(int *val) { int fd, err; + char buf[65]; + ssize_t rd; - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); + fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); err = -errno; - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) + if (!ASSERT_GE(fd, 0, "testmode_file_open")) return err; - read(fd, NULL, read_sz); - close(fd); - - return 0; -} - -static int trigger_module_test_write(int write_sz) -{ - int fd, err; - char *buf = malloc(write_sz); - - if (!buf) - return -ENOMEM; - - memset(buf, 'a', write_sz); - buf[write_sz-1] = '\0'; - - fd = open("/sys/kernel/bpf_testmod", O_WRONLY); + rd = read(fd, buf, sizeof(buf) - 1); err = -errno; - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) { - free(buf); + if (!ASSERT_GT(rd, 0, "testmod_file_rd_val")) { + close(fd); return err; } - write(fd, buf, write_sz); + buf[rd] = '\0'; + *val = strtol(buf, NULL, 0); close(fd); - free(buf); + return 0; } @@ -58,6 +45,7 @@ void test_module_attach(void) struct test_module_attach__bss *bss; struct bpf_link *link; int err; + int writable_val = 0; skel = test_module_attach__open(); if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) @@ -90,6 +78,14 @@ void test_module_attach(void) ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet"); ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret"); + bss->raw_tp_writable_bare_early_ret = true; + bss->raw_tp_writable_bare_out_val = 0xf1f2f3f4; + ASSERT_OK(trigger_module_test_writable(&writable_val), + "trigger_writable"); + ASSERT_EQ(bss->raw_tp_writable_bare_in_val, 1024, "writable_test_in"); + ASSERT_EQ(bss->raw_tp_writable_bare_out_val, writable_val, + "writable_test_out"); + test_module_attach__detach(skel); /* attach fentry/fexit and make sure it get's module reference */ diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index 2535788e135f08..24d493482ffc75 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -78,7 +78,8 @@ static void test_ns_current_pid_tgid_new_ns(void) return; } -void test_ns_current_pid_tgid(void) +/* TODO: use a different tracepoint */ +void serial_test_ns_current_pid_tgid(void) { if (test__start_subtest("ns_current_pid_tgid_root_ns")) test_current_pid_tgid(NULL); diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 6490e9673002ff..4e32f3586a75c7 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -43,9 +43,10 @@ int trigger_on_cpu(int cpu) return 0; } -void test_perf_buffer(void) +void serial_test_perf_buffer(void) { - int err, on_len, nr_on_cpus = 0, nr_cpus, i; + int err, on_len, nr_on_cpus = 0, nr_cpus, i, j; + int zero = 0, my_pid = getpid(); struct perf_buffer_opts pb_opts = {}; struct test_perf_buffer *skel; cpu_set_t cpu_seen; @@ -71,6 +72,10 @@ void test_perf_buffer(void) if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; + err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0); + if (!ASSERT_OK(err, "my_pid_update")) + goto out_close; + /* attach probe */ err = test_perf_buffer__attach(skel); if (CHECK(err, "attach_kprobe", "err %d\n", err)) @@ -107,19 +112,19 @@ void test_perf_buffer(void) "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen))) goto out_free_pb; - if (CHECK(perf_buffer__buffer_cnt(pb) != nr_cpus, "buf_cnt", - "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_cpus)) + if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt", + "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus)) goto out_close; - for (i = 0; i < nr_cpus; i++) { + for (i = 0, j = 0; i < nr_cpus; i++) { if (i >= on_len || !online[i]) continue; - fd = perf_buffer__buffer_fd(pb, i); + fd = perf_buffer__buffer_fd(pb, j); CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd); last_fd = fd; - err = perf_buffer__consume_buffer(pb, i); + err = perf_buffer__consume_buffer(pb, j); if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err)) goto out_close; @@ -127,12 +132,13 @@ void test_perf_buffer(void) if (trigger_on_cpu(i)) goto out_close; - err = perf_buffer__consume_buffer(pb, i); - if (CHECK(err, "consume_buf", "cpu %d, err %d\n", i, err)) + err = perf_buffer__consume_buffer(pb, j); + if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err)) goto out_close; if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i)) goto out_close; + j++; } out_free_pb: diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index b1abd0c4660738..ede07344f264ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -23,7 +23,8 @@ static void burn_cpu(void) ++j; } -void test_perf_link(void) +/* TODO: often fails in concurrent mode */ +void serial_test_perf_link(void) { struct test_perf_link *skel = NULL; struct perf_event_attr attr; diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c index 95bd1209735878..abf890d066eb4b 100644 --- a/tools/testing/selftests/bpf/prog_tests/probe_user.c +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -void test_probe_user(void) +/* TODO: corrupts other tests uses connect() */ +void serial_test_probe_user(void) { - const char *prog_name = "kprobe/__sys_connect"; + const char *prog_name = "handle_sys_connect"; const char *obj_file = "./test_probe_user.o"; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); int err, results_map_fd, sock_fd, duration = 0; @@ -18,7 +19,7 @@ void test_probe_user(void) if (!ASSERT_OK_PTR(obj, "obj_open_file")) return; - kprobe_prog = bpf_object__find_program_by_title(obj, prog_name); + kprobe_prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK(!kprobe_prog, "find_probe", "prog '%s' not found\n", prog_name)) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c index 5c45424cac5f81..ddefa1192e5d51 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -3,7 +3,8 @@ #include #include -void test_raw_tp_writable_test_run(void) +/* NOTE: conflict with other tests. */ +void serial_test_raw_tp_writable_test_run(void) { __u32 duration = 0; char error[4096]; diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c index 5f9eaa3ab58475..fd5d2ddfb06276 100644 --- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -37,7 +37,7 @@ void test_rdonly_maps(void) if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) goto cleanup; - bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss"); + bss_map = bpf_object__find_map_by_name(obj, ".bss"); if (CHECK(!bss_map, "find_bss_map", "failed\n")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c index 0e378d63fe1839..f3af2627b59971 100644 --- a/tools/testing/selftests/bpf/prog_tests/recursion.c +++ b/tools/testing/selftests/bpf/prog_tests/recursion.c @@ -20,18 +20,18 @@ void test_recursion(void) goto out; ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2"); ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1"); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2"); - err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_lookup), + err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_delete), &prog_info, &prog_info_len); if (!ASSERT_OK(err, "get_prog_info")) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index 4e91f4d6466c68..873323fb18ba97 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -1,6 +1,21 @@ // SPDX-License-Identifier: GPL-2.0 #include +static void toggle_object_autoload_progs(const struct bpf_object *obj, + const char *name_load) +{ + struct bpf_program *prog; + + bpf_object__for_each_program(prog, obj) { + const char *name = bpf_program__name(prog); + + if (!strcmp(name_load, name)) + bpf_program__set_autoload(prog, true); + else + bpf_program__set_autoload(prog, false); + } +} + void test_reference_tracking(void) { const char *file = "test_sk_lookup_kern.o"; @@ -9,44 +24,49 @@ void test_reference_tracking(void) .object_name = obj_name, .relaxed_maps = true, ); - struct bpf_object *obj; + struct bpf_object *obj_iter, *obj = NULL; struct bpf_program *prog; __u32 duration = 0; int err = 0; - obj = bpf_object__open_file(file, &open_opts); - if (!ASSERT_OK_PTR(obj, "obj_open_file")) + obj_iter = bpf_object__open_file(file, &open_opts); + if (!ASSERT_OK_PTR(obj_iter, "obj_iter_open_file")) return; - if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name", + if (CHECK(strcmp(bpf_object__name(obj_iter), obj_name), "obj_name", "wrong obj name '%s', expected '%s'\n", - bpf_object__name(obj), obj_name)) + bpf_object__name(obj_iter), obj_name)) goto cleanup; - bpf_object__for_each_program(prog, obj) { - const char *title; + bpf_object__for_each_program(prog, obj_iter) { + const char *name; - /* Ignore .text sections */ - title = bpf_program__section_name(prog); - if (strstr(title, ".text") != NULL) + name = bpf_program__name(prog); + if (!test__start_subtest(name)) continue; - if (!test__start_subtest(title)) - continue; + obj = bpf_object__open_file(file, &open_opts); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + goto cleanup; + toggle_object_autoload_progs(obj, name); /* Expect verifier failure if test name has 'err' */ - if (strstr(title, "err_") != NULL) { + if (strncmp(name, "err_", sizeof("err_") - 1) == 0) { libbpf_print_fn_t old_print_fn; old_print_fn = libbpf_set_print(NULL); - err = !bpf_program__load(prog, "GPL", 0); + err = !bpf_object__load(obj); libbpf_set_print(old_print_fn); } else { - err = bpf_program__load(prog, "GPL", 0); + err = bpf_object__load(obj); } - CHECK(err, title, "\n"); + ASSERT_OK(err, name); + + bpf_object__close(obj); + obj = NULL; } cleanup: bpf_object__close(obj); + bpf_object__close(obj_iter); } diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index f62361306f6d73..f4a13d9dd5c8bf 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -106,9 +106,9 @@ static int resolve_symbols(void) "Failed to load BTF from btf_data.o\n")) return -1; - nr = btf__get_nr_types(btf); + nr = btf__type_cnt(btf); - for (type_id = 1; type_id <= nr; type_id++) { + for (type_id = 1; type_id < nr; type_id++) { if (__resolve_symbol(btf, type_id)) break; } @@ -117,14 +117,14 @@ static int resolve_symbols(void) return 0; } -int test_resolve_btfids(void) +void test_resolve_btfids(void) { __u32 *test_list, *test_lists[] = { test_list_local, test_list_global }; unsigned int i, j; int ret = 0; if (resolve_symbols()) - return -1; + return; /* Check BTF_ID_LIST(test_list_local) and * BTF_ID_LIST_GLOBAL(test_list_global) IDs @@ -138,7 +138,7 @@ int test_resolve_btfids(void) test_symbols[i].name, test_list[i], test_symbols[i].id); if (ret) - return ret; + return; } } @@ -161,9 +161,7 @@ int test_resolve_btfids(void) if (i > 0) { if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check")) - return -1; + return; } } - - return ret; } diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index 4706cee8436071..9a80fe8a6427ce 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -58,7 +58,7 @@ static int process_sample(void *ctx, void *data, size_t len) } } -static struct test_ringbuf *skel; +static struct test_ringbuf_lskel *skel; static struct ring_buffer *ringbuf; static void trigger_samples() @@ -90,13 +90,13 @@ void test_ringbuf(void) int page_size = getpagesize(); void *mmap_ptr, *tmp_ptr; - skel = test_ringbuf__open(); + skel = test_ringbuf_lskel__open(); if (CHECK(!skel, "skel_open", "skeleton open failed\n")) return; skel->maps.ringbuf.max_entries = page_size; - err = test_ringbuf__load(skel); + err = test_ringbuf_lskel__load(skel); if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) goto cleanup; @@ -154,7 +154,7 @@ void test_ringbuf(void) if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) goto cleanup; - err = test_ringbuf__attach(skel); + err = test_ringbuf_lskel__attach(skel); if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) goto cleanup; @@ -292,8 +292,8 @@ void test_ringbuf(void) CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", 1L, skel->bss->discarded); - test_ringbuf__detach(skel); + test_ringbuf_lskel__detach(skel); cleanup: ring_buffer__free(ringbuf); - test_ringbuf__destroy(skel); + test_ringbuf_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 4efd337d6a3c47..3cfc910ab3c1bd 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -114,7 +114,7 @@ static int prepare_bpf_obj(void) err = bpf_object__load(obj); RET_ERR(err, "load bpf_object", "err:%d\n", err); - prog = bpf_program__next(NULL, obj); + prog = bpf_object__next_program(obj, NULL); RET_ERR(!prog, "get first bpf_program", "!prog\n"); select_by_skb_data_prog = bpf_program__fd(prog); RET_ERR(select_by_skb_data_prog < 0, "get prog fd", @@ -858,7 +858,7 @@ void test_map_type(enum bpf_map_type mt) cleanup(); } -void test_select_reuseport(void) +void serial_test_select_reuseport(void) { saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); if (saved_tcp_fo < 0) diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c index 189a34a7addbde..15dacfcfaa6dd3 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c @@ -25,7 +25,8 @@ static void *worker(void *p) return NULL; } -void test_send_signal_sched_switch(void) +/* NOTE: cause events loss */ +void serial_test_send_signal_sched_switch(void) { struct test_send_signal_kern *skel; pthread_t threads[THREAD_COUNT]; diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c index dfcbddcbe4d3f9..fdfdcff6cbef00 100644 --- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -42,7 +42,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type) signal(SIGALRM, SIG_DFL); } -void test_signal_pending(enum bpf_prog_type prog_type) +void test_signal_pending(void) { test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER); test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR); diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 3a469099f30d89..1d272e05188eb3 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -48,7 +48,7 @@ configure_stack(void) return false; sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf", "direct-action object-file ./test_sk_assign.o", - "section classifier/sk_assign_test", + "section tc", (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose"); if (CHECK(system(tc_cmd), "BPF load failed;", "run with -vv for more info\n")) diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index aee41547e7f454..6db07401bc4937 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -598,7 +598,7 @@ static void query_lookup_prog(struct test_sk_lookup *skel) static void run_lookup_prog(const struct test *t) { - int server_fds[MAX_SERVERS] = { -1 }; + int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 }; int client_fd, reuse_conn_fd = -1; struct bpf_link *lookup_link; int i, err; @@ -1053,7 +1053,7 @@ static void run_sk_assign(struct test_sk_lookup *skel, struct bpf_program *lookup_prog, const char *remote_ip, const char *local_ip) { - int server_fds[MAX_SERVERS] = { -1 }; + int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 }; struct bpf_sk_lookup ctx; __u64 server_cookie; int i, err; diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c index 2b392590e8ca48..547ae53cde7464 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c @@ -105,7 +105,7 @@ static void do_test(void) close(listen_fd); } -void test_sk_storage_tracing(void) +void serial_test_sk_storage_tracing(void) { struct test_sk_storage_trace_itself *skel_itself; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index fafeddaad6a998..c437e6ba8fe202 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -11,12 +11,14 @@ void test_skb_ctx(void) .cb[3] = 4, .cb[4] = 5, .priority = 6, + .ingress_ifindex = 11, .ifindex = 1, .tstamp = 7, .wire_len = 100, .gso_segs = 8, .mark = 9, .gso_size = 10, + .hwtstamp = 11, }; struct bpf_prog_test_run_attr tattr = { .data_in = &pkt_v4, @@ -97,6 +99,10 @@ void test_skb_ctx(void) "ctx_out_ifindex", "skb->ifindex == %d, expected %d\n", skb.ifindex, 1); + CHECK_ATTR(skb.ingress_ifindex != 11, + "ctx_out_ingress_ifindex", + "skb->ingress_ifindex == %d, expected %d\n", + skb.ingress_ifindex, 11); CHECK_ATTR(skb.tstamp != 8, "ctx_out_tstamp", "skb->tstamp == %lld, expected %d\n", diff --git a/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c new file mode 100644 index 00000000000000..3eefdfed1db933 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include +#include +#include "test_skc_to_unix_sock.skel.h" + +static const char *sock_path = "@skc_to_unix_sock"; + +void test_skc_to_unix_sock(void) +{ + struct test_skc_to_unix_sock *skel; + struct sockaddr_un sockaddr; + int err, sockfd = 0; + + skel = test_skc_to_unix_sock__open(); + if (!ASSERT_OK_PTR(skel, "could not open BPF object")) + return; + + skel->rodata->my_pid = getpid(); + + err = test_skc_to_unix_sock__load(skel); + if (!ASSERT_OK(err, "could not load BPF object")) + goto cleanup; + + err = test_skc_to_unix_sock__attach(skel); + if (!ASSERT_OK(err, "could not attach BPF object")) + goto cleanup; + + /* trigger unix_listen */ + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (!ASSERT_GT(sockfd, 0, "socket failed")) + goto cleanup; + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_UNIX; + strncpy(sockaddr.sun_path, sock_path, strlen(sock_path)); + sockaddr.sun_path[0] = '\0'; + + err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); + if (!ASSERT_OK(err, "bind failed")) + goto cleanup; + + err = listen(sockfd, 1); + if (!ASSERT_OK(err, "listen failed")) + goto cleanup; + + ASSERT_EQ(strcmp(skel->bss->path, sock_path), 0, "bpf_skc_to_unix_sock failed"); + +cleanup: + if (sockfd) + close(sockfd); + test_skc_to_unix_sock__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index f6f130c99b8cb8..180afd632f4caa 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -16,8 +16,13 @@ void test_skeleton(void) struct test_skeleton* skel; struct test_skeleton__bss *bss; struct test_skeleton__data *data; + struct test_skeleton__data_dyn *data_dyn; struct test_skeleton__rodata *rodata; + struct test_skeleton__rodata_dyn *rodata_dyn; struct test_skeleton__kconfig *kcfg; + const void *elf_bytes; + size_t elf_bytes_sz = 0; + int i; skel = test_skeleton__open(); if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) @@ -28,7 +33,12 @@ void test_skeleton(void) bss = skel->bss; data = skel->data; + data_dyn = skel->data_dyn; rodata = skel->rodata; + rodata_dyn = skel->rodata_dyn; + + ASSERT_STREQ(bpf_map__name(skel->maps.rodata_dyn), ".rodata.dyn", "rodata_dyn_name"); + ASSERT_STREQ(bpf_map__name(skel->maps.data_dyn), ".data.dyn", "data_dyn_name"); /* validate values are pre-initialized correctly */ CHECK(data->in1 != -1, "in1", "got %d != exp %d\n", data->in1, -1); @@ -44,6 +54,12 @@ void test_skeleton(void) CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); + ASSERT_EQ(rodata_dyn->in_dynarr_sz, 0, "in_dynarr_sz"); + for (i = 0; i < 4; i++) + ASSERT_EQ(rodata_dyn->in_dynarr[i], -(i + 1), "in_dynarr"); + for (i = 0; i < 4; i++) + ASSERT_EQ(data_dyn->out_dynarr[i], i + 1, "out_dynarr"); + /* validate we can pre-setup global variables, even in .bss */ data->in1 = 10; data->in2 = 11; @@ -51,6 +67,10 @@ void test_skeleton(void) bss->in4 = 13; rodata->in.in6 = 14; + rodata_dyn->in_dynarr_sz = 4; + for (i = 0; i < 4; i++) + rodata_dyn->in_dynarr[i] = i + 10; + err = test_skeleton__load(skel); if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) goto cleanup; @@ -62,6 +82,10 @@ void test_skeleton(void) CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); + ASSERT_EQ(rodata_dyn->in_dynarr_sz, 4, "in_dynarr_sz"); + for (i = 0; i < 4; i++) + ASSERT_EQ(rodata_dyn->in_dynarr[i], i + 10, "in_dynarr"); + /* now set new values and attach to get them into outX variables */ data->in1 = 1; data->in2 = 2; @@ -71,6 +95,8 @@ void test_skeleton(void) bss->in5.b = 6; kcfg = skel->kconfig; + skel->data_read_mostly->read_mostly_var = 123; + err = test_skeleton__attach(skel); if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) goto cleanup; @@ -91,6 +117,15 @@ void test_skeleton(void) CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2", "got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION); + for (i = 0; i < 4; i++) + ASSERT_EQ(data_dyn->out_dynarr[i], i + 10, "out_dynarr"); + + ASSERT_EQ(skel->bss->out_mostly_var, 123, "out_mostly_var"); + + elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz); + ASSERT_OK_PTR(elf_bytes, "elf_bytes"); + ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz"); + cleanup: test_skeleton__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index 8fd1b4b29a0e0b..394ebfc3bbf310 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -33,7 +33,7 @@ #define EXP_NO_BUF_RET 29 -void test_snprintf_positive(void) +static void test_snprintf_positive(void) { char exp_addr_out[] = EXP_ADDR_OUT; char exp_sym_out[] = EXP_SYM_OUT; @@ -103,7 +103,7 @@ static int load_single_snprintf(char *fmt) return ret; } -void test_snprintf_negative(void) +static void test_snprintf_negative(void) { ASSERT_OK(load_single_snprintf("valid %d"), "valid usage"); diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c index 76e1f5fe18fac3..dd41b826be3090 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c @@ -6,7 +6,7 @@ /* Demonstrate that bpf_snprintf_btf succeeds and that various data types * are formatted correctly. */ -void test_snprintf_btf(void) +void serial_test_snprintf_btf(void) { struct netif_receive_skb *skel; struct netif_receive_skb__bss *bss; diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c index 577d619fb07ed9..fae40db4d81f57 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c @@ -329,7 +329,7 @@ static void test(void) close(listen_fd); } -void test_sock_fields(void) +void serial_test_sock_fields(void) { struct bpf_link *egress_link = NULL, *ingress_link = NULL; int parent_cg_fd = -1, child_cg_fd = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index d88bb65b74cc9c..2a9cb951bfd6c3 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -2002,7 +2002,7 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, test_udp_unix_redir(skel, map, family); } -void test_sockmap_listen(void) +void serial_test_sockmap_listen(void) { struct test_sockmap_listen *skel; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index 51fac975b3163f..bc34f7773444f9 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -2,7 +2,7 @@ #include #include "cgroup_helpers.h" -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) +static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) { enum bpf_attach_type attach_type; enum bpf_prog_type prog_type; @@ -15,23 +15,23 @@ static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) return -1; } - prog = bpf_object__find_program_by_title(obj, title); + prog = bpf_object__find_program_by_name(obj, name); if (!prog) { - log_err("Failed to find %s BPF program", title); + log_err("Failed to find %s BPF program", name); return -1; } err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, attach_type, BPF_F_ALLOW_MULTI); if (err) { - log_err("Failed to attach %s BPF program", title); + log_err("Failed to attach %s BPF program", name); return -1; } return 0; } -static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title) +static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) { enum bpf_attach_type attach_type; enum bpf_prog_type prog_type; @@ -42,7 +42,7 @@ static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title) if (err) return -1; - prog = bpf_object__find_program_by_title(obj, title); + prog = bpf_object__find_program_by_name(obj, name); if (!prog) return -1; @@ -89,7 +89,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - child: 0x80 -> 0x90 */ - err = prog_attach(obj, cg_child, "cgroup/getsockopt/child"); + err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); if (err) goto detach; @@ -113,7 +113,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: 0x90 -> 0xA0 */ - err = prog_attach(obj, cg_parent, "cgroup/getsockopt/parent"); + err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); if (err) goto detach; @@ -157,7 +157,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: unexpected 0x40, EPERM */ - err = prog_detach(obj, cg_child, "cgroup/getsockopt/child"); + err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); if (err) { log_err("Failed to detach child program"); goto detach; @@ -198,8 +198,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/getsockopt/child"); - prog_detach(obj, cg_parent, "cgroup/getsockopt/parent"); + prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); + prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); return err; } @@ -236,7 +236,7 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach child program and make sure it adds 0x10. */ - err = prog_attach(obj, cg_child, "cgroup/setsockopt"); + err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); if (err) goto detach; @@ -263,7 +263,7 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach parent program and make sure it adds another 0x10. */ - err = prog_attach(obj, cg_parent, "cgroup/setsockopt"); + err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); if (err) goto detach; @@ -289,8 +289,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/setsockopt"); - prog_detach(obj, cg_parent, "cgroup/setsockopt"); + prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); + prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); return err; } diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index b5940e6ca67cb7..9825f1f7bfcc20 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -21,7 +21,7 @@ static void test_tailcall_1(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -38,9 +38,9 @@ static void test_tailcall_1(void) goto out; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -70,9 +70,9 @@ static void test_tailcall_1(void) err, errno, retval); for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -92,9 +92,9 @@ static void test_tailcall_1(void) for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { j = bpf_map__def(prog_array)->max_entries - 1 - i; - snprintf(prog_name, sizeof(prog_name), "classifier/%i", j); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", j); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -159,7 +159,7 @@ static void test_tailcall_2(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -176,9 +176,9 @@ static void test_tailcall_2(void) goto out; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -219,10 +219,7 @@ static void test_tailcall_2(void) bpf_object__close(obj); } -/* test_tailcall_3 checks that the count value of the tail call limit - * enforcement matches with expectations. - */ -static void test_tailcall_3(void) +static void test_tailcall_count(const char *which) { int err, map_fd, prog_fd, main_fd, data_fd, i, val; struct bpf_map *prog_array, *data_map; @@ -231,12 +228,12 @@ static void test_tailcall_3(void) __u32 retval, duration; char buff[128] = {}; - err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -252,7 +249,7 @@ static void test_tailcall_3(void) if (CHECK_FAIL(map_fd < 0)) goto out; - prog = bpf_object__find_program_by_title(obj, "classifier/0"); + prog = bpf_object__find_program_by_name(obj, "classifier_0"); if (CHECK_FAIL(!prog)) goto out; @@ -296,6 +293,22 @@ static void test_tailcall_3(void) bpf_object__close(obj); } +/* test_tailcall_3 checks that the count value of the tail call limit + * enforcement matches with expectations. JIT uses direct jump. + */ +static void test_tailcall_3(void) +{ + test_tailcall_count("tailcall3.o"); +} + +/* test_tailcall_6 checks that the count value of the tail call limit + * enforcement matches with expectations. JIT uses indirect jump. + */ +static void test_tailcall_6(void) +{ + test_tailcall_count("tailcall6.o"); +} + /* test_tailcall_4 checks that the kernel properly selects indirect jump * for the case where the key is not known. Latter is passed via global * data to select different targets we can compare return value of. @@ -316,7 +329,7 @@ static void test_tailcall_4(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -341,9 +354,9 @@ static void test_tailcall_4(void) return; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -404,7 +417,7 @@ static void test_tailcall_5(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -429,9 +442,9 @@ static void test_tailcall_5(void) return; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -490,7 +503,7 @@ static void test_tailcall_bpf2bpf_1(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -508,9 +521,9 @@ static void test_tailcall_bpf2bpf_1(void) /* nop -> jmp */ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -574,7 +587,7 @@ static void test_tailcall_bpf2bpf_2(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -590,7 +603,7 @@ static void test_tailcall_bpf2bpf_2(void) if (CHECK_FAIL(map_fd < 0)) goto out; - prog = bpf_object__find_program_by_title(obj, "classifier/0"); + prog = bpf_object__find_program_by_name(obj, "classifier_0"); if (CHECK_FAIL(!prog)) goto out; @@ -652,7 +665,7 @@ static void test_tailcall_bpf2bpf_3(void) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -669,9 +682,9 @@ static void test_tailcall_bpf2bpf_3(void) goto out; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -749,7 +762,7 @@ static void test_tailcall_bpf2bpf_4(bool noise) if (CHECK_FAIL(err)) return; - prog = bpf_object__find_program_by_title(obj, "classifier"); + prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; @@ -766,9 +779,9 @@ static void test_tailcall_bpf2bpf_4(bool noise) goto out; for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); - prog = bpf_object__find_program_by_title(obj, prog_name); + prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; @@ -822,6 +835,8 @@ void test_tailcalls(void) test_tailcall_4(); if (test__start_subtest("tailcall_5")) test_tailcall_5(); + if (test__start_subtest("tailcall_6")) + test_tailcall_6(); if (test__start_subtest("tailcall_bpf2bpf_1")) test_tailcall_bpf2bpf_1(); if (test__start_subtest("tailcall_bpf2bpf_2")) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index e7201ba29ccd62..4b18b73df10b6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -176,6 +176,18 @@ static int netns_setup_namespaces(const char *verb) return 0; } +static void netns_setup_namespaces_nofail(const char *verb) +{ + const char * const *ns = namespaces; + char cmd[128]; + + while (*ns) { + snprintf(cmd, sizeof(cmd), "ip netns %s %s > /dev/null 2>&1", verb, *ns); + system(cmd); + ns++; + } +} + struct netns_setup_result { int ifindex_veth_src_fwd; int ifindex_veth_dst_fwd; @@ -633,7 +645,7 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) struct nstoken *nstoken = NULL; int err; int tunnel_pid = -1; - int src_fd, target_fd; + int src_fd, target_fd = -1; int ifindex; /* Start a L3 TUN/TAP tunnel between the src and dst namespaces. @@ -762,6 +774,8 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) static void *test_tc_redirect_run_tests(void *arg) { + netns_setup_namespaces_nofail("delete"); + RUN_TEST(tc_redirect_peer); RUN_TEST(tc_redirect_peer_l3); RUN_TEST(tc_redirect_neigh); @@ -769,7 +783,7 @@ static void *test_tc_redirect_run_tests(void *arg) return NULL; } -void test_tc_redirect(void) +void serial_test_tc_redirect(void) { pthread_t test_thread; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index d207e968e6b1be..265b4fe33ec335 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -109,7 +109,7 @@ static int run_test(int cgroup_fd, int server_fd) return -1; } - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); map_fd = bpf_map__fd(map); err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index 0252f61d611a93..97d8a6f84f4ab1 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -43,7 +43,7 @@ static int process_sample(void *ctx, void *data, size_t len) void test_test_ima(void) { char measured_dir_template[] = "/tmp/ima_measuredXXXXXX"; - struct ring_buffer *ringbuf; + struct ring_buffer *ringbuf = NULL; const char *measured_dir; char cmd[256]; @@ -85,5 +85,6 @@ void test_test_ima(void) err = system(cmd); CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno); close_prog: + ring_buffer__free(ringbuf); ima__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 25f40e1b99670b..0f4e49e622cdff 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -39,7 +39,8 @@ static int timer(struct timer *timer_skel) return 0; } -void test_timer(void) +/* TODO: use pid filtering */ +void serial_test_timer(void) { struct timer *timer_skel = NULL; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c index ced8f6cf347cdf..949a0617869df5 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -52,7 +52,7 @@ static int timer_mim(struct timer_mim *timer_skel) return 0; } -void test_timer_mim(void) +void serial_test_timer_mim(void) { struct timer_mim_reject *timer_reject_skel = NULL; libbpf_print_fn_t old_print_fn = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index fb095e5cd9afd1..8652d0a46c87ab 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -void test_tp_attach_query(void) +void serial_test_tp_attach_query(void) { const int num_progs = 3; int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c index d39bc00feb45e6..cade7f12315f7e 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -8,35 +8,34 @@ #define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" #define SEARCHMSG "testing,testing" -void test_trace_printk(void) +void serial_test_trace_printk(void) { - int err, iter = 0, duration = 0, found = 0; - struct trace_printk__bss *bss; - struct trace_printk *skel; + struct trace_printk_lskel__bss *bss; + int err = 0, iter = 0, found = 0; + struct trace_printk_lskel *skel; char *buf = NULL; FILE *fp = NULL; size_t buflen; - skel = trace_printk__open(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + skel = trace_printk_lskel__open(); + if (!ASSERT_OK_PTR(skel, "trace_printk__open")) return; - ASSERT_EQ(skel->rodata->fmt[0], 'T', "invalid printk fmt string"); + ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]"); skel->rodata->fmt[0] = 't'; - err = trace_printk__load(skel); - if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + err = trace_printk_lskel__load(skel); + if (!ASSERT_OK(err, "trace_printk__load")) goto cleanup; bss = skel->bss; - err = trace_printk__attach(skel); - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + err = trace_printk_lskel__attach(skel); + if (!ASSERT_OK(err, "trace_printk__attach")) goto cleanup; fp = fopen(TRACEBUF, "r"); - if (CHECK(fp == NULL, "could not open trace buffer", - "error %d opening %s", errno, TRACEBUF)) + if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)")) goto cleanup; /* We do not want to wait forever if this test fails... */ @@ -44,16 +43,12 @@ void test_trace_printk(void) /* wait for tracepoint to trigger */ usleep(1); - trace_printk__detach(skel); + trace_printk_lskel__detach(skel); - if (CHECK(bss->trace_printk_ran == 0, - "bpf_trace_printk never ran", - "ran == %d", bss->trace_printk_ran)) + if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran")) goto cleanup; - if (CHECK(bss->trace_printk_ret <= 0, - "bpf_trace_printk returned <= 0 value", - "got %d", bss->trace_printk_ret)) + if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret")) goto cleanup; /* verify our search string is in the trace buffer */ @@ -66,12 +61,11 @@ void test_trace_printk(void) break; } - if (CHECK(!found, "message from bpf_trace_printk not found", - "no instance of %s in %s", SEARCHMSG, TRACEBUF)) + if (!ASSERT_EQ(found, bss->trace_printk_ran, "found")) goto cleanup; cleanup: - trace_printk__destroy(skel); + trace_printk_lskel__destroy(skel); free(buf); if (fp) fclose(fp); diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c new file mode 100644 index 00000000000000..7a4e313e85584b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include + +#include "trace_vprintk.lskel.h" + +#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define SEARCHMSG "1,2,3,4,5,6,7,8,9,10" + +void serial_test_trace_vprintk(void) +{ + struct trace_vprintk_lskel__bss *bss; + int err = 0, iter = 0, found = 0; + struct trace_vprintk_lskel *skel; + char *buf = NULL; + FILE *fp = NULL; + size_t buflen; + + skel = trace_vprintk_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) + goto cleanup; + + bss = skel->bss; + + err = trace_vprintk_lskel__attach(skel); + if (!ASSERT_OK(err, "trace_vprintk__attach")) + goto cleanup; + + fp = fopen(TRACEBUF, "r"); + if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)")) + goto cleanup; + + /* We do not want to wait forever if this test fails... */ + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); + + /* wait for tracepoint to trigger */ + usleep(1); + trace_vprintk_lskel__detach(skel); + + if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran")) + goto cleanup; + + if (!ASSERT_GT(bss->trace_vprintk_ret, 0, "bss->trace_vprintk_ret")) + goto cleanup; + + /* verify our search string is in the trace buffer */ + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { + if (strstr(buf, SEARCHMSG) != NULL) + found++; + if (found == bss->trace_vprintk_ran) + break; + if (++iter > 1000) + break; + } + + if (!ASSERT_EQ(found, bss->trace_vprintk_ran, "found")) + goto cleanup; + + if (!ASSERT_LT(bss->null_data_vprintk_ret, 0, "bss->null_data_vprintk_ret")) + goto cleanup; + +cleanup: + trace_vprintk_lskel__destroy(skel); + free(buf); + if (fp) + fclose(fp); +} diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index d7f5a931d7f384..fc146671b20a3a 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -41,7 +41,8 @@ static struct bpf_link *load(struct bpf_object *obj, const char *name) return bpf_program__attach_trace(prog); } -void test_trampoline_count(void) +/* TODO: use different target function to run in concurrent mode */ +void serial_test_trampoline_count(void) { const char *fentry_name = "fentry/__set_task_comm"; const char *fexit_name = "fexit/__set_task_comm"; diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c new file mode 100644 index 00000000000000..a47e7c0e1ffd8a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include + +#include "trace_vprintk.lskel.h" + +void test_verif_stats(void) +{ + __u32 len = sizeof(struct bpf_prog_info); + struct trace_vprintk_lskel *skel; + struct bpf_prog_info info = {}; + int err; + + skel = trace_vprintk_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) + goto cleanup; + + err = bpf_obj_get_info_by_fd(skel->progs.sys_enter.prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto cleanup; + + if (!ASSERT_GT(info.verified_insns, 0, "verified_insns")) + goto cleanup; + +cleanup: + trace_vprintk_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index d5c98f2cb12fb1..f529e3c923ae71 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -2,7 +2,7 @@ #include #include -void test_xdp_adjust_tail_shrink(void) +static void test_xdp_adjust_tail_shrink(void) { const char *file = "./test_xdp_adjust_tail_shrink.o"; __u32 duration, retval, size, expect_sz; @@ -30,7 +30,7 @@ void test_xdp_adjust_tail_shrink(void) bpf_object__close(obj); } -void test_xdp_adjust_tail_grow(void) +static void test_xdp_adjust_tail_grow(void) { const char *file = "./test_xdp_adjust_tail_grow.o"; struct bpf_object *obj; @@ -58,7 +58,7 @@ void test_xdp_adjust_tail_grow(void) bpf_object__close(obj); } -void test_xdp_adjust_tail_grow2(void) +static void test_xdp_adjust_tail_grow2(void) { const char *file = "./test_xdp_adjust_tail_grow.o"; char buf[4096]; /* avoid segfault: large buf to hold grow results */ diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index 15ef3531483ef3..4c4057262cd8db 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -4,7 +4,7 @@ #define IFINDEX_LO 1 #define XDP_FLAGS_REPLACE (1U << 4) -void test_xdp_attach(void) +void serial_test_xdp_attach(void) { __u32 duration = 0, id1, id2, id0 = 0, len; struct bpf_object *obj1, *obj2, *obj3; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index ad3ba81b40480c..faa22b84f2eeb1 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -519,7 +519,7 @@ static struct bond_test_case bond_test_cases[] = { { "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, }, }; -void test_xdp_bonding(void) +void serial_test_xdp_bonding(void) { libbpf_print_fn_t old_print_fn; struct skeletons skeletons = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index 8755effd80b0be..fd812bd43600ab 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -7,7 +7,7 @@ #define IFINDEX_LO 1 -void test_xdp_cpumap_attach(void) +void serial_test_xdp_cpumap_attach(void) { struct test_xdp_with_cpumap_helpers *skel; struct bpf_prog_info info = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index c72af030ff1014..3079d5568f8fac 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -8,7 +8,7 @@ #define IFINDEX_LO 1 -void test_xdp_with_devmap_helpers(void) +static void test_xdp_with_devmap_helpers(void) { struct test_xdp_with_devmap_helpers *skel; struct bpf_prog_info info = {}; @@ -60,7 +60,7 @@ void test_xdp_with_devmap_helpers(void) test_xdp_with_devmap_helpers__destroy(skel); } -void test_neg_xdp_devmap_helpers(void) +static void test_neg_xdp_devmap_helpers(void) { struct test_xdp_devmap_helpers *skel; @@ -72,7 +72,7 @@ void test_neg_xdp_devmap_helpers(void) } -void test_xdp_devmap_attach(void) +void serial_test_xdp_devmap_attach(void) { if (test__start_subtest("DEVMAP with programs in entries")) test_xdp_with_devmap_helpers(); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index d2d7a283d72f9d..4e2a4fd56f6759 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -4,7 +4,7 @@ #define IFINDEX_LO 1 -void test_xdp_info(void) +void serial_test_xdp_info(void) { __u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id; const char *file = "./xdp_dummy.o"; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c index 46eed0a33c23a1..983ab0b47d30aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -6,7 +6,7 @@ #define IFINDEX_LO 1 -void test_xdp_link(void) +void serial_test_xdp_link(void) { __u32 duration = 0, id1, id2, id0 = 0, prog_fd1, prog_fd2, err; DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1); diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c new file mode 100644 index 00000000000000..f3927829a55a6c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "test_progs.h" +#include "xdpwall.skel.h" + +void test_xdpwall(void) +{ + struct xdpwall *skel; + + skel = xdpwall__open_and_load(); + ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?"); + + xdpwall__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c index c245345e41ca03..16e57313204afb 100644 --- a/tools/testing/selftests/bpf/progs/atomics.c +++ b/tools/testing/selftests/bpf/progs/atomics.c @@ -10,6 +10,8 @@ bool skip_tests __attribute((__section__(".data"))) = false; bool skip_tests = true; #endif +__u32 pid = 0; + __u64 add64_value = 1; __u64 add64_result = 0; __u32 add32_value = 1; @@ -21,6 +23,8 @@ __u64 add_noreturn_value = 1; SEC("fentry/bpf_fentry_test1") int BPF_PROG(add, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS __u64 add_stack_value = 1; @@ -45,6 +49,8 @@ __s64 sub_noreturn_value = 1; SEC("fentry/bpf_fentry_test1") int BPF_PROG(sub, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS __u64 sub_stack_value = 1; @@ -67,6 +73,8 @@ __u64 and_noreturn_value = (0x110ull << 32); SEC("fentry/bpf_fentry_test1") int BPF_PROG(and, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS and64_result = __sync_fetch_and_and(&and64_value, 0x011ull << 32); @@ -86,6 +94,8 @@ __u64 or_noreturn_value = (0x110ull << 32); SEC("fentry/bpf_fentry_test1") int BPF_PROG(or, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS or64_result = __sync_fetch_and_or(&or64_value, 0x011ull << 32); or32_result = __sync_fetch_and_or(&or32_value, 0x011); @@ -104,6 +114,8 @@ __u64 xor_noreturn_value = (0x110ull << 32); SEC("fentry/bpf_fentry_test1") int BPF_PROG(xor, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS xor64_result = __sync_fetch_and_xor(&xor64_value, 0x011ull << 32); xor32_result = __sync_fetch_and_xor(&xor32_value, 0x011); @@ -123,6 +135,8 @@ __u32 cmpxchg32_result_succeed = 0; SEC("fentry/bpf_fentry_test1") int BPF_PROG(cmpxchg, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3); cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2); @@ -142,6 +156,8 @@ __u32 xchg32_result = 0; SEC("fentry/bpf_fentry_test1") int BPF_PROG(xchg, int a) { + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; #ifdef ENABLE_ATOMICS_TESTS __u64 val64 = 2; __u32 val32 = 2; diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_bench.c b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c new file mode 100644 index 00000000000000..d9a88dd1ea6562 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct bpf_map; + +__u8 rand_vals[2500000]; +const __u32 nr_rand_bytes = 2500000; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(__u32)); + /* max entries and value_size will be set programmatically. + * They are configurable from the userspace bench program. + */ +} array_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_BLOOM_FILTER); + /* max entries, value_size, and # of hash functions will be set + * programmatically. They are configurable from the userspace + * bench program. + */ + __uint(map_extra, 3); +} bloom_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + /* max entries, key_size, and value_size, will be set + * programmatically. They are configurable from the userspace + * bench program. + */ +} hashmap SEC(".maps"); + +struct callback_ctx { + struct bpf_map *map; + bool update; +}; + +/* Tracks the number of hits, drops, and false hits */ +struct { + __u32 stats[3]; +} __attribute__((__aligned__(256))) percpu_stats[256]; + +const __u32 hit_key = 0; +const __u32 drop_key = 1; +const __u32 false_hit_key = 2; + +__u8 value_size; + +const volatile bool hashmap_use_bloom; +const volatile bool count_false_hits; + +int error = 0; + +static __always_inline void log_result(__u32 key) +{ + __u32 cpu = bpf_get_smp_processor_id(); + + percpu_stats[cpu & 255].stats[key]++; +} + +static __u64 +bloom_callback(struct bpf_map *map, __u32 *key, void *val, + struct callback_ctx *data) +{ + int err; + + if (data->update) + err = bpf_map_push_elem(data->map, val, 0); + else + err = bpf_map_peek_elem(data->map, val); + + if (err) { + error |= 1; + return 1; /* stop the iteration */ + } + + log_result(hit_key); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int bloom_lookup(void *ctx) +{ + struct callback_ctx data; + + data.map = (struct bpf_map *)&bloom_map; + data.update = false; + + bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int bloom_update(void *ctx) +{ + struct callback_ctx data; + + data.map = (struct bpf_map *)&bloom_map; + data.update = true; + + bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int bloom_hashmap_lookup(void *ctx) +{ + __u64 *result; + int i, err; + + __u32 index = bpf_get_prandom_u32(); + __u32 bitmask = (1ULL << 21) - 1; + + for (i = 0; i < 1024; i++, index += value_size) { + index = index & bitmask; + + if (hashmap_use_bloom) { + err = bpf_map_peek_elem(&bloom_map, + rand_vals + index); + if (err) { + if (err != -ENOENT) { + error |= 2; + return 0; + } + log_result(hit_key); + continue; + } + } + + result = bpf_map_lookup_elem(&hashmap, + rand_vals + index); + if (result) { + log_result(hit_key); + } else { + if (hashmap_use_bloom && count_false_hits) + log_result(false_hit_key); + log_result(drop_key); + } + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_map.c b/tools/testing/selftests/bpf/progs/bloom_filter_map.c new file mode 100644 index 00000000000000..1316f3db79d99c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bloom_filter_map.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct bpf_map; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1000); +} map_random_data SEC(".maps"); + +struct map_bloom_type { + __uint(type, BPF_MAP_TYPE_BLOOM_FILTER); + __type(value, __u32); + __uint(max_entries, 10000); + __uint(map_extra, 5); +} map_bloom SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); + __array(values, struct map_bloom_type); +} outer_map SEC(".maps"); + +struct callback_ctx { + struct bpf_map *map; +}; + +int error = 0; + +static __u64 +check_elem(struct bpf_map *map, __u32 *key, __u32 *val, + struct callback_ctx *data) +{ + int err; + + err = bpf_map_peek_elem(data->map, val); + if (err) { + error |= 1; + return 1; /* stop the iteration */ + } + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int inner_map(void *ctx) +{ + struct bpf_map *inner_map; + struct callback_ctx data; + int key = 0; + + inner_map = bpf_map_lookup_elem(&outer_map, &key); + if (!inner_map) { + error |= 2; + return 0; + } + + data.map = inner_map; + bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0); + + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int check_bloom(void *ctx) +{ + struct callback_ctx data; + + data.map = (struct bpf_map *)&map_bloom; + bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c index f62df4d023f9f0..d9660e7200e2d1 100644 --- a/tools/testing/selftests/bpf/progs/bpf_cubic.c +++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c @@ -169,11 +169,7 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk) ca->sample_cnt = 0; } -/* "struct_ops/" prefix is not a requirement - * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS - * as long as it is used in one of the func ptr - * under SEC(".struct_ops"). - */ +/* "struct_ops/" prefix is a requirement */ SEC("struct_ops/bpf_cubic_init") void BPF_PROG(bpf_cubic_init, struct sock *sk) { @@ -188,10 +184,8 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } -/* No prefix in SEC will also work. - * The remaining tcp-cubic functions have an easier way. - */ -SEC("no-sec-prefix-bictcp_cwnd_event") +/* "struct_ops" prefix is a requirement */ +SEC("struct_ops/bpf_cubic_cwnd_event") void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event) { if (event == CA_EVENT_TX_START) { diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 95a5a0778ed719..f266c757b3df33 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -19,9 +19,8 @@ #include #include -int _version SEC("version") = 1; #define PROG(F) PROG_(F, _##F) -#define PROG_(NUM, NAME) SEC("flow_dissector/"#NUM) int bpf_func##NAME +#define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM /* These are the identifiers of the BPF programs that will be used in tail * calls. Name is limited to 16 characters, with the terminating character and diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c index 8f44767a75fa5f..e5560a6560309b 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c @@ -11,7 +11,7 @@ /* *struct bitfields_only_mixed_types { * int a: 3; - * long int b: 2; + * long b: 2; * _Bool c: 1; * enum { * A = 0, @@ -27,7 +27,7 @@ struct bitfields_only_mixed_types { int a: 3; - long int b: 2; + long b: 2; bool c: 1; /* it's really a _Bool type */ enum { A, /* A = 0, dumper is very explicit */ @@ -44,8 +44,8 @@ struct bitfields_only_mixed_types { * char: 4; * int a: 4; * short b; - * long int c; - * long int d: 8; + * long c; + * long d: 8; * int e; * int f; *}; @@ -71,7 +71,7 @@ struct bitfield_mixed_with_others { *struct bitfield_flushed { * int a: 4; * long: 60; - * long int b: 16; + * long b: 16; *}; * */ diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c index 1cef3bec1dc7f6..e304b6204bd9d7 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c @@ -29,7 +29,7 @@ struct non_packed_fields { struct nested_packed { char: 4; int a: 4; - long int b; + long b; struct { char c; int d; @@ -44,7 +44,7 @@ union union_is_never_packed { union union_does_not_need_packing { struct { - long int a; + long a; int b; } __attribute__((packed)); int c; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c index 35c512818a56b8..f2661c8d2d9003 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -9,7 +9,7 @@ /* ----- START-EXPECTED-OUTPUT ----- */ struct padded_implicitly { int a; - long int b; + long b; char c; }; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c index 8aaa24a003220b..1c7105fcae3c4c 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -189,7 +189,7 @@ struct struct_with_embedded_stuff { const char *d; } e; union { - volatile long int f; + volatile long f; void * restrict g; }; }; diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c index a25373002055e7..3f81ff92184cf1 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c @@ -20,7 +20,7 @@ struct { __u32 invocations = 0; -SEC("cgroup_skb/egress/1") +SEC("cgroup_skb/egress") int egress1(struct __sk_buff *skb) { struct cgroup_value *ptr_cg_storage = @@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb) return 1; } -SEC("cgroup_skb/egress/2") +SEC("cgroup_skb/egress") int egress2(struct __sk_buff *skb) { struct cgroup_value *ptr_cg_storage = diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c index a149f33bc53379..d662db27fe4a1e 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c @@ -20,7 +20,7 @@ struct { __u32 invocations = 0; -SEC("cgroup_skb/egress/1") +SEC("cgroup_skb/egress") int egress1(struct __sk_buff *skb) { struct cgroup_value *ptr_cg_storage = @@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb) return 1; } -SEC("cgroup_skb/egress/2") +SEC("cgroup_skb/egress") int egress2(struct __sk_buff *skb) { struct cgroup_value *ptr_cg_storage = diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c index 3f757e30d7a06c..88638315c582c2 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c @@ -14,7 +14,6 @@ #include #include -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; __u16 g_serv_port = 0; diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c index b565d997810adb..d3f4c5e4fb696e 100644 --- a/tools/testing/selftests/bpf/progs/connect4_dropper.c +++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c @@ -18,7 +18,7 @@ int connect_v4_dropper(struct bpf_sock_addr *ctx) { if (ctx->type != SOCK_STREAM) return VERDICT_PROCEED; - if (ctx->user_port == bpf_htons(60123)) + if (ctx->user_port == bpf_htons(60120)) return VERDICT_REJECT; return VERDICT_PROCEED; } diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index a943d394fd3a04..b241932911dbb3 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -31,8 +31,6 @@ #define IFNAMSIZ 16 #endif -int _version SEC("version") = 1; - __attribute__ ((noinline)) int do_bind(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c index 506d0f81a37569..40266d2c737c2e 100644 --- a/tools/testing/selftests/bpf/progs/connect6_prog.c +++ b/tools/testing/selftests/bpf/progs/connect6_prog.c @@ -24,8 +24,6 @@ #define DST_REWRITE_PORT6 6666 -int _version SEC("version") = 1; - SEC("cgroup/connect6") int connect_v6_prog(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/connect_force_port4.c b/tools/testing/selftests/bpf/progs/connect_force_port4.c index a979aaef2a7648..27a632dd382e00 100644 --- a/tools/testing/selftests/bpf/progs/connect_force_port4.c +++ b/tools/testing/selftests/bpf/progs/connect_force_port4.c @@ -13,7 +13,6 @@ #include char _license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; struct svc_addr { __be32 addr; diff --git a/tools/testing/selftests/bpf/progs/connect_force_port6.c b/tools/testing/selftests/bpf/progs/connect_force_port6.c index afc8f1c5a9d600..19cad93e612fc5 100644 --- a/tools/testing/selftests/bpf/progs/connect_force_port6.c +++ b/tools/testing/selftests/bpf/progs/connect_force_port6.c @@ -12,7 +12,6 @@ #include char _license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; struct svc_addr { __be32 addr[4]; diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c index 8924e06bdef063..79b54a4fa2446f 100644 --- a/tools/testing/selftests/bpf/progs/dev_cgroup.c +++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c @@ -57,4 +57,3 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops.c b/tools/testing/selftests/bpf/progs/dummy_st_ops.c new file mode 100644 index 00000000000000..ead87edb75e257 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include +#include +#include + +struct bpf_dummy_ops_state { + int val; +} __attribute__((preserve_access_index)); + +struct bpf_dummy_ops { + int (*test_1)(struct bpf_dummy_ops_state *state); + int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2, + char a3, unsigned long a4); +}; + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1, struct bpf_dummy_ops_state *state) +{ + int ret; + + if (!state) + return 0xf2f3f4f5; + + ret = state->val; + state->val = 0x5a; + return ret; +} + +__u64 test_2_args[5]; + +SEC("struct_ops/test_2") +int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2, + char a3, unsigned long a4) +{ + test_2_args[0] = (unsigned long)state; + test_2_args[1] = a1; + test_2_args[2] = a2; + test_2_args[3] = a3; + test_2_args[4] = a4; + return 0; +} + +SEC(".struct_ops") +struct bpf_dummy_ops dummy_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c index 03a672d76353a0..bca92c9bd29a17 100644 --- a/tools/testing/selftests/bpf/progs/fexit_sleep.c +++ b/tools/testing/selftests/bpf/progs/fexit_sleep.c @@ -13,7 +13,7 @@ int fexit_cnt = 0; SEC("fentry/__x64_sys_nanosleep") int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs) { - if ((int)bpf_get_current_pid_tgid() != pid) + if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; fentry_cnt++; @@ -23,7 +23,7 @@ int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs) SEC("fexit/__x64_sys_nanosleep") int BPF_PROG(nanosleep_fexit, const struct pt_regs *regs, int ret) { - if ((int)bpf_get_current_pid_tgid() != pid) + if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; fexit_cnt++; diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c index 75e8e1069fe73a..df918b2469da04 100644 --- a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c +++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c @@ -47,7 +47,7 @@ check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val, u32 arraymap_output = 0; -SEC("classifier") +SEC("tc") int test_pkt_access(struct __sk_buff *skb) { struct callback_ctx data; diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c index 913dd91aafffc2..276994d5c0c710 100644 --- a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c +++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c @@ -78,7 +78,7 @@ int hashmap_output = 0; int hashmap_elems = 0; int percpu_map_elems = 0; -SEC("classifier") +SEC("tc") int test_pkt_access(struct __sk_buff *skb) { struct callback_ctx data; diff --git a/tools/testing/selftests/bpf/progs/get_branch_snapshot.c b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c new file mode 100644 index 00000000000000..a1b139888048c9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +__u64 test1_hits = 0; +__u64 address_low = 0; +__u64 address_high = 0; +int wasted_entries = 0; +long total_entries = 0; + +#define ENTRY_CNT 32 +struct perf_branch_entry entries[ENTRY_CNT] = {}; + +static inline bool in_range(__u64 val) +{ + return (val >= address_low) && (val < address_high); +} + +SEC("fexit/bpf_testmod_loop_test") +int BPF_PROG(test1, int n, int ret) +{ + long i; + + total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); + total_entries /= sizeof(struct perf_branch_entry); + + for (i = 0; i < ENTRY_CNT; i++) { + if (i >= total_entries) + break; + if (in_range(entries[i].from) && in_range(entries[i].to)) + test1_hits++; + else if (!test1_hits) + wasted_entries++; + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c index 6b42db2fe391a4..68587b1de34e26 100644 --- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c @@ -37,4 +37,3 @@ int trace(void *ctx) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c index 55e283050cab42..7236da72ce8055 100644 --- a/tools/testing/selftests/bpf/progs/kfree_skb.c +++ b/tools/testing/selftests/bpf/progs/kfree_skb.c @@ -9,8 +9,8 @@ char _license[] SEC("license") = "GPL"; struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } perf_buf_map SEC(".maps"); #define _(P) (__builtin_preserve_access_index(P)) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 470f8723e463d9..8a8cf59017aac0 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -8,7 +8,7 @@ extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, __u32 c, __u64 d) __ksym; -SEC("classifier") +SEC("tc") int kfunc_call_test2(struct __sk_buff *skb) { struct bpf_sock *sk = skb->sk; @@ -23,7 +23,7 @@ int kfunc_call_test2(struct __sk_buff *skb) return bpf_kfunc_call_test2((struct sock *)sk, 1, 2); } -SEC("classifier") +SEC("tc") int kfunc_call_test1(struct __sk_buff *skb) { struct bpf_sock *sk = skb->sk; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c index 5fbd9e232d44b0..c1fdecabeabfcb 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c @@ -33,7 +33,7 @@ int __noinline f1(struct __sk_buff *skb) return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4); } -SEC("classifier") +SEC("tc") int kfunc_call_test1(struct __sk_buff *skb) { return f1(skb); diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index d1d304c980f0c4..b1b711d9b214d5 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -683,5 +683,4 @@ int cg_skb(void *ctx) return 1; } -__u32 _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c index 43649bce4c5406..f718b2c212dc84 100644 --- a/tools/testing/selftests/bpf/progs/netcnt_prog.c +++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c @@ -68,4 +68,3 @@ int bpf_nextcnt(struct __sk_buff *skb) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c index 25467d13c356a6..b3fcb5274ee090 100644 --- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c +++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c @@ -11,8 +11,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH]; struct { __uint(type, BPF_MAP_TYPE_STACK_TRACE); __uint(max_entries, 16384); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(stack_trace_t)); + __type(key, __u32); + __type(value, stack_trace_t); } stackmap SEC(".maps"); struct { diff --git a/tools/testing/selftests/bpf/progs/recursion.c b/tools/testing/selftests/bpf/progs/recursion.c index 49f679375b9d63..3c2423bb19e28f 100644 --- a/tools/testing/selftests/bpf/progs/recursion.c +++ b/tools/testing/selftests/bpf/progs/recursion.c @@ -24,8 +24,8 @@ struct { int pass1 = 0; int pass2 = 0; -SEC("fentry/__htab_map_lookup_elem") -int BPF_PROG(on_lookup, struct bpf_map *map) +SEC("fentry/htab_map_delete_elem") +int BPF_PROG(on_delete, struct bpf_map *map) { int key = 0; @@ -35,10 +35,7 @@ int BPF_PROG(on_lookup, struct bpf_map *map) } if (map == (void *)&hash2) { pass2++; - /* htab_map_gen_lookup() will inline below call - * into direct call to __htab_map_lookup_elem() - */ - bpf_map_lookup_elem(&hash2, &key); + bpf_map_delete_elem(&hash2, &key); return 0; } diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c index ac5abc34cde835..ea75a44cb7fce5 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c @@ -18,8 +18,6 @@ #define DST_PORT 4040 #define DST_REWRITE_PORT4 4444 -int _version SEC("version") = 1; - SEC("cgroup/sendmsg4") int sendmsg_v4_prog(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c index 24694b1a8d82b0..bf9b46b806f6af 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c @@ -22,8 +22,6 @@ #define DST_REWRITE_PORT6 6666 -int _version SEC("version") = 1; - SEC("cgroup/sendmsg6") int sendmsg_v6_prog(struct bpf_sock_addr *ctx) { diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c index 7f2eaa2f89f810..992b7861003a21 100644 --- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c +++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c @@ -25,7 +25,7 @@ static INLINE struct iphdr *get_iphdr(struct __sk_buff *skb) return ip; } -SEC("classifier/cls") +SEC("tc") int main_prog(struct __sk_buff *skb) { struct iphdr *ip = NULL; diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c index ca283af80d4e9b..95d5b941bc1f8d 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c @@ -2,8 +2,6 @@ #include #include -int _version SEC("version") = 1; - SEC("sk_skb1") int bpf_prog1(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c index eeaf6e75c9a26e..80632954c5a166 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - SEC("sk_msg1") int bpf_prog1(struct sk_msg_md *msg) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index 4797dc9850645f..e2468a6d01a59a 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -2,27 +2,25 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 20); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } sock_map_rx SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 20); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } sock_map_tx SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 20); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } sock_map_msg SEC(".maps"); struct { diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c index c6d428a8d78574..9fb241b9729197 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c @@ -3,7 +3,6 @@ #include char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; #define SOL_CUSTOM 0xdeadbeef #define CUSTOM_INHERIT1 0 diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c index 9d8c212dde9f5f..177a59069dae6f 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_multi.c +++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c @@ -4,9 +4,8 @@ #include char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; -SEC("cgroup/getsockopt/child") +SEC("cgroup/getsockopt") int _getsockopt_child(struct bpf_sockopt *ctx) { __u8 *optval_end = ctx->optval_end; @@ -29,7 +28,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx) return 1; } -SEC("cgroup/getsockopt/parent") +SEC("cgroup/getsockopt") int _getsockopt_parent(struct bpf_sockopt *ctx) { __u8 *optval_end = ctx->optval_end; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 7de534f38c3f1f..60c93aee2f4ad0 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -358,7 +358,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, void *payload) { void *location; - uint32_t len; + uint64_t len; data->str_lens[idx] = 0; location = calc_location(&cfg->str_locs[idx], tls_base); @@ -390,7 +390,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, struct strobe_map_descr* descr = &data->map_descrs[idx]; struct strobe_map_raw map; void *location; - uint32_t len; + uint64_t len; int i; descr->tag_len = 0; /* presume no tag is set */ diff --git a/tools/testing/selftests/bpf/progs/tag.c b/tools/testing/selftests/bpf/progs/tag.c new file mode 100644 index 00000000000000..1792f4eda09597 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tag.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include +#include + +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#if __has_attribute(btf_decl_tag) +#define __tag1 __attribute__((btf_decl_tag("tag1"))) +#define __tag2 __attribute__((btf_decl_tag("tag2"))) +volatile const bool skip_tests __tag1 __tag2 = false; +#else +#define __tag1 +#define __tag2 +volatile const bool skip_tests = true; +#endif + +struct key_t { + int a; + int b __tag1 __tag2; + int c; +} __tag1 __tag2; + +typedef struct { + int a; + int b; +} value_t __tag1 __tag2; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, value_t); +} hashmap1 SEC(".maps"); + + +static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2 +{ + struct key_t key; + value_t val = {}; + + key.a = key.b = key.c = x; + bpf_map_update_elem(&hashmap1, &key, &val, 0); + return 0; +} + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(sub, int x) +{ + return foo(x); +} diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c index 7115bcefbe8ab6..8159a0b4a69a7c 100644 --- a/tools/testing/selftests/bpf/progs/tailcall1.c +++ b/tools/testing/selftests/bpf/progs/tailcall1.c @@ -11,8 +11,8 @@ struct { } jmp_table SEC(".maps"); #define TAIL_FUNC(x) \ - SEC("classifier/" #x) \ - int bpf_func_##x(struct __sk_buff *skb) \ + SEC("tc") \ + int classifier_##x(struct __sk_buff *skb) \ { \ return x; \ } @@ -20,7 +20,7 @@ TAIL_FUNC(0) TAIL_FUNC(1) TAIL_FUNC(2) -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { /* Multiple locations to make sure we patch @@ -45,4 +45,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c index 0431e4fe7efd7e..a5ff53e6170202 100644 --- a/tools/testing/selftests/bpf/progs/tailcall2.c +++ b/tools/testing/selftests/bpf/progs/tailcall2.c @@ -10,41 +10,41 @@ struct { __uint(value_size, sizeof(__u32)); } jmp_table SEC(".maps"); -SEC("classifier/0") -int bpf_func_0(struct __sk_buff *skb) +SEC("tc") +int classifier_0(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 1); return 0; } -SEC("classifier/1") -int bpf_func_1(struct __sk_buff *skb) +SEC("tc") +int classifier_1(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 2); return 1; } -SEC("classifier/2") -int bpf_func_2(struct __sk_buff *skb) +SEC("tc") +int classifier_2(struct __sk_buff *skb) { return 2; } -SEC("classifier/3") -int bpf_func_3(struct __sk_buff *skb) +SEC("tc") +int classifier_3(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 4); return 3; } -SEC("classifier/4") -int bpf_func_4(struct __sk_buff *skb) +SEC("tc") +int classifier_4(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 3); return 4; } -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 0); @@ -56,4 +56,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c index 910858fe078a8c..f60bcd7b8d4b5b 100644 --- a/tools/testing/selftests/bpf/progs/tailcall3.c +++ b/tools/testing/selftests/bpf/progs/tailcall3.c @@ -12,15 +12,15 @@ struct { int count = 0; -SEC("classifier/0") -int bpf_func_0(struct __sk_buff *skb) +SEC("tc") +int classifier_0(struct __sk_buff *skb) { count++; bpf_tail_call_static(skb, &jmp_table, 0); return 1; } -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 0); @@ -28,4 +28,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c index bd4be135c39d93..a56bbc2313ca10 100644 --- a/tools/testing/selftests/bpf/progs/tailcall4.c +++ b/tools/testing/selftests/bpf/progs/tailcall4.c @@ -13,8 +13,8 @@ struct { int selector = 0; #define TAIL_FUNC(x) \ - SEC("classifier/" #x) \ - int bpf_func_##x(struct __sk_buff *skb) \ + SEC("tc") \ + int classifier_##x(struct __sk_buff *skb) \ { \ return x; \ } @@ -22,7 +22,7 @@ TAIL_FUNC(0) TAIL_FUNC(1) TAIL_FUNC(2) -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { bpf_tail_call(skb, &jmp_table, selector); @@ -30,4 +30,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c index adf30a33064ef5..8d03496eb6ca0b 100644 --- a/tools/testing/selftests/bpf/progs/tailcall5.c +++ b/tools/testing/selftests/bpf/progs/tailcall5.c @@ -13,8 +13,8 @@ struct { int selector = 0; #define TAIL_FUNC(x) \ - SEC("classifier/" #x) \ - int bpf_func_##x(struct __sk_buff *skb) \ + SEC("tc") \ + int classifier_##x(struct __sk_buff *skb) \ { \ return x; \ } @@ -22,7 +22,7 @@ TAIL_FUNC(0) TAIL_FUNC(1) TAIL_FUNC(2) -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { int idx = 0; @@ -37,4 +37,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall6.c b/tools/testing/selftests/bpf/progs/tailcall6.c new file mode 100644 index 00000000000000..d77b8abd62f3bf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall6.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count, which; + +SEC("tc") +int classifier_0(struct __sk_buff *skb) +{ + count++; + if (__builtin_constant_p(which)) + __bpf_unreachable(); + bpf_tail_call(skb, &jmp_table, which); + return 1; +} + +SEC("tc") +int entry(struct __sk_buff *skb) +{ + if (__builtin_constant_p(which)) + __bpf_unreachable(); + bpf_tail_call(skb, &jmp_table, which); + return 0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c index 0103f3dd9f02b3..8c91428deb90d7 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c @@ -10,8 +10,8 @@ struct { } jmp_table SEC(".maps"); #define TAIL_FUNC(x) \ - SEC("classifier/" #x) \ - int bpf_func_##x(struct __sk_buff *skb) \ + SEC("tc") \ + int classifier_##x(struct __sk_buff *skb) \ { \ return x; \ } @@ -26,7 +26,7 @@ int subprog_tail(struct __sk_buff *skb) return skb->len * 2; } -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 1); @@ -35,4 +35,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c index 3cc4c12817b505..ce97d141daee15 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c @@ -22,14 +22,14 @@ int subprog_tail(struct __sk_buff *skb) int count = 0; -SEC("classifier/0") -int bpf_func_0(struct __sk_buff *skb) +SEC("tc") +int classifier_0(struct __sk_buff *skb) { count++; return subprog_tail(skb); } -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { bpf_tail_call_static(skb, &jmp_table, 0); @@ -38,4 +38,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c index 0d5482bea6c9b9..7fab39a3bb12aa 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c @@ -33,23 +33,23 @@ int subprog_tail(struct __sk_buff *skb) return skb->len * 2; } -SEC("classifier/0") -int bpf_func_0(struct __sk_buff *skb) +SEC("tc") +int classifier_0(struct __sk_buff *skb) { volatile char arr[128] = {}; return subprog_tail2(skb); } -SEC("classifier/1") -int bpf_func_1(struct __sk_buff *skb) +SEC("tc") +int classifier_1(struct __sk_buff *skb) { volatile char arr[128] = {}; return skb->len * 3; } -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { volatile char arr[128] = {}; @@ -58,4 +58,3 @@ int entry(struct __sk_buff *skb) } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c index e89368a50b9795..b67e8022d50092 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c @@ -50,30 +50,29 @@ int subprog_tail(struct __sk_buff *skb) return skb->len; } -SEC("classifier/1") -int bpf_func_1(struct __sk_buff *skb) +SEC("tc") +int classifier_1(struct __sk_buff *skb) { return subprog_tail_2(skb); } -SEC("classifier/2") -int bpf_func_2(struct __sk_buff *skb) +SEC("tc") +int classifier_2(struct __sk_buff *skb) { count++; return subprog_tail_2(skb); } -SEC("classifier/0") -int bpf_func_0(struct __sk_buff *skb) +SEC("tc") +int classifier_0(struct __sk_buff *skb) { return subprog_tail_1(skb); } -SEC("classifier") +SEC("tc") int entry(struct __sk_buff *skb) { return subprog_tail(skb); } char __license[] SEC("license") = "GPL"; -int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tcp_rtt.c b/tools/testing/selftests/bpf/progs/tcp_rtt.c index 0cb3204ddb1829..0988d79f15877f 100644 --- a/tools/testing/selftests/bpf/progs/tcp_rtt.c +++ b/tools/testing/selftests/bpf/progs/tcp_rtt.c @@ -3,7 +3,6 @@ #include char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; struct tcp_rtt_storage { __u32 invoked; diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c index 31538c9ed1939c..160ead6c67b200 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c @@ -4,8 +4,6 @@ #include #include "bpf_legacy.h" -int _version SEC("version") = 1; - struct ipv_counts { unsigned int v4; unsigned int v6; diff --git a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c index c1e0c8c7c55ff4..c218cf8989a9e4 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c @@ -21,8 +21,8 @@ struct inner_map_sz2 { struct outer_arr { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 3); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); /* it's possible to use anonymous struct as inner map definition here */ __array(values, struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -61,8 +61,8 @@ struct inner_map_sz4 { struct outer_arr_dyn { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 3); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); __array(values, struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(map_flags, BPF_F_INNER_MAP); @@ -81,7 +81,7 @@ struct outer_arr_dyn { struct outer_hash { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, 5); - __uint(key_size, sizeof(int)); + __type(key, int); /* Here everything works flawlessly due to reuse of struct inner_map * and compiler will complain at the attempt to use non-inner_map * references below. This is great experience. @@ -111,8 +111,8 @@ struct sockarr_sz2 { struct outer_sockarr_sz1 { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 1); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); __array(values, struct sockarr_sz1); } outer_sockarr SEC(".maps") = { .values = { (void *)&sockarr_sz1 }, diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c index 6c5560162746b8..1884a5bd10f568 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c @@ -4,8 +4,6 @@ #include #include "bpf_legacy.h" -int _version SEC("version") = 1; - struct ipv_counts { unsigned int v4; unsigned int v6; diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c index 506da7fd2da237..15e0f9945fe469 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - struct ipv_counts { unsigned int v4; unsigned int v6; diff --git a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c index 9a6b85dd52d29f..e2bea4da194bd6 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c +++ b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c @@ -145,7 +145,7 @@ static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb) return TC_ACT_OK; } -SEC("classifier/ingress") +SEC("tc") int cls_ingress(struct __sk_buff *skb) { struct ipv6hdr *ip6h; diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c index 77e47b9e44469b..4faba88e45a5a2 100644 --- a/tools/testing/selftests/bpf/progs/test_cgroup_link.c +++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c @@ -6,14 +6,14 @@ int calls = 0; int alt_calls = 0; -SEC("cgroup_skb/egress1") +SEC("cgroup_skb/egress") int egress(struct __sk_buff *skb) { __sync_fetch_and_add(&calls, 1); return 1; } -SEC("cgroup_skb/egress2") +SEC("cgroup_skb/egress") int egress_alt(struct __sk_buff *skb) { __sync_fetch_and_add(&alt_calls, 1); diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c index 71184af57749a2..2ec1de11a3ae4a 100644 --- a/tools/testing/selftests/bpf/progs/test_check_mtu.c +++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c @@ -153,7 +153,7 @@ int xdp_input_len_exceed(struct xdp_md *ctx) return retval; } -SEC("classifier") +SEC("tc") int tc_use_helper(struct __sk_buff *ctx) { int retval = BPF_OK; /* Expected retval on successful test */ @@ -172,7 +172,7 @@ int tc_use_helper(struct __sk_buff *ctx) return retval; } -SEC("classifier") +SEC("tc") int tc_exceed_mtu(struct __sk_buff *ctx) { __u32 ifindex = GLOBAL_USER_IFINDEX; @@ -196,7 +196,7 @@ int tc_exceed_mtu(struct __sk_buff *ctx) return retval; } -SEC("classifier") +SEC("tc") int tc_exceed_mtu_da(struct __sk_buff *ctx) { /* SKB Direct-Access variant */ @@ -223,7 +223,7 @@ int tc_exceed_mtu_da(struct __sk_buff *ctx) return retval; } -SEC("classifier") +SEC("tc") int tc_minus_delta(struct __sk_buff *ctx) { int retval = BPF_OK; /* Expected retval on successful test */ @@ -245,7 +245,7 @@ int tc_minus_delta(struct __sk_buff *ctx) return retval; } -SEC("classifier") +SEC("tc") int tc_input_len(struct __sk_buff *ctx) { int retval = BPF_OK; /* Expected retval on successful test */ @@ -265,7 +265,7 @@ int tc_input_len(struct __sk_buff *ctx) return retval; } -SEC("classifier") +SEC("tc") int tc_input_len_exceed(struct __sk_buff *ctx) { int retval = BPF_DROP; /* Fail */ diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c index e2a5acc4785c29..2833ad722cb752 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c @@ -928,7 +928,7 @@ static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics) } } -SEC("classifier/cls_redirect") +SEC("tc") int cls_redirect(struct __sk_buff *skb) { metrics_t *metrics = get_global_metrics(); diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c index 8b533db4a7a5ed..b2ded497572a1c 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c @@ -42,7 +42,16 @@ struct core_reloc_mods { core_reloc_mods_substruct_t h; }; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src) +#else +#define CORE_READ(dst, src) ({ \ + int __sz = sizeof(*(dst)) < sizeof(*(src)) ? sizeof(*(dst)) : \ + sizeof(*(src)); \ + bpf_core_read((char *)(dst) + sizeof(*(dst)) - __sz, __sz, \ + (const char *)(src) + sizeof(*(src)) - __sz); \ +}) +#endif SEC("raw_tracepoint/sys_enter") int test_core_mods(void *ctx) diff --git a/tools/testing/selftests/bpf/progs/test_enable_stats.c b/tools/testing/selftests/bpf/progs/test_enable_stats.c index 01a002ade52925..1705097d01d7b4 100644 --- a/tools/testing/selftests/bpf/progs/test_enable_stats.c +++ b/tools/testing/selftests/bpf/progs/test_enable_stats.c @@ -13,6 +13,6 @@ __u64 count = 0; SEC("raw_tracepoint/sys_enter") int test_enable_stats(void *ctx) { - count += 1; + __sync_fetch_and_add(&count, 1); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c index 1319be1c54ba41..719e314ef3e42e 100644 --- a/tools/testing/selftests/bpf/progs/test_global_data.c +++ b/tools/testing/selftests/bpf/progs/test_global_data.c @@ -68,7 +68,7 @@ static struct foo struct3 = { bpf_map_update_elem(&result_##map, &key, var, 0); \ } while (0) -SEC("classifier/static_data_load") +SEC("tc") int load_static_data(struct __sk_buff *skb) { static const __u64 bar = ~0; diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index 880260f6d53661..7b42dad187b894 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -38,7 +38,7 @@ int f3(int val, struct __sk_buff *skb, int var) return skb->ifindex * val * var; } -SEC("classifier/test") +SEC("tc") int test_cls(struct __sk_buff *skb) { return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4); diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c index 86f0ecb304fcfe..01bf8275dfd640 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func3.c +++ b/tools/testing/selftests/bpf/progs/test_global_func3.c @@ -54,7 +54,7 @@ int f8(struct __sk_buff *skb) } #endif -SEC("classifier/test") +SEC("tc") int test_cls(struct __sk_buff *skb) { #ifndef NO_FN8 diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c index 260c25b827ef94..9248d03e0d06fb 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func5.c +++ b/tools/testing/selftests/bpf/progs/test_global_func5.c @@ -24,7 +24,7 @@ int f3(int val, struct __sk_buff *skb) return skb->ifindex * val; } -SEC("classifier/test") +SEC("tc") int test_cls(struct __sk_buff *skb) { return f1(skb) + f2(2, skb) + f3(3, skb); diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c index 69e19c64e10b6b..af8c78bdfb2575 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func6.c +++ b/tools/testing/selftests/bpf/progs/test_global_func6.c @@ -24,7 +24,7 @@ int f3(int val, struct __sk_buff *skb) return skb->ifindex * val; } -SEC("classifier/test") +SEC("tc") int test_cls(struct __sk_buff *skb) { return f1(skb) + f2(2, skb) + f3(3, skb); diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c index 309b3f6136bde6..6cb8e2f5254cf1 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func7.c +++ b/tools/testing/selftests/bpf/progs/test_global_func7.c @@ -10,7 +10,7 @@ void foo(struct __sk_buff *skb) skb->tc_index = 0; } -SEC("classifier/test") +SEC("tc") int test_cls(struct __sk_buff *skb) { foo(skb); diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_module.c b/tools/testing/selftests/bpf/progs/test_ksyms_module.c index d6a0b3086b9065..0650d918c096f4 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_module.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_module.c @@ -2,24 +2,48 @@ /* Copyright (c) 2021 Facebook */ #include "vmlinux.h" - #include +#define X_0(x) +#define X_1(x) x X_0(x) +#define X_2(x) x X_1(x) +#define X_3(x) x X_2(x) +#define X_4(x) x X_3(x) +#define X_5(x) x X_4(x) +#define X_6(x) x X_5(x) +#define X_7(x) x X_6(x) +#define X_8(x) x X_7(x) +#define X_9(x) x X_8(x) +#define X_10(x) x X_9(x) +#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y) + extern const int bpf_testmod_ksym_percpu __ksym; +extern void bpf_testmod_test_mod_kfunc(int i) __ksym; +extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak; -int out_mod_ksym_global = 0; -bool triggered = false; +int out_bpf_testmod_ksym = 0; +const volatile int x = 0; -SEC("raw_tp/sys_enter") -int handler(const void *ctx) +SEC("tc") +int load(struct __sk_buff *skb) { - int *val; - __u32 cpu; - - val = (int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu); - out_mod_ksym_global = *val; - triggered = true; + /* This will be kept by clang, but removed by verifier. Since it is + * marked as __weak, libbpf and gen_loader don't error out if BTF ID + * is not found for it, instead imm and off is set to 0 for it. + */ + if (x) + bpf_testmod_invalid_mod_kfunc(); + bpf_testmod_test_mod_kfunc(42); + out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu); + return 0; +} +SEC("tc") +int load_256(struct __sk_buff *skb) +{ + /* this will fail if kfunc doesn't reuse its own btf fd index */ + REPEAT_256(bpf_testmod_test_mod_kfunc(42);); + bpf_testmod_test_mod_kfunc(42); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 5f8379aadb2965..8eadbd4caf7a79 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -38,7 +38,7 @@ int pass_handler(const void *ctx) /* tests existing symbols. */ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0); if (rq) - out__existing_typed = rq->cpu; + out__existing_typed = 0; out__existing_typeless = (__u64)&bpf_prog_active; /* tests non-existent symbols. */ diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c index 33493911d87a4f..04fee08863cb15 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb.c @@ -21,8 +21,6 @@ #include "test_iptunnel_common.h" #include -int _version SEC("version") = 1; - static inline __u32 rol32(__u32 word, unsigned int shift) { return (word << shift) | (word >> ((-shift) & 31)); diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c index 1cfeb940cf9fb2..f416032ba858b4 100644 --- a/tools/testing/selftests/bpf/progs/test_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c @@ -9,21 +9,19 @@ struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 1); __uint(map_flags, 0); - __uint(key_size, sizeof(__u32)); - /* must be sizeof(__u32) for map in map */ - __uint(value_size, sizeof(__u32)); + __type(key, __u32); + __type(value, __u32); } mim_array SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, 1); __uint(map_flags, 0); - __uint(key_size, sizeof(int)); - /* must be sizeof(__u32) for map in map */ - __uint(value_size, sizeof(__u32)); + __type(key, int); + __type(value, __u32); } mim_hash SEC(".maps"); -SEC("xdp_mimtest") +SEC("xdp") int xdp_mimtest0(struct xdp_md *ctx) { int value = 123; @@ -49,5 +47,4 @@ int xdp_mimtest0(struct xdp_md *ctx) return XDP_PASS; } -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c index 703c08e064423a..9c7d75cf0bd6b7 100644 --- a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c +++ b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c @@ -13,7 +13,7 @@ struct inner { struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 0); /* This will make map creation to fail */ - __uint(key_size, sizeof(__u32)); + __type(key, __u32); __array(values, struct inner); } mim SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c index 6077a025092cb3..2c121c5d66a7fa 100644 --- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c @@ -293,7 +293,7 @@ static int handle_passive_estab(struct bpf_sock_ops *skops) return check_active_hdr_in(skops); } -SEC("sockops/misc_estab") +SEC("sockops") int misc_estab(struct bpf_sock_ops *skops) { int true_val = 1; diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index bd37ceec55877a..b36857093f71fe 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -27,6 +27,20 @@ int BPF_PROG(handle_raw_tp_bare, return 0; } +int raw_tp_writable_bare_in_val = 0; +int raw_tp_writable_bare_early_ret = 0; +int raw_tp_writable_bare_out_val = 0; + +SEC("raw_tp.w/bpf_testmod_test_writable_bare") +int BPF_PROG(handle_raw_tp_writable_bare, + struct bpf_testmod_test_writable_ctx *writable) +{ + raw_tp_writable_bare_in_val = writable->val; + writable->early_ret = raw_tp_writable_bare_early_ret; + writable->val = raw_tp_writable_bare_out_val; + return 0; +} + __u32 tp_btf_read_sz = 0; SEC("tp_btf/bpf_testmod_test_read") diff --git a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c index fb22de7c365d4c..1249a945699fdb 100644 --- a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c +++ b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c @@ -7,15 +7,15 @@ struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __uint(max_entries, 1); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } array_1 SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __uint(max_entries, 1); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); __uint(map_flags, BPF_F_PRESERVE_ELEMS); } array_2 SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index 8207a2dc2f9da0..17d5b67744d55b 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -6,20 +6,36 @@ #include #include +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} my_pid_map SEC(".maps"); + struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } perf_buf_map SEC(".maps"); SEC("tp/raw_syscalls/sys_enter") int handle_sys_enter(void *ctx) { + int zero = 0, *my_pid, cur_pid; int cpu = bpf_get_smp_processor_id(); + my_pid = bpf_map_lookup_elem(&my_pid_map, &zero); + if (!my_pid) + return 1; + + cur_pid = bpf_get_current_pid_tgid() >> 32; + if (cur_pid != *my_pid) + return 1; + bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU, &cpu, sizeof(cpu)); - return 0; + return 1; } char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c index 4ef2630292b261..0facea6cbbaed9 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning.c +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c index 5412e0c732c760..2a56db1094b875 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c +++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c @@ -3,8 +3,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 852051064507d0..0558544e1ff0fb 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -15,7 +15,6 @@ #include #define barrier() __asm__ __volatile__("": : :"memory") -int _version SEC("version") = 1; /* llvm will optimize both subprograms into exactly the same BPF assembly * @@ -97,7 +96,7 @@ int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off) return 0; } -SEC("classifier/test_pkt_access") +SEC("tc") int test_pkt_access(struct __sk_buff *skb) { void *data_end = (void *)(long)skb->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c index 610c74ea9f648a..d1839366f3e1e6 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c @@ -7,8 +7,6 @@ #include #include -int _version SEC("version") = 1; - #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define TEST_FIELD(TYPE, FIELD, MASK) \ { \ @@ -27,7 +25,7 @@ int _version SEC("version") = 1; } #endif -SEC("classifier/test_pkt_md_access") +SEC("tc") int test_pkt_md_access(struct __sk_buff *skb) { TEST_FIELD(__u8, len, 0xFF); diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c index 89b3532ccc75e3..8812a90da4eb87 100644 --- a/tools/testing/selftests/bpf/progs/test_probe_user.c +++ b/tools/testing/selftests/bpf/progs/test_probe_user.c @@ -8,13 +8,37 @@ #include #include +#if defined(__TARGET_ARCH_x86) +#define SYSCALL_WRAPPER 1 +#define SYS_PREFIX "__x64_" +#elif defined(__TARGET_ARCH_s390) +#define SYSCALL_WRAPPER 1 +#define SYS_PREFIX "__s390x_" +#elif defined(__TARGET_ARCH_arm64) +#define SYSCALL_WRAPPER 1 +#define SYS_PREFIX "__arm64_" +#else +#define SYSCALL_WRAPPER 0 +#define SYS_PREFIX "" +#endif + static struct sockaddr_in old; -SEC("kprobe/__sys_connect") +SEC("kprobe/" SYS_PREFIX "sys_connect") int BPF_KPROBE(handle_sys_connect) { - void *ptr = (void *)PT_REGS_PARM2(ctx); +#if SYSCALL_WRAPPER == 1 + struct pt_regs *real_regs; +#endif struct sockaddr_in new; + void *ptr; + +#if SYSCALL_WRAPPER == 0 + ptr = (void *)PT_REGS_PARM2(ctx); +#else + real_regs = (struct pt_regs *)PT_REGS_PARM1(ctx); + bpf_probe_read_kernel(&ptr, sizeof(ptr), &PT_REGS_PARM2(real_regs)); +#endif bpf_probe_read_user(&old, sizeof(old), ptr); __builtin_memset(&new, 0xab, sizeof(new)); diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h index 4dd9806ad73b5b..0fcd3ff0e38a2a 100644 --- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h +++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h @@ -8,8 +8,6 @@ #include #include -int _version SEC("version") = 1; - struct { __uint(type, MAP_TYPE); __uint(max_entries, 32); diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index 26e77dcc7e91da..7d56ed47cd4d86 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -15,8 +15,6 @@ #include #include "test_select_reuseport_common.h" -int _version SEC("version") = 1; - #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif @@ -24,8 +22,8 @@ int _version SEC("version") = 1; struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 1); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(__u32)); + __type(key, __u32); + __type(value, __u32); } outer_map SEC(".maps"); struct { diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c index 1ecd987005d2c8..02f79356d5eb7c 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_assign.c +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -36,7 +36,6 @@ struct { .pinning = PIN_GLOBAL_NS, }; -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; /* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */ @@ -159,7 +158,7 @@ handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4) return ret; } -SEC("classifier/sk_assign_test") +SEC("tc") int bpf_sk_assign_test(struct __sk_buff *skb) { struct bpf_sock_tuple *tuple, ln = {0}; diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index ac6f7f205e25d2..19d2465d94425b 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -72,32 +72,32 @@ static const __u16 DST_PORT = 7007; /* Host byte order */ static const __u32 DST_IP4 = IP4(127, 0, 0, 1); static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001); -SEC("sk_lookup/lookup_pass") +SEC("sk_lookup") int lookup_pass(struct bpf_sk_lookup *ctx) { return SK_PASS; } -SEC("sk_lookup/lookup_drop") +SEC("sk_lookup") int lookup_drop(struct bpf_sk_lookup *ctx) { return SK_DROP; } -SEC("sk_reuseport/reuse_pass") +SEC("sk_reuseport") int reuseport_pass(struct sk_reuseport_md *ctx) { return SK_PASS; } -SEC("sk_reuseport/reuse_drop") +SEC("sk_reuseport") int reuseport_drop(struct sk_reuseport_md *ctx) { return SK_DROP; } /* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */ -SEC("sk_lookup/redir_port") +SEC("sk_lookup") int redir_port(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -116,7 +116,7 @@ int redir_port(struct bpf_sk_lookup *ctx) } /* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */ -SEC("sk_lookup/redir_ip4") +SEC("sk_lookup") int redir_ip4(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -139,7 +139,7 @@ int redir_ip4(struct bpf_sk_lookup *ctx) } /* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */ -SEC("sk_lookup/redir_ip6") +SEC("sk_lookup") int redir_ip6(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -164,7 +164,7 @@ int redir_ip6(struct bpf_sk_lookup *ctx) return err ? SK_DROP : SK_PASS; } -SEC("sk_lookup/select_sock_a") +SEC("sk_lookup") int select_sock_a(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -179,7 +179,7 @@ int select_sock_a(struct bpf_sk_lookup *ctx) return err ? SK_DROP : SK_PASS; } -SEC("sk_lookup/select_sock_a_no_reuseport") +SEC("sk_lookup") int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -194,7 +194,7 @@ int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx) return err ? SK_DROP : SK_PASS; } -SEC("sk_reuseport/select_sock_b") +SEC("sk_reuseport") int select_sock_b(struct sk_reuseport_md *ctx) { __u32 key = KEY_SERVER_B; @@ -205,7 +205,7 @@ int select_sock_b(struct sk_reuseport_md *ctx) } /* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */ -SEC("sk_lookup/sk_assign_eexist") +SEC("sk_lookup") int sk_assign_eexist(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -238,7 +238,7 @@ int sk_assign_eexist(struct bpf_sk_lookup *ctx) } /* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */ -SEC("sk_lookup/sk_assign_replace_flag") +SEC("sk_lookup") int sk_assign_replace_flag(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -270,7 +270,7 @@ int sk_assign_replace_flag(struct bpf_sk_lookup *ctx) } /* Check that bpf_sk_assign(sk=NULL) is accepted. */ -SEC("sk_lookup/sk_assign_null") +SEC("sk_lookup") int sk_assign_null(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk = NULL; @@ -313,7 +313,7 @@ int sk_assign_null(struct bpf_sk_lookup *ctx) } /* Check that selected sk is accessible through context. */ -SEC("sk_lookup/access_ctx_sk") +SEC("sk_lookup") int access_ctx_sk(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk1 = NULL, *sk2 = NULL; @@ -379,7 +379,7 @@ int access_ctx_sk(struct bpf_sk_lookup *ctx) * are not covered because they give bogus results, that is the * verifier ignores the offset. */ -SEC("sk_lookup/ctx_narrow_access") +SEC("sk_lookup") int ctx_narrow_access(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -553,7 +553,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx) } /* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */ -SEC("sk_lookup/sk_assign_esocknosupport") +SEC("sk_lookup") int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; @@ -578,28 +578,28 @@ int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx) return ret; } -SEC("sk_lookup/multi_prog_pass1") +SEC("sk_lookup") int multi_prog_pass1(struct bpf_sk_lookup *ctx) { bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); return SK_PASS; } -SEC("sk_lookup/multi_prog_pass2") +SEC("sk_lookup") int multi_prog_pass2(struct bpf_sk_lookup *ctx) { bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); return SK_PASS; } -SEC("sk_lookup/multi_prog_drop1") +SEC("sk_lookup") int multi_prog_drop1(struct bpf_sk_lookup *ctx) { bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); return SK_DROP; } -SEC("sk_lookup/multi_prog_drop2") +SEC("sk_lookup") int multi_prog_drop2(struct bpf_sk_lookup *ctx) { bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); @@ -623,7 +623,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) return SK_PASS; } -SEC("sk_lookup/multi_prog_redir1") +SEC("sk_lookup") int multi_prog_redir1(struct bpf_sk_lookup *ctx) { int ret; @@ -633,7 +633,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx) return SK_PASS; } -SEC("sk_lookup/multi_prog_redir2") +SEC("sk_lookup") int multi_prog_redir2(struct bpf_sk_lookup *ctx) { int ret; @@ -644,4 +644,3 @@ int multi_prog_redir2(struct bpf_sk_lookup *ctx) } char _license[] SEC("license") = "Dual BSD/GPL"; -__u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c index 8249075f088f24..40f161480a2f25 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c @@ -15,7 +15,6 @@ #include #include -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; /* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */ @@ -53,8 +52,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off, return result; } -SEC("classifier/sk_lookup_success") -int bpf_sk_lookup_test0(struct __sk_buff *skb) +SEC("tc") +int sk_lookup_success(struct __sk_buff *skb) { void *data_end = (void *)(long)skb->data_end; void *data = (void *)(long)skb->data; @@ -79,8 +78,8 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb) return sk ? TC_ACT_OK : TC_ACT_UNSPEC; } -SEC("classifier/sk_lookup_success_simple") -int bpf_sk_lookup_test1(struct __sk_buff *skb) +SEC("tc") +int sk_lookup_success_simple(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; @@ -91,8 +90,8 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb) return 0; } -SEC("classifier/err_use_after_free") -int bpf_sk_lookup_uaf(struct __sk_buff *skb) +SEC("tc") +int err_use_after_free(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; @@ -106,8 +105,8 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb) return family; } -SEC("classifier/err_modify_sk_pointer") -int bpf_sk_lookup_modptr(struct __sk_buff *skb) +SEC("tc") +int err_modify_sk_pointer(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; @@ -121,8 +120,8 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb) return 0; } -SEC("classifier/err_modify_sk_or_null_pointer") -int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) +SEC("tc") +int err_modify_sk_or_null_pointer(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; @@ -135,8 +134,8 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) return 0; } -SEC("classifier/err_no_release") -int bpf_sk_lookup_test2(struct __sk_buff *skb) +SEC("tc") +int err_no_release(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; @@ -144,8 +143,8 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb) return 0; } -SEC("classifier/err_release_twice") -int bpf_sk_lookup_test3(struct __sk_buff *skb) +SEC("tc") +int err_release_twice(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; @@ -156,8 +155,8 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb) return 0; } -SEC("classifier/err_release_unchecked") -int bpf_sk_lookup_test4(struct __sk_buff *skb) +SEC("tc") +int err_release_unchecked(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; @@ -173,8 +172,8 @@ void lookup_no_release(struct __sk_buff *skb) bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); } -SEC("classifier/err_no_release_subcall") -int bpf_sk_lookup_test5(struct __sk_buff *skb) +SEC("tc") +int err_no_release_subcall(struct __sk_buff *skb) { lookup_no_release(skb); return 0; diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c index 552f2090665c50..c304cd5b8cad78 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c @@ -42,6 +42,4 @@ int log_cgroup_id(struct __sk_buff *skb) return TC_ACT_OK; } -int _version SEC("version") = 1; - char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index b02ea589ce7eaa..1d61b36e6067e3 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -3,7 +3,6 @@ #include #include -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; SEC("skb_ctx") @@ -25,6 +24,12 @@ int process(struct __sk_buff *skb) return 1; if (skb->gso_size != 10) return 1; + if (skb->ingress_ifindex != 11) + return 1; + if (skb->ifindex != 1) + return 1; + if (skb->hwtstamp != 11) + return 1; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_skb_helpers.c b/tools/testing/selftests/bpf/progs/test_skb_helpers.c index bb3fbf1a29e34f..507215791c5bbe 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_skb_helpers.c @@ -14,7 +14,7 @@ struct { char _license[] SEC("license") = "GPL"; -SEC("classifier/test_skb_helpers") +SEC("tc") int test_skb_helpers(struct __sk_buff *skb) { struct task_struct *task; diff --git a/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c new file mode 100644 index 00000000000000..a408ec95cba496 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include "vmlinux.h" +#include +#include +#include "bpf_tracing_net.h" + +const volatile pid_t my_pid = 0; +char path[256] = {}; + +SEC("fentry/unix_listen") +int BPF_PROG(unix_listen, struct socket *sock, int backlog) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + struct unix_sock *unix_sk; + int i, len; + + if (pid != my_pid) + return 0; + + unix_sk = (struct unix_sock *)bpf_skc_to_unix_sock(sock->sk); + if (!unix_sk) + return 0; + + if (!UNIX_ABSTRACT(unix_sk)) + return 0; + + len = unix_sk->addr->len - sizeof(short); + path[0] = '@'; + for (i = 1; i < len; i++) { + if (i >= sizeof(struct sockaddr_un)) + break; + + path[i] = unix_sk->addr->name->sun_path[i]; + } + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 441fa1c552c8d8..1b1187d2967bd4 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -5,6 +5,8 @@ #include #include +#define __read_mostly SEC(".data.read_mostly") + struct s { int a; long long b; @@ -40,9 +42,20 @@ int kern_ver = 0; struct s out5 = {}; + +const volatile int in_dynarr_sz SEC(".rodata.dyn"); +const volatile int in_dynarr[4] SEC(".rodata.dyn") = { -1, -2, -3, -4 }; + +int out_dynarr[4] SEC(".data.dyn") = { 1, 2, 3, 4 }; + +int read_mostly_var __read_mostly; +int out_mostly_var; + SEC("raw_tp/sys_enter") int handler(const void *ctx) { + int i; + out1 = in1; out2 = in2; out3 = in3; @@ -53,6 +66,11 @@ int handler(const void *ctx) bpf_syscall = CONFIG_BPF_SYSCALL; kern_ver = LINUX_KERNEL_VERSION; + for (i = 0; i < in_dynarr_sz; i++) + out_dynarr[i] = in_dynarr[i]; + + out_mostly_var = read_mostly_var; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 1858435de7aaf9..2966564b8497aa 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -361,5 +361,4 @@ int bpf_prog10(struct sk_msg_md *msg) return SK_DROP; } -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c index a1cc58b10c7c21..325c9f193432aa 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c @@ -56,7 +56,7 @@ int prog_stream_verdict(struct __sk_buff *skb) return verdict; } -SEC("sk_skb/skb_verdict") +SEC("sk_skb") int prog_skb_verdict(struct __sk_buff *skb) { unsigned int *count; @@ -116,5 +116,4 @@ int prog_reuseport(struct sk_reuseport_md *reuse) return verdict; } -int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c index 2d31f66e4f2361..3c69aa971738e6 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c @@ -9,7 +9,7 @@ struct { __type(value, __u64); } sock_map SEC(".maps"); -SEC("sk_skb/skb_verdict") +SEC("sk_skb") int prog_skb_verdict(struct __sk_buff *skb) { return SK_DROP; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_update.c index 9d0c9f28cab2ed..6d64ea536e3d81 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_update.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_update.c @@ -24,7 +24,7 @@ struct { __type(value, __u64); } dst_sock_hash SEC(".maps"); -SEC("classifier/copy_sock_map") +SEC("tc") int copy_sock_map(void *ctx) { struct bpf_sock *sk; diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c index 0cf0134631b4ba..36a707e7c7a7ab 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c @@ -28,8 +28,8 @@ struct { __uint(type, BPF_MAP_TYPE_STACK_TRACE); __uint(max_entries, 128); __uint(map_flags, BPF_F_STACK_BUILD_ID); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(stack_trace_t)); + __type(key, __u32); + __type(value, stack_trace_t); } stackmap SEC(".maps"); struct { @@ -73,4 +73,3 @@ int oncpu(struct random_urandom_args *args) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c index 00ed486726204b..a8233e7f173bcc 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c @@ -27,8 +27,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH]; struct { __uint(type, BPF_MAP_TYPE_STACK_TRACE); __uint(max_entries, 16384); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(stack_trace_t)); + __type(key, __u32); + __type(value, stack_trace_t); } stackmap SEC(".maps"); struct { diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c index 18a3a7ed924a89..d28ca8d1f3d070 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_bpf.c +++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c @@ -5,7 +5,7 @@ /* Dummy prog to test TC-BPF API */ -SEC("classifier") +SEC("tc") int cls(struct __sk_buff *skb) { return 0; diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c index 0c93d326a663f9..3e32ea375ab4ed 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c @@ -70,7 +70,7 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb, return v6_equal(ip6h->daddr, addr); } -SEC("classifier/chk_egress") +SEC("tc") int tc_chk(struct __sk_buff *skb) { void *data_end = ctx_ptr(skb->data_end); @@ -83,7 +83,7 @@ int tc_chk(struct __sk_buff *skb) return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK; } -SEC("classifier/dst_ingress") +SEC("tc") int tc_dst(struct __sk_buff *skb) { __u8 zero[ETH_ALEN * 2]; @@ -108,7 +108,7 @@ int tc_dst(struct __sk_buff *skb) return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0); } -SEC("classifier/src_ingress") +SEC("tc") int tc_src(struct __sk_buff *skb) { __u8 zero[ETH_ALEN * 2]; diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c index f7ab69cf018e5a..ec4cce19362de4 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c @@ -75,7 +75,7 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb, return 0; } -SEC("classifier/chk_egress") +SEC("tc") int tc_chk(struct __sk_buff *skb) { void *data_end = ctx_ptr(skb->data_end); @@ -143,13 +143,13 @@ static __always_inline int tc_redir(struct __sk_buff *skb) /* these are identical, but keep them separate for compatibility with the * section names expected by test_tc_redirect.sh */ -SEC("classifier/dst_ingress") +SEC("tc") int tc_dst(struct __sk_buff *skb) { return tc_redir(skb); } -SEC("classifier/src_ingress") +SEC("tc") int tc_src(struct __sk_buff *skb) { return tc_redir(skb); diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c index fe818cd5f0109f..365eacb5dc3412 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_peer.c +++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c @@ -16,31 +16,31 @@ volatile const __u32 IFINDEX_DST; static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}; static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66}; -SEC("classifier/chk_egress") +SEC("tc") int tc_chk(struct __sk_buff *skb) { return TC_ACT_SHOT; } -SEC("classifier/dst_ingress") +SEC("tc") int tc_dst(struct __sk_buff *skb) { return bpf_redirect_peer(IFINDEX_SRC, 0); } -SEC("classifier/src_ingress") +SEC("tc") int tc_src(struct __sk_buff *skb) { return bpf_redirect_peer(IFINDEX_DST, 0); } -SEC("classifier/dst_ingress_l3") +SEC("tc") int tc_dst_l3(struct __sk_buff *skb) { return bpf_redirect(IFINDEX_SRC, 0); } -SEC("classifier/src_ingress_l3") +SEC("tc") int tc_src_l3(struct __sk_buff *skb) { __u16 proto = skb->protocol; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c index 47cbe2eeae431b..cd747cd93dbe81 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c @@ -148,7 +148,7 @@ static __always_inline void check_syncookie(void *ctx, void *data, bpf_sk_release(sk); } -SEC("clsact/check_syncookie") +SEC("tc") int check_syncookie_clsact(struct __sk_buff *skb) { check_syncookie(skb, (void *)(long)skb->data, @@ -156,7 +156,7 @@ int check_syncookie_clsact(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("xdp/check_syncookie") +SEC("xdp") int check_syncookie_xdp(struct xdp_md *ctx) { check_syncookie(ctx, (void *)(long)ctx->data, diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index adc83a54c35270..2c5c602c60114b 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -255,4 +255,3 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c index 678bd0fad29edd..5f4e87ee949ad1 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c @@ -594,7 +594,7 @@ static int handle_parse_hdr(struct bpf_sock_ops *skops) return CG_OK; } -SEC("sockops/estab") +SEC("sockops") int estab(struct bpf_sock_ops *skops) { int true_val = 1; diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 94f50f7e94d64e..3ded052807576e 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -16,7 +16,6 @@ #include "test_tcpbpf.h" struct tcpbpf_globals global = {}; -int _version SEC("version") = 1; /** * SOL_TCP is defined in while diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c index ac63410bb54136..540181c115a85a 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c @@ -24,12 +24,10 @@ struct { struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __uint(max_entries, 2); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(__u32)); + __type(key, int); + __type(value, __u32); } perf_event_map SEC(".maps"); -int _version SEC("version") = 1; - SEC("sockops") int bpf_testcb(struct bpf_sock_ops *skops) { diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c index 4b825ee122cf8c..ce6974016f53fa 100644 --- a/tools/testing/selftests/bpf/progs/test_tracepoint.c +++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c @@ -23,4 +23,3 @@ int oncpu(struct sched_switch_args *ctx) } char _license[] SEC("license") = "GPL"; -__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index e7b67311743641..ef0dde83b85acd 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -26,8 +26,6 @@ bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \ } while (0) -int _version SEC("version") = 1; - struct geneve_opt { __be16 opt_class; __u8 type; diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c index 31f9bce37491a6..d7a9a74b724539 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp.c +++ b/tools/testing/selftests/bpf/progs/test_xdp.c @@ -20,8 +20,6 @@ #include #include "test_iptunnel_common.h" -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 256); @@ -210,7 +208,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp) return XDP_TX; } -SEC("xdp_tx_iptunnel") +SEC("xdp") int _xdp_tx_iptunnel(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index 3d66599eee2ec9..199c61b7d06286 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -2,7 +2,7 @@ #include #include -SEC("xdp_adjust_tail_grow") +SEC("xdp") int _xdp_adjust_tail_grow(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c index 22065a9cfb254e..b7448253d1359b 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c @@ -9,9 +9,7 @@ #include #include -int _version SEC("version") = 1; - -SEC("xdp_adjust_tail_shrink") +SEC("xdp") int _xdp_adjust_tail_shrink(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c index a038e827f850a0..58cf4345f5cc9c 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c @@ -36,8 +36,8 @@ struct meta { struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); + __type(key, int); + __type(value, int); } perf_buf_map SEC(".maps"); __u64 test_result_fentry = 0; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c index b360ba2bd44114..807bf895f42ca8 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c @@ -5,7 +5,7 @@ #include #include -SEC("xdp_dm_log") +SEC("xdp") int xdpdm_devlog(struct xdp_md *ctx) { char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_link.c b/tools/testing/selftests/bpf/progs/test_xdp_link.c index eb93ea95d1d8f1..ee7d6ac0f61513 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_link.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_link.c @@ -5,7 +5,7 @@ char LICENSE[] SEC("license") = "GPL"; -SEC("xdp/handler") +SEC("xdp") int xdp_handler(struct xdp_md *xdp) { return 0; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c index fcabcda30ba320..c98fb44156f04d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c @@ -16,8 +16,6 @@ #include #include "test_iptunnel_common.h" -int _version SEC("version") = 1; - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 256); @@ -206,7 +204,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp) return XDP_TX; } -SEC("xdp_tx_iptunnel") +SEC("xdp") int _xdp_tx_iptunnel(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index 3a67921f62b52c..596c4e71bf3ac2 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -797,7 +797,7 @@ static int process_packet(void *data, __u64 off, void *data_end, return XDP_DROP; } -SEC("xdp-test-v4") +SEC("xdp") int balancer_ingress_v4(struct xdp_md *ctx) { void *data = (void *)(long)ctx->data; @@ -816,7 +816,7 @@ int balancer_ingress_v4(struct xdp_md *ctx) return XDP_DROP; } -SEC("xdp-test-v6") +SEC("xdp") int balancer_ingress_v6(struct xdp_md *ctx) { void *data = (void *)(long)ctx->data; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c index a5337cd9400beb..b778cad454852e 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c @@ -12,8 +12,6 @@ #include #include -int _version SEC("version") = 1; - SEC("redirect_to_111") int xdp_redirect_to_111(struct xdp_md *xdp) { diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c index 59ee4f182ff800..532025057711e2 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c @@ -12,13 +12,13 @@ struct { __uint(max_entries, 4); } cpu_map SEC(".maps"); -SEC("xdp_redir") +SEC("xdp") int xdp_redir_prog(struct xdp_md *ctx) { return bpf_redirect_map(&cpu_map, 1, 0); } -SEC("xdp_dummy") +SEC("xdp") int xdp_dummy_prog(struct xdp_md *ctx) { return XDP_PASS; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c index 0ac08649772228..1e6b9c38ea6d92 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c @@ -9,7 +9,7 @@ struct { __uint(max_entries, 4); } dm_ports SEC(".maps"); -SEC("xdp_redir") +SEC("xdp") int xdp_redir_prog(struct xdp_md *ctx) { return bpf_redirect_map(&dm_ports, 1, 0); @@ -18,7 +18,7 @@ int xdp_redir_prog(struct xdp_md *ctx) /* invalid program on DEVMAP entry; * SEC name means expected attach type not set */ -SEC("xdp_dummy") +SEC("xdp") int xdp_dummy_prog(struct xdp_md *ctx) { return XDP_PASS; diff --git a/tools/testing/selftests/bpf/progs/trace_vprintk.c b/tools/testing/selftests/bpf/progs/trace_vprintk.c new file mode 100644 index 00000000000000..d327241ba04754 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/trace_vprintk.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "vmlinux.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +int null_data_vprintk_ret = 0; +int trace_vprintk_ret = 0; +int trace_vprintk_ran = 0; + +SEC("fentry/__x64_sys_nanosleep") +int sys_enter(void *ctx) +{ + static const char one[] = "1"; + static const char three[] = "3"; + static const char five[] = "5"; + static const char seven[] = "7"; + static const char nine[] = "9"; + static const char f[] = "%pS\n"; + + /* runner doesn't search for \t, just ensure it compiles */ + bpf_printk("\t"); + + trace_vprintk_ret = __bpf_vprintk("%s,%d,%s,%d,%s,%d,%s,%d,%s,%d %d\n", + one, 2, three, 4, five, 6, seven, 8, nine, 10, ++trace_vprintk_ran); + + /* non-NULL fmt w/ NULL data should result in error */ + null_data_vprintk_ret = bpf_trace_vprintk(f, sizeof(f), NULL, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/twfw.c b/tools/testing/selftests/bpf/progs/twfw.c new file mode 100644 index 00000000000000..de1b18a62b467b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/twfw.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include +#include +#include +#include + +#define TWFW_MAX_TIERS (64) +/* + * load is successful + * #define TWFW_MAX_TIERS (64u)$ + */ + +struct twfw_tier_value { + unsigned long mask[1]; +}; + +struct rule { + uint8_t seqnum; +}; + +struct rules_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, struct rule); + __uint(max_entries, 1); +}; + +struct tiers_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, struct twfw_tier_value); + __uint(max_entries, 1); +}; + +struct rules_map rules SEC(".maps"); +struct tiers_map tiers SEC(".maps"); + +SEC("cgroup_skb/ingress") +int twfw_verifier(struct __sk_buff* skb) +{ + const uint32_t key = 0; + const struct twfw_tier_value* tier = bpf_map_lookup_elem(&tiers, &key); + if (!tier) + return 1; + + struct rule* rule = bpf_map_lookup_elem(&rules, &key); + if (!rule) + return 1; + + if (rule && rule->seqnum < TWFW_MAX_TIERS) { + /* rule->seqnum / 64 should always be 0 */ + unsigned long mask = tier->mask[rule->seqnum / 64]; + if (mask) + return 0; + } + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c index ea25e88819928d..d988b2e0cee840 100644 --- a/tools/testing/selftests/bpf/progs/xdp_dummy.c +++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c @@ -4,7 +4,7 @@ #include #include -SEC("xdp_dummy") +SEC("xdp") int xdp_dummy_prog(struct xdp_md *ctx) { return XDP_PASS; diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c index 880debcbcd65de..8395782b6e0a3c 100644 --- a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c @@ -34,7 +34,7 @@ struct { __uint(max_entries, 128); } mac_map SEC(".maps"); -SEC("xdp_redirect_map_multi") +SEC("xdp") int xdp_redirect_map_multi_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; @@ -63,7 +63,7 @@ int xdp_redirect_map_multi_prog(struct xdp_md *ctx) } /* The following 2 progs are for 2nd devmap prog testing */ -SEC("xdp_redirect_map_ingress") +SEC("xdp") int xdp_redirect_map_all_prog(struct xdp_md *ctx) { return bpf_redirect_map(&map_egress, 0, diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c index 6b9ca40bd1f4f8..4ad73847b8a5de 100644 --- a/tools/testing/selftests/bpf/progs/xdping_kern.c +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -86,7 +86,7 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type) return XDP_TX; } -SEC("xdpclient") +SEC("xdp") int xdping_client(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; @@ -150,7 +150,7 @@ int xdping_client(struct xdp_md *ctx) return XDP_TX; } -SEC("xdpserver") +SEC("xdp") int xdping_server(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/xdpwall.c b/tools/testing/selftests/bpf/progs/xdpwall.c new file mode 100644 index 00000000000000..7a891a0c3a39cf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdpwall.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum pkt_parse_err { + NO_ERR, + BAD_IP6_HDR, + BAD_IP4GUE_HDR, + BAD_IP6GUE_HDR, +}; + +enum pkt_flag { + TUNNEL = 0x1, + TCP_SYN = 0x2, + QUIC_INITIAL_FLAG = 0x4, + TCP_ACK = 0x8, + TCP_RST = 0x10 +}; + +struct v4_lpm_key { + __u32 prefixlen; + __u32 src; +}; + +struct v4_lpm_val { + struct v4_lpm_key key; + __u8 val; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16); + __type(key, struct in6_addr); + __type(value, bool); +} v6_addr_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16); + __type(key, __u32); + __type(value, bool); +} v4_addr_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(max_entries, 16); + __uint(key_size, sizeof(struct v4_lpm_key)); + __uint(value_size, sizeof(struct v4_lpm_val)); + __uint(map_flags, BPF_F_NO_PREALLOC); +} v4_lpm_val_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 16); + __type(key, int); + __type(value, __u8); +} tcp_port_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 16); + __type(key, int); + __type(value, __u16); +} udp_port_map SEC(".maps"); + +enum ip_type { V4 = 1, V6 = 2 }; + +struct fw_match_info { + __u8 v4_src_ip_match; + __u8 v6_src_ip_match; + __u8 v4_src_prefix_match; + __u8 v4_dst_prefix_match; + __u8 tcp_dp_match; + __u16 udp_sp_match; + __u16 udp_dp_match; + bool is_tcp; + bool is_tcp_syn; +}; + +struct pkt_info { + enum ip_type type; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } ip; + int sport; + int dport; + __u16 trans_hdr_offset; + __u8 proto; + __u8 flags; +}; + +static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end) +{ + struct ethhdr *eth = data; + + if (eth + 1 > data_end) + return NULL; + + return eth; +} + +static __always_inline __u8 filter_ipv6_addr(const struct in6_addr *ipv6addr) +{ + __u8 *leaf; + + leaf = bpf_map_lookup_elem(&v6_addr_map, ipv6addr); + + return leaf ? *leaf : 0; +} + +static __always_inline __u8 filter_ipv4_addr(const __u32 ipaddr) +{ + __u8 *leaf; + + leaf = bpf_map_lookup_elem(&v4_addr_map, &ipaddr); + + return leaf ? *leaf : 0; +} + +static __always_inline __u8 filter_ipv4_lpm(const __u32 ipaddr) +{ + struct v4_lpm_key v4_key = {}; + struct v4_lpm_val *lpm_val; + + v4_key.src = ipaddr; + v4_key.prefixlen = 32; + + lpm_val = bpf_map_lookup_elem(&v4_lpm_val_map, &v4_key); + + return lpm_val ? lpm_val->val : 0; +} + + +static __always_inline void +filter_src_dst_ip(struct pkt_info* info, struct fw_match_info* match_info) +{ + if (info->type == V6) { + match_info->v6_src_ip_match = + filter_ipv6_addr(&info->ip.ipv6->saddr); + } else if (info->type == V4) { + match_info->v4_src_ip_match = + filter_ipv4_addr(info->ip.ipv4->saddr); + match_info->v4_src_prefix_match = + filter_ipv4_lpm(info->ip.ipv4->saddr); + match_info->v4_dst_prefix_match = + filter_ipv4_lpm(info->ip.ipv4->daddr); + } +} + +static __always_inline void * +get_transport_hdr(__u16 offset, void *data, void *data_end) +{ + if (offset > 255 || data + offset > data_end) + return NULL; + + return data + offset; +} + +static __always_inline bool tcphdr_only_contains_flag(struct tcphdr *tcp, + __u32 FLAG) +{ + return (tcp_flag_word(tcp) & + (TCP_FLAG_ACK | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)) == FLAG; +} + +static __always_inline void set_tcp_flags(struct pkt_info *info, + struct tcphdr *tcp) { + if (tcphdr_only_contains_flag(tcp, TCP_FLAG_SYN)) + info->flags |= TCP_SYN; + else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_ACK)) + info->flags |= TCP_ACK; + else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_RST)) + info->flags |= TCP_RST; +} + +static __always_inline bool +parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end) +{ + struct tcphdr *tcp = transport_hdr; + + if (tcp + 1 > data_end) + return false; + + info->sport = bpf_ntohs(tcp->source); + info->dport = bpf_ntohs(tcp->dest); + set_tcp_flags(info, tcp); + + return true; +} + +static __always_inline bool +parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end) +{ + struct udphdr *udp = transport_hdr; + + if (udp + 1 > data_end) + return false; + + info->sport = bpf_ntohs(udp->source); + info->dport = bpf_ntohs(udp->dest); + + return true; +} + +static __always_inline __u8 filter_tcp_port(int port) +{ + __u8 *leaf = bpf_map_lookup_elem(&tcp_port_map, &port); + + return leaf ? *leaf : 0; +} + +static __always_inline __u16 filter_udp_port(int port) +{ + __u16 *leaf = bpf_map_lookup_elem(&udp_port_map, &port); + + return leaf ? *leaf : 0; +} + +static __always_inline bool +filter_transport_hdr(void *transport_hdr, void *data_end, + struct pkt_info *info, struct fw_match_info *match_info) +{ + if (info->proto == IPPROTO_TCP) { + if (!parse_tcp(info, transport_hdr, data_end)) + return false; + + match_info->is_tcp = true; + match_info->is_tcp_syn = (info->flags & TCP_SYN) > 0; + + match_info->tcp_dp_match = filter_tcp_port(info->dport); + } else if (info->proto == IPPROTO_UDP) { + if (!parse_udp(info, transport_hdr, data_end)) + return false; + + match_info->udp_dp_match = filter_udp_port(info->dport); + match_info->udp_sp_match = filter_udp_port(info->sport); + } + + return true; +} + +static __always_inline __u8 +parse_gue_v6(struct pkt_info *info, struct ipv6hdr *ip6h, void *data_end) +{ + struct udphdr *udp = (struct udphdr *)(ip6h + 1); + void *encap_data = udp + 1; + + if (udp + 1 > data_end) + return BAD_IP6_HDR; + + if (udp->dest != bpf_htons(6666)) + return NO_ERR; + + info->flags |= TUNNEL; + + if (encap_data + 1 > data_end) + return BAD_IP6GUE_HDR; + + if (*(__u8 *)encap_data & 0x30) { + struct ipv6hdr *inner_ip6h = encap_data; + + if (inner_ip6h + 1 > data_end) + return BAD_IP6GUE_HDR; + + info->type = V6; + info->proto = inner_ip6h->nexthdr; + info->ip.ipv6 = inner_ip6h; + info->trans_hdr_offset += sizeof(struct ipv6hdr) + sizeof(struct udphdr); + } else { + struct iphdr *inner_ip4h = encap_data; + + if (inner_ip4h + 1 > data_end) + return BAD_IP6GUE_HDR; + + info->type = V4; + info->proto = inner_ip4h->protocol; + info->ip.ipv4 = inner_ip4h; + info->trans_hdr_offset += sizeof(struct iphdr) + sizeof(struct udphdr); + } + + return NO_ERR; +} + +static __always_inline __u8 parse_ipv6_gue(struct pkt_info *info, + void *data, void *data_end) +{ + struct ipv6hdr *ip6h = data + sizeof(struct ethhdr); + + if (ip6h + 1 > data_end) + return BAD_IP6_HDR; + + info->proto = ip6h->nexthdr; + info->ip.ipv6 = ip6h; + info->type = V6; + info->trans_hdr_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); + + if (info->proto == IPPROTO_UDP) + return parse_gue_v6(info, ip6h, data_end); + + return NO_ERR; +} + +SEC("xdp") +int edgewall(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)(ctx->data_end); + void *data = (void *)(long)(ctx->data); + struct fw_match_info match_info = {}; + struct pkt_info info = {}; + __u8 parse_err = NO_ERR; + void *transport_hdr; + struct ethhdr *eth; + bool filter_res; + __u32 proto; + + eth = parse_ethhdr(data, data_end); + if (!eth) + return XDP_DROP; + + proto = eth->h_proto; + if (proto != bpf_htons(ETH_P_IPV6)) + return XDP_DROP; + + if (parse_ipv6_gue(&info, data, data_end)) + return XDP_DROP; + + if (info.proto == IPPROTO_ICMPV6) + return XDP_PASS; + + if (info.proto != IPPROTO_TCP && info.proto != IPPROTO_UDP) + return XDP_DROP; + + filter_src_dst_ip(&info, &match_info); + + transport_hdr = get_transport_hdr(info.trans_hdr_offset, data, + data_end); + if (!transport_hdr) + return XDP_DROP; + + filter_res = filter_transport_hdr(transport_hdr, data_end, + &info, &match_info); + if (!filter_res) + return XDP_DROP; + + if (match_info.is_tcp && !match_info.is_tcp_syn) + return XDP_PASS; + + return XDP_DROP; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_bpftool.py b/tools/testing/selftests/bpf/test_bpftool.py index 4fed2dc25c0aa1..1c2408ee1f5d00 100644 --- a/tools/testing/selftests/bpf/test_bpftool.py +++ b/tools/testing/selftests/bpf/test_bpftool.py @@ -57,6 +57,11 @@ def wrapper(*args, **kwargs): return f(*args, iface, **kwargs) return wrapper +DMESG_EMITTING_HELPERS = [ + "bpf_probe_write_user", + "bpf_trace_printk", + "bpf_trace_vprintk", + ] class TestBpftool(unittest.TestCase): @classmethod @@ -67,10 +72,7 @@ def setUpClass(cls): @default_iface def test_feature_dev_json(self, iface): - unexpected_helpers = [ - "bpf_probe_write_user", - "bpf_trace_printk", - ] + unexpected_helpers = DMESG_EMITTING_HELPERS expected_keys = [ "syscall_config", "program_types", @@ -94,10 +96,7 @@ def test_feature_kernel(self): bpftool_json(["feature", "probe"]), bpftool_json(["feature"]), ] - unexpected_helpers = [ - "bpf_probe_write_user", - "bpf_trace_printk", - ] + unexpected_helpers = DMESG_EMITTING_HELPERS expected_keys = [ "syscall_config", "system_config", @@ -121,10 +120,7 @@ def test_feature_kernel_full(self): bpftool_json(["feature", "probe", "kernel", "full"]), bpftool_json(["feature", "probe", "full"]), ] - expected_helpers = [ - "bpf_probe_write_user", - "bpf_trace_printk", - ] + expected_helpers = DMESG_EMITTING_HELPERS for tc in test_cases: # Check if expected helpers are included at least once in any @@ -157,7 +153,7 @@ def test_feature_kernel_full_vs_not_full(self): not_full_set.add(helper) self.assertCountEqual(full_set - not_full_set, - {"bpf_probe_write_user", "bpf_trace_printk"}) + set(DMESG_EMITTING_HELPERS)) self.assertCountEqual(not_full_set - full_set, set()) def test_feature_macros(self): diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh index b03a875715920f..1453a53ed547d9 100755 --- a/tools/testing/selftests/bpf/test_bpftool_build.sh +++ b/tools/testing/selftests/bpf/test_bpftool_build.sh @@ -90,6 +90,10 @@ echo -e "... through kbuild\n" if [ -f ".config" ] ; then make_and_clean tools/bpf + ## "make tools/bpf" sets $(OUTPUT) to ...tools/bpf/runqslower for + ## runqslower, but the default (used for the "clean" target) is .output. + ## Let's make sure we clean runqslower's directory properly. + make -C tools/bpf/runqslower OUTPUT=${KDIR_ROOT_DIR}/tools/bpf/runqslower/ clean ## $OUTPUT is overwritten in kbuild Makefile, and thus cannot be passed ## down from toplevel Makefile to bpftool's Makefile. diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h index e2394eea4b7f83..32c7a57867da23 100644 --- a/tools/testing/selftests/bpf/test_btf.h +++ b/tools/testing/selftests/bpf/test_btf.h @@ -69,4 +69,7 @@ #define BTF_TYPE_FLOAT_ENC(name, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) +#define BTF_DECL_TAG_ENC(value, type, component_idx) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) + #endif /* _TEST_BTF_H */ diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh index 174b72a64a4c57..dbd91221727d8f 100755 --- a/tools/testing/selftests/bpf/test_flow_dissector.sh +++ b/tools/testing/selftests/bpf/test_flow_dissector.sh @@ -26,22 +26,22 @@ if [[ -z $(ip netns identify $$) ]]; then type flow_dissector if ! unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/flow_dissector flow_dissector; then + /sys/fs/bpf/flow/_dissect flow_dissector; then echo "Unexpected unsuccessful attach in namespace" >&2 err=1 fi - $bpftool prog attach pinned /sys/fs/bpf/flow/flow_dissector \ + $bpftool prog attach pinned /sys/fs/bpf/flow/_dissect \ flow_dissector if unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/flow_dissector flow_dissector; then + /sys/fs/bpf/flow/_dissect flow_dissector; then echo "Unexpected successful attach in namespace" >&2 err=1 fi if ! $bpftool prog detach pinned \ - /sys/fs/bpf/flow/flow_dissector flow_dissector; then + /sys/fs/bpf/flow/_dissect flow_dissector; then echo "Failed to detach flow dissector" >&2 err=1 fi @@ -95,7 +95,7 @@ else fi # Attach BPF program -./flow_dissector_load -p bpf_flow.o -s flow_dissector +./flow_dissector_load -p bpf_flow.o -s _dissect # Setup tc qdisc add dev lo ingress diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index cc1cd240445d27..c65986bd9d0703 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -12,6 +12,11 @@ #include #include /* backtrace */ #include +#include /* get_nprocs */ +#include +#include +#include +#include /* Adapted from perf/util/string.c */ static bool glob_match(const char *str, const char *pat) @@ -45,9 +50,12 @@ struct prog_test_def { const char *test_name; int test_num; void (*run_test)(void); + void (*run_serial_test)(void); bool force_log; int error_cnt; int skip_cnt; + int sub_succ_cnt; + bool should_run; bool tested; bool need_cgroup_cleanup; @@ -97,6 +105,10 @@ static void dump_test_log(const struct prog_test_def *test, bool failed) if (stdout == env.stdout) return; + /* worker always holds log */ + if (env.worker_id != -1) + return; + fflush(stdout); /* exports env.log_buf & env.log_cnt */ if (env.verbosity > VERBOSE_NONE || test->force_log || failed) { @@ -107,8 +119,6 @@ static void dump_test_log(const struct prog_test_def *test, bool failed) fprintf(env.stdout, "\n"); } } - - fseeko(stdout, 0, SEEK_SET); /* rewind */ } static void skip_account(void) @@ -124,7 +134,8 @@ static void stdio_restore(void); /* A bunch of tests set custom affinity per-thread and/or per-process. Reset * it after each test/sub-test. */ -static void reset_affinity() { +static void reset_affinity(void) +{ cpu_set_t cpuset; int i, err; @@ -165,21 +176,21 @@ static void restore_netns(void) } } -void test__end_subtest() +void test__end_subtest(void) { struct prog_test_def *test = env.test; int sub_error_cnt = test->error_cnt - test->old_error_cnt; dump_test_log(test, sub_error_cnt); - fprintf(env.stdout, "#%d/%d %s/%s:%s\n", + fprintf(stdout, "#%d/%d %s/%s:%s\n", test->test_num, test->subtest_num, test->test_name, test->subtest_name, sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK")); if (sub_error_cnt) - env.fail_cnt++; + test->error_cnt++; else if (test->skip_cnt == 0) - env.sub_succ_cnt++; + test->sub_succ_cnt++; skip_account(); free(test->subtest_name); @@ -217,7 +228,8 @@ bool test__start_subtest(const char *name) return true; } -void test__force_log() { +void test__force_log(void) +{ env.test->force_log = true; } @@ -370,7 +382,7 @@ int extract_build_id(char *build_id, size_t size) if (getline(&line, &len, fp) == -1) goto err; - fclose(fp); + pclose(fp); if (len > size) len = size; @@ -379,7 +391,7 @@ int extract_build_id(char *build_id, size_t size) free(line); return 0; err: - fclose(fp); + pclose(fp); return -1; } @@ -446,14 +458,17 @@ static int load_bpf_testmod(void) } /* extern declarations for test funcs */ -#define DEFINE_TEST(name) extern void test_##name(void); +#define DEFINE_TEST(name) \ + extern void test_##name(void) __weak; \ + extern void serial_test_##name(void) __weak; #include #undef DEFINE_TEST static struct prog_test_def prog_test_defs[] = { -#define DEFINE_TEST(name) { \ - .test_name = #name, \ - .run_test = &test_##name, \ +#define DEFINE_TEST(name) { \ + .test_name = #name, \ + .run_test = &test_##name, \ + .run_serial_test = &serial_test_##name, \ }, #include #undef DEFINE_TEST @@ -474,6 +489,8 @@ enum ARG_KEYS { ARG_LIST_TEST_NAMES = 'l', ARG_TEST_NAME_GLOB_ALLOWLIST = 'a', ARG_TEST_NAME_GLOB_DENYLIST = 'd', + ARG_NUM_WORKERS = 'j', + ARG_DEBUG = -1, }; static const struct argp_option opts[] = { @@ -495,6 +512,10 @@ static const struct argp_option opts[] = { "Run tests with name matching the pattern (supports '*' wildcard)." }, { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0, "Don't run tests with name matching the pattern (supports '*' wildcard)." }, + { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, + "Number of workers to run in parallel, default to number of cpus." }, + { "debug", ARG_DEBUG, NULL, 0, + "print extra debug information for test_progs." }, {}, }; @@ -650,7 +671,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) fprintf(stderr, "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)", errno); - return -1; + return -EINVAL; } } @@ -661,6 +682,20 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case ARG_LIST_TEST_NAMES: env->list_test_names = true; break; + case ARG_NUM_WORKERS: + if (arg) { + env->workers = atoi(arg); + if (!env->workers) { + fprintf(stderr, "Invalid number of worker: %s.", arg); + return -EINVAL; + } + } else { + env->workers = get_nprocs(); + } + break; + case ARG_DEBUG: + env->debug = true; + break; case ARGP_KEY_ARG: argp_usage(state); break; @@ -678,7 +713,7 @@ static void stdio_hijack(void) env.stdout = stdout; env.stderr = stderr; - if (env.verbosity > VERBOSE_NONE) { + if (env.verbosity > VERBOSE_NONE && env.worker_id == -1) { /* nothing to do, output to stdout by default */ return; } @@ -704,10 +739,6 @@ static void stdio_restore(void) return; fclose(stdout); - free(env.log_buf); - - env.log_buf = NULL; - env.log_cnt = 0; stdout = env.stdout; stderr = env.stderr; @@ -743,6 +774,45 @@ int cd_flavor_subdir(const char *exec_name) return chdir(flavor); } +int trigger_module_test_read(int read_sz) +{ + int fd, err; + + fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); + err = -errno; + if (!ASSERT_GE(fd, 0, "testmod_file_open")) + return err; + + read(fd, NULL, read_sz); + close(fd); + + return 0; +} + +int trigger_module_test_write(int write_sz) +{ + int fd, err; + char *buf = malloc(write_sz); + + if (!buf) + return -ENOMEM; + + memset(buf, 'a', write_sz); + buf[write_sz-1] = '\0'; + + fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY); + err = -errno; + if (!ASSERT_GE(fd, 0, "testmod_file_open")) { + free(buf); + return err; + } + + write(fd, buf, write_sz); + close(fd); + free(buf); + return 0; +} + #define MAX_BACKTRACE_SZ 128 void crash_handler(int signum) { @@ -755,11 +825,498 @@ void crash_handler(int signum) dump_test_log(env.test, true); if (env.stdout) stdio_restore(); - + if (env.worker_id != -1) + fprintf(stderr, "[%d]: ", env.worker_id); fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); backtrace_symbols_fd(bt, sz, STDERR_FILENO); } +static void sigint_handler(int signum) +{ + int i; + + for (i = 0; i < env.workers; i++) + if (env.worker_socks[i] > 0) + close(env.worker_socks[i]); +} + +static int current_test_idx; +static pthread_mutex_t current_test_lock; +static pthread_mutex_t stdout_output_lock; + +struct test_result { + int error_cnt; + int skip_cnt; + int sub_succ_cnt; + + size_t log_cnt; + char *log_buf; +}; + +static struct test_result test_results[ARRAY_SIZE(prog_test_defs)]; + +static inline const char *str_msg(const struct msg *msg, char *buf) +{ + switch (msg->type) { + case MSG_DO_TEST: + sprintf(buf, "MSG_DO_TEST %d", msg->do_test.test_num); + break; + case MSG_TEST_DONE: + sprintf(buf, "MSG_TEST_DONE %d (log: %d)", + msg->test_done.test_num, + msg->test_done.have_log); + break; + case MSG_TEST_LOG: + sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)", + strlen(msg->test_log.log_buf), + msg->test_log.is_last); + break; + case MSG_EXIT: + sprintf(buf, "MSG_EXIT"); + break; + default: + sprintf(buf, "UNKNOWN"); + break; + } + + return buf; +} + +static int send_message(int sock, const struct msg *msg) +{ + char buf[256]; + + if (env.debug) + fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf)); + return send(sock, msg, sizeof(*msg), 0); +} + +static int recv_message(int sock, struct msg *msg) +{ + int ret; + char buf[256]; + + memset(msg, 0, sizeof(*msg)); + ret = recv(sock, msg, sizeof(*msg), 0); + if (ret >= 0) { + if (env.debug) + fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf)); + } + return ret; +} + +static void run_one_test(int test_num) +{ + struct prog_test_def *test = &prog_test_defs[test_num]; + + env.test = test; + + if (test->run_test) + test->run_test(); + else if (test->run_serial_test) + test->run_serial_test(); + + /* ensure last sub-test is finalized properly */ + if (test->subtest_name) + test__end_subtest(); + + test->tested = true; + + dump_test_log(test, test->error_cnt); + + reset_affinity(); + restore_netns(); + if (test->need_cgroup_cleanup) + cleanup_cgroup_environment(); +} + +struct dispatch_data { + int worker_id; + int sock_fd; +}; + +static void *dispatch_thread(void *ctx) +{ + struct dispatch_data *data = ctx; + int sock_fd; + FILE *log_fd = NULL; + + sock_fd = data->sock_fd; + + while (true) { + int test_to_run = -1; + struct prog_test_def *test; + struct test_result *result; + + /* grab a test */ + { + pthread_mutex_lock(¤t_test_lock); + + if (current_test_idx >= prog_test_cnt) { + pthread_mutex_unlock(¤t_test_lock); + goto done; + } + + test = &prog_test_defs[current_test_idx]; + test_to_run = current_test_idx; + current_test_idx++; + + pthread_mutex_unlock(¤t_test_lock); + } + + if (!test->should_run || test->run_serial_test) + continue; + + /* run test through worker */ + { + struct msg msg_do_test; + + msg_do_test.type = MSG_DO_TEST; + msg_do_test.do_test.test_num = test_to_run; + if (send_message(sock_fd, &msg_do_test) < 0) { + perror("Fail to send command"); + goto done; + } + env.worker_current_test[data->worker_id] = test_to_run; + } + + /* wait for test done */ + { + int err; + struct msg msg_test_done; + + err = recv_message(sock_fd, &msg_test_done); + if (err < 0) + goto error; + if (msg_test_done.type != MSG_TEST_DONE) + goto error; + if (test_to_run != msg_test_done.test_done.test_num) + goto error; + + test->tested = true; + result = &test_results[test_to_run]; + + result->error_cnt = msg_test_done.test_done.error_cnt; + result->skip_cnt = msg_test_done.test_done.skip_cnt; + result->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt; + + /* collect all logs */ + if (msg_test_done.test_done.have_log) { + log_fd = open_memstream(&result->log_buf, &result->log_cnt); + if (!log_fd) + goto error; + + while (true) { + struct msg msg_log; + + if (recv_message(sock_fd, &msg_log) < 0) + goto error; + if (msg_log.type != MSG_TEST_LOG) + goto error; + + fprintf(log_fd, "%s", msg_log.test_log.log_buf); + if (msg_log.test_log.is_last) + break; + } + fclose(log_fd); + log_fd = NULL; + } + /* output log */ + { + pthread_mutex_lock(&stdout_output_lock); + + if (result->log_cnt) { + result->log_buf[result->log_cnt] = '\0'; + fprintf(stdout, "%s", result->log_buf); + if (result->log_buf[result->log_cnt - 1] != '\n') + fprintf(stdout, "\n"); + } + + fprintf(stdout, "#%d %s:%s\n", + test->test_num, test->test_name, + result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK")); + + pthread_mutex_unlock(&stdout_output_lock); + } + + } /* wait for test done */ + } /* while (true) */ +error: + if (env.debug) + fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); + + if (log_fd) + fclose(log_fd); +done: + { + struct msg msg_exit; + + msg_exit.type = MSG_EXIT; + if (send_message(sock_fd, &msg_exit) < 0) { + if (env.debug) + fprintf(stderr, "[%d]: send_message msg_exit: %s.\n", + data->worker_id, strerror(errno)); + } + } + return NULL; +} + +static void print_all_error_logs(void) +{ + int i; + + if (env.fail_cnt) + fprintf(stdout, "\nAll error logs:\n"); + + /* print error logs again */ + for (i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *test; + struct test_result *result; + + test = &prog_test_defs[i]; + result = &test_results[i]; + + if (!test->tested || !result->error_cnt) + continue; + + fprintf(stdout, "\n#%d %s:%s\n", + test->test_num, test->test_name, + result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK")); + + if (result->log_cnt) { + result->log_buf[result->log_cnt] = '\0'; + fprintf(stdout, "%s", result->log_buf); + if (result->log_buf[result->log_cnt - 1] != '\n') + fprintf(stdout, "\n"); + } + } +} + +static int server_main(void) +{ + pthread_t *dispatcher_threads; + struct dispatch_data *data; + struct sigaction sigact_int = { + .sa_handler = sigint_handler, + .sa_flags = SA_RESETHAND, + }; + int i; + + sigaction(SIGINT, &sigact_int, NULL); + + dispatcher_threads = calloc(sizeof(pthread_t), env.workers); + data = calloc(sizeof(struct dispatch_data), env.workers); + + env.worker_current_test = calloc(sizeof(int), env.workers); + for (i = 0; i < env.workers; i++) { + int rc; + + data[i].worker_id = i; + data[i].sock_fd = env.worker_socks[i]; + rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]); + if (rc < 0) { + perror("Failed to launch dispatcher thread"); + exit(EXIT_ERR_SETUP_INFRA); + } + } + + /* wait for all dispatcher to finish */ + for (i = 0; i < env.workers; i++) { + while (true) { + int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL); + + if (!ret) { + break; + } else if (ret == EBUSY) { + if (env.debug) + fprintf(stderr, "Still waiting for thread %d (test %d).\n", + i, env.worker_current_test[i] + 1); + usleep(1000 * 1000); + continue; + } else { + fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret); + break; + } + } + } + free(dispatcher_threads); + free(env.worker_current_test); + free(data); + + /* run serial tests */ + save_netns(); + + for (int i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *test = &prog_test_defs[i]; + struct test_result *result = &test_results[i]; + + if (!test->should_run || !test->run_serial_test) + continue; + + stdio_hijack(); + + run_one_test(i); + + stdio_restore(); + if (env.log_buf) { + result->log_cnt = env.log_cnt; + result->log_buf = strdup(env.log_buf); + + free(env.log_buf); + env.log_buf = NULL; + env.log_cnt = 0; + } + restore_netns(); + + fprintf(stdout, "#%d %s:%s\n", + test->test_num, test->test_name, + test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK")); + + result->error_cnt = test->error_cnt; + result->skip_cnt = test->skip_cnt; + result->sub_succ_cnt = test->sub_succ_cnt; + } + + /* generate summary */ + fflush(stderr); + fflush(stdout); + + for (i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *current_test; + struct test_result *result; + + current_test = &prog_test_defs[i]; + result = &test_results[i]; + + if (!current_test->tested) + continue; + + env.succ_cnt += result->error_cnt ? 0 : 1; + env.skip_cnt += result->skip_cnt; + if (result->error_cnt) + env.fail_cnt++; + env.sub_succ_cnt += result->sub_succ_cnt; + } + + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", + env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); + + print_all_error_logs(); + + /* reap all workers */ + for (i = 0; i < env.workers; i++) { + int wstatus, pid; + + pid = waitpid(env.worker_pids[i], &wstatus, 0); + if (pid != env.worker_pids[i]) + perror("Unable to reap worker"); + } + + return 0; +} + +static int worker_main(int sock) +{ + save_netns(); + + while (true) { + /* receive command */ + struct msg msg; + + if (recv_message(sock, &msg) < 0) + goto out; + + switch (msg.type) { + case MSG_EXIT: + if (env.debug) + fprintf(stderr, "[%d]: worker exit.\n", + env.worker_id); + goto out; + case MSG_DO_TEST: { + int test_to_run; + struct prog_test_def *test; + struct msg msg_done; + + test_to_run = msg.do_test.test_num; + test = &prog_test_defs[test_to_run]; + + if (env.debug) + fprintf(stderr, "[%d]: #%d:%s running.\n", + env.worker_id, + test_to_run + 1, + test->test_name); + + stdio_hijack(); + + run_one_test(test_to_run); + + stdio_restore(); + + memset(&msg_done, 0, sizeof(msg_done)); + msg_done.type = MSG_TEST_DONE; + msg_done.test_done.test_num = test_to_run; + msg_done.test_done.error_cnt = test->error_cnt; + msg_done.test_done.skip_cnt = test->skip_cnt; + msg_done.test_done.sub_succ_cnt = test->sub_succ_cnt; + msg_done.test_done.have_log = false; + + if (env.verbosity > VERBOSE_NONE || test->force_log || test->error_cnt) { + if (env.log_cnt) + msg_done.test_done.have_log = true; + } + if (send_message(sock, &msg_done) < 0) { + perror("Fail to send message done"); + goto out; + } + + /* send logs */ + if (msg_done.test_done.have_log) { + char *src; + size_t slen; + + src = env.log_buf; + slen = env.log_cnt; + while (slen) { + struct msg msg_log; + char *dest; + size_t len; + + memset(&msg_log, 0, sizeof(msg_log)); + msg_log.type = MSG_TEST_LOG; + dest = msg_log.test_log.log_buf; + len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen; + memcpy(dest, src, len); + + src += len; + slen -= len; + if (!slen) + msg_log.test_log.is_last = true; + + assert(send_message(sock, &msg_log) >= 0); + } + } + if (env.log_buf) { + free(env.log_buf); + env.log_buf = NULL; + env.log_cnt = 0; + } + if (env.debug) + fprintf(stderr, "[%d]: #%d:%s done.\n", + env.worker_id, + test_to_run + 1, + test->test_name); + break; + } /* case MSG_DO_TEST */ + default: + if (env.debug) + fprintf(stderr, "[%d]: unknown message.\n", env.worker_id); + return -1; + } + } +out: + return 0; +} + int main(int argc, char **argv) { static const struct argp argp = { @@ -770,7 +1327,7 @@ int main(int argc, char **argv) struct sigaction sigact = { .sa_handler = crash_handler, .sa_flags = SA_RESETHAND, - }; + }; int err, i; sigaction(SIGSEGV, &sigact, NULL); @@ -798,21 +1355,84 @@ int main(int argc, char **argv) return -1; } - save_netns(); - stdio_hijack(); + env.stdout = stdout; + env.stderr = stderr; + env.has_testmod = true; if (!env.list_test_names && load_bpf_testmod()) { fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); env.has_testmod = false; } + + /* initializing tests */ for (i = 0; i < prog_test_cnt; i++) { struct prog_test_def *test = &prog_test_defs[i]; - env.test = test; test->test_num = i + 1; - - if (!should_run(&env.test_selector, + if (should_run(&env.test_selector, test->test_num, test->test_name)) + test->should_run = true; + else + test->should_run = false; + + if ((test->run_test == NULL && test->run_serial_test == NULL) || + (test->run_test != NULL && test->run_serial_test != NULL)) { + fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n", + test->test_num, test->test_name, test->test_name, test->test_name); + exit(EXIT_ERR_SETUP_INFRA); + } + } + + /* ignore workers if we are just listing */ + if (env.get_test_cnt || env.list_test_names) + env.workers = 0; + + /* launch workers if requested */ + env.worker_id = -1; /* main process */ + if (env.workers) { + env.worker_pids = calloc(sizeof(__pid_t), env.workers); + env.worker_socks = calloc(sizeof(int), env.workers); + if (env.debug) + fprintf(stdout, "Launching %d workers.\n", env.workers); + for (i = 0; i < env.workers; i++) { + int sv[2]; + pid_t pid; + + if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) { + perror("Fail to create worker socket"); + return -1; + } + pid = fork(); + if (pid < 0) { + perror("Failed to fork worker"); + return -1; + } else if (pid != 0) { /* main process */ + close(sv[1]); + env.worker_pids[i] = pid; + env.worker_socks[i] = sv[0]; + } else { /* inside each worker process */ + close(sv[0]); + env.worker_id = i; + return worker_main(sv[1]); + } + } + + if (env.worker_id == -1) { + server_main(); + goto out; + } + } + + /* The rest of the main process */ + + /* on single mode */ + save_netns(); + + for (i = 0; i < prog_test_cnt; i++) { + struct prog_test_def *test = &prog_test_defs[i]; + struct test_result *result; + + if (!test->should_run) continue; if (env.get_test_cnt) { @@ -826,33 +1446,35 @@ int main(int argc, char **argv) continue; } - test->run_test(); - /* ensure last sub-test is finalized properly */ - if (test->subtest_name) - test__end_subtest(); + stdio_hijack(); - test->tested = true; + run_one_test(i); - dump_test_log(test, test->error_cnt); + stdio_restore(); fprintf(env.stdout, "#%d %s:%s\n", test->test_num, test->test_name, test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK")); + result = &test_results[i]; + result->error_cnt = test->error_cnt; + if (env.log_buf) { + result->log_buf = strdup(env.log_buf); + result->log_cnt = env.log_cnt; + + free(env.log_buf); + env.log_buf = NULL; + env.log_cnt = 0; + } + if (test->error_cnt) env.fail_cnt++; else env.succ_cnt++; - skip_account(); - reset_affinity(); - restore_netns(); - if (test->need_cgroup_cleanup) - cleanup_cgroup_environment(); + skip_account(); + env.sub_succ_cnt += test->sub_succ_cnt; } - if (!env.list_test_names && env.has_testmod) - unload_bpf_testmod(); - stdio_restore(); if (env.get_test_cnt) { printf("%d\n", env.succ_cnt); @@ -865,14 +1487,18 @@ int main(int argc, char **argv) fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); + print_all_error_logs(); + + close(env.saved_netns_fd); out: + if (!env.list_test_names && env.has_testmod) + unload_bpf_testmod(); free_str_set(&env.test_selector.blacklist); free_str_set(&env.test_selector.whitelist); free(env.test_selector.num_set); free_str_set(&env.subtest_selector.blacklist); free_str_set(&env.subtest_selector.whitelist); free(env.subtest_selector.num_set); - close(env.saved_netns_fd); if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) return EXIT_NO_TEST; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index c8c2bf878f67cc..93c1ff7055334f 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -62,6 +62,7 @@ struct test_env { struct test_selector test_selector; struct test_selector subtest_selector; bool verifier_stats; + bool debug; enum verbosity verbosity; bool jit_enabled; @@ -69,7 +70,8 @@ struct test_env { bool get_test_cnt; bool list_test_names; - struct prog_test_def *test; + struct prog_test_def *test; /* current running tests */ + FILE *stdout; FILE *stderr; char *log_buf; @@ -82,6 +84,38 @@ struct test_env { int skip_cnt; /* skipped tests */ int saved_netns_fd; + int workers; /* number of worker process */ + int worker_id; /* id number of current worker, main process is -1 */ + pid_t *worker_pids; /* array of worker pids */ + int *worker_socks; /* array of worker socks */ + int *worker_current_test; /* array of current running test for each worker */ +}; + +#define MAX_LOG_TRUNK_SIZE 8192 +enum msg_type { + MSG_DO_TEST = 0, + MSG_TEST_DONE = 1, + MSG_TEST_LOG = 2, + MSG_EXIT = 255, +}; +struct msg { + enum msg_type type; + union { + struct { + int test_num; + } do_test; + struct { + int test_num; + int sub_succ_cnt; + int error_cnt; + int skip_cnt; + bool have_log; + } test_done; + struct { + char log_buf[MAX_LOG_TRUNK_SIZE + 1]; + bool is_last; + } test_log; + }; }; extern struct test_env env; @@ -291,6 +325,8 @@ int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); int extract_build_id(char *build_id, size_t size); int kern_sync_rcu(void); +int trigger_module_test_read(int read_sz); +int trigger_module_test_write(int write_sz); #ifdef __x86_64__ #define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep" @@ -299,3 +335,5 @@ int kern_sync_rcu(void); #else #define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep" #endif + +#define BPF_TESTMOD_TEST_FILE "/sys/kernel/bpf_testmod" diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index eefd445b96fc72..1ba7e7346afb2b 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -139,6 +139,7 @@ struct sockmap_options { bool sendpage; bool data_test; bool drop_expected; + bool check_recved_len; int iov_count; int iov_length; int rate; @@ -556,8 +557,12 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, int err, i, flags = MSG_NOSIGNAL; bool drop = opt->drop_expected; bool data = opt->data_test; + int iov_alloc_length = iov_length; - err = msg_alloc_iov(&msg, iov_count, iov_length, data, tx); + if (!tx && opt->check_recved_len) + iov_alloc_length *= 2; + + err = msg_alloc_iov(&msg, iov_count, iov_alloc_length, data, tx); if (err) goto out_errno; if (peek_flag) { @@ -665,6 +670,13 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, s->bytes_recvd += recv; + if (opt->check_recved_len && s->bytes_recvd > total_bytes) { + errno = EMSGSIZE; + fprintf(stderr, "recv failed(), bytes_recvd:%zd, total_bytes:%f\n", + s->bytes_recvd, total_bytes); + goto out_errno; + } + if (data) { int chunk_sz = opt->sendpage ? iov_length * cnt : @@ -744,7 +756,8 @@ static int sendmsg_test(struct sockmap_options *opt) rxpid = fork(); if (rxpid == 0) { - iov_buf -= (txmsg_pop - txmsg_start_pop + 1); + if (txmsg_pop || txmsg_start_pop) + iov_buf -= (txmsg_pop - txmsg_start_pop + 1); if (opt->drop_expected || txmsg_ktls_skb_drop) _exit(0); @@ -1680,12 +1693,27 @@ static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt) { txmsg_pass = 1; skb_use_parser = 512; + if (ktls == 1) + skb_use_parser = 570; opt->iov_length = 256; opt->iov_count = 1; opt->rate = 2; test_exec(cgrp, opt); } +static void test_txmsg_ingress_parser2(int cgrp, struct sockmap_options *opt) +{ + if (ktls == 1) + return; + skb_use_parser = 10; + opt->iov_length = 20; + opt->iov_count = 1; + opt->rate = 1; + opt->check_recved_len = true; + test_exec(cgrp, opt); + opt->check_recved_len = false; +} + char *map_names[] = { "sock_map", "sock_map_txmsg", @@ -1784,7 +1812,8 @@ struct _test test[] = { {"txmsg test pull-data", test_txmsg_pull}, {"txmsg test pop-data", test_txmsg_pop}, {"txmsg test push/pop data", test_txmsg_push_pop}, - {"txmsg text ingress parser", test_txmsg_ingress_parser}, + {"txmsg test ingress parser", test_txmsg_ingress_parser}, + {"txmsg test ingress parser2", test_txmsg_ingress_parser2}, }; static int check_whitelist(struct _test *t, struct sockmap_options *opt) diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index a20a919244c0b3..a3bb6d399daa01 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -124,7 +124,7 @@ static struct sysctl_test tests[] = { .descr = "ctx:write sysctl:write read ok narrow", .insns = { /* u64 w = (u16)write & 1; */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, offsetof(struct bpf_sysctl, write)), #else @@ -184,7 +184,7 @@ static struct sysctl_test tests[] = { .descr = "ctx:file_pos sysctl:read read ok narrow", .insns = { /* If (file_pos == X) */ -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, offsetof(struct bpf_sysctl, file_pos)), #else diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh index 9b3617d770a525..6413c147255460 100755 --- a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh +++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh @@ -76,8 +76,8 @@ DIR=$(dirname $0) TEST_IF=lo MAX_PING_TRIES=5 BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.o" -CLSACT_SECTION="clsact/check_syncookie" -XDP_SECTION="xdp/check_syncookie" +CLSACT_SECTION="tc" +XDP_SECTION="xdp" BPF_PROG_ID=0 PROG="${DIR}/test_tcp_check_syncookie_user" diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index 1ccbe804e8e1cb..ca1372924023df 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -168,14 +168,15 @@ add_vxlan_tunnel() ip netns exec at_ns0 \ ip link set dev $DEV_NS address 52:54:00:d9:01:00 up ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 - ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00 + ip netns exec at_ns0 \ + ip neigh add 10.1.1.200 lladdr 52:54:00:d9:02:00 dev $DEV_NS ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF # root namespace ip link add dev $DEV type $TYPE external gbp dstport 4789 ip link set dev $DEV address 52:54:00:d9:02:00 up ip addr add dev $DEV 10.1.1.200/24 - arp -s 10.1.1.100 52:54:00:d9:01:00 + ip neigh add 10.1.1.100 lladdr 52:54:00:d9:01:00 dev $DEV } add_ip6vxlan_tunnel() diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3a9e332c5e360e..25afe423b3f06d 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -47,6 +47,10 @@ #include "test_btf.h" #include "../../../include/linux/filter.h" +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + #define MAX_INSNS BPF_MAXINSNS #define MAX_TEST_INSNS 1000000 #define MAX_FIXUPS 8 @@ -974,7 +978,7 @@ static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val, if (err) { switch (saved_errno) { - case 524/*ENOTSUPP*/: + case ENOTSUPP: printf("Did not run the program (not supported) "); return 0; case EPERM: @@ -1119,6 +1123,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv, goto close_fds; } + if (fd_prog < 0 && saved_errno == ENOTSUPP) { + printf("SKIP (program uses an unsupported feature)\n"); + skips++; + goto close_fds; + } + alignment_prevented_execution = 0; if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) { diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh index 637fcf4fe4e30a..d10cefd6eb0945 100755 --- a/tools/testing/selftests/bpf/test_xdp_meta.sh +++ b/tools/testing/selftests/bpf/test_xdp_meta.sh @@ -1,5 +1,8 @@ #!/bin/sh +# Kselftest framework requirement - SKIP code is 4. +readonly KSFT_SKIP=4 + cleanup() { if [ "$?" = "0" ]; then @@ -17,7 +20,7 @@ cleanup() ip link set dev lo xdp off 2>/dev/null > /dev/null if [ $? -ne 0 ];then echo "selftests: [SKIP] Could not run test without the ip xdp support" - exit 0 + exit $KSFT_SKIP fi set -e diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh index c033850886f441..57c8db9972a65f 100755 --- a/tools/testing/selftests/bpf/test_xdp_redirect.sh +++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh @@ -52,8 +52,8 @@ test_xdp_redirect() return 0 fi - ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null - ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null + ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null + ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh index 1538373157e3c5..351955c2bdfd80 100755 --- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh +++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh @@ -88,7 +88,7 @@ setup_ns() # Add a neigh entry for IPv4 ping test ip -n ns$i neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0 ip -n ns$i link set veth0 $mode obj \ - xdp_dummy.o sec xdp_dummy &> /dev/null || \ + xdp_dummy.o sec xdp &> /dev/null || \ { test_fail "Unable to load dummy xdp" && exit 1; } IFACES="$IFACES veth$i" veth_mac[$i]=$(ip link show veth$i | awk '/link\/ether/ {print $2}') diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh index 995278e684b6e4..a3a1eaee26ea60 100755 --- a/tools/testing/selftests/bpf/test_xdp_veth.sh +++ b/tools/testing/selftests/bpf/test_xdp_veth.sh @@ -107,9 +107,9 @@ ip link set dev veth1 xdp pinned $BPF_DIR/progs/redirect_map_0 ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1 ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2 -ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy +ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp -ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy +ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp trap cleanup EXIT diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh index bb8b0da91686f0..0cbc7604a2f814 100755 --- a/tools/testing/selftests/bpf/test_xdp_vlan.sh +++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh @@ -2,6 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 # Author: Jesper Dangaard Brouer +# Kselftest framework requirement - SKIP code is 4. +readonly KSFT_SKIP=4 + # Allow wrapper scripts to name test if [ -z "$TESTNAME" ]; then TESTNAME=xdp_vlan @@ -94,7 +97,7 @@ while true; do -h | --help ) usage; echo "selftests: $TESTNAME [SKIP] usage help info requested" - exit 0 + exit $KSFT_SKIP ;; * ) shift @@ -117,7 +120,7 @@ fi ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null if [ $? -ne 0 ]; then echo "selftests: $TESTNAME [SKIP] need ip xdp support" - exit 0 + exit $KSFT_SKIP fi # Interactive mode likely require us to cleanup netns diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index e7a19b04d4eaf8..7b7f918eda7768 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c index 1b1c798e924895..1b138cd2b187d9 100644 --- a/tools/testing/selftests/bpf/verifier/array_access.c +++ b/tools/testing/selftests/bpf/verifier/array_access.c @@ -186,7 +186,7 @@ }, .fixup_map_hash_48b = { 3 }, .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", + .errstr = "invalid access to map value, value_size=48 off=44 size=8", .result_unpriv = REJECT, .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c index 6e52dfc644153f..c22dc83a41fdc4 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c +++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c @@ -119,3 +119,41 @@ }, .result = ACCEPT, }, +{ + "Dest pointer in r0 - fail", + .insns = { + /* val = 0; */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, 1); */ + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8), + /* if (r0 != 0) exit(1); */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "Dest pointer in r0 - succeed", + .insns = { + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + /* r0 = atomic_cmpxchg(&val, r0, 0); */ + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8), + /* r1 = *r0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch.c b/tools/testing/selftests/bpf/verifier/atomic_fetch.c new file mode 100644 index 00000000000000..3bc9ff7a860b7f --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/atomic_fetch.c @@ -0,0 +1,57 @@ +#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \ + { \ + "atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \ + .insns = { \ + /* u64 val = operan1; */ \ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, operand1), \ + /* u64 old = atomic_fetch_add(&val, operand2); */ \ + BPF_MOV64_REG(dst_reg, BPF_REG_10), \ + BPF_MOV64_IMM(src_reg, operand2), \ + BPF_ATOMIC_OP(BPF_DW, op, \ + dst_reg, src_reg, -8), \ + /* if (old != operand1) exit(1); */ \ + BPF_JMP_IMM(BPF_JEQ, src_reg, operand1, 2), \ + BPF_MOV64_IMM(BPF_REG_0, 1), \ + BPF_EXIT_INSN(), \ + /* if (val != result) exit (2); */ \ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8), \ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, expect, 2), \ + BPF_MOV64_IMM(BPF_REG_0, 2), \ + BPF_EXIT_INSN(), \ + /* exit(0); */ \ + BPF_MOV64_IMM(BPF_REG_0, 0), \ + BPF_EXIT_INSN(), \ + }, \ + .result = ACCEPT, \ + } +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 1, BPF_ADD | BPF_FETCH, 2, 3), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XCHG, 0x011, 0x011), +__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XCHG, 0x011, 0x011), +#undef __ATOMIC_FETCH_OP_TEST diff --git a/tools/testing/selftests/bpf/verifier/atomic_invalid.c b/tools/testing/selftests/bpf/verifier/atomic_invalid.c new file mode 100644 index 00000000000000..39272720b2f6f4 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/atomic_invalid.c @@ -0,0 +1,25 @@ +#define __INVALID_ATOMIC_ACCESS_TEST(op) \ + { \ + "atomic " #op " access through non-pointer ", \ + .insns = { \ + BPF_MOV64_IMM(BPF_REG_0, 1), \ + BPF_MOV64_IMM(BPF_REG_1, 0), \ + BPF_ATOMIC_OP(BPF_DW, op, BPF_REG_1, BPF_REG_0, -8), \ + BPF_MOV64_IMM(BPF_REG_0, 0), \ + BPF_EXIT_INSN(), \ + }, \ + .result = REJECT, \ + .errstr = "R1 invalid mem access 'inv'" \ + } +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD), +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD), +__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_AND), +__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_OR), +__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR), +__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH), +__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG), +__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG), diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 336a749673d195..d7b74eb2833398 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -1,3 +1,26 @@ +{ + "calls: invalid kfunc call not eliminated", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = REJECT, + .errstr = "invalid kernel function call not eliminated in verifier pass", +}, +{ + "calls: invalid kfunc call unreachable", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = ACCEPT, +}, { "calls: basic sanity", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c index 2022c0f2cd759b..83cecfbd67394a 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c @@ -502,7 +502,7 @@ "check skb->hash byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash)), #else @@ -537,7 +537,7 @@ "check skb->hash byte load permitted 3", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 3), #else @@ -646,7 +646,7 @@ "check skb->hash half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash)), #else @@ -661,7 +661,7 @@ "check skb->hash half load permitted 2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 2), #else @@ -676,7 +676,7 @@ "check skb->hash half load not permitted, unaligned 1", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 1), #else @@ -693,7 +693,7 @@ "check skb->hash half load not permitted, unaligned 3", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 3), #else @@ -951,7 +951,7 @@ "check skb->data half load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, data)), #else @@ -1057,6 +1057,66 @@ .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, +{ + "padding after gso_size is not accessible", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetofend(struct __sk_buff, gso_size)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access off=180 size=4", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "read hwtstamp from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "read hwtstamp from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "write hwtstamp from CGROUP_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access off=184 size=8", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "read hwtstamp from CLS", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, { "check wire_len is not readable by sockets", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c index df215e0045664c..79021c30e51eb3 100644 --- a/tools/testing/selftests/bpf/verifier/jit.c +++ b/tools/testing/selftests/bpf/verifier/jit.c @@ -62,6 +62,11 @@ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), BPF_MOV64_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 0xefefef), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), BPF_MOV32_REG(BPF_REG_2, BPF_REG_2), BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), @@ -73,11 +78,69 @@ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), BPF_MOV64_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0xefefef), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_2, 0x2ad4d4aaULL), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 0x2b), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL), BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL), - BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), - BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_5, 0xeeff0d413122ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_5, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_5, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jit: various div tests", + .insns = { + BPF_LD_IMM64(BPF_REG_2, 0xefeffeULL), + BPF_LD_IMM64(BPF_REG_0, 0xeeff0d413122ULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL), + BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xeeff0d413122ULL), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_3, 0xfefeeeULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_2, 0xaa93ULL), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_1, 0xbeefULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL), + BPF_LD_IMM64(BPF_REG_3, 0xbeefULL), + BPF_ALU64_REG(BPF_MOD, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_2, 0x5ee1dULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefeeeULL), + BPF_LD_IMM64(BPF_REG_3, 0x2bULL), + BPF_ALU32_REG(BPF_DIV, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_REG(BPF_DIV, BPF_REG_1, BPF_REG_1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 2), BPF_MOV64_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), BPF_MOV64_IMM(BPF_REG_0, 2), diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c index 2cab6a3966bb47..5c8944d0b09100 100644 --- a/tools/testing/selftests/bpf/verifier/lwt.c +++ b/tools/testing/selftests/bpf/verifier/lwt.c @@ -174,7 +174,7 @@ "check skb->tc_classid half load not permitted for lwt prog", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, tc_classid)), #else diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c index 471c1a5950d840..d8a9b1a1f9a259 100644 --- a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c +++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c @@ -2,7 +2,7 @@ "check bpf_perf_event_data->sample_period byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -18,7 +18,7 @@ "check bpf_perf_event_data->sample_period half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -34,7 +34,7 @@ "check bpf_perf_event_data->sample_period word load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c index 0b943897aaf6c1..c9991c3f3bd274 100644 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c @@ -104,3 +104,164 @@ .result = ACCEPT, .retval = POINTER_VALUE, }, +{ + "Spill and refill a u32 const scalar. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + /* r4 = 20 */ + BPF_MOV32_IMM(BPF_REG_4, 20), + /* *(u32 *)(r10 -8) = r4 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), + /* r4 = *(u32 *)(r10 -8) */ + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), + /* r0 = r2 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "Spill a u32 const, refill from another half of the uninit u32 from the stack", + .insns = { + /* r4 = 20 */ + BPF_MOV32_IMM(BPF_REG_4, 20), + /* *(u32 *)(r10 -8) = r4 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), + /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid read from stack off -4+0 size 4", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "Spill a u32 const scalar. Refill as u16. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + /* r4 = 20 */ + BPF_MOV32_IMM(BPF_REG_4, 20), + /* *(u32 *)(r10 -8) = r4 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), + /* r4 = *(u16 *)(r10 -8) */ + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), + /* r0 = r2 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + /* r4 = 20 */ + BPF_MOV32_IMM(BPF_REG_4, 20), + /* *(u32 *)(r10 -8) = r4 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), + /* r4 = *(u16 *)(r10 -6) */ + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6), + /* r0 = r2 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + /* r4 = 20 */ + BPF_MOV32_IMM(BPF_REG_4, 20), + /* *(u32 *)(r10 -8) = r4 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), + /* *(u32 *)(r10 -4) = r4 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), + /* r4 = *(u32 *)(r10 -4), */ + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), + /* r0 = r2 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "Spill and refill a umax=40 bounded scalar. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, tstamp)), + BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), + /* r4 = (*u32 *)(r10 - 8) */ + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), + /* r2 += r4 R2=pkt R4=inv,umax=40 */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4), + /* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20), + /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1), + /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 8889b3f552367d..027198768fad31 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -224,10 +224,10 @@ EOF -nodefaults \ -display none \ -serial mon:stdio \ - -cpu kvm64 \ + -cpu host \ -enable-kvm \ - -smp 4 \ - -m 2G \ + -smp 8 \ + -m 4G \ -drive file="${rootfs_img}",format=raw,index=1,media=disk,if=virtio,cache=none \ -kernel "${kernel_bzimage}" \ -append "root=/dev/vda rw console=ttyS0,115200" diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c index 842d9155d36c5b..30f12637f4e4ab 100644 --- a/tools/testing/selftests/bpf/xdping.c +++ b/tools/testing/selftests/bpf/xdping.c @@ -178,9 +178,8 @@ int main(int argc, char **argv) return 1; } - main_prog = bpf_object__find_program_by_title(obj, - server ? "xdpserver" : - "xdpclient"); + main_prog = bpf_object__find_program_by_name(obj, + server ? "xdping_server" : "xdping_client"); if (main_prog) prog_fd = bpf_program__fd(main_prog); if (!main_prog || prog_fd < 0) { @@ -188,7 +187,7 @@ int main(int argc, char **argv) return 1; } - map = bpf_map__next(NULL, obj); + map = bpf_object__next_map(obj, NULL); if (map) map_fd = bpf_map__fd(map); if (!map || map_fd < 0) { diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c index f53ce2683f8d92..6c7cf8aadc7923 100644 --- a/tools/testing/selftests/bpf/xdpxceiver.c +++ b/tools/testing/selftests/bpf/xdpxceiver.c @@ -19,7 +19,7 @@ * Virtual Ethernet interfaces. * * For each mode, the following tests are run: - * a. nopoll - soft-irq processing + * a. nopoll - soft-irq processing in run-to-completion mode * b. poll - using poll() syscall * c. Socket Teardown * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy @@ -45,6 +45,10 @@ * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, * then remove xsk sockets from queue 0 on both veth interfaces and * finally run a traffic on queues ids 1 + * g. unaligned mode + * h. tests for invalid and corner case Tx descriptors so that the correct ones + * are discarded and let through, respectively. + * i. 2K frame size tests * * Total tests: 12 * @@ -112,13 +116,10 @@ static void __exit_with_error(int error, const char *file, const char *func, int #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) -#define print_ksft_result(void)\ - (ksft_test_result_pass("PASS: %s %s %s%s%s%s\n", configured_mode ? "DRV" : "SKB",\ - test_type == TEST_TYPE_POLL ? "POLL" : "NOPOLL",\ - test_type == TEST_TYPE_TEARDOWN ? "Socket Teardown" : "",\ - test_type == TEST_TYPE_BIDI ? "Bi-directional Sockets" : "",\ - test_type == TEST_TYPE_STATS ? "Stats" : "",\ - test_type == TEST_TYPE_BPF_RES ? "BPF RES" : "")) +#define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV" + +#define print_ksft_result(test) \ + (ksft_test_result_pass("PASS: %s %s\n", mode_string(test), (test)->name)) static void memset32_htonl(void *dest, u32 val, u32 size) { @@ -235,80 +236,46 @@ static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); } -static void xsk_configure_umem(struct ifobject *data, void *buffer, u64 size, int idx) +static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) { struct xsk_umem_config cfg = { .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, - .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE, - .frame_headroom = frame_headroom, + .frame_size = umem->frame_size, + .frame_headroom = umem->frame_headroom, .flags = XSK_UMEM__DEFAULT_FLAGS }; - struct xsk_umem_info *umem; int ret; - umem = calloc(1, sizeof(struct xsk_umem_info)); - if (!umem) - exit_with_error(errno); + if (umem->unaligned_mode) + cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, &cfg); if (ret) - exit_with_error(-ret); + return ret; umem->buffer = buffer; - - data->umem_arr[idx] = umem; -} - -static void xsk_populate_fill_ring(struct xsk_umem_info *umem) -{ - int ret, i; - u32 idx = 0; - - ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx); - if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS) - exit_with_error(-ret); - for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) - *xsk_ring_prod__fill_addr(&umem->fq, idx++) = i * XSK_UMEM__DEFAULT_FRAME_SIZE; - xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS); + return 0; } -static int xsk_configure_socket(struct ifobject *ifobject, int idx) +static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, + struct ifobject *ifobject, u32 qid) { struct xsk_socket_config cfg; - struct xsk_socket_info *xsk; struct xsk_ring_cons *rxr; struct xsk_ring_prod *txr; - int ret; - - xsk = calloc(1, sizeof(struct xsk_socket_info)); - if (!xsk) - exit_with_error(errno); - xsk->umem = ifobject->umem; - cfg.rx_size = rxqsize; + xsk->umem = umem; + cfg.rx_size = xsk->rxqsize; cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; cfg.libbpf_flags = 0; - cfg.xdp_flags = xdp_flags; - cfg.bind_flags = xdp_bind_flags; - - if (test_type != TEST_TYPE_BIDI) { - rxr = (ifobject->fv.vector == rx) ? &xsk->rx : NULL; - txr = (ifobject->fv.vector == tx) ? &xsk->tx : NULL; - } else { - rxr = &xsk->rx; - txr = &xsk->tx; - } - - ret = xsk_socket__create(&xsk->xsk, ifobject->ifname, idx, - ifobject->umem->umem, rxr, txr, &cfg); - if (ret) - return 1; + cfg.xdp_flags = ifobject->xdp_flags; + cfg.bind_flags = ifobject->bind_flags; - ifobject->xsk_arr[idx] = xsk; - - return 0; + txr = ifobject->tx_on ? &xsk->tx : NULL; + rxr = ifobject->rx_on ? &xsk->rx : NULL; + return xsk_socket__create(&xsk->xsk, ifobject->ifname, qid, umem->umem, rxr, txr, &cfg); } static struct option long_options[] = { @@ -354,45 +321,44 @@ static int switch_namespace(const char *nsname) return nsfd; } -static int validate_interfaces(void) +static bool validate_interface(struct ifobject *ifobj) { - bool ret = true; - - for (int i = 0; i < MAX_INTERFACES; i++) { - if (!strcmp(ifdict[i]->ifname, "")) { - ret = false; - ksft_test_result_fail("ERROR: interfaces: -i , -i ,."); - } - } - return ret; + if (!strcmp(ifobj->ifname, "")) + return false; + return true; } -static void parse_command_line(int argc, char **argv) +static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, + char **argv) { - int option_index, interface_index = 0, c; + struct ifobject *ifobj; + u32 interface_nb = 0; + int option_index, c; opterr = 0; for (;;) { - c = getopt_long(argc, argv, "i:Dv", long_options, &option_index); + char *sptr, *token; + c = getopt_long(argc, argv, "i:Dv", long_options, &option_index); if (c == -1) break; switch (c) { case 'i': - if (interface_index == MAX_INTERFACES) + if (interface_nb == 0) + ifobj = ifobj_tx; + else if (interface_nb == 1) + ifobj = ifobj_rx; + else break; - char *sptr, *token; sptr = strndupa(optarg, strlen(optarg)); - memcpy(ifdict[interface_index]->ifname, - strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS); + memcpy(ifobj->ifname, strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS); token = strsep(&sptr, ","); if (token) - memcpy(ifdict[interface_index]->nsname, token, - MAX_INTERFACES_NAMESPACE_CHARS); - interface_index++; + memcpy(ifobj->nsname, token, MAX_INTERFACES_NAMESPACE_CHARS); + interface_nb++; break; case 'D': opt_pkt_dump = true; @@ -405,11 +371,85 @@ static void parse_command_line(int argc, char **argv) ksft_exit_xfail(); } } +} - if (!validate_interfaces()) { - usage(basename(argv[0])); - ksft_exit_xfail(); +static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, + struct ifobject *ifobj_rx) +{ + u32 i, j; + + for (i = 0; i < MAX_INTERFACES; i++) { + struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; + + ifobj->umem = &ifobj->umem_arr[0]; + ifobj->xsk = &ifobj->xsk_arr[0]; + ifobj->use_poll = false; + ifobj->pacing_on = true; + ifobj->pkt_stream = test->pkt_stream_default; + + if (i == 0) { + ifobj->rx_on = false; + ifobj->tx_on = true; + } else { + ifobj->rx_on = true; + ifobj->tx_on = false; + } + + for (j = 0; j < MAX_SOCKETS; j++) { + memset(&ifobj->umem_arr[j], 0, sizeof(ifobj->umem_arr[j])); + memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); + ifobj->umem_arr[j].num_frames = DEFAULT_UMEM_BUFFERS; + ifobj->umem_arr[j].frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; + } + } + + test->ifobj_tx = ifobj_tx; + test->ifobj_rx = ifobj_rx; + test->current_step = 0; + test->total_steps = 1; + test->nb_sockets = 1; +} + +static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, + struct ifobject *ifobj_rx, enum test_mode mode) +{ + struct pkt_stream *pkt_stream; + u32 i; + + pkt_stream = test->pkt_stream_default; + memset(test, 0, sizeof(*test)); + test->pkt_stream_default = pkt_stream; + + for (i = 0; i < MAX_INTERFACES; i++) { + struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; + + ifobj->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; + if (mode == TEST_MODE_SKB) + ifobj->xdp_flags |= XDP_FLAGS_SKB_MODE; + else + ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE; + + ifobj->bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; } + + __test_spec_init(test, ifobj_tx, ifobj_rx); +} + +static void test_spec_reset(struct test_spec *test) +{ + __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); +} + +static void test_spec_set_name(struct test_spec *test, const char *name) +{ + strncpy(test->name, name, MAX_TEST_NAME_SIZE); +} + +static void pkt_stream_reset(struct pkt_stream *pkt_stream) +{ + if (pkt_stream) + pkt_stream->rx_pkt_nb = 0; } static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) @@ -420,29 +460,104 @@ static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) return &pkt_stream->pkts[pkt_nb]; } -static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len) +static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream) +{ + while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { + if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid) + return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++]; + pkt_stream->rx_pkt_nb++; + } + return NULL; +} + +static void pkt_stream_delete(struct pkt_stream *pkt_stream) +{ + free(pkt_stream->pkts); + free(pkt_stream); +} + +static void pkt_stream_restore_default(struct test_spec *test) +{ + if (test->ifobj_tx->pkt_stream != test->pkt_stream_default) { + pkt_stream_delete(test->ifobj_tx->pkt_stream); + test->ifobj_tx->pkt_stream = test->pkt_stream_default; + } + test->ifobj_rx->pkt_stream = test->pkt_stream_default; +} + +static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) { struct pkt_stream *pkt_stream; - u32 i; - pkt_stream = malloc(sizeof(*pkt_stream)); + pkt_stream = calloc(1, sizeof(*pkt_stream)); if (!pkt_stream) - exit_with_error(ENOMEM); + return NULL; pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); - if (!pkt_stream->pkts) + if (!pkt_stream->pkts) { + free(pkt_stream); + return NULL; + } + + pkt_stream->nb_pkts = nb_pkts; + return pkt_stream; +} + +static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) +{ + struct pkt_stream *pkt_stream; + u32 i; + + pkt_stream = __pkt_stream_alloc(nb_pkts); + if (!pkt_stream) exit_with_error(ENOMEM); pkt_stream->nb_pkts = nb_pkts; for (i = 0; i < nb_pkts; i++) { - pkt_stream->pkts[i].addr = (i % num_frames) * XSK_UMEM__DEFAULT_FRAME_SIZE; + pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size; pkt_stream->pkts[i].len = pkt_len; pkt_stream->pkts[i].payload = i; + + if (pkt_len > umem->frame_size) + pkt_stream->pkts[i].valid = false; + else + pkt_stream->pkts[i].valid = true; } return pkt_stream; } +static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem, + struct pkt_stream *pkt_stream) +{ + return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); +} + +static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) +{ + struct pkt_stream *pkt_stream; + + pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); + test->ifobj_tx->pkt_stream = pkt_stream; + test->ifobj_rx->pkt_stream = pkt_stream; +} + +static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) +{ + struct xsk_umem_info *umem = test->ifobj_tx->umem; + struct pkt_stream *pkt_stream; + u32 i; + + pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default); + for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2) { + pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size + offset; + pkt_stream->pkts[i].len = pkt_len; + } + + test->ifobj_tx->pkt_stream = pkt_stream; + test->ifobj_rx->pkt_stream = pkt_stream; +} + static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) { struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); @@ -453,6 +568,8 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) if (!pkt) return NULL; + if (!pkt->valid || pkt->len < PKT_SIZE) + return pkt; data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); @@ -467,6 +584,26 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) return pkt; } +static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) +{ + struct pkt_stream *pkt_stream; + u32 i; + + pkt_stream = __pkt_stream_alloc(nb_pkts); + if (!pkt_stream) + exit_with_error(ENOMEM); + + test->ifobj_tx->pkt_stream = pkt_stream; + test->ifobj_rx->pkt_stream = pkt_stream; + + for (i = 0; i < nb_pkts; i++) { + pkt_stream->pkts[i].addr = pkts[i].addr; + pkt_stream->pkts[i].len = pkts[i].len; + pkt_stream->pkts[i].payload = i; + pkt_stream->pkts[i].valid = pkts[i].valid; + } +} + static void pkt_dump(void *pkt, u32 len) { char s[INET_ADDRSTRLEN]; @@ -504,9 +641,28 @@ static void pkt_dump(void *pkt, u32 len) fprintf(stdout, "---------------------------------------\n"); } -static bool is_pkt_valid(struct pkt *pkt, void *buffer, const struct xdp_desc *desc) +static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr, + u64 pkt_stream_addr) { - void *data = xsk_umem__get_data(buffer, desc->addr); + u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; + u32 offset = addr % umem->frame_size, expected_offset = 0; + + if (!pkt_stream->use_addr_for_fill) + pkt_stream_addr = 0; + + expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; + + if (offset == expected_offset) + return true; + + ksft_test_result_fail("ERROR: [%s] expected [%u], got [%u]\n", __func__, expected_offset, + offset); + return false; +} + +static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) +{ + void *data = xsk_umem__get_data(buffer, addr); struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); if (!pkt) { @@ -514,19 +670,24 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, const struct xdp_desc *d return false; } + if (len < PKT_SIZE) { + /*Do not try to verify packets that are smaller than minimum size. */ + return true; + } + + if (pkt->len != len) { + ksft_test_result_fail + ("ERROR: [%s] expected length [%d], got length [%d]\n", + __func__, pkt->len, len); + return false; + } + if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); - if (opt_pkt_dump && test_type != TEST_TYPE_STATS) + if (opt_pkt_dump) pkt_dump(data, PKT_SIZE); - if (pkt->len != desc->len) { - ksft_test_result_fail - ("ERROR: [%s] expected length [%d], got length [%d]\n", - __func__, pkt->len, desc->len); - return false; - } - if (pkt->payload != seqnum) { ksft_test_result_fail ("ERROR: [%s] expected seqnum [%d], got seqnum [%d]\n", @@ -558,14 +719,20 @@ static void complete_pkts(struct xsk_socket_info *xsk, int batch_size) unsigned int rcvd; u32 idx; - if (!xsk->outstanding_tx) - return; - if (xsk_ring_prod__needs_wakeup(&xsk->tx)) kick_tx(xsk); rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); if (rcvd) { + if (rcvd > xsk->outstanding_tx) { + u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); + + ksft_test_result_fail("ERROR: [%s] Too many packets completed\n", + __func__); + ksft_print_msg("Last completion address: %llx\n", addr); + return; + } + xsk_ring_cons__release(&xsk->umem->cq, rcvd); xsk->outstanding_tx -= rcvd; } @@ -574,15 +741,16 @@ static void complete_pkts(struct xsk_socket_info *xsk, int batch_size) static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *xsk, struct pollfd *fds) { - u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkt_count = 0; - struct pkt *pkt; + struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream); + struct xsk_umem_info *umem = xsk->umem; + u32 idx_rx = 0, idx_fq = 0, rcvd, i; + u32 total = 0; int ret; - pkt = pkt_stream_get_pkt(pkt_stream, pkt_count++); while (pkt) { rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); if (!rcvd) { - if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { + if (xsk_ring_prod__needs_wakeup(&umem->fq)) { ret = poll(fds, 1, POLL_TMOUT); if (ret < 0) exit_with_error(-ret); @@ -590,40 +758,58 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info * continue; } - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); + ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); while (ret != rcvd) { if (ret < 0) exit_with_error(-ret); - if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { + if (xsk_ring_prod__needs_wakeup(&umem->fq)) { ret = poll(fds, 1, POLL_TMOUT); if (ret < 0) exit_with_error(-ret); } - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); + ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); } for (i = 0; i < rcvd; i++) { const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); u64 addr = desc->addr, orig; + if (!pkt) { + ksft_test_result_fail("ERROR: [%s] Received too many packets.\n", + __func__); + ksft_print_msg("Last packet has addr: %llx len: %u\n", + addr, desc->len); + return; + } + orig = xsk_umem__extract_addr(addr); addr = xsk_umem__add_offset_to_addr(addr); - if (!is_pkt_valid(pkt, xsk->umem->buffer, desc)) + + if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len)) + return; + if (!is_offset_correct(umem, pkt_stream, addr, pkt->addr)) return; - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; - pkt = pkt_stream_get_pkt(pkt_stream, pkt_count++); + *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig; + pkt = pkt_stream_get_next_rx_pkt(pkt_stream); } - xsk_ring_prod__submit(&xsk->umem->fq, rcvd); + xsk_ring_prod__submit(&umem->fq, rcvd); xsk_ring_cons__release(&xsk->rx, rcvd); + + pthread_mutex_lock(&pacing_mutex); + pkts_in_flight -= rcvd; + total += rcvd; + if (pkts_in_flight < umem->num_frames) + pthread_cond_signal(&pacing_cond); + pthread_mutex_unlock(&pacing_mutex); } } static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb) { struct xsk_socket_info *xsk = ifobject->xsk; - u32 i, idx; + u32 i, idx, valid_pkts = 0; while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) complete_pkts(xsk, BATCH_SIZE); @@ -638,15 +824,23 @@ static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb) tx_desc->addr = pkt->addr; tx_desc->len = pkt->len; pkt_nb++; + if (pkt->valid) + valid_pkts++; } - xsk_ring_prod__submit(&xsk->tx, i); - if (stat_test_type != STAT_TEST_TX_INVALID) - xsk->outstanding_tx += i; - else if (xsk_ring_prod__needs_wakeup(&xsk->tx)) + pthread_mutex_lock(&pacing_mutex); + pkts_in_flight += valid_pkts; + if (ifobject->pacing_on && pkts_in_flight >= ifobject->umem->num_frames - BATCH_SIZE) { kick_tx(xsk); + pthread_cond_wait(&pacing_cond, &pacing_mutex); + } + pthread_mutex_unlock(&pacing_mutex); + + xsk_ring_prod__submit(&xsk->tx, i); + xsk->outstanding_tx += valid_pkts; complete_pkts(xsk, i); + usleep(10); return i; } @@ -658,29 +852,25 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk) static void send_pkts(struct ifobject *ifobject) { - struct pollfd fds[MAX_SOCKS] = { }; + struct pollfd fds = { }; u32 pkt_cnt = 0; - fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk); - fds[0].events = POLLOUT; + fds.fd = xsk_socket__fd(ifobject->xsk->xsk); + fds.events = POLLOUT; while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { - u32 sent; - - if (test_type == TEST_TYPE_POLL) { + if (ifobject->use_poll) { int ret; - ret = poll(fds, 1, POLL_TMOUT); + ret = poll(&fds, 1, POLL_TMOUT); if (ret <= 0) continue; - if (!(fds[0].revents & POLLOUT)) + if (!(fds.revents & POLLOUT)) continue; } - sent = __send_pkts(ifobject, pkt_cnt); - pkt_cnt += sent; - usleep(10); + pkt_cnt += __send_pkts(ifobject, pkt_cnt); } wait_for_tx_completion(ifobject->xsk); @@ -698,7 +888,7 @@ static bool rx_stats_are_valid(struct ifobject *ifobject) optlen = sizeof(stats); err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); if (err) { - ksft_test_result_fail("ERROR: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", + ksft_test_result_fail("ERROR Rx: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", __func__, -err, strerror(-err)); return true; } @@ -739,7 +929,7 @@ static void tx_stats_validate(struct ifobject *ifobject) optlen = sizeof(stats); err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); if (err) { - ksft_test_result_fail("ERROR: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", + ksft_test_result_fail("ERROR Tx: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", __func__, -err, strerror(-err)); return; } @@ -751,71 +941,62 @@ static void tx_stats_validate(struct ifobject *ifobject) __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); } -static void thread_common_ops(struct ifobject *ifobject, void *bufs) +static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) { - u64 umem_sz = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE; int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; - size_t mmap_sz = umem_sz; - int ctr = 0; - int ret; + u32 i; ifobject->ns_fd = switch_namespace(ifobject->nsname); - if (test_type == TEST_TYPE_BPF_RES) - mmap_sz *= 2; + if (ifobject->umem->unaligned_mode) + mmap_flags |= MAP_HUGETLB; - bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); - if (bufs == MAP_FAILED) - exit_with_error(errno); + for (i = 0; i < test->nb_sockets; i++) { + u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; + u32 ctr = 0; + void *bufs; + int ret; - while (ctr++ < SOCK_RECONF_CTR) { - xsk_configure_umem(ifobject, bufs, umem_sz, 0); - ifobject->umem = ifobject->umem_arr[0]; - ret = xsk_configure_socket(ifobject, 0); - if (!ret) - break; + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + if (bufs == MAP_FAILED) + exit_with_error(errno); - /* Retry Create Socket if it fails as xsk_socket__create() is asynchronous */ - usleep(USLEEP_MAX); - if (ctr >= SOCK_RECONF_CTR) + ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz); + if (ret) exit_with_error(-ret); - } - ifobject->umem = ifobject->umem_arr[0]; - ifobject->xsk = ifobject->xsk_arr[0]; + while (ctr++ < SOCK_RECONF_CTR) { + ret = xsk_configure_socket(&ifobject->xsk_arr[i], &ifobject->umem_arr[i], + ifobject, i); + if (!ret) + break; - if (test_type == TEST_TYPE_BPF_RES) { - xsk_configure_umem(ifobject, (u8 *)bufs + umem_sz, umem_sz, 1); - ifobject->umem = ifobject->umem_arr[1]; - ret = xsk_configure_socket(ifobject, 1); + /* Retry if it fails as xsk_socket__create() is asynchronous */ + if (ctr >= SOCK_RECONF_CTR) + exit_with_error(-ret); + usleep(USLEEP_MAX); + } } - ifobject->umem = ifobject->umem_arr[0]; - ifobject->xsk = ifobject->xsk_arr[0]; - print_verbose("Interface [%s] vector [%s]\n", - ifobject->ifname, ifobject->fv.vector == tx ? "Tx" : "Rx"); -} - -static bool testapp_is_test_two_stepped(void) -{ - return (test_type != TEST_TYPE_BIDI && test_type != TEST_TYPE_BPF_RES) || second_step; + ifobject->umem = &ifobject->umem_arr[0]; + ifobject->xsk = &ifobject->xsk_arr[0]; } static void testapp_cleanup_xsk_res(struct ifobject *ifobj) { - if (testapp_is_test_two_stepped()) { - xsk_socket__delete(ifobj->xsk->xsk); - (void)xsk_umem__delete(ifobj->umem->umem); - } + print_verbose("Destroying socket\n"); + xsk_socket__delete(ifobj->xsk->xsk); + munmap(ifobj->umem->buffer, ifobj->umem->num_frames * ifobj->umem->frame_size); + xsk_umem__delete(ifobj->umem->umem); } static void *worker_testapp_validate_tx(void *arg) { - struct ifobject *ifobject = (struct ifobject *)arg; - void *bufs = NULL; + struct test_spec *test = (struct test_spec *)arg; + struct ifobject *ifobject = test->ifobj_tx; - if (!second_step) - thread_common_ops(ifobject, bufs); + if (test->current_step == 1) + thread_common_ops(test, ifobject); print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, ifobject->ifname); @@ -824,24 +1005,55 @@ static void *worker_testapp_validate_tx(void *arg) if (stat_test_type == STAT_TEST_TX_INVALID) tx_stats_validate(ifobject); - testapp_cleanup_xsk_res(ifobject); + if (test->total_steps == test->current_step) + testapp_cleanup_xsk_res(ifobject); pthread_exit(NULL); } +static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) +{ + u32 idx = 0, i, buffers_to_fill; + int ret; + + if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) + buffers_to_fill = umem->num_frames; + else + buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; + + ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); + if (ret != buffers_to_fill) + exit_with_error(ENOSPC); + for (i = 0; i < buffers_to_fill; i++) { + u64 addr; + + if (pkt_stream->use_addr_for_fill) { + struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); + + if (!pkt) + break; + addr = pkt->addr; + } else { + addr = i * umem->frame_size; + } + + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; + } + xsk_ring_prod__submit(&umem->fq, buffers_to_fill); +} + static void *worker_testapp_validate_rx(void *arg) { - struct ifobject *ifobject = (struct ifobject *)arg; - struct pollfd fds[MAX_SOCKS] = { }; - void *bufs = NULL; + struct test_spec *test = (struct test_spec *)arg; + struct ifobject *ifobject = test->ifobj_rx; + struct pollfd fds = { }; - if (!second_step) - thread_common_ops(ifobject, bufs); + if (test->current_step == 1) + thread_common_ops(test, ifobject); - if (stat_test_type != STAT_TEST_RX_FILL_EMPTY) - xsk_populate_fill_ring(ifobject->umem); + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); - fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk); - fds[0].events = POLLIN; + fds.fd = xsk_socket__fd(ifobject->xsk->xsk); + fds.events = POLLIN; pthread_barrier_wait(&barr); @@ -849,151 +1061,239 @@ static void *worker_testapp_validate_rx(void *arg) while (!rx_stats_are_valid(ifobject)) continue; else - receive_pkts(ifobject->pkt_stream, ifobject->xsk, fds); + receive_pkts(ifobject->pkt_stream, ifobject->xsk, &fds); - if (test_type == TEST_TYPE_TEARDOWN) - print_verbose("Destroying socket\n"); - - testapp_cleanup_xsk_res(ifobject); + if (test->total_steps == test->current_step) + testapp_cleanup_xsk_res(ifobject); pthread_exit(NULL); } -static void testapp_validate(void) +static void testapp_validate_traffic(struct test_spec *test) { - bool bidi = test_type == TEST_TYPE_BIDI; - bool bpf = test_type == TEST_TYPE_BPF_RES; - struct pkt_stream *pkt_stream; + struct ifobject *ifobj_tx = test->ifobj_tx; + struct ifobject *ifobj_rx = test->ifobj_rx; + pthread_t t0, t1; if (pthread_barrier_init(&barr, NULL, 2)) exit_with_error(errno); - if (stat_test_type == STAT_TEST_TX_INVALID) - pkt_stream = pkt_stream_generate(DEFAULT_PKT_CNT, XSK_UMEM__INVALID_FRAME_SIZE); - else - pkt_stream = pkt_stream_generate(DEFAULT_PKT_CNT, PKT_SIZE); - ifdict_tx->pkt_stream = pkt_stream; - ifdict_rx->pkt_stream = pkt_stream; + test->current_step++; + pkt_stream_reset(ifobj_rx->pkt_stream); + pkts_in_flight = 0; /*Spawn RX thread */ - pthread_create(&t0, NULL, ifdict_rx->func_ptr, ifdict_rx); + pthread_create(&t0, NULL, ifobj_rx->func_ptr, test); pthread_barrier_wait(&barr); if (pthread_barrier_destroy(&barr)) exit_with_error(errno); /*Spawn TX thread */ - pthread_create(&t1, NULL, ifdict_tx->func_ptr, ifdict_tx); + pthread_create(&t1, NULL, ifobj_tx->func_ptr, test); pthread_join(t1, NULL); pthread_join(t0, NULL); - - if (!(test_type == TEST_TYPE_TEARDOWN) && !bidi && !bpf && !(test_type == TEST_TYPE_STATS)) - print_ksft_result(); } -static void testapp_teardown(void) +static void testapp_teardown(struct test_spec *test) { int i; + test_spec_set_name(test, "TEARDOWN"); for (i = 0; i < MAX_TEARDOWN_ITER; i++) { - print_verbose("Creating socket\n"); - testapp_validate(); + testapp_validate_traffic(test); + test_spec_reset(test); } - - print_ksft_result(); } -static void swap_vectors(struct ifobject *ifobj1, struct ifobject *ifobj2) +static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) { - void *(*tmp_func_ptr)(void *) = ifobj1->func_ptr; - enum fvector tmp_vector = ifobj1->fv.vector; + thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; + struct ifobject *tmp_ifobj = (*ifobj1); - ifobj1->func_ptr = ifobj2->func_ptr; - ifobj1->fv.vector = ifobj2->fv.vector; + (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; + (*ifobj2)->func_ptr = tmp_func_ptr; - ifobj2->func_ptr = tmp_func_ptr; - ifobj2->fv.vector = tmp_vector; - - ifdict_tx = ifobj1; - ifdict_rx = ifobj2; + *ifobj1 = *ifobj2; + *ifobj2 = tmp_ifobj; } -static void testapp_bidi(void) +static void testapp_bidi(struct test_spec *test) { - for (int i = 0; i < MAX_BIDI_ITER; i++) { - print_verbose("Creating socket\n"); - testapp_validate(); - if (!second_step) { - print_verbose("Switching Tx/Rx vectors\n"); - swap_vectors(ifdict[1], ifdict[0]); - } - second_step = true; - } + test_spec_set_name(test, "BIDIRECTIONAL"); + test->ifobj_tx->rx_on = true; + test->ifobj_rx->tx_on = true; + test->total_steps = 2; + testapp_validate_traffic(test); - swap_vectors(ifdict[0], ifdict[1]); + print_verbose("Switching Tx/Rx vectors\n"); + swap_directions(&test->ifobj_rx, &test->ifobj_tx); + testapp_validate_traffic(test); - print_ksft_result(); + swap_directions(&test->ifobj_rx, &test->ifobj_tx); } -static void swap_xsk_res(void) +static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) { - xsk_socket__delete(ifdict_tx->xsk->xsk); - xsk_umem__delete(ifdict_tx->umem->umem); - xsk_socket__delete(ifdict_rx->xsk->xsk); - xsk_umem__delete(ifdict_rx->umem->umem); - ifdict_tx->umem = ifdict_tx->umem_arr[1]; - ifdict_tx->xsk = ifdict_tx->xsk_arr[1]; - ifdict_rx->umem = ifdict_rx->umem_arr[1]; - ifdict_rx->xsk = ifdict_rx->xsk_arr[1]; + xsk_socket__delete(ifobj_tx->xsk->xsk); + xsk_umem__delete(ifobj_tx->umem->umem); + xsk_socket__delete(ifobj_rx->xsk->xsk); + xsk_umem__delete(ifobj_rx->umem->umem); + ifobj_tx->umem = &ifobj_tx->umem_arr[1]; + ifobj_tx->xsk = &ifobj_tx->xsk_arr[1]; + ifobj_rx->umem = &ifobj_rx->umem_arr[1]; + ifobj_rx->xsk = &ifobj_rx->xsk_arr[1]; } -static void testapp_bpf_res(void) +static void testapp_bpf_res(struct test_spec *test) { - int i; + test_spec_set_name(test, "BPF_RES"); + test->total_steps = 2; + test->nb_sockets = 2; + testapp_validate_traffic(test); - for (i = 0; i < MAX_BPF_ITER; i++) { - print_verbose("Creating socket\n"); - testapp_validate(); - if (!second_step) - swap_xsk_res(); - second_step = true; - } + swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); + testapp_validate_traffic(test); +} - print_ksft_result(); +static void testapp_headroom(struct test_spec *test) +{ + test_spec_set_name(test, "UMEM_HEADROOM"); + test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; + testapp_validate_traffic(test); } -static void testapp_stats(void) +static void testapp_stats(struct test_spec *test) { - for (int i = 0; i < STAT_TEST_TYPE_MAX; i++) { - stat_test_type = i; + int i; - /* reset defaults */ - rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; - frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; + for (i = 0; i < STAT_TEST_TYPE_MAX; i++) { + test_spec_reset(test); + stat_test_type = i; + /* No or few packets will be received so cannot pace packets */ + test->ifobj_tx->pacing_on = false; switch (stat_test_type) { case STAT_TEST_RX_DROPPED: - frame_headroom = XSK_UMEM__DEFAULT_FRAME_SIZE - - XDP_PACKET_HEADROOM - 1; + test_spec_set_name(test, "STAT_RX_DROPPED"); + test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - + XDP_PACKET_HEADROOM - 1; + testapp_validate_traffic(test); break; case STAT_TEST_RX_FULL: - rxqsize = RX_FULL_RXQSIZE; + test_spec_set_name(test, "STAT_RX_FULL"); + test->ifobj_rx->xsk->rxqsize = RX_FULL_RXQSIZE; + testapp_validate_traffic(test); break; case STAT_TEST_TX_INVALID: - continue; + test_spec_set_name(test, "STAT_TX_INVALID"); + pkt_stream_replace(test, DEFAULT_PKT_CNT, XSK_UMEM__INVALID_FRAME_SIZE); + testapp_validate_traffic(test); + + pkt_stream_restore_default(test); + break; + case STAT_TEST_RX_FILL_EMPTY: + test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); + test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 0, + MIN_PKT_SIZE); + if (!test->ifobj_rx->pkt_stream) + exit_with_error(ENOMEM); + test->ifobj_rx->pkt_stream->use_addr_for_fill = true; + testapp_validate_traffic(test); + + pkt_stream_restore_default(test); + break; default: break; } - testapp_validate(); } - print_ksft_result(); + /* To only see the whole stat set being completed unless an individual test fails. */ + test_spec_set_name(test, "STATS"); +} + +/* Simple test */ +static bool hugepages_present(struct ifobject *ifobject) +{ + const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; + void *bufs; + + bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_HUGETLB, -1, 0); + if (bufs == MAP_FAILED) + return false; + + munmap(bufs, mmap_sz); + return true; +} + +static bool testapp_unaligned(struct test_spec *test) +{ + if (!hugepages_present(test->ifobj_tx)) { + ksft_test_result_skip("No 2M huge pages present.\n"); + return false; + } + + test_spec_set_name(test, "UNALIGNED_MODE"); + test->ifobj_tx->umem->unaligned_mode = true; + test->ifobj_rx->umem->unaligned_mode = true; + /* Let half of the packets straddle a buffer boundrary */ + pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2); + test->ifobj_rx->pkt_stream->use_addr_for_fill = true; + testapp_validate_traffic(test); + + pkt_stream_restore_default(test); + return true; +} + +static void testapp_single_pkt(struct test_spec *test) +{ + struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}}; + + pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); + testapp_validate_traffic(test); + pkt_stream_restore_default(test); } -static void init_iface(struct ifobject *ifobj, const char *dst_mac, - const char *src_mac, const char *dst_ip, - const char *src_ip, const u16 dst_port, - const u16 src_port, enum fvector vector) +static void testapp_invalid_desc(struct test_spec *test) +{ + struct pkt pkts[] = { + /* Zero packet length at address zero allowed */ + {0, 0, 0, true}, + /* Zero packet length allowed */ + {0x1000, 0, 0, true}, + /* Straddling the start of umem */ + {-2, PKT_SIZE, 0, false}, + /* Packet too large */ + {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, + /* After umem ends */ + {UMEM_SIZE, PKT_SIZE, 0, false}, + /* Straddle the end of umem */ + {UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false}, + /* Straddle a page boundrary */ + {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, + /* Straddle a 2K boundrary */ + {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true}, + /* Valid packet for synch so that something is received */ + {0x4000, PKT_SIZE, 0, true}}; + + if (test->ifobj_tx->umem->unaligned_mode) { + /* Crossing a page boundrary allowed */ + pkts[6].valid = true; + } + if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { + /* Crossing a 2K frame size boundrary not allowed */ + pkts[7].valid = false; + } + + pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); + testapp_validate_traffic(test); + pkt_stream_restore_default(test); +} + +static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, + const char *dst_ip, const char *src_ip, const u16 dst_port, + const u16 src_port, thread_func_t func_ptr) { struct in_addr ip; @@ -1009,58 +1309,80 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, ifobj->dst_port = dst_port; ifobj->src_port = src_port; - if (vector == tx) { - ifobj->fv.vector = tx; - ifobj->func_ptr = worker_testapp_validate_tx; - ifdict_tx = ifobj; - } else { - ifobj->fv.vector = rx; - ifobj->func_ptr = worker_testapp_validate_rx; - ifdict_rx = ifobj; - } + ifobj->func_ptr = func_ptr; } -static void run_pkt_test(int mode, int type) +static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) { test_type = type; /* reset defaults after potential previous test */ - xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; - second_step = 0; stat_test_type = -1; - rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; - frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; - - configured_mode = mode; - - switch (mode) { - case (TEST_MODE_SKB): - xdp_flags |= XDP_FLAGS_SKB_MODE; - break; - case (TEST_MODE_DRV): - xdp_flags |= XDP_FLAGS_DRV_MODE; - break; - default: - break; - } switch (test_type) { case TEST_TYPE_STATS: - testapp_stats(); + testapp_stats(test); break; case TEST_TYPE_TEARDOWN: - testapp_teardown(); + testapp_teardown(test); break; case TEST_TYPE_BIDI: - testapp_bidi(); + testapp_bidi(test); break; case TEST_TYPE_BPF_RES: - testapp_bpf_res(); + testapp_bpf_res(test); + break; + case TEST_TYPE_RUN_TO_COMPLETION: + test_spec_set_name(test, "RUN_TO_COMPLETION"); + testapp_validate_traffic(test); + break; + case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: + test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); + testapp_single_pkt(test); + break; + case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: + test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); + test->ifobj_tx->umem->frame_size = 2048; + test->ifobj_rx->umem->frame_size = 2048; + pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); + testapp_validate_traffic(test); + + pkt_stream_restore_default(test); + break; + case TEST_TYPE_POLL: + test->ifobj_tx->use_poll = true; + test->ifobj_rx->use_poll = true; + test_spec_set_name(test, "POLL"); + testapp_validate_traffic(test); + break; + case TEST_TYPE_ALIGNED_INV_DESC: + test_spec_set_name(test, "ALIGNED_INV_DESC"); + testapp_invalid_desc(test); + break; + case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: + test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); + test->ifobj_tx->umem->frame_size = 2048; + test->ifobj_rx->umem->frame_size = 2048; + testapp_invalid_desc(test); + break; + case TEST_TYPE_UNALIGNED_INV_DESC: + test_spec_set_name(test, "UNALIGNED_INV_DESC"); + test->ifobj_tx->umem->unaligned_mode = true; + test->ifobj_rx->umem->unaligned_mode = true; + testapp_invalid_desc(test); + break; + case TEST_TYPE_UNALIGNED: + if (!testapp_unaligned(test)) + return; + break; + case TEST_TYPE_HEADROOM: + testapp_headroom(test); break; default: - testapp_validate(); break; } + + print_ksft_result(test); } static struct ifobject *ifobject_create(void) @@ -1071,11 +1393,11 @@ static struct ifobject *ifobject_create(void) if (!ifobj) return NULL; - ifobj->xsk_arr = calloc(2, sizeof(struct xsk_socket_info *)); + ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); if (!ifobj->xsk_arr) goto out_xsk_arr; - ifobj->umem_arr = calloc(2, sizeof(struct xsk_umem_info *)); + ifobj->umem_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->umem_arr)); if (!ifobj->umem_arr) goto out_umem_arr; @@ -1098,34 +1420,53 @@ static void ifobject_delete(struct ifobject *ifobj) int main(int argc, char **argv) { struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY }; - int i, j; + struct pkt_stream *pkt_stream_default; + struct ifobject *ifobj_tx, *ifobj_rx; + struct test_spec test; + u32 i, j; if (setrlimit(RLIMIT_MEMLOCK, &_rlim)) exit_with_error(errno); - for (i = 0; i < MAX_INTERFACES; i++) { - ifdict[i] = ifobject_create(); - if (!ifdict[i]) - exit_with_error(ENOMEM); - } + ifobj_tx = ifobject_create(); + if (!ifobj_tx) + exit_with_error(ENOMEM); + ifobj_rx = ifobject_create(); + if (!ifobj_rx) + exit_with_error(ENOMEM); setlocale(LC_ALL, ""); - parse_command_line(argc, argv); + parse_command_line(ifobj_tx, ifobj_rx, argc, argv); - init_iface(ifdict[tx], MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, tx); - init_iface(ifdict[rx], MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, rx); + if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { + usage(basename(argv[0])); + ksft_exit_xfail(); + } + + init_iface(ifobj_tx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, + worker_testapp_validate_tx); + init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, + worker_testapp_validate_rx); + + test_spec_init(&test, ifobj_tx, ifobj_rx, 0); + pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + if (!pkt_stream_default) + exit_with_error(ENOMEM); + test.pkt_stream_default = pkt_stream_default; ksft_set_plan(TEST_MODE_MAX * TEST_TYPE_MAX); for (i = 0; i < TEST_MODE_MAX; i++) for (j = 0; j < TEST_TYPE_MAX; j++) { - run_pkt_test(i, j); + test_spec_init(&test, ifobj_tx, ifobj_rx, i); + run_pkt_test(&test, i, j); usleep(USLEEP_MAX); } - for (i = 0; i < MAX_INTERFACES; i++) - ifobject_delete(ifdict[i]); + pkt_stream_delete(pkt_stream_default); + ifobject_delete(ifobj_tx); + ifobject_delete(ifobj_rx); ksft_exit_pass(); return 0; diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xdpxceiver.h index 7e49b9fbe25e10..2f705f44b7483c 100644 --- a/tools/testing/selftests/bpf/xdpxceiver.h +++ b/tools/testing/selftests/bpf/xdpxceiver.h @@ -20,10 +20,9 @@ #define MAX_INTERFACES 2 #define MAX_INTERFACE_NAME_CHARS 7 #define MAX_INTERFACES_NAMESPACE_CHARS 10 -#define MAX_SOCKS 1 +#define MAX_SOCKETS 2 +#define MAX_TEST_NAME_SIZE 32 #define MAX_TEARDOWN_ITER 10 -#define MAX_BIDI_ITER 2 -#define MAX_BPF_ITER 2 #define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ sizeof(struct udphdr)) #define MIN_PKT_SIZE 64 @@ -36,10 +35,13 @@ #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr)) #define USLEEP_MAX 10000 #define SOCK_RECONF_CTR 10 -#define BATCH_SIZE 8 +#define BATCH_SIZE 64 #define POLL_TMOUT 1000 #define DEFAULT_PKT_CNT (4 * 1024) +#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4) +#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE) #define RX_FULL_RXQSIZE 32 +#define UMEM_HEADROOM_TEST_SIZE 128 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) @@ -51,8 +53,15 @@ enum test_mode { }; enum test_type { - TEST_TYPE_NOPOLL, + TEST_TYPE_RUN_TO_COMPLETION, + TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME, + TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT, TEST_TYPE_POLL, + TEST_TYPE_UNALIGNED, + TEST_TYPE_ALIGNED_INV_DESC, + TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME, + TEST_TYPE_UNALIGNED_INV_DESC, + TEST_TYPE_HEADROOM, TEST_TYPE_TEARDOWN, TEST_TYPE_BIDI, TEST_TYPE_STATS, @@ -68,25 +77,21 @@ enum stat_test_type { STAT_TEST_TYPE_MAX }; -static int configured_mode; static bool opt_pkt_dump; -static u32 num_frames = DEFAULT_PKT_CNT / 4; -static bool second_step; static int test_type; static bool opt_verbose; - -static u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; -static u32 xdp_bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; static int stat_test_type; -static u32 rxqsize; -static u32 frame_headroom; struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; + u32 num_frames; + u32 frame_headroom; void *buffer; + u32 frame_size; + bool unaligned_mode; }; struct xsk_socket_info { @@ -95,51 +100,63 @@ struct xsk_socket_info { struct xsk_umem_info *umem; struct xsk_socket *xsk; u32 outstanding_tx; -}; - -struct flow_vector { - enum fvector { - tx, - rx, - } vector; + u32 rxqsize; }; struct pkt { u64 addr; u32 len; u32 payload; + bool valid; }; struct pkt_stream { u32 nb_pkts; + u32 rx_pkt_nb; struct pkt *pkts; + bool use_addr_for_fill; }; +typedef void *(*thread_func_t)(void *arg); + struct ifobject { char ifname[MAX_INTERFACE_NAME_CHARS]; char nsname[MAX_INTERFACES_NAMESPACE_CHARS]; struct xsk_socket_info *xsk; - struct xsk_socket_info **xsk_arr; - struct xsk_umem_info **umem_arr; + struct xsk_socket_info *xsk_arr; struct xsk_umem_info *umem; - void *(*func_ptr)(void *arg); - struct flow_vector fv; + struct xsk_umem_info *umem_arr; + thread_func_t func_ptr; struct pkt_stream *pkt_stream; int ns_fd; u32 dst_ip; u32 src_ip; + u32 xdp_flags; + u32 bind_flags; u16 src_port; u16 dst_port; + bool tx_on; + bool rx_on; + bool use_poll; + bool pacing_on; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; }; -static struct ifobject *ifdict[MAX_INTERFACES]; -static struct ifobject *ifdict_rx; -static struct ifobject *ifdict_tx; +struct test_spec { + struct ifobject *ifobj_tx; + struct ifobject *ifobj_rx; + struct pkt_stream *pkt_stream_default; + u16 total_steps; + u16 current_step; + u16 nb_sockets; + char name[MAX_TEST_NAME_SIZE]; +}; -/*threads*/ pthread_barrier_t barr; -pthread_t t0, t1; +pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER; + +u32 pkts_in_flight; #endif /* XDPXCEIVER_H */ diff --git a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh new file mode 100755 index 00000000000000..dca8be6092b926 --- /dev/null +++ b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Bridge FDB entries can be offloaded to DSA switches without holding the +# rtnl_mutex. Traditionally this mutex has conferred drivers implicit +# serialization, which means their code paths are not well tested in the +# presence of concurrency. +# This test creates a background task that stresses the FDB by adding and +# deleting an entry many times in a row without the rtnl_mutex held. +# It then tests the driver resistance to concurrency by calling .ndo_fdb_dump +# (with rtnl_mutex held) from a foreground task. +# Since either the FDB dump or the additions/removals can fail, but the +# additions and removals are performed in deferred as opposed to process +# context, we cannot simply check for user space error codes. + +WAIT_TIME=1 +NUM_NETIFS=1 +REQUIRE_JQ="no" +REQUIRE_MZ="no" +NETIF_CREATE="no" +lib_dir=$(dirname $0)/../../../net/forwarding +source $lib_dir/lib.sh + +cleanup() { + echo "Cleaning up" + kill $pid && wait $pid &> /dev/null + ip link del br0 + echo "Please check kernel log for errors" +} +trap 'cleanup' EXIT + +eth=${NETIFS[p1]} + +ip link del br0 2&>1 >/dev/null || : +ip link add br0 type bridge && ip link set $eth master br0 + +(while :; do + bridge fdb add 00:01:02:03:04:05 dev $eth master static + bridge fdb del 00:01:02:03:04:05 dev $eth master static +done) & +pid=$! + +for i in $(seq 1 50); do + bridge fdb show > /dev/null + sleep 3 + echo "$((${i} * 2))% complete..." +done diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh index a37273473c1b55..d3a891d421abb6 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh @@ -87,6 +87,7 @@ ALL_TESTS=" NUM_NETIFS=4 source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh +source mlxsw_lib.sh h1_create() { @@ -626,8 +627,7 @@ ipv6_redirect_test() ptp_event_test() { - # PTP is only supported on Spectrum-1, for now. - [[ "$DEVLINK_VIDDID" != "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 1 || return # PTP Sync (0) devlink_trap_stats_test "PTP Time-Critical Event Message" "ptp_event" \ @@ -638,8 +638,7 @@ ptp_event_test() ptp_general_test() { - # PTP is only supported on Spectrum-1, for now. - [[ "$DEVLINK_VIDDID" != "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 1 || return # PTP Announce (b) devlink_trap_stats_test "PTP General Message" "ptp_general" \ diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh index 508a702f002153..0bd5ffc218acb8 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh @@ -272,13 +272,17 @@ __rate_test() rate_test() { - local id + local last_policer=$(devlink -j -p trap policer show | + jq '[.[]["'$DEVLINK_DEV'"][].policer] | max') - for id in $(devlink_trap_policer_ids_get); do - echo - log_info "Running rate test for policer $id" - __rate_test $id - done + log_info "Running rate test for policer 1" + __rate_test 1 + + log_info "Running rate test for policer $((last_policer / 2))" + __rate_test $((last_policer / 2)) + + log_info "Running rate test for policer $last_policer" + __rate_test $last_policer } __burst_test() @@ -342,13 +346,17 @@ __burst_test() burst_test() { - local id + local last_policer=$(devlink -j -p trap policer show | + jq '[.[]["'$DEVLINK_DEV'"][].policer] | max') + + log_info "Running burst test for policer 1" + __burst_test 1 + + log_info "Running burst test for policer $((last_policer / 2))" + __burst_test $((last_policer / 2)) - for id in $(devlink_trap_policer_ids_get); do - echo - log_info "Running burst size test for policer $id" - __burst_test $id - done + log_info "Running burst test for policer $last_policer" + __burst_test $last_policer } trap cleanup EXIT diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh index 8817851da7a940..e9a82cae8c9a59 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh @@ -13,7 +13,7 @@ # | # +-------------------|-----+ # | SW1 | | -# | $swp1 + | +# | $swp1 + | # | 192.0.2.2/28 | # | | # | + g1a (gre) | @@ -27,8 +27,8 @@ # | # +--|----------------------+ # | | VRF2 | -# | + $rp2 | -# | 198.51.100.2/28 | +# | + $rp2 | +# | 198.51.100.2/28 | # +-------------------------+ lib_dir=$(dirname $0)/../../../net/forwarding @@ -116,12 +116,16 @@ cleanup() forwarding_restore } -ecn_payload_get() +ipip_payload_get() { + local flags=$1; shift + local key=$1; shift + p=$(: - )"0"$( : GRE flags + )"$flags"$( : GRE flags )"0:00:"$( : Reserved + version )"08:00:"$( : ETH protocol type + )"$key"$( : Key )"4"$( : IP version )"5:"$( : IHL )"00:"$( : IP TOS @@ -137,6 +141,11 @@ ecn_payload_get() echo $p } +ecn_payload_get() +{ + echo $(ipip_payload_get "0") +} + ecn_decap_test() { local trap_name="decap_error" @@ -171,31 +180,6 @@ ecn_decap_test() tc filter del dev $swp1 egress protocol ip pref 1 handle 101 flower } -ipip_payload_get() -{ - local flags=$1; shift - local key=$1; shift - - p=$(: - )"$flags"$( : GRE flags - )"0:00:"$( : Reserved + version - )"08:00:"$( : ETH protocol type - )"$key"$( : Key - )"4"$( : IP version - )"5:"$( : IHL - )"00:"$( : IP TOS - )"00:14:"$( : IP total length - )"00:00:"$( : IP identification - )"20:00:"$( : IP flags + frag off - )"30:"$( : IP TTL - )"01:"$( : IP proto - )"E7:E6:"$( : IP header csum - )"C0:00:01:01:"$( : IP saddr : 192.0.1.1 - )"C0:00:02:01:"$( : IP daddr : 192.0.2.1 - ) - echo $p -} - no_matching_tunnel_test() { local trap_name="decap_error" @@ -239,7 +223,8 @@ decap_error_test() no_matching_tunnel_test "Decap error: Source IP check failed" \ 192.0.2.68 "0" no_matching_tunnel_test \ - "Decap error: Key exists but was not expected" $sip "2" ":E9:" + "Decap error: Key exists but was not expected" $sip "2" \ + "00:00:00:E9:" # Destroy the tunnel and create new one with key __addr_add_del g1 del 192.0.2.65/32 @@ -251,7 +236,8 @@ decap_error_test() no_matching_tunnel_test \ "Decap error: Key does not exist but was expected" $sip "0" no_matching_tunnel_test \ - "Decap error: Packet has a wrong key field" $sip "2" "E8:" + "Decap error: Packet has a wrong key field" $sip "2" \ + "00:00:00:E8:" } trap cleanup EXIT diff --git a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh index cbe50f260a4051..a95856aafd2aba 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh @@ -11,3 +11,53 @@ if [[ ! -v MLXSW_CHIP ]]; then exit 1 fi fi + +MLXSW_SPECTRUM_REV=$(case $MLXSW_CHIP in + mlxsw_spectrum) + echo 1 ;; + mlxsw_spectrum*) + echo ${MLXSW_CHIP#mlxsw_spectrum} ;; + *) + echo "Couldn't determine Spectrum chip revision." \ + > /dev/stderr ;; + esac) + +mlxsw_on_spectrum() +{ + local rev=$1; shift + local op="==" + local rev2=${rev%+} + + if [[ $rev2 != $rev ]]; then + op=">=" + fi + + ((MLXSW_SPECTRUM_REV $op rev2)) +} + +__mlxsw_only_on_spectrum() +{ + local rev=$1; shift + local caller=$1; shift + local src=$1; shift + + if ! mlxsw_on_spectrum "$rev"; then + log_test_skip $src:$caller "(Spectrum-$rev only)" + return 1 + fi +} + +mlxsw_only_on_spectrum() +{ + local caller=${FUNCNAME[1]} + local src=${BASH_SOURCE[1]} + local rev + + for rev in "$@"; do + if __mlxsw_only_on_spectrum "$rev" "$caller" "$src"; then + return 0 + fi + done + + return 1 +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profile_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profile_scale.sh new file mode 100644 index 00000000000000..71e7681f15f659 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profile_scale.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for RIF MAC profiles resource. The test adds VLAN netdevices according to +# the maximum number of RIF MAC profiles, sets each of them with a random +# MAC address, and checks that eventually the number of occupied RIF MAC +# profiles equals the maximum number of RIF MAC profiles. + + +RIF_MAC_PROFILE_NUM_NETIFS=2 + +rif_mac_profiles_create() +{ + local count=$1; shift + local should_fail=$1; shift + local batch_file="$(mktemp)" + + for ((i = 1; i <= count; i++)); do + vlan=$(( i*10 )) + m=$(( i*11 )) + + cat >> $batch_file <<-EOF + link add link $h1 name $h1.$vlan \ + address 00:$m:$m:$m:$m:$m type vlan id $vlan + address add 192.0.$m.1/24 dev $h1.$vlan + EOF + done + + ip -b $batch_file &> /dev/null + check_err_fail $should_fail $? "RIF creation" + + rm -f $batch_file +} + +rif_mac_profile_test() +{ + local count=$1; shift + local should_fail=$1; shift + + rif_mac_profiles_create $count $should_fail + + occ=$(devlink -j resource show $DEVLINK_DEV \ + | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]') + + [[ $occ -eq $count ]] + check_err_fail $should_fail $? "Attempt to use $count profiles (actual result $occ)" +} + +rif_mac_profile_setup_prepare() +{ + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + + # Disable IPv6 on the two interfaces to avoid IPv6 link-local addresses + # being generated and RIFs being created. + sysctl_set net.ipv6.conf.$h1.disable_ipv6 1 + sysctl_set net.ipv6.conf.$h2.disable_ipv6 1 + + ip link set $h1 up + ip link set $h2 up +} + +rif_mac_profile_cleanup() +{ + pre_cleanup + + ip link set $h2 down + ip link set $h1 down + + sysctl_restore net.ipv6.conf.$h2.disable_ipv6 + sysctl_restore net.ipv6.conf.$h1.disable_ipv6 +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles.sh new file mode 100755 index 00000000000000..c18340cee55d58 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles.sh @@ -0,0 +1,213 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +lib_dir=$(dirname $0)/../../../net/forwarding + +ALL_TESTS=" + mac_profile_test +" +NUM_NETIFS=4 +source $lib_dir/lib.sh +source $lib_dir/tc_common.sh +source $lib_dir/devlink_lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 + ip route add 198.51.100.0/24 vrf v$h1 nexthop via 192.0.2.2 + + tc qdisc add dev $h1 ingress +} + +h1_destroy() +{ + tc qdisc del dev $h1 ingress + + ip route del 198.51.100.0/24 vrf v$h1 + simple_if_fini $h1 192.0.2.1/24 +} + +h2_create() +{ + simple_if_init $h2 198.51.100.1/24 + ip route add 192.0.2.0/24 vrf v$h2 nexthop via 198.51.100.2 + + tc qdisc add dev $h2 ingress +} + +h2_destroy() +{ + tc qdisc del dev $h2 ingress + + ip route del 192.0.2.0/24 vrf v$h2 + simple_if_fini $h2 198.51.100.1/24 +} + +router_create() +{ + ip link set dev $rp1 up + ip link set dev $rp2 up + + tc qdisc add dev $rp1 clsact + tc qdisc add dev $rp2 clsact + ip address add 192.0.2.2/24 dev $rp1 + ip address add 198.51.100.2/24 dev $rp2 +} + +router_destroy() +{ + ip address del 198.51.100.2/24 dev $rp2 + ip address del 192.0.2.2/24 dev $rp1 + tc qdisc del dev $rp2 clsact + tc qdisc del dev $rp1 clsact + + ip link set dev $rp2 down + ip link set dev $rp1 down +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + rp1=${NETIFS[p2]} + + rp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + + router_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + router_destroy + + h2_destroy + h1_destroy + + vrf_cleanup +} + +h1_to_h2() +{ + local test_name=$@; shift + local smac=$(mac_get $rp2) + + RET=0 + + # Replace neighbour to avoid first packet being forwarded in software + ip neigh replace dev $rp2 198.51.100.1 lladdr $(mac_get $h2) + + # Add a filter to ensure that packets are forwarded in hardware. Cannot + # match on source MAC because it is not set in eACL after routing + tc filter add dev $rp2 egress proto ip pref 1 handle 101 \ + flower skip_sw ip_proto udp src_port 12345 dst_port 54321 \ + action pass + + # Add a filter to ensure that packets are received with the correct + # source MAC + tc filter add dev $h2 ingress proto ip pref 1 handle 101 \ + flower skip_sw src_mac $smac ip_proto udp src_port 12345 \ + dst_port 54321 action pass + + $MZ $h1 -a own -b $(mac_get $rp1) -t udp "sp=12345,dp=54321" \ + -A 192.0.2.1 -B 198.51.100.1 -c 10 -p 100 -d 1msec -q + + tc_check_packets "dev $rp2 egress" 101 10 + check_err $? "packets not forwarded in hardware" + + tc_check_packets "dev $h2 ingress" 101 10 + check_err $? "packets not forwarded with correct source mac" + + log_test "h1->h2: $test_name" + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower + ip neigh del dev $rp2 198.51.100.1 lladdr $(mac_get $h2) +} + +h2_to_h1() +{ + local test_name=$@; shift + local rp1_mac=$(mac_get $rp1) + + RET=0 + + ip neigh replace dev $rp1 192.0.2.1 lladdr $(mac_get $h1) + + tc filter add dev $rp1 egress proto ip pref 1 handle 101 \ + flower skip_sw ip_proto udp src_port 54321 dst_port 12345 \ + action pass + + tc filter add dev $h1 ingress proto ip pref 1 handle 101 \ + flower skip_sw src_mac $rp1_mac ip_proto udp src_port 54321 \ + dst_port 12345 action pass + + $MZ $h2 -a own -b $(mac_get $rp2) -t udp "sp=54321,dp=12345" \ + -A 198.51.100.1 -B 192.0.2.1 -c 10 -p 100 -d 1msec -q + + tc_check_packets "dev $rp1 egress" 101 10 + check_err $? "packets not forwarded in hardware" + + tc_check_packets "dev $h1 ingress" 101 10 + check_err $? "packets not forwarded with correct source mac" + + log_test "h2->h1: $test_name" + + tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $rp1 egress protocol ip pref 1 handle 101 flower + ip neigh del dev $rp1 192.0.2.1 lladdr $(mac_get $h1) +} + +smac_test() +{ + local test_name=$@; shift + + # Test that packets forwarded to $h2 via $rp2 are forwarded with the + # current source MAC of $rp2 + h1_to_h2 $test_name + + # Test that packets forwarded to $h1 via $rp1 are forwarded with the + # current source MAC of $rp1. This MAC is never changed during the test, + # but given the shared nature of MAC profile, the point is to see that + # changes to the MAC of $rp2 do not affect that of $rp1 + h2_to_h1 $test_name +} + +mac_profile_test() +{ + local rp2_mac=$(mac_get $rp2) + + # Test behavior when the RIF backing $rp2 is transitioned to use + # a new MAC profile + ip link set dev $rp2 addr 00:11:22:33:44:55 + smac_test "new mac profile" + + # Test behavior when the MAC profile used by the RIF is edited + ip link set dev $rp2 address 00:22:22:22:22:22 + smac_test "edit mac profile" + + # Restore original MAC + ip link set dev $rp2 addr $rp2_mac +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +mac_profiles=$(devlink_resource_size_get rif_mac_profiles) +if [[ $mac_profiles -ne 1 ]]; then + tests_run +fi + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh new file mode 100755 index 00000000000000..b513f64d9092d1 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +lib_dir=$(dirname $0)/../../../net/forwarding + +ALL_TESTS=" + rif_mac_profile_edit_test +" +NUM_NETIFS=2 +source $lib_dir/lib.sh +source $lib_dir/devlink_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + + # Disable IPv6 on the two interfaces to avoid IPv6 link-local addresses + # being generated and RIFs being created + sysctl_set net.ipv6.conf.$h1.disable_ipv6 1 + sysctl_set net.ipv6.conf.$h2.disable_ipv6 1 + + ip link set $h1 up + ip link set $h2 up +} + +cleanup() +{ + pre_cleanup + + ip link set $h2 down + ip link set $h1 down + + sysctl_restore net.ipv6.conf.$h2.disable_ipv6 + sysctl_restore net.ipv6.conf.$h1.disable_ipv6 + + # Reload in order to clean all the RIFs and RIF MAC profiles created + devlink_reload +} + +create_max_rif_mac_profiles() +{ + local count=$1; shift + local batch_file="$(mktemp)" + + for ((i = 1; i <= count; i++)); do + vlan=$(( i*10 )) + m=$(( i*11 )) + + cat >> $batch_file <<-EOF + link add link $h1 name $h1.$vlan \ + address 00:$m:$m:$m:$m:$m type vlan id $vlan + address add 192.0.$m.1/24 dev $h1.$vlan + EOF + done + + ip -b $batch_file &> /dev/null + rm -f $batch_file +} + +rif_mac_profile_replacement_test() +{ + local h1_10_mac=$(mac_get $h1.10) + + RET=0 + + ip link set $h1.10 address 00:12:34:56:78:99 + check_err $? + + log_test "RIF MAC profile replacement" + + ip link set $h1.10 address $h1_10_mac +} + +rif_mac_profile_shared_replacement_test() +{ + local count=$1; shift + local i=$((count + 1)) + local vlan=$(( i*10 )) + local m=11 + + RET=0 + + # Create a VLAN netdevice that has the same MAC as the first one. + ip link add link $h1 name $h1.$vlan address 00:$m:$m:$m:$m:$m \ + type vlan id $vlan + ip address add 192.0.$m.1/24 dev $h1.$vlan + + # MAC replacement should fail because all the MAC profiles are in use + # and the profile is shared between multiple RIFs + m=$(( i*11 )) + ip link set $h1.$vlan address 00:$m:$m:$m:$m:$m &> /dev/null + check_fail $? + + log_test "RIF MAC profile shared replacement" + + ip link del dev $h1.$vlan +} + +rif_mac_profile_edit_test() +{ + local count=$(devlink_resource_size_get rif_mac_profiles) + + create_max_rif_mac_profiles $count + + rif_mac_profile_replacement_test + rif_mac_profile_shared_replacement_test $count +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index a217f9f6775bc4..04f03ae9d8fbe5 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh @@ -10,9 +10,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding ALL_TESTS=" - rif_set_addr_test rif_vrf_set_addr_test - rif_inherit_bridge_addr_test rif_non_inherit_bridge_addr_test vlan_interface_deletion_test bridge_deletion_test @@ -60,55 +58,6 @@ cleanup() ip link set dev $swp1 down } -rif_set_addr_test() -{ - local swp1_mac=$(mac_get $swp1) - local swp2_mac=$(mac_get $swp2) - - RET=0 - - # $swp1 and $swp2 likely got their IPv6 local addresses already, but - # here we need to test the transition to RIF. - ip addr flush dev $swp1 - ip addr flush dev $swp2 - sleep .1 - - ip addr add dev $swp1 192.0.2.1/28 - check_err $? - - ip link set dev $swp1 addr 00:11:22:33:44:55 - check_err $? - - # IP address enablement should be rejected if the MAC address prefix - # doesn't match other RIFs. - ip addr add dev $swp2 192.0.2.2/28 &>/dev/null - check_fail $? "IP address addition passed for a device with a wrong MAC" - ip addr add dev $swp2 192.0.2.2/28 2>&1 >/dev/null \ - | grep -q mlxsw_spectrum - check_err $? "no extack for IP address addition" - - ip link set dev $swp2 addr 00:11:22:33:44:66 - check_err $? - ip addr add dev $swp2 192.0.2.2/28 &>/dev/null - check_err $? - - # Change of MAC address of a RIF should be forbidden if the new MAC - # doesn't share the prefix with other MAC addresses. - ip link set dev $swp2 addr 00:11:22:33:00:66 &>/dev/null - check_fail $? "change of MAC address passed for a wrong MAC" - ip link set dev $swp2 addr 00:11:22:33:00:66 2>&1 >/dev/null \ - | grep -q mlxsw_spectrum - check_err $? "no extack for MAC address change" - - log_test "RIF - bad MAC change" - - ip addr del dev $swp2 192.0.2.2/28 - ip addr del dev $swp1 192.0.2.1/28 - - ip link set dev $swp2 addr $swp2_mac - ip link set dev $swp1 addr $swp1_mac -} - rif_vrf_set_addr_test() { # Test that it is possible to set an IP address on a VRF upper despite @@ -128,45 +77,6 @@ rif_vrf_set_addr_test() ip link del dev vrf-test } -rif_inherit_bridge_addr_test() -{ - RET=0 - - # Create first RIF - ip addr add dev $swp1 192.0.2.1/28 - check_err $? - - # Create a FID RIF - ip link add name br1 up type bridge vlan_filtering 0 - ip link set dev $swp2 master br1 - ip addr add dev br1 192.0.2.17/28 - check_err $? - - # Prepare a device with a low MAC address - ip link add name d up type dummy - ip link set dev d addr 00:11:22:33:44:55 - - # Attach the device to br1. That prompts bridge address change, which - # should be vetoed, thus preventing the attachment. - ip link set dev d master br1 &>/dev/null - check_fail $? "Device with low MAC was permitted to attach a bridge with RIF" - ip link set dev d master br1 2>&1 >/dev/null \ - | grep -q mlxsw_spectrum - check_err $? "no extack for bridge attach rejection" - - ip link set dev $swp2 addr 00:11:22:33:44:55 &>/dev/null - check_fail $? "Changing swp2's MAC address permitted" - ip link set dev $swp2 addr 00:11:22:33:44:55 2>&1 >/dev/null \ - | grep -q mlxsw_spectrum - check_err $? "no extack for bridge port MAC address change rejection" - - log_test "RIF - attach port with bad MAC to bridge" - - ip link del dev d - ip link del dev br1 - ip addr del dev $swp1 192.0.2.1/28 -} - rif_non_inherit_bridge_addr_test() { local swp2_mac=$(mac_get $swp2) @@ -779,7 +689,7 @@ nexthop_obj_offload_test() setup_wait ip nexthop add id 1 via 192.0.2.2 dev $swp1 - ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 busywait "$TIMEOUT" wait_for_offload \ @@ -791,7 +701,7 @@ nexthop_obj_offload_test() ip nexthop show id 1 check_err $? "nexthop marked as offloaded after setting neigh to failed state" - ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 busywait "$TIMEOUT" wait_for_offload \ ip nexthop show id 1 @@ -828,11 +738,11 @@ nexthop_obj_group_offload_test() ip nexthop add id 1 via 192.0.2.2 dev $swp1 ip nexthop add id 2 via 2001:db8:1::2 dev $swp1 ip nexthop add id 10 group 1/2 - ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 - ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 - ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 busywait "$TIMEOUT" wait_for_offload \ @@ -888,11 +798,11 @@ nexthop_obj_bucket_offload_test() ip nexthop add id 1 via 192.0.2.2 dev $swp1 ip nexthop add id 2 via 2001:db8:1::2 dev $swp1 ip nexthop add id 10 group 1/2 type resilient buckets 32 idle_timer 0 - ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 - ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 - ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 busywait "$TIMEOUT" wait_for_offload \ @@ -921,7 +831,7 @@ nexthop_obj_bucket_offload_test() check_err $? "nexthop bucket not marked as offloaded after revalidating nexthop" # Revalidate nexthop id 2 by changing its neighbour - ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 2001:db8:1::2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 busywait "$TIMEOUT" wait_for_offload \ ip nexthop bucket show nhid 2 @@ -971,9 +881,9 @@ nexthop_obj_route_offload_test() setup_wait ip nexthop add id 1 via 192.0.2.2 dev $swp1 - ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.2 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 - ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud reachable \ + ip neigh replace 192.0.2.3 lladdr 00:11:22:33:44:55 nud perm \ dev $swp1 ip route replace 198.51.100.0/24 nhid 1 diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_offload.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_offload.sh new file mode 100755 index 00000000000000..071a33d10c20b6 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_offload.sh @@ -0,0 +1,290 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test qdisc offload indication + + +ALL_TESTS=" + test_root + test_port_tbf + test_etsprio + test_etsprio_port_tbf +" +NUM_NETIFS=1 +lib_dir=$(dirname $0)/../../../net/forwarding +source $lib_dir/lib.sh + +check_not_offloaded() +{ + local handle=$1; shift + local h + local offloaded + + h=$(qdisc_stats_get $h1 "$handle" .handle) + [[ $h == '"'$handle'"' ]] + check_err $? "Qdisc with handle $handle does not exist" + + offloaded=$(qdisc_stats_get $h1 "$handle" .offloaded) + [[ $offloaded == true ]] + check_fail $? "Qdisc with handle $handle offloaded, but should not be" +} + +check_all_offloaded() +{ + local handle=$1; shift + + if [[ ! -z $handle ]]; then + local offloaded=$(qdisc_stats_get $h1 "$handle" .offloaded) + [[ $offloaded == true ]] + check_err $? "Qdisc with handle $handle not offloaded" + fi + + local unoffloaded=$(tc q sh dev $h1 invisible | + grep -v offloaded | + sed s/root/parent\ root/ | + cut -d' ' -f 5) + [[ -z $unoffloaded ]] + check_err $? "Qdiscs with following parents not offloaded: $unoffloaded" + + pre_cleanup +} + +with_ets() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle \ + ets bands 8 priomap 7 6 5 4 3 2 1 0 + "$@" + tc qdisc del dev $h1 $locus +} + +with_prio() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle \ + prio bands 8 priomap 7 6 5 4 3 2 1 0 + "$@" + tc qdisc del dev $h1 $locus +} + +with_red() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle \ + red limit 1000000 min 200000 max 300000 probability 0.5 avpkt 1500 + "$@" + tc qdisc del dev $h1 $locus +} + +with_tbf() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle \ + tbf rate 400Mbit burst 128K limit 1M + "$@" + tc qdisc del dev $h1 $locus +} + +with_pfifo() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle pfifo limit 100K + "$@" + tc qdisc del dev $h1 $locus +} + +with_bfifo() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle bfifo limit 100K + "$@" + tc qdisc del dev $h1 $locus +} + +with_drr() +{ + local handle=$1; shift + local locus=$1; shift + + tc qdisc add dev $h1 $locus handle $handle drr + "$@" + tc qdisc del dev $h1 $locus +} + +with_qdiscs() +{ + local handle=$1; shift + local parent=$1; shift + local kind=$1; shift + local next_handle=$((handle * 2)) + local locus; + + if [[ $kind == "--" ]]; then + local cmd=$1; shift + $cmd $(printf %x: $parent) "$@" + else + if ((parent == 0)); then + locus=root + else + locus=$(printf "parent %x:1" $parent) + fi + + with_$kind $(printf %x: $handle) "$locus" \ + with_qdiscs $next_handle $handle "$@" + fi +} + +get_name() +{ + local parent=$1; shift + local name=$(echo "" "${@^^}" | tr ' ' -) + + if ((parent != 0)); then + kind=$(qdisc_stats_get $h1 $parent: .kind) + kind=${kind%\"} + kind=${kind#\"} + name="-${kind^^}$name" + fi + + echo root$name +} + +do_test_offloaded() +{ + local handle=$1; shift + local parent=$1; shift + + RET=0 + with_qdiscs $handle $parent "$@" -- check_all_offloaded + log_test $(get_name $parent "$@")" offloaded" +} + +do_test_nooffload() +{ + local handle=$1; shift + local parent=$1; shift + + local name=$(echo "${@^^}" | tr ' ' -) + local kind + + RET=0 + with_qdiscs $handle $parent "$@" -- check_not_offloaded + log_test $(get_name $parent "$@")" not offloaded" +} + +do_test_combinations() +{ + local handle=$1; shift + local parent=$1; shift + + local cont + local leaf + local fifo + + for cont in "" ets prio; do + for leaf in "" red tbf "red tbf" "tbf red"; do + for fifo in "" pfifo bfifo; do + if [[ -z "$cont$leaf$fifo" ]]; then + continue + fi + do_test_offloaded $handle $parent \ + $cont $leaf $fifo + done + done + done + + for cont in ets prio; do + for leaf in red tbf; do + do_test_nooffload $handle $parent $cont red tbf $leaf + do_test_nooffload $handle $parent $cont tbf red $leaf + done + for leaf in "red red" "tbf tbf"; do + do_test_nooffload $handle $parent $cont $leaf + done + done + + do_test_nooffload $handle $parent drr +} + +test_root() +{ + do_test_combinations 1 0 +} + +test_port_tbf() +{ + with_tbf 1: root \ + do_test_combinations 8 1 +} + +do_test_etsprio() +{ + local parent=$1; shift + local tbfpfx=$1; shift + local cont + + for cont in ets prio; do + RET=0 + with_$cont 8: "$parent" \ + with_red 11: "parent 8:1" \ + with_red 12: "parent 8:2" \ + with_tbf 13: "parent 8:3" \ + with_tbf 14: "parent 8:4" \ + check_all_offloaded + log_test "root$tbfpfx-ETS-{RED,TBF} offloaded" + + RET=0 + with_$cont 8: "$parent" \ + with_red 81: "parent 8:1" \ + with_tbf 811: "parent 81:1" \ + with_tbf 84: "parent 8:4" \ + with_red 841: "parent 84:1" \ + check_all_offloaded + log_test "root$tbfpfx-ETS-{RED-TBF,TBF-RED} offloaded" + + RET=0 + with_$cont 8: "$parent" \ + with_red 81: "parent 8:1" \ + with_tbf 811: "parent 81:1" \ + with_bfifo 8111: "parent 811:1" \ + with_tbf 82: "parent 8:2" \ + with_red 821: "parent 82:1" \ + with_bfifo 8211: "parent 821:1" \ + check_all_offloaded + log_test "root$tbfpfx-ETS-{RED-TBF-bFIFO,TBF-RED-bFIFO} offloaded" + done +} + +test_etsprio() +{ + do_test_etsprio root "" +} + +test_etsprio_port_tbf() +{ + with_tbf 1: root \ + do_test_etsprio "parent 1:1" "-TBF" +} + +cleanup() +{ + tc qdisc del dev $h1 root &>/dev/null +} + +trap cleanup EXIT +h1=${NETIFS[p1]} +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh index 33ddd01689bee9..f260f01db0e807 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh @@ -73,6 +73,7 @@ CHECK_TC="yes" lib_dir=$(dirname $0)/../../../net/forwarding source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh +source mlxsw_lib.sh source qos_lib.sh ipaddr() @@ -331,6 +332,14 @@ get_nmarked() ethtool_stats_get $swp3 ecn_marked } +get_qdisc_nmarked() +{ + local vlan=$1; shift + + busywait_for_counter 1100 +1 \ + qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .marked +} + get_qdisc_npackets() { local vlan=$1; shift @@ -384,14 +393,15 @@ build_backlog() check_marking() { + local get_nmarked=$1; shift local vlan=$1; shift local cond=$1; shift local npackets_0=$(get_qdisc_npackets $vlan) - local nmarked_0=$(get_nmarked $vlan) + local nmarked_0=$($get_nmarked $vlan) sleep 5 local npackets_1=$(get_qdisc_npackets $vlan) - local nmarked_1=$(get_nmarked $vlan) + local nmarked_1=$($get_nmarked $vlan) local nmarked_d=$((nmarked_1 - nmarked_0)) local npackets_d=$((npackets_1 - npackets_0)) @@ -404,6 +414,7 @@ check_marking() ecn_test_common() { local name=$1; shift + local get_nmarked=$1; shift local vlan=$1; shift local limit=$1; shift local backlog @@ -416,7 +427,7 @@ ecn_test_common() RET=0 backlog=$(build_backlog $vlan $((2 * limit / 3)) udp) check_err $? "Could not build the requested backlog" - pct=$(check_marking $vlan "== 0") + pct=$(check_marking "$get_nmarked" $vlan "== 0") check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." log_test "TC $((vlan - 10)): $name backlog < limit" @@ -426,22 +437,23 @@ ecn_test_common() RET=0 backlog=$(build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01) check_err $? "Could not build the requested backlog" - pct=$(check_marking $vlan ">= 95") + pct=$(check_marking "$get_nmarked" $vlan ">= 95") check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected >= 95." log_test "TC $((vlan - 10)): $name backlog > limit" } -do_ecn_test() +__do_ecn_test() { + local get_nmarked=$1; shift local vlan=$1; shift local limit=$1; shift - local name=ECN + local name=${1-ECN}; shift start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \ $h3_mac tos=0x01 sleep 1 - ecn_test_common "$name" $vlan $limit + ecn_test_common "$name" "$get_nmarked" $vlan $limit # Up there we saw that UDP gets accepted when backlog is below the # limit. Now that it is above, it should all get dropped, and backlog @@ -455,6 +467,23 @@ do_ecn_test() sleep 1 } +do_ecn_test() +{ + local vlan=$1; shift + local limit=$1; shift + + __do_ecn_test get_nmarked "$vlan" "$limit" +} + +do_ecn_test_perband() +{ + local vlan=$1; shift + local limit=$1; shift + + mlxsw_only_on_spectrum 3+ || return + __do_ecn_test get_qdisc_nmarked "$vlan" "$limit" "per-band ECN" +} + do_ecn_nodrop_test() { local vlan=$1; shift @@ -465,7 +494,7 @@ do_ecn_nodrop_test() $h3_mac tos=0x01 sleep 1 - ecn_test_common "$name" $vlan $limit + ecn_test_common "$name" get_nmarked $vlan $limit # Up there we saw that UDP gets accepted when backlog is below the # limit. Now that it is above, in nodrop mode, make sure it goes to @@ -495,7 +524,7 @@ do_red_test() RET=0 backlog=$(build_backlog $vlan $((2 * limit / 3)) tcp tos=0x01) check_err $? "Could not build the requested backlog" - pct=$(check_marking $vlan "== 0") + pct=$(check_marking get_nmarked $vlan "== 0") check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." log_test "TC $((vlan - 10)): RED backlog < limit" @@ -503,7 +532,7 @@ do_red_test() RET=0 backlog=$(build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01) check_fail $? "Traffic went into backlog instead of being early-dropped" - pct=$(check_marking $vlan "== 0") + pct=$(check_marking get_nmarked $vlan "== 0") check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." local diff=$((limit - backlog)) pct=$((100 * diff / limit)) @@ -544,6 +573,55 @@ do_mc_backlog_test() log_test "TC $((vlan - 10)): Qdisc reports MC backlog" } +do_mark_test() +{ + local vlan=$1; shift + local limit=$1; shift + local subtest=$1; shift + local fetch_counter=$1; shift + local should_fail=$1; shift + local base + + mlxsw_only_on_spectrum 2+ || return + + RET=0 + + start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \ + $h3_mac tos=0x01 + + # Create a bit of a backlog and observe no mirroring due to marks. + qevent_rule_install_$subtest + + build_backlog $vlan $((2 * limit / 3)) tcp tos=0x01 >/dev/null + + base=$($fetch_counter) + count=$(busywait 1100 until_counter_is ">= $((base + 1))" \ + $fetch_counter) + check_fail $? "Spurious packets ($base -> $count) observed without buffer pressure" + + # Above limit, everything should be mirrored, we should see lots of + # packets. + build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01 >/dev/null + busywait_for_counter 1100 +10000 \ + $fetch_counter > /dev/null + check_err_fail "$should_fail" $? "ECN-marked packets $subtest'd" + + # When the rule is uninstalled, there should be no mirroring. + qevent_rule_uninstall_$subtest + busywait_for_counter 1100 +10 \ + $fetch_counter > /dev/null + check_fail $? "Spurious packets observed after uninstall" + + if ((should_fail)); then + log_test "TC $((vlan - 10)): marked packets not $subtest'd" + else + log_test "TC $((vlan - 10)): marked packets $subtest'd" + fi + + stop_traffic + sleep 1 +} + do_drop_test() { local vlan=$1; shift @@ -551,10 +629,10 @@ do_drop_test() local trigger=$1; shift local subtest=$1; shift local fetch_counter=$1; shift - local backlog local base local now - local pct + + mlxsw_only_on_spectrum 2+ || return RET=0 @@ -628,6 +706,22 @@ do_drop_mirror_test() tc filter del dev $h2 ingress pref 1 handle 101 flower } +do_mark_mirror_test() +{ + local vlan=$1; shift + local limit=$1; shift + + tc filter add dev $h2 ingress pref 1 handle 101 prot ip \ + flower skip_sw ip_proto tcp \ + action drop + + do_mark_test "$vlan" "$limit" mirror \ + qevent_counter_fetch_mirror \ + $(: should_fail=)0 + + tc filter del dev $h2 ingress pref 1 handle 101 flower +} + qevent_rule_install_trap() { tc filter add block 10 pref 1234 handle 102 matchall skip_sw \ @@ -655,3 +749,14 @@ do_drop_trap_test() do_drop_test "$vlan" "$limit" "$trap_name" trap \ "qevent_counter_fetch_trap $trap_name" } + +qevent_rule_install_trap_fwd() +{ + tc filter add block 10 pref 1234 handle 102 matchall skip_sw \ + action trap_fwd hw_stats disabled +} + +qevent_rule_uninstall_trap_fwd() +{ + tc filter del block 10 pref 1234 handle 102 matchall +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh index f3ef3274f9b3de..1e5ad320943664 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh @@ -4,11 +4,13 @@ ALL_TESTS=" ping_ipv4 ecn_test + ecn_test_perband ecn_nodrop_test red_test mc_backlog_test red_mirror_test red_trap_test + ecn_mirror_test " : ${QDISC:=ets} source sch_red_core.sh @@ -21,28 +23,60 @@ source sch_red_core.sh BACKLOG1=200000 BACKLOG2=500000 -install_qdisc() +install_root_qdisc() { - local -a args=("$@") - tc qdisc add dev $swp3 root handle 10: $QDISC \ bands 8 priomap 7 6 5 4 3 2 1 0 +} + +install_qdisc_tc0() +{ + local -a args=("$@") + tc qdisc add dev $swp3 parent 10:8 handle 108: red \ limit 1000000 min $BACKLOG1 max $((BACKLOG1 + 1)) \ probability 1.0 avpkt 8000 burst 38 "${args[@]}" +} + +install_qdisc_tc1() +{ + local -a args=("$@") + tc qdisc add dev $swp3 parent 10:7 handle 107: red \ limit 1000000 min $BACKLOG2 max $((BACKLOG2 + 1)) \ probability 1.0 avpkt 8000 burst 63 "${args[@]}" +} + +install_qdisc() +{ + install_root_qdisc + install_qdisc_tc0 "$@" + install_qdisc_tc1 "$@" sleep 1 } -uninstall_qdisc() +uninstall_qdisc_tc0() { - tc qdisc del dev $swp3 parent 10:7 tc qdisc del dev $swp3 parent 10:8 +} + +uninstall_qdisc_tc1() +{ + tc qdisc del dev $swp3 parent 10:7 +} + +uninstall_root_qdisc() +{ tc qdisc del dev $swp3 root } +uninstall_qdisc() +{ + uninstall_qdisc_tc0 + uninstall_qdisc_tc1 + uninstall_root_qdisc +} + ecn_test() { install_qdisc ecn @@ -53,6 +87,16 @@ ecn_test() uninstall_qdisc } +ecn_test_perband() +{ + install_qdisc ecn + + do_ecn_test_perband 10 $BACKLOG1 + do_ecn_test_perband 11 $BACKLOG2 + + uninstall_qdisc +} + ecn_nodrop_test() { install_qdisc ecn nodrop @@ -112,6 +156,16 @@ red_trap_test() uninstall_qdisc } +ecn_mirror_test() +{ + install_qdisc ecn qevent mark block 10 + + do_mark_mirror_test 10 $BACKLOG1 + do_mark_mirror_test 11 $BACKLOG2 + + uninstall_qdisc +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh index ede9c38d3effec..d79a82f317d297 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh @@ -4,6 +4,7 @@ ALL_TESTS=" ping_ipv4 ecn_test + ecn_test_perband ecn_nodrop_test red_test mc_backlog_test @@ -35,6 +36,13 @@ ecn_test() uninstall_qdisc } +ecn_test_perband() +{ + install_qdisc ecn + do_ecn_test_perband 10 $BACKLOG + uninstall_qdisc +} + ecn_nodrop_test() { install_qdisc ecn nodrop diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh new file mode 100755 index 00000000000000..f62ce479c266eb --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/devlink_trap_tunnel_ipip6.sh @@ -0,0 +1,250 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test devlink-trap tunnel exceptions functionality over mlxsw. +# Check all exception traps to make sure they are triggered under the right +# conditions. + +# +-------------------------+ +# | H1 | +# | $h1 + | +# | 2001:db8:1::1/64 | | +# +-------------------|-----+ +# | +# +-------------------|-----+ +# | SW1 | | +# | $swp1 + | +# | 2001:db8:1::2/64 | +# | | +# | + g1 (ip6gre) | +# | loc=2001:db8:3::1 | +# | rem=2001:db8:3::2 | +# | tos=inherit | +# | | +# | + $rp1 | +# | | 2001:db8:10::1/64 | +# +--|----------------------+ +# | +# +--|----------------------+ +# | | VRF2 | +# | + $rp2 | +# | 2001:db8:10::2/64 | +# +-------------------------+ + +lib_dir=$(dirname $0)/../../../../net/forwarding + +ALL_TESTS=" + decap_error_test +" + +NUM_NETIFS=4 +source $lib_dir/lib.sh +source $lib_dir/tc_common.sh +source $lib_dir/devlink_lib.sh + +h1_create() +{ + simple_if_init $h1 2001:db8:1::1/64 +} + +h1_destroy() +{ + simple_if_fini $h1 2001:db8:1::1/64 +} + +vrf2_create() +{ + simple_if_init $rp2 2001:db8:10::2/64 +} + +vrf2_destroy() +{ + simple_if_fini $rp2 2001:db8:10::2/64 +} + +switch_create() +{ + ip link set dev $swp1 up + __addr_add_del $swp1 add 2001:db8:1::2/64 + tc qdisc add dev $swp1 clsact + + tunnel_create g1 ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \ + ttl inherit + ip link set dev g1 up + __addr_add_del g1 add 2001:db8:3::1/128 + + ip link set dev $rp1 up + __addr_add_del $rp1 add 2001:db8:10::1/64 +} + +switch_destroy() +{ + __addr_add_del $rp1 del 2001:db8:10::1/64 + ip link set dev $rp1 down + + __addr_add_del g1 del 2001:db8:3::1/128 + ip link set dev g1 down + tunnel_destroy g1 + + tc qdisc del dev $swp1 clsact + __addr_add_del $swp1 del 2001:db8:1::2/64 + ip link set dev $swp1 down +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + rp1=${NETIFS[p3]} + rp2=${NETIFS[p4]} + + forwarding_enable + vrf_prepare + h1_create + switch_create + vrf2_create +} + +cleanup() +{ + pre_cleanup + + vrf2_destroy + switch_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +ipip_payload_get() +{ + local saddr="20:01:0d:b8:00:02:00:00:00:00:00:00:00:00:00:01" + local daddr="20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:01" + local flags=$1; shift + local key=$1; shift + + p=$(: + )"$flags"$( : GRE flags + )"0:00:"$( : Reserved + version + )"86:dd:"$( : ETH protocol type + )"$key"$( : Key + )"6"$( : IP version + )"0:0"$( : Traffic class + )"0:00:00:"$( : Flow label + )"00:00:"$( : Payload length + )"3a:"$( : Next header + )"04:"$( : Hop limit + )"$saddr:"$( : IP saddr + )"$daddr:"$( : IP daddr + ) + echo $p +} + +ecn_payload_get() +{ + echo $(ipip_payload_get "0") +} + +ecn_decap_test() +{ + local trap_name="decap_error" + local desc=$1; shift + local ecn_desc=$1; shift + local outer_tos=$1; shift + local mz_pid + + RET=0 + + tc filter add dev $swp1 egress protocol ipv6 pref 1 handle 101 \ + flower src_ip 2001:db8:2::1 dst_ip 2001:db8:1::1 skip_sw \ + action pass + + rp1_mac=$(mac_get $rp1) + rp2_mac=$(mac_get $rp2) + payload=$(ecn_payload_get) + + ip vrf exec v$rp2 $MZ -6 $rp2 -c 0 -d 1msec -a $rp2_mac -b $rp1_mac \ + -A 2001:db8:3::2 -B 2001:db8:3::1 -t ip \ + tos=$outer_tos,next=47,p=$payload -q & + mz_pid=$! + + devlink_trap_exception_test $trap_name + + tc_check_packets "dev $swp1 egress" 101 0 + check_err $? "Packets were not dropped" + + log_test "$desc: Inner ECN is not ECT and outer is $ecn_desc" + + kill $mz_pid && wait $mz_pid &> /dev/null + tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower +} + +no_matching_tunnel_test() +{ + local trap_name="decap_error" + local desc=$1; shift + local sip=$1; shift + local mz_pid + + RET=0 + + tc filter add dev $swp1 egress protocol ipv6 pref 1 handle 101 \ + flower src_ip 2001:db8:2::1 dst_ip 2001:db8:1::1 action pass + + rp1_mac=$(mac_get $rp1) + rp2_mac=$(mac_get $rp2) + payload=$(ipip_payload_get "$@") + + ip vrf exec v$rp2 $MZ -6 $rp2 -c 0 -d 1msec -a $rp2_mac -b $rp1_mac \ + -A $sip -B 2001:db8:3::1 -t ip next=47,p=$payload -q & + mz_pid=$! + + devlink_trap_exception_test $trap_name + + tc_check_packets "dev $swp1 egress" 101 0 + check_err $? "Packets were not dropped" + + log_test "$desc" + + kill $mz_pid && wait $mz_pid &> /dev/null + tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower +} + +decap_error_test() +{ + # Correct source IP - the remote address + local sip=2001:db8:3::2 + + ecn_decap_test "Decap error" "ECT(1)" 01 + ecn_decap_test "Decap error" "ECT(0)" 02 + ecn_decap_test "Decap error" "CE" 03 + + no_matching_tunnel_test "Decap error: Source IP check failed" \ + 2001:db8:4::2 "0" + no_matching_tunnel_test \ + "Decap error: Key exists but was not expected" $sip "2" \ + "00:00:00:E9:" + + # Destroy the tunnel and create new one with key + __addr_add_del g1 del 2001:db8:3::1/128 + tunnel_destroy g1 + + tunnel_create g1 ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \ + ttl inherit key 233 + __addr_add_del g1 add 2001:db8:3::1/128 + + no_matching_tunnel_test \ + "Decap error: Key does not exist but was expected" $sip "0" + no_matching_tunnel_test \ + "Decap error: Packet has a wrong key field" $sip "2" \ + "00:00:00:E8:" +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh index 50654f8a8c3739..e9f65bd2e29914 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh @@ -7,12 +7,9 @@ NUM_NETIFS=6 source $lib_dir/lib.sh source $lib_dir/tc_common.sh source $lib_dir/devlink_lib.sh +source ../mlxsw_lib.sh -if [[ "$DEVLINK_VIDDID" != "15b3:cf6c" && \ - "$DEVLINK_VIDDID" != "15b3:cf70" ]]; then - echo "SKIP: test is tailored for Mellanox Spectrum-2 and Spectrum-3" - exit 1 -fi +mlxsw_only_on_spectrum 2+ || exit 1 current_test="" @@ -28,7 +25,7 @@ cleanup() trap cleanup EXIT -ALL_TESTS="router tc_flower mirror_gre tc_police port" +ALL_TESTS="router tc_flower mirror_gre tc_police port rif_mac_profile" for current_test in ${TESTS:-$ALL_TESTS}; do RET_FIN=0 source ${current_test}_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_mac_profile_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_mac_profile_scale.sh new file mode 100644 index 00000000000000..303d7cbe3c45fa --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_mac_profile_scale.sh @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../rif_mac_profile_scale.sh + +rif_mac_profile_get_target() +{ + local should_fail=$1 + local target + + target=$(devlink_resource_size_get rif_mac_profiles) + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh index 73035e25085d7f..06a80f40daa4ca 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh @@ -2,11 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 source "../../../../net/forwarding/devlink_lib.sh" +source ../mlxsw_lib.sh -if [ "$DEVLINK_VIDDID" != "15b3:cb84" ]; then - echo "SKIP: test is tailored for Mellanox Spectrum" - exit 1 -fi +mlxsw_only_on_spectrum 1 || exit 1 # Needed for returning to default declare -A KVD_DEFAULTS diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh index 685dfb3478b3ed..bcb110e830ceca 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh @@ -22,7 +22,7 @@ cleanup() devlink_sp_read_kvd_defaults trap cleanup EXIT -ALL_TESTS="router tc_flower mirror_gre tc_police port" +ALL_TESTS="router tc_flower mirror_gre tc_police port rif_mac_profile" for current_test in ${TESTS:-$ALL_TESTS}; do RET_FIN=0 source ${current_test}_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_mac_profile_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_mac_profile_scale.sh new file mode 100644 index 00000000000000..303d7cbe3c45fa --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_mac_profile_scale.sh @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../rif_mac_profile_scale.sh + +rif_mac_profile_get_target() +{ + local should_fail=$1 + local target + + target=$(devlink_resource_size_get rif_mac_profiles) + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh index 5ec3beb637c82e..0441a18f098b10 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh @@ -20,6 +20,7 @@ NUM_NETIFS=2 source $lib_dir/tc_common.sh source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh +source mlxsw_lib.sh switch_create() { @@ -169,7 +170,7 @@ matchall_sample_egress_test() # It is forbidden in mlxsw driver to have matchall with sample action # bound on egress. Spectrum-1 specific restriction - [[ "$DEVLINK_VIDDID" != "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 1 || return tc qdisc add dev $swp1 clsact diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh index 373d5f2a846eb2..83a0210e754496 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh @@ -51,6 +51,7 @@ NUM_NETIFS=8 CAPTURE_FILE=$(mktemp) source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh +source mlxsw_lib.sh # Available at https://github.com/Mellanox/libpsample require_command psample @@ -431,7 +432,7 @@ tc_sample_md_out_tc_test() RET=0 # Output traffic class is not supported on Spectrum-1. - [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 2+ || return tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \ skip_sw action sample rate 5 group 1 @@ -477,7 +478,7 @@ tc_sample_md_out_tc_occ_test() RET=0 # Output traffic class occupancy is not supported on Spectrum-1. - [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 2+ || return tc filter add dev $rp1 ingress protocol all pref 1 handle 101 matchall \ skip_sw action sample rate 1024 group 1 @@ -521,7 +522,7 @@ tc_sample_md_latency_test() RET=0 # Egress sampling not supported on Spectrum-1. - [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 2+ || return tc filter add dev $rp2 egress protocol all pref 1 handle 101 matchall \ skip_sw action sample rate 5 group 1 @@ -550,7 +551,7 @@ tc_sample_acl_group_conflict_test() # port with different groups. # Policy-based sampling is not supported on Spectrum-1. - [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 2+ || return tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \ skip_sw action sample rate 1024 group 1 @@ -579,7 +580,7 @@ __tc_sample_acl_rate_test() RET=0 # Policy-based sampling is not supported on Spectrum-1. - [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 2+ || return tc filter add dev $port $bind protocol ip pref 1 handle 101 flower \ skip_sw dst_ip 198.51.100.1 action sample rate 32 group 1 @@ -631,7 +632,7 @@ tc_sample_acl_max_rate_test() RET=0 # Policy-based sampling is not supported on Spectrum-1. - [[ "$DEVLINK_VIDDID" == "15b3:cb84" ]] && return + mlxsw_only_on_spectrum 2+ || return tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \ skip_sw action sample rate $((2 ** 24 - 1)) group 1 diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh index 7ca1f030d20990..922744059aaa24 100644 --- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh +++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh @@ -50,7 +50,7 @@ function make_netdev { modprobe netdevsim fi - echo $NSIM_ID > /sys/bus/netdevsim/new_device + echo $NSIM_ID $@ > /sys/bus/netdevsim/new_device # get new device name ls /sys/bus/netdevsim/devices/netdevsim${NSIM_ID}/net/ } diff --git a/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh new file mode 100755 index 00000000000000..fd13c8cfb7a87a --- /dev/null +++ b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only + +source ethtool-common.sh + +set -o pipefail + +n_children() { + n=$(tc qdisc show dev $NDEV | grep '^qdisc' | wc -l) + echo $((n - 1)) +} + +tcq() { + tc qdisc $1 dev $NDEV ${@:2} +} + +n_child_assert() { + n=$(n_children) + if [ $n -ne $1 ]; then + echo "ERROR ($root): ${@:2}, expected $1 have $n" + ((num_errors++)) + else + ((num_passes++)) + fi +} + + +for root in mq mqprio; do + NDEV=$(make_netdev 1 4) + + opts= + [ $root == "mqprio" ] && opts='hw 0 num_tc 1 map 0 0 0 0 queues 1@0' + + tcq add root handle 100: $root $opts + n_child_assert 4 'Init' + + # All defaults + + for n in 3 2 1 2 3 4 1 4; do + ethtool -L $NDEV combined $n + n_child_assert $n "Change queues to $n while down" + done + + ip link set dev $NDEV up + + for n in 3 2 1 2 3 4 1 4; do + ethtool -L $NDEV combined $n + n_child_assert $n "Change queues to $n while up" + done + + # One real one + tcq replace parent 100:4 handle 204: pfifo_fast + n_child_assert 4 "One real queue" + + ethtool -L $NDEV combined 1 + n_child_assert 2 "One real queue, one default" + + ethtool -L $NDEV combined 4 + n_child_assert 4 "One real queue, rest default" + + # Graft some + tcq replace parent 100:1 handle 204: + n_child_assert 3 "Grafted" + + ethtool -L $NDEV combined 1 + n_child_assert 1 "Grafted, one" + + cleanup_nsim +done + +if [ $num_errors -eq 0 ]; then + echo "PASSED all $((num_passes)) checks" + exit 0 +else + echo "FAILED $num_errors/$((num_errors+num_passes)) checks" + exit 1 +fi diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh index f7d84549cc3e35..eaf8a04a7ca5f1 100755 --- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh +++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh @@ -156,6 +156,11 @@ create_tcam_skeleton() setup_prepare() { + ip link set $eth0 up + ip link set $eth1 up + ip link set $eth2 up + ip link set $eth3 up + create_tcam_skeleton $eth0 ip link add br0 type bridge @@ -242,9 +247,9 @@ test_vlan_push() tcpdump_cleanup } -test_vlan_modify() +test_vlan_ingress_modify() { - printf "Testing VLAN modification.. " + printf "Testing ingress VLAN modification.. " ip link set br0 type bridge vlan_filtering 1 bridge vlan add dev $eth0 vid 200 @@ -280,6 +285,44 @@ test_vlan_modify() ip link set br0 type bridge vlan_filtering 0 } +test_vlan_egress_modify() +{ + printf "Testing egress VLAN modification.. " + + tc qdisc add dev $eth1 clsact + + ip link set br0 type bridge vlan_filtering 1 + bridge vlan add dev $eth0 vid 200 + bridge vlan add dev $eth1 vid 200 + + tc filter add dev $eth1 egress chain $(ES0) pref 3 \ + protocol 802.1Q flower skip_sw vlan_id 200 vlan_prio 0 \ + action vlan modify id 300 priority 7 + + tcpdump_start $eth2 + + $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip + + sleep 1 + + tcpdump_stop + + if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then + echo "OK" + else + echo "FAIL" + fi + + tcpdump_cleanup + + tc filter del dev $eth1 egress chain $(ES0) pref 3 + tc qdisc del dev $eth1 clsact + + bridge vlan del dev $eth0 vid 200 + bridge vlan del dev $eth1 vid 200 + ip link set br0 type bridge vlan_filtering 0 +} + test_skbedit_priority() { local num_pkts=100 @@ -304,7 +347,8 @@ trap cleanup EXIT ALL_TESTS=" test_vlan_pop test_vlan_push - test_vlan_modify + test_vlan_ingress_modify + test_vlan_egress_modify test_skbedit_priority " diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 19deb9cdf72f20..7581a7348e1bcc 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -31,3 +31,8 @@ rxtimestamp timestamping txtimestamp so_netns_cookie +test_unix_oob +gro +ioam6_parser +toeplitz +cmsg_so_mark diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 492b273743b4e7..aee76d1bb9da1b 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -22,12 +22,14 @@ TEST_PROGS += devlink_port_split.py TEST_PROGS += drop_monitor_tests.sh TEST_PROGS += vrf_route_leaking.sh TEST_PROGS += bareudp.sh +TEST_PROGS += amt.sh TEST_PROGS += unicast_extensions.sh TEST_PROGS += udpgro_fwd.sh TEST_PROGS += veth.sh TEST_PROGS += ioam6.sh TEST_PROGS += gro.sh TEST_PROGS += gre_gso.sh +TEST_PROGS += cmsg_so_mark.sh TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any @@ -44,6 +46,7 @@ TEST_GEN_FILES += gro TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls TEST_GEN_FILES += toeplitz +TEST_GEN_FILES += cmsg_so_mark TEST_FILES := settings diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh new file mode 100644 index 00000000000000..75528788cb95e5 --- /dev/null +++ b/tools/testing/selftests/net/amt.sh @@ -0,0 +1,284 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# Author: Taehee Yoo +# +# This script evaluates the AMT driver. +# There are four network-namespaces, LISTENER, SOURCE, GATEWAY, RELAY. +# The role of LISTENER is to listen multicast traffic. +# In order to do that, it send IGMP group join message. +# The role of SOURCE is to send multicast traffic to listener. +# The role of GATEWAY is to work Gateway role of AMT interface. +# The role of RELAY is to work Relay role of AMT interface. +# +# +# +------------------------+ +# | LISTENER netns | +# | | +# | +------------------+ | +# | | l_gw | | +# | | 192.168.0.2/24 | | +# | | 2001:db8::2/64 | | +# | +------------------+ | +# | . | +# +------------------------+ +# . +# . +# +-----------------------------------------------------+ +# | . GATEWAY netns | +# | . | +# |+---------------------------------------------------+| +# || . br0 || +# || +------------------+ +------------------+ || +# || | gw_l | | amtg | || +# || | 192.168.0.1/24 | +--------+---------+ || +# || | 2001:db8::1/64 | | || +# || +------------------+ | || +# |+-------------------------------------|-------------+| +# | | | +# | +--------+---------+ | +# | | gw_relay | | +# | | 10.0.0.1/24 | | +# | +------------------+ | +# | . | +# +-----------------------------------------------------+ +# . +# . +# +-----------------------------------------------------+ +# | RELAY netns . | +# | +------------------+ | +# | | relay_gw | | +# | | 10.0.0.2/24 | | +# | +--------+---------+ | +# | | | +# | | | +# | +------------------+ +--------+---------+ | +# | | relay_src | | amtr | | +# | | 172.17.0.1/24 | +------------------+ | +# | | 2001:db8:3::1/64 | | +# | +------------------+ | +# | . | +# | . | +# +-----------------------------------------------------+ +# . +# . +# +------------------------+ +# | . | +# | +------------------+ | +# | | src_relay | | +# | | 172.17.0.2/24 | | +# | | 2001:db8:3::2/64 | | +# | +------------------+ | +# | SOURCE netns | +# +------------------------+ +#============================================================================== + +readonly LISTENER=$(mktemp -u listener-XXXXXXXX) +readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX) +readonly RELAY=$(mktemp -u relay-XXXXXXXX) +readonly SOURCE=$(mktemp -u source-XXXXXXXX) +ERR=4 +err=0 + +exit_cleanup() +{ + for ns in "$@"; do + ip netns delete "${ns}" 2>/dev/null || true + done + + exit $ERR +} + +create_namespaces() +{ + ip netns add "${LISTENER}" || exit_cleanup + ip netns add "${GATEWAY}" || exit_cleanup "${LISTENER}" + ip netns add "${RELAY}" || exit_cleanup "${LISTENER}" "${GATEWAY}" + ip netns add "${SOURCE}" || exit_cleanup "${LISTENER}" "${GATEWAY}" \ + "${RELAY}" +} + +# The trap function handler +# +exit_cleanup_all() +{ + exit_cleanup "${LISTENER}" "${GATEWAY}" "${RELAY}" "${SOURCE}" +} + +setup_interface() +{ + for ns in "${LISTENER}" "${GATEWAY}" "${RELAY}" "${SOURCE}"; do + ip -netns "${ns}" link set dev lo up + done; + + ip link add l_gw type veth peer name gw_l + ip link add gw_relay type veth peer name relay_gw + ip link add relay_src type veth peer name src_relay + + ip link set l_gw netns "${LISTENER}" up + ip link set gw_l netns "${GATEWAY}" up + ip link set gw_relay netns "${GATEWAY}" up + ip link set relay_gw netns "${RELAY}" up + ip link set relay_src netns "${RELAY}" up + ip link set src_relay netns "${SOURCE}" up mtu 1400 + + ip netns exec "${LISTENER}" ip a a 192.168.0.2/24 dev l_gw + ip netns exec "${LISTENER}" ip r a default via 192.168.0.1 dev l_gw + ip netns exec "${LISTENER}" ip a a 2001:db8::2/64 dev l_gw + ip netns exec "${LISTENER}" ip r a default via 2001:db8::1 dev l_gw + ip netns exec "${LISTENER}" ip a a 239.0.0.1/32 dev l_gw autojoin + ip netns exec "${LISTENER}" ip a a ff0e::5:6/128 dev l_gw autojoin + + ip netns exec "${GATEWAY}" ip a a 192.168.0.1/24 dev gw_l + ip netns exec "${GATEWAY}" ip a a 2001:db8::1/64 dev gw_l + ip netns exec "${GATEWAY}" ip a a 10.0.0.1/24 dev gw_relay + ip netns exec "${GATEWAY}" ip link add br0 type bridge + ip netns exec "${GATEWAY}" ip link set br0 up + ip netns exec "${GATEWAY}" ip link set gw_l master br0 + ip netns exec "${GATEWAY}" ip link set gw_l up + ip netns exec "${GATEWAY}" ip link add amtg master br0 type amt \ + mode gateway local 10.0.0.1 discovery 10.0.0.2 dev gw_relay \ + gateway_port 2268 relay_port 2268 + ip netns exec "${RELAY}" ip a a 10.0.0.2/24 dev relay_gw + ip netns exec "${RELAY}" ip link add amtr type amt mode relay \ + local 10.0.0.2 dev relay_gw relay_port 2268 max_tunnels 4 + ip netns exec "${RELAY}" ip a a 172.17.0.1/24 dev relay_src + ip netns exec "${RELAY}" ip a a 2001:db8:3::1/64 dev relay_src + ip netns exec "${SOURCE}" ip a a 172.17.0.2/24 dev src_relay + ip netns exec "${SOURCE}" ip a a 2001:db8:3::2/64 dev src_relay + ip netns exec "${SOURCE}" ip r a default via 172.17.0.1 dev src_relay + ip netns exec "${SOURCE}" ip r a default via 2001:db8:3::1 dev src_relay + ip netns exec "${RELAY}" ip link set amtr up + ip netns exec "${GATEWAY}" ip link set amtg up +} + +setup_sysctl() +{ + ip netns exec "${RELAY}" sysctl net.ipv4.ip_forward=1 -w -q +} + +setup_iptables() +{ + ip netns exec "${RELAY}" iptables -t mangle -I PREROUTING \ + -d 239.0.0.1 -j TTL --ttl-set 2 + ip netns exec "${RELAY}" ip6tables -t mangle -I PREROUTING \ + -j HL --hl-set 2 +} + +setup_mcast_routing() +{ + ip netns exec "${RELAY}" smcrouted + ip netns exec "${RELAY}" smcroutectl a relay_src \ + 172.17.0.2 239.0.0.1 amtr + ip netns exec "${RELAY}" smcroutectl a relay_src \ + 2001:db8:3::2 ff0e::5:6 amtr +} + +test_remote_ip() +{ + REMOTE=$(ip netns exec "${GATEWAY}" \ + ip -d -j link show amtg | jq .[0].linkinfo.info_data.remote) + if [ $REMOTE == "\"10.0.0.2\"" ]; then + printf "TEST: %-60s [ OK ]\n" "amt discovery" + else + printf "TEST: %-60s [FAIL]\n" "amt discovery" + ERR=1 + fi +} + +send_mcast_torture4() +{ + ip netns exec "${SOURCE}" bash -c \ + 'cat /dev/urandom | head -c 1G | nc -w 1 -u 239.0.0.1 4001' +} + + +send_mcast_torture6() +{ + ip netns exec "${SOURCE}" bash -c \ + 'cat /dev/urandom | head -c 1G | nc -w 1 -u ff0e::5:6 6001' +} + +check_features() +{ + ip link help 2>&1 | grep -q amt + if [ $? -ne 0 ]; then + echo "Missing amt support in iproute2" >&2 + exit_cleanup + fi +} + +test_ipv4_forward() +{ + RESULT4=$(ip netns exec "${LISTENER}" nc -w 1 -l -u 239.0.0.1 4000) + if [ "$RESULT4" == "172.17.0.2" ]; then + printf "TEST: %-60s [ OK ]\n" "IPv4 amt multicast forwarding" + exit 0 + else + printf "TEST: %-60s [FAIL]\n" "IPv4 amt multicast forwarding" + exit 1 + fi +} + +test_ipv6_forward() +{ + RESULT6=$(ip netns exec "${LISTENER}" nc -w 1 -l -u ff0e::5:6 6000) + if [ "$RESULT6" == "2001:db8:3::2" ]; then + printf "TEST: %-60s [ OK ]\n" "IPv6 amt multicast forwarding" + exit 0 + else + printf "TEST: %-60s [FAIL]\n" "IPv6 amt multicast forwarding" + exit 1 + fi +} + +send_mcast4() +{ + sleep 2 + ip netns exec "${SOURCE}" bash -c \ + 'echo 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' & +} + +send_mcast6() +{ + sleep 2 + ip netns exec "${SOURCE}" bash -c \ + 'echo 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' & +} + +check_features + +create_namespaces + +set -e +trap exit_cleanup_all EXIT + +setup_interface +setup_sysctl +setup_iptables +setup_mcast_routing +test_remote_ip +test_ipv4_forward & +pid=$! +send_mcast4 +wait $pid || err=$? +if [ $err -eq 1 ]; then + ERR=1 +fi +test_ipv6_forward & +pid=$! +send_mcast6 +wait $pid || err=$? +if [ $err -eq 1 ]; then + ERR=1 +fi +send_mcast_torture4 +printf "TEST: %-60s [ OK ]\n" "IPv4 amt traffic forwarding torture" +send_mcast_torture6 +printf "TEST: %-60s [ OK ]\n" "IPv6 amt traffic forwarding torture" +sleep 5 +if [ "${ERR}" -eq 1 ]; then + echo "Some tests failed." >&2 +else + ERR=0 +fi diff --git a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh new file mode 100755 index 00000000000000..b5af08af855959 --- /dev/null +++ b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Tests sysctl options {arp,ndisc}_evict_nocarrier={0,1} +# +# Create a veth pair and set IPs/routes on both. Then ping to establish +# an entry in the ARP/ND table. Depending on the test set sysctl option to +# 1 or 0. Set remote veth down which will cause local veth to go into a no +# carrier state. Depending on the test check the ARP/ND table: +# +# {arp,ndisc}_evict_nocarrier=1 should contain no ARP/ND after no carrier +# {arp,ndisc}_evict_nocarrer=0 should still contain the single ARP/ND entry +# + +readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" +readonly V4_ADDR0=10.0.10.1 +readonly V4_ADDR1=10.0.10.2 +readonly V6_ADDR0=2001:db8:91::1 +readonly V6_ADDR1=2001:db8:91::2 +nsid=100 + +cleanup_v6() +{ + ip netns del me + ip netns del peer + + sysctl -w net.ipv4.conf.veth0.ndisc_evict_nocarrier=1 >/dev/null 2>&1 + sysctl -w net.ipv4.conf.all.ndisc_evict_nocarrier=1 >/dev/null 2>&1 +} + +create_ns() +{ + local n=${1} + + ip netns del ${n} 2>/dev/null + + ip netns add ${n} + ip netns set ${n} $((nsid++)) + ip -netns ${n} link set lo up +} + + +setup_v6() { + create_ns me + create_ns peer + + IP="ip -netns me" + + $IP li add veth1 type veth peer name veth2 + $IP li set veth1 up + $IP -6 addr add $V6_ADDR0/64 dev veth1 nodad + $IP li set veth2 netns peer up + ip -netns peer -6 addr add $V6_ADDR1/64 dev veth2 nodad + + ip netns exec me sysctl -w $1 >/dev/null 2>&1 + + # Establish an ND cache entry + ip netns exec me ping -6 -c1 -Iveth1 $V6_ADDR1 >/dev/null 2>&1 + # Should have the veth1 entry in ND table + ip netns exec me ip -6 neigh get $V6_ADDR1 dev veth1 >/dev/null 2>&1 + if [ $? -ne 0 ]; then + cleanup_v6 + echo "failed" + exit + fi + + # Set veth2 down, which will put veth1 in NOCARRIER state + ip netns exec peer ip link set veth2 down +} + +setup_v4() { + ip netns add "${PEER_NS}" + ip link add name veth0 type veth peer name veth1 + ip link set dev veth0 up + ip link set dev veth1 netns "${PEER_NS}" + ip netns exec "${PEER_NS}" ip link set dev veth1 up + ip addr add $V4_ADDR0/24 dev veth0 + ip netns exec "${PEER_NS}" ip addr add $V4_ADDR1/24 dev veth1 + ip netns exec ${PEER_NS} ip route add default via $V4_ADDR1 dev veth1 + ip route add default via $V4_ADDR0 dev veth0 + + sysctl -w "$1" >/dev/null 2>&1 + + # Establish an ARP cache entry + ping -c1 -I veth0 $V4_ADDR1 -q >/dev/null 2>&1 + # Should have the veth1 entry in ARP table + ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1 + if [ $? -ne 0 ]; then + cleanup_v4 + echo "failed" + exit + fi + + # Set veth1 down, which will put veth0 in NOCARRIER state + ip netns exec "${PEER_NS}" ip link set veth1 down +} + +cleanup_v4() { + ip neigh flush dev veth0 + ip link del veth0 + local -r ns="$(ip netns list|grep $PEER_NS)" + [ -n "$ns" ] && ip netns del $ns 2>/dev/null + + sysctl -w net.ipv4.conf.veth0.arp_evict_nocarrier=1 >/dev/null 2>&1 + sysctl -w net.ipv4.conf.all.arp_evict_nocarrier=1 >/dev/null 2>&1 +} + +# Run test when arp_evict_nocarrier = 1 (default). +run_arp_evict_nocarrier_enabled() { + echo "run arp_evict_nocarrier=1 test" + setup_v4 "net.ipv4.conf.veth0.arp_evict_nocarrier=1" + + # ARP table should be empty + ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1 + + if [ $? -eq 0 ];then + echo "failed" + else + echo "ok" + fi + + cleanup_v4 +} + +# Run test when arp_evict_nocarrier = 0 +run_arp_evict_nocarrier_disabled() { + echo "run arp_evict_nocarrier=0 test" + setup_v4 "net.ipv4.conf.veth0.arp_evict_nocarrier=0" + + # ARP table should still contain the entry + ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1 + + if [ $? -eq 0 ];then + echo "ok" + else + echo "failed" + fi + + cleanup_v4 +} + +run_arp_evict_nocarrier_disabled_all() { + echo "run all.arp_evict_nocarrier=0 test" + setup_v4 "net.ipv4.conf.all.arp_evict_nocarrier=0" + + # ARP table should still contain the entry + ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1 + + if [ $? -eq 0 ];then + echo "ok" + else + echo "failed" + fi + + cleanup_v4 +} + +run_ndisc_evict_nocarrier_enabled() { + echo "run ndisc_evict_nocarrier=1 test" + + setup_v6 "net.ipv6.conf.veth1.ndisc_evict_nocarrier=1" + + ip netns exec me ip -6 neigh get $V6_ADDR1 dev veth1 >/dev/null 2>&1 + + if [ $? -eq 0 ];then + echo "failed" + else + echo "ok" + fi + + cleanup_v6 +} + +run_ndisc_evict_nocarrier_disabled() { + echo "run ndisc_evict_nocarrier=0 test" + + setup_v6 "net.ipv6.conf.veth1.ndisc_evict_nocarrier=0" + + ip netns exec me ip -6 neigh get $V6_ADDR1 dev veth1 >/dev/null 2>&1 + + if [ $? -eq 0 ];then + echo "ok" + else + echo "failed" + fi + + cleanup_v6 +} + +run_ndisc_evict_nocarrier_disabled_all() { + echo "run all.ndisc_evict_nocarrier=0 test" + + setup_v6 "net.ipv6.conf.all.ndisc_evict_nocarrier=0" + + ip netns exec me ip -6 neigh get $V6_ADDR1 dev veth1 >/dev/null 2>&1 + + if [ $? -eq 0 ];then + echo "ok" + else + echo "failed" + fi + + cleanup_v6 +} + +run_all_tests() { + run_arp_evict_nocarrier_enabled + run_arp_evict_nocarrier_disabled + run_arp_evict_nocarrier_disabled_all + run_ndisc_evict_nocarrier_enabled + run_ndisc_evict_nocarrier_disabled + run_ndisc_evict_nocarrier_disabled_all +} + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit $ksft_skip; +fi + +run_all_tests diff --git a/tools/testing/selftests/net/cmsg_so_mark.c b/tools/testing/selftests/net/cmsg_so_mark.c new file mode 100644 index 00000000000000..27f2804892a7eb --- /dev/null +++ b/tools/testing/selftests/net/cmsg_so_mark.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include +#include +#include +#include +#include + +int main(int argc, const char **argv) +{ + char cbuf[CMSG_SPACE(sizeof(__u32))]; + struct addrinfo hints, *ai; + struct cmsghdr *cmsg; + struct iovec iov[1]; + struct msghdr msg; + int mark; + int err; + int fd; + + if (argc != 4) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + mark = atoi(argv[3]); + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_DGRAM; + + ai = NULL; + err = getaddrinfo(argv[1], argv[2], &hints, &ai); + if (err) { + fprintf(stderr, "Can't resolve address: %s\n", strerror(errno)); + return 1; + } + + fd = socket(ai->ai_family, SOCK_DGRAM, IPPROTO_UDP); + if (fd < 0) { + fprintf(stderr, "Can't open socket: %s\n", strerror(errno)); + freeaddrinfo(ai); + return 1; + } + + iov[0].iov_base = "bla"; + iov[0].iov_len = 4; + + msg.msg_name = ai->ai_addr; + msg.msg_namelen = ai->ai_addrlen; + msg.msg_iov = iov; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SO_MARK; + cmsg->cmsg_len = CMSG_LEN(sizeof(__u32)); + *(__u32 *)CMSG_DATA(cmsg) = mark; + + err = sendmsg(fd, &msg, 0); + + close(fd); + freeaddrinfo(ai); + return err != 4; +} diff --git a/tools/testing/selftests/net/cmsg_so_mark.sh b/tools/testing/selftests/net/cmsg_so_mark.sh new file mode 100755 index 00000000000000..19c6aab8d0e94d --- /dev/null +++ b/tools/testing/selftests/net/cmsg_so_mark.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +NS=ns +IP4=172.16.0.1/24 +TGT4=172.16.0.2 +IP6=2001:db8:1::1/64 +TGT6=2001:db8:1::2 +MARK=1000 + +cleanup() +{ + ip netns del $NS +} + +trap cleanup EXIT + +# Namespaces +ip netns add $NS + +# Connectivity +ip -netns $NS link add type dummy +ip -netns $NS link set dev dummy0 up +ip -netns $NS addr add $IP4 dev dummy0 +ip -netns $NS addr add $IP6 dev dummy0 + +ip -netns $NS rule add fwmark $MARK lookup 300 +ip -6 -netns $NS rule add fwmark $MARK lookup 300 +ip -netns $NS route add prohibit any table 300 +ip -6 -netns $NS route add prohibit any table 300 + +# Test +BAD=0 +TOTAL=0 + +check_result() { + ((TOTAL++)) + if [ $1 -ne $2 ]; then + echo " Case $3 returned $1, expected $2" + ((BAD++)) + fi +} + +ip netns exec $NS ./cmsg_so_mark $TGT4 1234 $((MARK + 1)) +check_result $? 0 "IPv4 pass" +ip netns exec $NS ./cmsg_so_mark $TGT6 1234 $((MARK + 1)) +check_result $? 0 "IPv6 pass" + +ip netns exec $NS ./cmsg_so_mark $TGT4 1234 $MARK +check_result $? 1 "IPv4 rejection" +ip netns exec $NS ./cmsg_so_mark $TGT6 1234 $MARK +check_result $? 1 "IPv6 rejection" + +# Summary +if [ $BAD -ne 0 ]; then + echo "FAIL - $BAD/$TOTAL cases failed" + exit 1 +else + echo "OK" + exit 0 +fi diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index 86ab429fe7f3bf..ead7963b9bf0bc 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -44,3 +44,4 @@ CONFIG_NET_ACT_MIRRED=m CONFIG_BAREUDP=m CONFIG_IPV6_IOAM6_LWTUNNEL=y CONFIG_CRYPTO_SM4=y +CONFIG_AMT=m diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 0d293391e9a449..b5a69ad191b078 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -2078,6 +2078,7 @@ basic_res() "id 101 index 0 nhid 2 id 101 index 1 nhid 2 id 101 index 2 nhid 1 id 101 index 3 nhid 1" log_test $? 0 "Dump all nexthop buckets in a group" + sleep 0.1 (( $($IP -j nexthop bucket list id 101 | jq '[.[] | select(.bucket.idle_time > 0 and .bucket.idle_time < 2)] | length') == 4 )) diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh index 675eff45b03717..1162836f8f3293 100755 --- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh +++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh @@ -482,10 +482,15 @@ v3exc_timeout_test() local X=("192.0.2.20" "192.0.2.30") # GMI should be 3 seconds - ip link set dev br0 type bridge mcast_query_interval 100 mcast_query_response_interval 100 + ip link set dev br0 type bridge mcast_query_interval 100 \ + mcast_query_response_interval 100 \ + mcast_membership_interval 300 v3exclude_prepare $h1 $ALL_MAC $ALL_GROUP - ip link set dev br0 type bridge mcast_query_interval 500 mcast_query_response_interval 500 + ip link set dev br0 type bridge mcast_query_interval 500 \ + mcast_query_response_interval 500 \ + mcast_membership_interval 1500 + $MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW2" -q sleep 3 bridge -j -d -s mdb show dev br0 \ @@ -517,7 +522,8 @@ v3exc_timeout_test() log_test "IGMPv3 group $TEST_GROUP exclude timeout" ip link set dev br0 type bridge mcast_query_interval 12500 \ - mcast_query_response_interval 1000 + mcast_query_response_interval 1000 \ + mcast_membership_interval 26000 v3cleanup $swp1 $TEST_GROUP } diff --git a/tools/testing/selftests/net/forwarding/bridge_mld.sh b/tools/testing/selftests/net/forwarding/bridge_mld.sh index ffdcfa87ca2bae..e2b9ff773c6b60 100755 --- a/tools/testing/selftests/net/forwarding/bridge_mld.sh +++ b/tools/testing/selftests/net/forwarding/bridge_mld.sh @@ -479,10 +479,15 @@ mldv2exc_timeout_test() local X=("2001:db8:1::20" "2001:db8:1::30") # GMI should be 3 seconds - ip link set dev br0 type bridge mcast_query_interval 100 mcast_query_response_interval 100 + ip link set dev br0 type bridge mcast_query_interval 100 \ + mcast_query_response_interval 100 \ + mcast_membership_interval 300 mldv2exclude_prepare $h1 - ip link set dev br0 type bridge mcast_query_interval 500 mcast_query_response_interval 500 + ip link set dev br0 type bridge mcast_query_interval 500 \ + mcast_query_response_interval 500 \ + mcast_membership_interval 1500 + $MZ $h1 -c 1 $MZPKT_ALLOW2 -q sleep 3 bridge -j -d -s mdb show dev br0 \ @@ -514,7 +519,8 @@ mldv2exc_timeout_test() log_test "MLDv2 group $TEST_GROUP exclude timeout" ip link set dev br0 type bridge mcast_query_interval 12500 \ - mcast_query_response_interval 1000 + mcast_query_response_interval 1000 \ + mcast_membership_interval 26000 mldv2cleanup $swp1 } diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh index 2c14a86adaaae4..de9944d42027c4 100644 --- a/tools/testing/selftests/net/forwarding/devlink_lib.sh +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -563,12 +563,6 @@ devlink_trap_group_policer_get() | jq '.[][][]["policer"]' } -devlink_trap_policer_ids_get() -{ - devlink -j -p trap policer show \ - | jq '.[]["'$DEVLINK_DEV'"][]["policer"]' -} - devlink_port_by_netdev() { local if_name=$1 diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample index e5e2fbeca22ec6..bf17e485684f0d 100644 --- a/tools/testing/selftests/net/forwarding/forwarding.config.sample +++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample @@ -39,5 +39,9 @@ NETIF_CREATE=yes # Timeout (in seconds) before ping exits regardless of how many packets have # been sent or received PING_TIMEOUT=5 +# Flag for tc match, supposed to be skip_sw/skip_hw which means do not process +# filter by software/hardware +TC_FLAG=skip_hw # IPv6 traceroute utility name. TROUTE6=traceroute6 + diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat.sh new file mode 100755 index 00000000000000..96c97064f2d33c --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_flat.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test IP-in-IP GRE tunnel without key. +# This test uses flat topology for IP tunneling tests. See ip6gre_lib.sh for +# more details. + +ALL_TESTS=" + gre_flat + gre_mtu_change +" + +NUM_NETIFS=6 +source lib.sh +source ip6gre_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + forwarding_enable + vrf_prepare + h1_create + h2_create + sw1_flat_create $ol1 $ul1 + sw2_flat_create $ol2 $ul2 +} + +gre_flat() +{ + test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6" + test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6" +} + +gre_mtu_change() +{ + test_mtu_change +} + +cleanup() +{ + pre_cleanup + + sw2_flat_destroy $ol2 $ul2 + sw1_flat_destroy $ol1 $ul1 + h2_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh new file mode 100755 index 00000000000000..ff9fb0db9bd128 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_flat_key.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test IP-in-IP GRE tunnel with key. +# This test uses flat topology for IP tunneling tests. See ip6gre_lib.sh for +# more details. + +ALL_TESTS=" + gre_flat + gre_mtu_change +" + +NUM_NETIFS=6 +source lib.sh +source ip6gre_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + forwarding_enable + vrf_prepare + h1_create + h2_create + sw1_flat_create $ol1 $ul1 key 233 + sw2_flat_create $ol2 $ul2 key 233 +} + +gre_flat() +{ + test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with key" + test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with key" +} + +gre_mtu_change() +{ + test_mtu_change +} + +cleanup() +{ + pre_cleanup + + sw2_flat_destroy $ol2 $ul2 + sw1_flat_destroy $ol1 $ul1 + h2_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh b/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh new file mode 100755 index 00000000000000..12c1387852421f --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test IP-in-IP GRE tunnel with keys. +# This test uses flat topology for IP tunneling tests. See ip6gre_lib.sh for +# more details. + +ALL_TESTS=" + gre_flat + gre_mtu_change +" + +NUM_NETIFS=6 +source lib.sh +source ip6gre_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + forwarding_enable + vrf_prepare + h1_create + h2_create + sw1_flat_create $ol1 $ul1 ikey 111 okey 222 + sw2_flat_create $ol2 $ul2 ikey 222 okey 111 +} + +gre_flat() +{ + test_traffic_ip4ip6 "GRE flat IPv4-in-IPv6 with ikey/okey" + test_traffic_ip6ip6 "GRE flat IPv6-in-IPv6 with ikey/okey" +} + +gre_mtu_change() +{ + test_mtu_change gre +} + +cleanup() +{ + pre_cleanup + + sw2_flat_destroy $ol2 $ul2 + sw1_flat_destroy $ol1 $ul1 + h2_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier.sh new file mode 100755 index 00000000000000..83b55c30a5c359 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_hier.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test IP-in-IP GRE tunnels without key. +# This test uses hierarchical topology for IP tunneling tests. See +# ip6gre_lib.sh for more details. + +ALL_TESTS=" + gre_hier + gre_mtu_change +" + +NUM_NETIFS=6 +source lib.sh +source ip6gre_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + forwarding_enable + vrf_prepare + h1_create + h2_create + sw1_hierarchical_create $ol1 $ul1 + sw2_hierarchical_create $ol2 $ul2 +} + +gre_hier() +{ + test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6" + test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6" +} + +gre_mtu_change() +{ + test_mtu_change gre +} + +cleanup() +{ + pre_cleanup + + sw2_hierarchical_destroy $ol2 $ul2 + sw1_hierarchical_destroy $ol1 $ul1 + h2_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh new file mode 100755 index 00000000000000..256607916d92f4 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_hier_key.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test IP-in-IP GRE tunnels without key. +# This test uses hierarchical topology for IP tunneling tests. See +# ip6gre_lib.sh for more details. + +ALL_TESTS=" + gre_hier + gre_mtu_change +" + +NUM_NETIFS=6 +source lib.sh +source ip6gre_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + forwarding_enable + vrf_prepare + h1_create + h2_create + sw1_hierarchical_create $ol1 $ul1 key 22 + sw2_hierarchical_create $ol2 $ul2 key 22 +} + +gre_hier() +{ + test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with key" + test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with key" +} + +gre_mtu_change() +{ + test_mtu_change gre +} + +cleanup() +{ + pre_cleanup + + sw2_hierarchical_destroy $ol2 $ul2 + sw1_hierarchical_destroy $ol1 $ul1 + h2_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh b/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh new file mode 100755 index 00000000000000..ad1bcd6334a80b --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test IP-in-IP GRE tunnels without key. +# This test uses hierarchical topology for IP tunneling tests. See +# ip6gre_lib.sh for more details. + +ALL_TESTS=" + gre_hier + gre_mtu_change +" + +NUM_NETIFS=6 +source lib.sh +source ip6gre_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + forwarding_enable + vrf_prepare + h1_create + h2_create + sw1_hierarchical_create $ol1 $ul1 ikey 111 okey 222 + sw2_hierarchical_create $ol2 $ul2 ikey 222 okey 111 +} + +gre_hier() +{ + test_traffic_ip4ip6 "GRE hierarchical IPv4-in-IPv6 with ikey/okey" + test_traffic_ip6ip6 "GRE hierarchical IPv6-in-IPv6 with ikey/okey" +} + +gre_mtu_change() +{ + test_mtu_change gre +} + +cleanup() +{ + pre_cleanup + + sw2_hierarchical_destroy $ol2 $ul2 + sw1_hierarchical_destroy $ol1 $ul1 + h2_destroy + h1_destroy + vrf_cleanup + forwarding_restore +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh new file mode 100644 index 00000000000000..58a3597037b134 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh @@ -0,0 +1,438 @@ +# SPDX-License-Identifier: GPL-2.0 +#!/bin/bash + +# Handles creation and destruction of IP-in-IP or GRE tunnels over the given +# topology. Supports both flat and hierarchical models. +# +# Flat Model: +# Overlay and underlay share the same VRF. +# SW1 uses default VRF so tunnel has no bound dev. +# SW2 uses non-default VRF tunnel has a bound dev. +# +--------------------------------+ +# | H1 | +# | $h1 + | +# | 198.51.100.1/24 | | +# | 2001:db8:1::1/64 | | +# +-------------------------|------+ +# | +# +-------------------------|-------------------+ +# | SW1 | | +# | $ol1 + | +# | 198.51.100.2/24 | +# | 2001:db8:1::2/64 | +# | | +# | + g1a (ip6gre) | +# | loc=2001:db8:3::1 | +# | rem=2001:db8:3::2 --. | +# | tos=inherit | | +# | . | +# | .--------------------- | +# | | | +# | v | +# | + $ul1.111 (vlan) | +# | | 2001:db8:10::1/64 | +# | \ | +# | \____________ | +# | | | +# | VRF default + $ul1 | +# +---------------------|-----------------------+ +# | +# +---------------------|-----------------------+ +# | SW2 | | +# | $ul2 + | +# | ___________| | +# | / | +# | / | +# | + $ul2.111 (vlan) | +# | ^ 2001:db8:10::2/64 | +# | | | +# | | | +# | '----------------------. | +# | + g2a (ip6gre) | | +# | loc=2001:db8:3::2 | | +# | rem=2001:db8:3::1 --' | +# | tos=inherit | +# | | +# | + $ol2 | +# | | 203.0.113.2/24 | +# | VRF v$ol2 | 2001:db8:2::2/64 | +# +---------------------|-----------------------+ +# +---------------------|----------+ +# | H2 | | +# | $h2 + | +# | 203.0.113.1/24 | +# | 2001:db8:2::1/64 | +# +--------------------------------+ +# +# Hierarchical model: +# The tunnel is bound to a device in a different VRF +# +# +--------------------------------+ +# | H1 | +# | $h1 + | +# | 198.51.100.1/24 | | +# | 2001:db8:1::1/64 | | +# +-------------------------|------+ +# | +# +-------------------------|-------------------+ +# | SW1 | | +# | +-----------------------|-----------------+ | +# | | $ol1 + | | +# | | 198.51.100.2/24 | | +# | | 2001:db8:1::2/64 | | +# | | | | +# | | + g1a (ip6gre) | | +# | | loc=2001:db8:3::1 | | +# | | rem=2001:db8:3::2 | | +# | | tos=inherit | | +# | | ^ | | +# | | VRF v$ol1 | | | +# | +--------------------|--------------------+ | +# | | | +# | +--------------------|--------------------+ | +# | | VRF v$ul1 | | | +# | | | | | +# | | v | | +# | | dummy1 + | | +# | | 2001:db8:3::1/64 | | +# | | .-----------' | | +# | | | | | +# | | v | | +# | | + $ul1.111 (vlan) | | +# | | | 2001:db8:10::1/64 | | +# | | \ | | +# | | \__________ | | +# | | | | | +# | | + $ul1 | | +# | +---------------------|-------------------+ | +# +-----------------------|---------------------+ +# | +# +-----------------------|---------------------+ +# | SW2 | | +# | +---------------------|-------------------+ | +# | | + $ul2 | | +# | | _____| | | +# | | / | | +# | | / | | +# | | | $ul2.111 (vlan) | | +# | | + 2001:db8:10::2/64 | | +# | | ^ | | +# | | | | | +# | | '------. | | +# | | dummy2 + | | +# | | 2001:db8:3::2/64 | | +# | | ^ | | +# | | | | | +# | | | | | +# | | VRF v$ul2 | | | +# | +---------------------|-------------------+ | +# | | | +# | +---------------------|-------------------+ | +# | | VRF v$ol2 | | | +# | | | | | +# | | v | | +# | | g2a (ip6gre) + | | +# | | loc=2001:db8:3::2 | | +# | | rem=2001:db8:3::1 | | +# | | tos=inherit | | +# | | | | +# | | $ol2 + | | +# | | 203.0.113.2/24 | | | +# | | 2001:db8:2::2/64 | | | +# | +---------------------|-------------------+ | +# +-----------------------|---------------------+ +# | +# +-----------------------|--------+ +# | H2 | | +# | $h2 + | +# | 203.0.113.1/24 | +# | 2001:db8:2::1/64 | +# +--------------------------------+ + +source lib.sh +source tc_common.sh + +h1_create() +{ + simple_if_init $h1 198.51.100.1/24 2001:db8:1::1/64 + ip route add vrf v$h1 203.0.113.0/24 via 198.51.100.2 + ip -6 route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 +} + +h1_destroy() +{ + ip -6 route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 + ip route del vrf v$h1 203.0.113.0/24 via 198.51.100.2 + simple_if_fini $h1 198.51.100.1/24 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 203.0.113.1/24 2001:db8:2::1/64 + ip route add vrf v$h2 198.51.100.0/24 via 203.0.113.2 + ip -6 route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::2 +} + +h2_destroy() +{ + ip -6 route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::2 + ip route del vrf v$h2 198.51.100.0/24 via 203.0.113.2 + simple_if_fini $h2 203.0.113.1/24 2001:db8:2::1/64 +} + +sw1_flat_create() +{ + local ol1=$1; shift + local ul1=$1; shift + + ip link set dev $ol1 up + __addr_add_del $ol1 add 198.51.100.2/24 2001:db8:1::2/64 + + ip link set dev $ul1 up + vlan_create $ul1 111 "" 2001:db8:10::1/64 + + tunnel_create g1a ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \ + ttl inherit "$@" + ip link set dev g1a up + __addr_add_del g1a add "2001:db8:3::1/128" + + ip -6 route add 2001:db8:3::2/128 via 2001:db8:10::2 + ip route add 203.0.113.0/24 dev g1a + ip -6 route add 2001:db8:2::/64 dev g1a +} + +sw1_flat_destroy() +{ + local ol1=$1; shift + local ul1=$1; shift + + ip -6 route del 2001:db8:2::/64 + ip route del 203.0.113.0/24 + ip -6 route del 2001:db8:3::2/128 via 2001:db8:10::2 + + __simple_if_fini g1a 2001:db8:3::1/128 + tunnel_destroy g1a + + vlan_destroy $ul1 111 + __simple_if_fini $ul1 + __simple_if_fini $ol1 198.51.100.2/24 2001:db8:1::2/64 +} + +sw2_flat_create() +{ + local ol2=$1; shift + local ul2=$1; shift + + simple_if_init $ol2 203.0.113.2/24 2001:db8:2::2/64 + __simple_if_init $ul2 v$ol2 + vlan_create $ul2 111 v$ol2 2001:db8:10::2/64 + + tunnel_create g2a ip6gre 2001:db8:3::2 2001:db8:3::1 tos inherit \ + ttl inherit dev v$ol2 "$@" + __simple_if_init g2a v$ol2 2001:db8:3::2/128 + + # Replace neighbor to avoid 1 dropped packet due to "unresolved neigh" + ip neigh replace dev $ol2 203.0.113.1 lladdr $(mac_get $h2) + ip -6 neigh replace dev $ol2 2001:db8:2::1 lladdr $(mac_get $h2) + + ip -6 route add vrf v$ol2 2001:db8:3::1/128 via 2001:db8:10::1 + ip route add vrf v$ol2 198.51.100.0/24 dev g2a + ip -6 route add vrf v$ol2 2001:db8:1::/64 dev g2a +} + +sw2_flat_destroy() +{ + local ol2=$1; shift + local ul2=$1; shift + + ip -6 route del vrf v$ol2 2001:db8:2::/64 + ip route del vrf v$ol2 198.51.100.0/24 + ip -6 route del vrf v$ol2 2001:db8:3::1/128 via 2001:db8:10::1 + + __simple_if_fini g2a 2001:db8:3::2/128 + tunnel_destroy g2a + + vlan_destroy $ul2 111 + __simple_if_fini $ul2 + simple_if_fini $ol2 203.0.113.2/24 2001:db8:2::2/64 +} + +sw1_hierarchical_create() +{ + local ol1=$1; shift + local ul1=$1; shift + + simple_if_init $ol1 198.51.100.2/24 2001:db8:1::2/64 + simple_if_init $ul1 + ip link add name dummy1 type dummy + __simple_if_init dummy1 v$ul1 2001:db8:3::1/64 + + vlan_create $ul1 111 v$ul1 2001:db8:10::1/64 + tunnel_create g1a ip6gre 2001:db8:3::1 2001:db8:3::2 tos inherit \ + ttl inherit dev dummy1 "$@" + ip link set dev g1a master v$ol1 + + ip -6 route add vrf v$ul1 2001:db8:3::2/128 via 2001:db8:10::2 + ip route add vrf v$ol1 203.0.113.0/24 dev g1a + ip -6 route add vrf v$ol1 2001:db8:2::/64 dev g1a +} + +sw1_hierarchical_destroy() +{ + local ol1=$1; shift + local ul1=$1; shift + + ip -6 route del vrf v$ol1 2001:db8:2::/64 + ip route del vrf v$ol1 203.0.113.0/24 + ip -6 route del vrf v$ul1 2001:db8:3::2/128 + + tunnel_destroy g1a + vlan_destroy $ul1 111 + + __simple_if_fini dummy1 2001:db8:3::1/64 + ip link del dev dummy1 + + simple_if_fini $ul1 + simple_if_fini $ol1 198.51.100.2/24 2001:db8:1::2/64 +} + +sw2_hierarchical_create() +{ + local ol2=$1; shift + local ul2=$1; shift + + simple_if_init $ol2 203.0.113.2/24 2001:db8:2::2/64 + simple_if_init $ul2 + + ip link add name dummy2 type dummy + __simple_if_init dummy2 v$ul2 2001:db8:3::2/64 + + vlan_create $ul2 111 v$ul2 2001:db8:10::2/64 + tunnel_create g2a ip6gre 2001:db8:3::2 2001:db8:3::1 tos inherit \ + ttl inherit dev dummy2 "$@" + ip link set dev g2a master v$ol2 + + # Replace neighbor to avoid 1 dropped packet due to "unresolved neigh" + ip neigh replace dev $ol2 203.0.113.1 lladdr $(mac_get $h2) + ip -6 neigh replace dev $ol2 2001:db8:2::1 lladdr $(mac_get $h2) + + ip -6 route add vrf v$ul2 2001:db8:3::1/128 via 2001:db8:10::1 + ip route add vrf v$ol2 198.51.100.0/24 dev g2a + ip -6 route add vrf v$ol2 2001:db8:1::/64 dev g2a +} + +sw2_hierarchical_destroy() +{ + local ol2=$1; shift + local ul2=$1; shift + + ip -6 route del vrf v$ol2 2001:db8:2::/64 + ip route del vrf v$ol2 198.51.100.0/24 + ip -6 route del vrf v$ul2 2001:db8:3::1/128 + + tunnel_destroy g2a + vlan_destroy $ul2 111 + + __simple_if_fini dummy2 2001:db8:3::2/64 + ip link del dev dummy2 + + simple_if_fini $ul2 + simple_if_fini $ol2 203.0.113.2/24 2001:db8:2::2/64 +} + +test_traffic_ip4ip6() +{ + RET=0 + + h1mac=$(mac_get $h1) + ol1mac=$(mac_get $ol1) + + tc qdisc add dev $ul1 clsact + tc filter add dev $ul1 egress proto all pref 1 handle 101 \ + flower $TC_FLAG action pass + + tc qdisc add dev $ol2 clsact + tc filter add dev $ol2 egress protocol ipv4 pref 1 handle 101 \ + flower $TC_FLAG dst_ip 203.0.113.1 action pass + + $MZ $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 198.51.100.1 \ + -B 203.0.113.1 -t ip -q -d 1msec + + # Check ports after encap and after decap. + tc_check_at_least_x_packets "dev $ul1 egress" 101 1000 + check_err $? "Packets did not go through $ul1, tc_flag = $TC_FLAG" + + tc_check_at_least_x_packets "dev $ol2 egress" 101 1000 + check_err $? "Packets did not go through $ol2, tc_flag = $TC_FLAG" + + log_test "$@" + + tc filter del dev $ol2 egress protocol ipv4 pref 1 handle 101 flower + tc qdisc del dev $ol2 clsact + tc filter del dev $ul1 egress proto all pref 1 handle 101 flower + tc qdisc del dev $ul1 clsact +} + +test_traffic_ip6ip6() +{ + RET=0 + + h1mac=$(mac_get $h1) + ol1mac=$(mac_get $ol1) + + tc qdisc add dev $ul1 clsact + tc filter add dev $ul1 egress proto all pref 1 handle 101 \ + flower $TC_FLAG action pass + + tc qdisc add dev $ol2 clsact + tc filter add dev $ol2 egress protocol ipv6 pref 1 handle 101 \ + flower $TC_FLAG dst_ip 2001:db8:2::1 action pass + + $MZ -6 $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 2001:db8:1::1 \ + -B 2001:db8:2::1 -t ip -q -d 1msec + + # Check ports after encap and after decap. + tc_check_at_least_x_packets "dev $ul1 egress" 101 1000 + check_err $? "Packets did not go through $ul1, tc_flag = $TC_FLAG" + + tc_check_at_least_x_packets "dev $ol2 egress" 101 1000 + check_err $? "Packets did not go through $ol2, tc_flag = $TC_FLAG" + + log_test "$@" + + tc filter del dev $ol2 egress protocol ipv6 pref 1 handle 101 flower + tc qdisc del dev $ol2 clsact + tc filter del dev $ul1 egress proto all pref 1 handle 101 flower + tc qdisc del dev $ul1 clsact +} + +topo_mtu_change() +{ + local mtu=$1 + + ip link set mtu $mtu dev $h1 + ip link set mtu $mtu dev $ol1 + ip link set mtu $mtu dev g1a + ip link set mtu $mtu dev $ul1 + ip link set mtu $mtu dev $ul1.111 + ip link set mtu $mtu dev $h2 + ip link set mtu $mtu dev $ol2 + ip link set mtu $mtu dev g2a + ip link set mtu $mtu dev $ul2 + ip link set mtu $mtu dev $ul2.111 +} + +test_mtu_change() +{ + RET=0 + + ping6_do $h1 2001:db8:2::1 "-s 1800 -w 3" + check_fail $? "ping GRE IPv6 should not pass with packet size 1800" + + RET=0 + + topo_mtu_change 2000 + ping6_do $h1 2001:db8:2::1 "-s 1800 -w 3" + check_err $? + log_test "ping GRE IPv6, packet size 1800 after MTU change" +} diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 92087d423bcf1d..dfd827b7a9f910 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -23,6 +23,8 @@ MC_CLI=${MC_CLI:=smcroutectl} PING_TIMEOUT=${PING_TIMEOUT:=5} WAIT_TIMEOUT=${WAIT_TIMEOUT:=20} INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600} +REQUIRE_JQ=${REQUIRE_JQ:=yes} +REQUIRE_MZ=${REQUIRE_MZ:=yes} relative_path="${BASH_SOURCE%/*}" if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then @@ -141,8 +143,12 @@ require_command() fi } -require_command jq -require_command $MZ +if [[ "$REQUIRE_JQ" = "yes" ]]; then + require_command jq +fi +if [[ "$REQUIRE_MZ" = "yes" ]]; then + require_command $MZ +fi if [[ ! -v NUM_NETIFS ]]; then echo "SKIP: importer does not define \"NUM_NETIFS\"" @@ -280,6 +286,15 @@ log_test() return 0 } +log_test_skip() +{ + local test_name=$1 + local opt_str=$2 + + printf "TEST: %-60s [SKIP]\n" "$test_name $opt_str" + return 0 +} + log_info() { local msg=$1 diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh b/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh index 8bd85da1905ad6..75a37c189ef355 100644 --- a/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh +++ b/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh @@ -4,9 +4,12 @@ ALL_TESTS=" ping_ipv4 tbf_test + tbf_root_test " source $lib_dir/sch_tbf_core.sh +QDISC_TYPE=${QDISC% *} + tbf_test_one() { local bs=$1; shift @@ -22,6 +25,8 @@ tbf_test_one() tbf_test() { + log_info "Testing root-$QDISC_TYPE-tbf" + # This test is used for both ETS and PRIO. Even though we only need two # bands, PRIO demands a minimum of three. tc qdisc add dev $swp2 root handle 10: $QDISC 3 priomap 2 1 0 @@ -29,6 +34,29 @@ tbf_test() tc qdisc del dev $swp2 root } +tbf_root_test() +{ + local bs=128K + + log_info "Testing root-tbf-$QDISC_TYPE" + + tc qdisc replace dev $swp2 root handle 1: \ + tbf rate 400Mbit burst $bs limit 1M + tc qdisc replace dev $swp2 parent 1:1 handle 10: \ + $QDISC 3 priomap 2 1 0 + tc qdisc replace dev $swp2 parent 10:3 handle 103: \ + bfifo limit 1M + tc qdisc replace dev $swp2 parent 10:2 handle 102: \ + bfifo limit 1M + tc qdisc replace dev $swp2 parent 10:1 handle 101: \ + bfifo limit 1M + + do_tbf_test 10 400 $bs + do_tbf_test 11 400 $bs + + tc qdisc del dev $swp2 root +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh index 0e18e8be6e2a90..bce8bb8d2b6f3c 100644 --- a/tools/testing/selftests/net/forwarding/tc_common.sh +++ b/tools/testing/selftests/net/forwarding/tc_common.sh @@ -16,6 +16,16 @@ tc_check_packets() tc_rule_handle_stats_get "$id" "$handle" > /dev/null } +tc_check_at_least_x_packets() +{ + local id=$1 + local handle=$2 + local count=$3 + + busywait "$TC_HIT_TIMEOUT" until_counter_is ">= $count" \ + tc_rule_handle_stats_get "$id" "$handle" > /dev/null +} + tc_check_packets_hitting() { local id=$1 diff --git a/tools/testing/selftests/net/ioam6.sh b/tools/testing/selftests/net/ioam6.sh index a2489ec398fe01..a2b9fad5a9a652 100755 --- a/tools/testing/selftests/net/ioam6.sh +++ b/tools/testing/selftests/net/ioam6.sh @@ -6,7 +6,7 @@ # This script evaluates the IOAM insertion for IPv6 by checking the IOAM data # consistency directly inside packets on the receiver side. Tests are divided # into three categories: OUTPUT (evaluates the IOAM processing by the sender), -# INPUT (evaluates the IOAM processing by the receiver) and GLOBAL (evaluates +# INPUT (evaluates the IOAM processing by a receiver) and GLOBAL (evaluates # wider use cases that do not fall into the other two categories). Both OUTPUT # and INPUT tests only use a two-node topology (alpha and beta), while GLOBAL # tests use the entire three-node topology (alpha, beta, gamma). Each test is @@ -200,7 +200,7 @@ check_kernel_compatibility() ip -netns ioam-tmp-node link set veth0 up ip -netns ioam-tmp-node link set veth1 up - ip -netns ioam-tmp-node ioam namespace add 0 &>/dev/null + ip -netns ioam-tmp-node ioam namespace add 0 ns_ad=$? ip -netns ioam-tmp-node ioam namespace show | grep -q "namespace 0" @@ -214,11 +214,11 @@ check_kernel_compatibility() exit 1 fi - ip -netns ioam-tmp-node route add db02::/64 encap ioam6 trace prealloc \ - type 0x800000 ns 0 size 4 dev veth0 &>/dev/null + ip -netns ioam-tmp-node route add db02::/64 encap ioam6 mode inline \ + trace prealloc type 0x800000 ns 0 size 4 dev veth0 tr_ad=$? - ip -netns ioam-tmp-node -6 route | grep -q "encap ioam6 trace" + ip -netns ioam-tmp-node -6 route | grep -q "encap ioam6" tr_sh=$? if [[ $tr_ad != 0 || $tr_sh != 0 ]] @@ -232,6 +232,30 @@ check_kernel_compatibility() ip link del veth0 2>/dev/null || true ip netns del ioam-tmp-node || true + + lsmod | grep -q "ip6_tunnel" + ip6tnl_loaded=$? + + if [ $ip6tnl_loaded = 0 ] + then + encap_tests=0 + else + modprobe ip6_tunnel &>/dev/null + lsmod | grep -q "ip6_tunnel" + encap_tests=$? + + if [ $encap_tests != 0 ] + then + ip a | grep -q "ip6tnl0" + encap_tests=$? + + if [ $encap_tests != 0 ] + then + echo "Note: ip6_tunnel not found neither as a module nor inside the" \ + "kernel, tests that require it (encap mode) will be omitted" + fi + fi + fi } cleanup() @@ -242,6 +266,11 @@ cleanup() ip netns del ioam-node-alpha || true ip netns del ioam-node-beta || true ip netns del ioam-node-gamma || true + + if [ $ip6tnl_loaded != 0 ] + then + modprobe -r ip6_tunnel 2>/dev/null || true + fi } setup() @@ -329,6 +358,12 @@ log_test_failed() printf "TEST: %-60s [FAIL]\n" "${desc}" } +log_results() +{ + echo "- Tests passed: ${npassed}" + echo "- Tests failed: ${nfailed}" +} + run_test() { local name=$1 @@ -349,16 +384,26 @@ run_test() ip netns exec $node_src ping6 -t 64 -c 1 -W 1 $ip6_dst &>/dev/null if [ $? != 0 ] then + nfailed=$((nfailed+1)) log_test_failed "${desc}" kill -2 $spid &>/dev/null else wait $spid - [ $? = 0 ] && log_test_passed "${desc}" || log_test_failed "${desc}" + if [ $? = 0 ] + then + npassed=$((npassed+1)) + log_test_passed "${desc}" + else + nfailed=$((nfailed+1)) + log_test_failed "${desc}" + fi fi } run() { + echo + printf "%0.s-" {1..74} echo echo "OUTPUT tests" printf "%0.s-" {1..74} @@ -369,7 +414,8 @@ run() for t in $TESTS_OUTPUT do - $t + $t "inline" + [ $encap_tests = 0 ] && $t "encap" done # clean OUTPUT settings @@ -377,6 +423,8 @@ run() ip -netns ioam-node-alpha route change db01::/64 dev veth0 + echo + printf "%0.s-" {1..74} echo echo "INPUT tests" printf "%0.s-" {1..74} @@ -387,7 +435,8 @@ run() for t in $TESTS_INPUT do - $t + $t "inline" + [ $encap_tests = 0 ] && $t "encap" done # clean INPUT settings @@ -396,7 +445,8 @@ run() ip -netns ioam-node-alpha ioam namespace set 123 schema ${ALPHA[8]} ip -netns ioam-node-alpha route change db01::/64 dev veth0 - + echo + printf "%0.s-" {1..74} echo echo "GLOBAL tests" printf "%0.s-" {1..74} @@ -404,8 +454,12 @@ run() for t in $TESTS_GLOBAL do - $t + $t "inline" + [ $encap_tests = 0 ] && $t "encap" done + + echo + log_results } bit2type=( @@ -431,11 +485,16 @@ out_undef_ns() ############################################################################## local desc="Unknown IOAM namespace" - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0x800000 ns 0 size 4 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0x800000 0 + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0x800000 ns 0 size 4 dev veth0 + + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0x800000 0 + + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down } out_no_room() @@ -446,11 +505,16 @@ out_no_room() ############################################################################## local desc="Missing trace room" - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0xc00000 ns 123 size 4 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0xc00000 ns 123 size 4 dev veth0 - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0xc00000 123 + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0xc00000 123 + + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down } out_bits() @@ -465,10 +529,13 @@ out_bits() local tmp=${bit2size[22]} bit2size[22]=$(( $tmp + ${#ALPHA[9]} + ((4 - (${#ALPHA[9]} % 4)) % 4) )) + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + for i in {0..22} do - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \ - prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \ + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \ dev veth0 &>/dev/null local cmd_res=$? @@ -485,11 +552,13 @@ out_bits() log_test_failed "$descr" fi else - run_test "out_bit$i" "$descr" ioam-node-alpha ioam-node-beta \ - db01::2 db01::1 veth0 ${bit2type[$i]} 123 + run_test "out_bit$i" "$descr ($1 mode)" ioam-node-alpha \ + ioam-node-beta db01::2 db01::1 veth0 ${bit2type[$i]} 123 fi done + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down + bit2size[22]=$tmp } @@ -501,11 +570,16 @@ out_full_supp_trace() ############################################################################## local desc="Full supported trace" - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0xfff002 ns 123 size 100 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0xfff002 123 + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0xfff002 ns 123 size 100 dev veth0 + + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0xfff002 123 + + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down } @@ -526,11 +600,16 @@ in_undef_ns() ############################################################################## local desc="Unknown IOAM namespace" - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0x800000 ns 0 size 4 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0x800000 ns 0 size 4 dev veth0 - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0x800000 0 + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0x800000 0 + + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down } in_no_room() @@ -541,11 +620,16 @@ in_no_room() ############################################################################## local desc="Missing trace room" - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0xc00000 ns 123 size 4 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0xc00000 ns 123 size 4 dev veth0 + + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0xc00000 123 - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0xc00000 123 + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down } in_bits() @@ -560,15 +644,21 @@ in_bits() local tmp=${bit2size[22]} bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) )) + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + for i in {0..11} {22..22} do - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \ - prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0 + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \ + dev veth0 - run_test "in_bit$i" "${desc//$i}" ioam-node-alpha ioam-node-beta \ - db01::2 db01::1 veth0 ${bit2type[$i]} 123 + run_test "in_bit$i" "${desc//$i} ($1 mode)" ioam-node-alpha \ + ioam-node-beta db01::2 db01::1 veth0 ${bit2type[$i]} 123 done + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down + bit2size[22]=$tmp } @@ -585,11 +675,16 @@ in_oflag() # back the IOAM namespace that was previously configured on the sender. ip -netns ioam-node-alpha ioam namespace add 123 - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0xc00000 ns 123 size 4 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0xc00000 ns 123 size 4 dev veth0 - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0xc00000 123 + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0xc00000 123 + + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down # And we clean the exception for this test to get things back to normal for # other INPUT tests @@ -604,11 +699,16 @@ in_full_supp_trace() ############################################################################## local desc="Full supported trace" - ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace prealloc \ - type 0xfff002 ns 123 size 80 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db01::1" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 up + + ip -netns ioam-node-alpha route change db01::/64 encap ioam6 mode $mode \ + trace prealloc type 0xfff002 ns 123 size 80 dev veth0 + + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-beta \ + db01::2 db01::1 veth0 0xfff002 123 - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-beta db01::2 \ - db01::1 veth0 0xfff002 123 + [ "$1" = "encap" ] && ip -netns ioam-node-beta link set ip6tnl0 down } @@ -627,11 +727,16 @@ fwd_full_supp_trace() ############################################################################## local desc="Forward - Full supported trace" - ip -netns ioam-node-alpha route change db02::/64 encap ioam6 trace prealloc \ - type 0xfff002 ns 123 size 244 via db01::1 dev veth0 + [ "$1" = "encap" ] && mode="$1 tundst db02::2" || mode="$1" + [ "$1" = "encap" ] && ip -netns ioam-node-gamma link set ip6tnl0 up - run_test ${FUNCNAME[0]} "${desc}" ioam-node-alpha ioam-node-gamma db01::2 \ - db02::2 veth0 0xfff002 123 + ip -netns ioam-node-alpha route change db02::/64 encap ioam6 mode $mode \ + trace prealloc type 0xfff002 ns 123 size 244 via db01::1 dev veth0 + + run_test ${FUNCNAME[0]} "${desc} ($1 mode)" ioam-node-alpha ioam-node-gamma \ + db01::2 db02::2 veth0 0xfff002 123 + + [ "$1" = "encap" ] && ip -netns ioam-node-gamma link set ip6tnl0 down } @@ -641,6 +746,9 @@ fwd_full_supp_trace() # # ################################################################################ +npassed=0 +nfailed=0 + if [ "$(id -u)" -ne 0 ] then echo "SKIP: Need root privileges" diff --git a/tools/testing/selftests/net/mptcp/.gitignore b/tools/testing/selftests/net/mptcp/.gitignore index 260336d5f0b13f..7569d892967a99 100644 --- a/tools/testing/selftests/net/mptcp/.gitignore +++ b/tools/testing/selftests/net/mptcp/.gitignore @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only mptcp_connect +mptcp_sockopt pm_nl_ctl *.pcap diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index f1464f09b080fa..bbf4e448bad997 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -8,7 +8,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ simult_flows.sh mptcp_sockopt.sh -TEST_GEN_FILES = mptcp_connect pm_nl_ctl +TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt TEST_FILES := settings diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index 89c4753c2760cd..95e81d557b0887 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,7 @@ static int cfg_sndbuf; static int cfg_rcvbuf; static bool cfg_join; static bool cfg_remove; +static unsigned int cfg_time; static unsigned int cfg_do_w; static int cfg_wait; static uint32_t cfg_mark; @@ -78,9 +80,10 @@ static struct cfg_cmsg_types cfg_cmsg_types; static void die_usage(void) { fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]" - "[-l] [-w sec] connect_address\n"); + "[-l] [-w sec] [-t num] [-T num] connect_address\n"); fprintf(stderr, "\t-6 use ipv6\n"); fprintf(stderr, "\t-t num -- set poll timeout to num\n"); + fprintf(stderr, "\t-T num -- set expected runtime to num ms\n"); fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n"); fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n"); fprintf(stderr, "\t-p num -- use port num\n"); @@ -448,7 +451,7 @@ static void set_nonblock(int fd) fcntl(fd, F_SETFL, flags | O_NONBLOCK); } -static int copyfd_io_poll(int infd, int peerfd, int outfd) +static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after_out) { struct pollfd fds = { .fd = peerfd, @@ -487,9 +490,11 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd) */ fds.events &= ~POLLIN; - if ((fds.events & POLLOUT) == 0) + if ((fds.events & POLLOUT) == 0) { + *in_closed_after_out = true; /* and nothing more to send */ break; + } /* Else, still have data to transmit */ } else if (len < 0) { @@ -547,7 +552,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd) } /* leave some time for late join/announce */ - if (cfg_join || cfg_remove) + if (cfg_remove) usleep(cfg_wait); close(peerfd); @@ -646,7 +651,7 @@ static int do_sendfile(int infd, int outfd, unsigned int count) } static int copyfd_io_mmap(int infd, int peerfd, int outfd, - unsigned int size) + unsigned int size, bool *in_closed_after_out) { int err; @@ -664,13 +669,14 @@ static int copyfd_io_mmap(int infd, int peerfd, int outfd, shutdown(peerfd, SHUT_WR); err = do_recvfile(peerfd, outfd); + *in_closed_after_out = true; } return err; } static int copyfd_io_sendfile(int infd, int peerfd, int outfd, - unsigned int size) + unsigned int size, bool *in_closed_after_out) { int err; @@ -685,6 +691,7 @@ static int copyfd_io_sendfile(int infd, int peerfd, int outfd, if (err) return err; err = do_recvfile(peerfd, outfd); + *in_closed_after_out = true; } return err; @@ -692,27 +699,62 @@ static int copyfd_io_sendfile(int infd, int peerfd, int outfd, static int copyfd_io(int infd, int peerfd, int outfd) { + bool in_closed_after_out = false; + struct timespec start, end; int file_size; + int ret; + + if (cfg_time && (clock_gettime(CLOCK_MONOTONIC, &start) < 0)) + xerror("can not fetch start time %d", errno); switch (cfg_mode) { case CFG_MODE_POLL: - return copyfd_io_poll(infd, peerfd, outfd); + ret = copyfd_io_poll(infd, peerfd, outfd, &in_closed_after_out); + break; + case CFG_MODE_MMAP: file_size = get_infd_size(infd); if (file_size < 0) return file_size; - return copyfd_io_mmap(infd, peerfd, outfd, file_size); + ret = copyfd_io_mmap(infd, peerfd, outfd, file_size, &in_closed_after_out); + break; + case CFG_MODE_SENDFILE: file_size = get_infd_size(infd); if (file_size < 0) return file_size; - return copyfd_io_sendfile(infd, peerfd, outfd, file_size); + ret = copyfd_io_sendfile(infd, peerfd, outfd, file_size, &in_closed_after_out); + break; + + default: + fprintf(stderr, "Invalid mode %d\n", cfg_mode); + + die_usage(); + return 1; } - fprintf(stderr, "Invalid mode %d\n", cfg_mode); + if (ret) + return ret; - die_usage(); - return 1; + if (cfg_time) { + unsigned int delta_ms; + + if (clock_gettime(CLOCK_MONOTONIC, &end) < 0) + xerror("can not fetch end time %d", errno); + delta_ms = (end.tv_sec - start.tv_sec) * 1000 + (end.tv_nsec - start.tv_nsec) / 1000000; + if (delta_ms > cfg_time) { + xerror("transfer slower than expected! runtime %d ms, expected %d ms", + delta_ms, cfg_time); + } + + /* show the runtime only if this end shutdown(wr) before receiving the EOF, + * (that is, if this end got the longer runtime) + */ + if (in_closed_after_out) + fprintf(stderr, "%d", delta_ms); + } + + return 0; } static void check_sockaddr(int pf, struct sockaddr_storage *ss, @@ -1005,12 +1047,11 @@ static void parse_opts(int argc, char **argv) { int c; - while ((c = getopt(argc, argv, "6jr:lp:s:hut:m:S:R:w:M:P:c:")) != -1) { + while ((c = getopt(argc, argv, "6jr:lp:s:hut:T:m:S:R:w:M:P:c:")) != -1) { switch (c) { case 'j': cfg_join = true; cfg_mode = CFG_MODE_POLL; - cfg_wait = 400000; break; case 'r': cfg_remove = true; @@ -1043,6 +1084,9 @@ static void parse_opts(int argc, char **argv) if (poll_timeout <= 0) poll_timeout = -1; break; + case 'T': + cfg_time = atoi(optarg); + break; case 'm': cfg_mode = parse_mode(optarg); break; diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 255793c5ac4ffe..7ef639a9d4a6fd 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -297,7 +297,7 @@ do_transfer() if [ "$test_link_fail" -eq 2 ];then timeout ${timeout_test} \ ip netns exec ${listener_ns} \ - $mptcp_connect -t ${timeout_poll} -l -p $port -s ${cl_proto} \ + $mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \ ${local_addr} < "$sinfail" > "$sout" & else timeout ${timeout_test} \ @@ -945,12 +945,15 @@ subflows_tests() # subflow limited by client reset + ip netns exec $ns1 ./pm_nl_ctl limits 0 0 + ip netns exec $ns2 ./pm_nl_ctl limits 0 0 ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow run_tests $ns1 $ns2 10.0.1.1 chk_join_nr "single subflow, limited by client" 0 0 0 # subflow limited by server reset + ip netns exec $ns1 ./pm_nl_ctl limits 0 0 ip netns exec $ns2 ./pm_nl_ctl limits 0 1 ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow run_tests $ns1 $ns2 10.0.1.1 @@ -973,7 +976,7 @@ subflows_tests() run_tests $ns1 $ns2 10.0.1.1 chk_join_nr "multiple subflows" 2 2 2 - # multiple subflows limited by serverf + # multiple subflows limited by server reset ip netns exec $ns1 ./pm_nl_ctl limits 0 1 ip netns exec $ns2 ./pm_nl_ctl limits 0 2 diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c new file mode 100644 index 00000000000000..417b11cafafe7a --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c @@ -0,0 +1,683 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +static int pf = AF_INET; + +#ifndef IPPROTO_MPTCP +#define IPPROTO_MPTCP 262 +#endif +#ifndef SOL_MPTCP +#define SOL_MPTCP 284 +#endif + +#ifndef MPTCP_INFO +struct mptcp_info { + __u8 mptcpi_subflows; + __u8 mptcpi_add_addr_signal; + __u8 mptcpi_add_addr_accepted; + __u8 mptcpi_subflows_max; + __u8 mptcpi_add_addr_signal_max; + __u8 mptcpi_add_addr_accepted_max; + __u32 mptcpi_flags; + __u32 mptcpi_token; + __u64 mptcpi_write_seq; + __u64 mptcpi_snd_una; + __u64 mptcpi_rcv_nxt; + __u8 mptcpi_local_addr_used; + __u8 mptcpi_local_addr_max; + __u8 mptcpi_csum_enabled; +}; + +struct mptcp_subflow_data { + __u32 size_subflow_data; /* size of this structure in userspace */ + __u32 num_subflows; /* must be 0, set by kernel */ + __u32 size_kernel; /* must be 0, set by kernel */ + __u32 size_user; /* size of one element in data[] */ +} __attribute__((aligned(8))); + +struct mptcp_subflow_addrs { + union { + __kernel_sa_family_t sa_family; + struct sockaddr sa_local; + struct sockaddr_in sin_local; + struct sockaddr_in6 sin6_local; + struct __kernel_sockaddr_storage ss_local; + }; + union { + struct sockaddr sa_remote; + struct sockaddr_in sin_remote; + struct sockaddr_in6 sin6_remote; + struct __kernel_sockaddr_storage ss_remote; + }; +}; + +#define MPTCP_INFO 1 +#define MPTCP_TCPINFO 2 +#define MPTCP_SUBFLOW_ADDRS 3 +#endif + +struct so_state { + struct mptcp_info mi; + uint64_t mptcpi_rcv_delta; + uint64_t tcpi_rcv_delta; +}; + +static void die_perror(const char *msg) +{ + perror(msg); + exit(1); +} + +static void die_usage(int r) +{ + fprintf(stderr, "Usage: mptcp_sockopt [-6]\n"); + exit(r); +} + +static void xerror(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fputc('\n', stderr); + exit(1); +} + +static const char *getxinfo_strerr(int err) +{ + if (err == EAI_SYSTEM) + return strerror(errno); + + return gai_strerror(err); +} + +static void xgetaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res) +{ + int err = getaddrinfo(node, service, hints, res); + + if (err) { + const char *errstr = getxinfo_strerr(err); + + fprintf(stderr, "Fatal: getaddrinfo(%s:%s): %s\n", + node ? node : "", service ? service : "", errstr); + exit(1); + } +} + +static int sock_listen_mptcp(const char * const listenaddr, + const char * const port) +{ + int sock; + struct addrinfo hints = { + .ai_protocol = IPPROTO_TCP, + .ai_socktype = SOCK_STREAM, + .ai_flags = AI_PASSIVE | AI_NUMERICHOST + }; + + hints.ai_family = pf; + + struct addrinfo *a, *addr; + int one = 1; + + xgetaddrinfo(listenaddr, port, &hints, &addr); + hints.ai_family = pf; + + for (a = addr; a; a = a->ai_next) { + sock = socket(a->ai_family, a->ai_socktype, IPPROTO_MPTCP); + if (sock < 0) + continue; + + if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))) + perror("setsockopt"); + + if (bind(sock, a->ai_addr, a->ai_addrlen) == 0) + break; /* success */ + + perror("bind"); + close(sock); + sock = -1; + } + + freeaddrinfo(addr); + + if (sock < 0) + xerror("could not create listen socket"); + + if (listen(sock, 20)) + die_perror("listen"); + + return sock; +} + +static int sock_connect_mptcp(const char * const remoteaddr, + const char * const port, int proto) +{ + struct addrinfo hints = { + .ai_protocol = IPPROTO_TCP, + .ai_socktype = SOCK_STREAM, + }; + struct addrinfo *a, *addr; + int sock = -1; + + hints.ai_family = pf; + + xgetaddrinfo(remoteaddr, port, &hints, &addr); + for (a = addr; a; a = a->ai_next) { + sock = socket(a->ai_family, a->ai_socktype, proto); + if (sock < 0) + continue; + + if (connect(sock, a->ai_addr, a->ai_addrlen) == 0) + break; /* success */ + + die_perror("connect"); + } + + if (sock < 0) + xerror("could not create connect socket"); + + freeaddrinfo(addr); + return sock; +} + +static void parse_opts(int argc, char **argv) +{ + int c; + + while ((c = getopt(argc, argv, "h6")) != -1) { + switch (c) { + case 'h': + die_usage(0); + break; + case '6': + pf = AF_INET6; + break; + default: + die_usage(1); + break; + } + } +} + +static void do_getsockopt_bogus_sf_data(int fd, int optname) +{ + struct mptcp_subflow_data good_data; + struct bogus_data { + struct mptcp_subflow_data d; + char buf[2]; + } bd; + socklen_t olen, _olen; + int ret; + + memset(&bd, 0, sizeof(bd)); + memset(&good_data, 0, sizeof(good_data)); + + olen = sizeof(good_data); + good_data.size_subflow_data = olen; + + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen); + assert(ret < 0); /* 0 size_subflow_data */ + assert(olen == sizeof(good_data)); + + bd.d = good_data; + + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen); + assert(ret == 0); + assert(olen == sizeof(good_data)); + assert(bd.d.num_subflows == 1); + assert(bd.d.size_kernel > 0); + assert(bd.d.size_user == 0); + + bd.d = good_data; + _olen = rand() % olen; + olen = _olen; + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen); + assert(ret < 0); /* bogus olen */ + assert(olen == _olen); /* must be unchanged */ + + bd.d = good_data; + olen = sizeof(good_data); + bd.d.size_kernel = 1; + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen); + assert(ret < 0); /* size_kernel not 0 */ + + bd.d = good_data; + olen = sizeof(good_data); + bd.d.num_subflows = 1; + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen); + assert(ret < 0); /* num_subflows not 0 */ + + /* forward compat check: larger struct mptcp_subflow_data on 'old' kernel */ + bd.d = good_data; + olen = sizeof(bd); + bd.d.size_subflow_data = sizeof(bd); + + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &olen); + assert(ret == 0); + + /* olen must be truncated to real data size filled by kernel: */ + assert(olen == sizeof(good_data)); + + assert(bd.d.size_subflow_data == sizeof(bd)); + + bd.d = good_data; + bd.d.size_subflow_data += 1; + bd.d.size_user = 1; + olen = bd.d.size_subflow_data + 1; + _olen = olen; + + ret = getsockopt(fd, SOL_MPTCP, optname, &bd, &_olen); + assert(ret == 0); + + /* no truncation, kernel should have filled 1 byte of optname payload in buf[1]: */ + assert(olen == _olen); + + assert(bd.d.size_subflow_data == sizeof(good_data) + 1); + assert(bd.buf[0] == 0); +} + +static void do_getsockopt_mptcp_info(struct so_state *s, int fd, size_t w) +{ + struct mptcp_info i; + socklen_t olen; + int ret; + + olen = sizeof(i); + ret = getsockopt(fd, SOL_MPTCP, MPTCP_INFO, &i, &olen); + + if (ret < 0) + die_perror("getsockopt MPTCP_INFO"); + + assert(olen == sizeof(i)); + + if (s->mi.mptcpi_write_seq == 0) + s->mi = i; + + assert(s->mi.mptcpi_write_seq + w == i.mptcpi_write_seq); + + s->mptcpi_rcv_delta = i.mptcpi_rcv_nxt - s->mi.mptcpi_rcv_nxt; +} + +static void do_getsockopt_tcp_info(struct so_state *s, int fd, size_t r, size_t w) +{ + struct my_tcp_info { + struct mptcp_subflow_data d; + struct tcp_info ti[2]; + } ti; + int ret, tries = 5; + socklen_t olen; + + do { + memset(&ti, 0, sizeof(ti)); + + ti.d.size_subflow_data = sizeof(struct mptcp_subflow_data); + ti.d.size_user = sizeof(struct tcp_info); + olen = sizeof(ti); + + ret = getsockopt(fd, SOL_MPTCP, MPTCP_TCPINFO, &ti, &olen); + if (ret < 0) + xerror("getsockopt MPTCP_TCPINFO (tries %d, %m)"); + + assert(olen <= sizeof(ti)); + assert(ti.d.size_user == ti.d.size_kernel); + assert(ti.d.size_user == sizeof(struct tcp_info)); + assert(ti.d.num_subflows == 1); + + assert(olen > (socklen_t)sizeof(struct mptcp_subflow_data)); + olen -= sizeof(struct mptcp_subflow_data); + assert(olen == sizeof(struct tcp_info)); + + if (ti.ti[0].tcpi_bytes_sent == w && + ti.ti[0].tcpi_bytes_received == r) + goto done; + + if (r == 0 && ti.ti[0].tcpi_bytes_sent == w && + ti.ti[0].tcpi_bytes_received) { + s->tcpi_rcv_delta = ti.ti[0].tcpi_bytes_received; + goto done; + } + + /* wait and repeat, might be that tx is still ongoing */ + sleep(1); + } while (tries-- > 0); + + xerror("tcpi_bytes_sent %" PRIu64 ", want %zu. tcpi_bytes_received %" PRIu64 ", want %zu", + ti.ti[0].tcpi_bytes_sent, w, ti.ti[0].tcpi_bytes_received, r); + +done: + do_getsockopt_bogus_sf_data(fd, MPTCP_TCPINFO); +} + +static void do_getsockopt_subflow_addrs(int fd) +{ + struct sockaddr_storage remote, local; + socklen_t olen, rlen, llen; + int ret; + struct my_addrs { + struct mptcp_subflow_data d; + struct mptcp_subflow_addrs addr[2]; + } addrs; + + memset(&addrs, 0, sizeof(addrs)); + memset(&local, 0, sizeof(local)); + memset(&remote, 0, sizeof(remote)); + + addrs.d.size_subflow_data = sizeof(struct mptcp_subflow_data); + addrs.d.size_user = sizeof(struct mptcp_subflow_addrs); + olen = sizeof(addrs); + + ret = getsockopt(fd, SOL_MPTCP, MPTCP_SUBFLOW_ADDRS, &addrs, &olen); + if (ret < 0) + die_perror("getsockopt MPTCP_SUBFLOW_ADDRS"); + + assert(olen <= sizeof(addrs)); + assert(addrs.d.size_user == addrs.d.size_kernel); + assert(addrs.d.size_user == sizeof(struct mptcp_subflow_addrs)); + assert(addrs.d.num_subflows == 1); + + assert(olen > (socklen_t)sizeof(struct mptcp_subflow_data)); + olen -= sizeof(struct mptcp_subflow_data); + assert(olen == sizeof(struct mptcp_subflow_addrs)); + + llen = sizeof(local); + ret = getsockname(fd, (struct sockaddr *)&local, &llen); + if (ret < 0) + die_perror("getsockname"); + rlen = sizeof(remote); + ret = getpeername(fd, (struct sockaddr *)&remote, &rlen); + if (ret < 0) + die_perror("getpeername"); + + assert(rlen > 0); + assert(rlen == llen); + + assert(remote.ss_family == local.ss_family); + + assert(memcmp(&local, &addrs.addr[0].ss_local, sizeof(local)) == 0); + assert(memcmp(&remote, &addrs.addr[0].ss_remote, sizeof(remote)) == 0); + + memset(&addrs, 0, sizeof(addrs)); + + addrs.d.size_subflow_data = sizeof(struct mptcp_subflow_data); + addrs.d.size_user = sizeof(sa_family_t); + olen = sizeof(addrs.d) + sizeof(sa_family_t); + + ret = getsockopt(fd, SOL_MPTCP, MPTCP_SUBFLOW_ADDRS, &addrs, &olen); + assert(ret == 0); + assert(olen == sizeof(addrs.d) + sizeof(sa_family_t)); + + assert(addrs.addr[0].sa_family == pf); + assert(addrs.addr[0].sa_family == local.ss_family); + + assert(memcmp(&local, &addrs.addr[0].ss_local, sizeof(local)) != 0); + assert(memcmp(&remote, &addrs.addr[0].ss_remote, sizeof(remote)) != 0); + + do_getsockopt_bogus_sf_data(fd, MPTCP_SUBFLOW_ADDRS); +} + +static void do_getsockopts(struct so_state *s, int fd, size_t r, size_t w) +{ + do_getsockopt_mptcp_info(s, fd, w); + + do_getsockopt_tcp_info(s, fd, r, w); + + do_getsockopt_subflow_addrs(fd); +} + +static void connect_one_server(int fd, int pipefd) +{ + char buf[4096], buf2[4096]; + size_t len, i, total; + struct so_state s; + bool eof = false; + ssize_t ret; + + memset(&s, 0, sizeof(s)); + + len = rand() % (sizeof(buf) - 1); + + if (len < 128) + len = 128; + + for (i = 0; i < len ; i++) { + buf[i] = rand() % 26; + buf[i] += 'A'; + } + + buf[i] = '\n'; + + do_getsockopts(&s, fd, 0, 0); + + /* un-block server */ + ret = read(pipefd, buf2, 4); + assert(ret == 4); + close(pipefd); + + assert(strncmp(buf2, "xmit", 4) == 0); + + ret = write(fd, buf, len); + if (ret < 0) + die_perror("write"); + + if (ret != (ssize_t)len) + xerror("short write"); + + total = 0; + do { + ret = read(fd, buf2 + total, sizeof(buf2) - total); + if (ret < 0) + die_perror("read"); + if (ret == 0) { + eof = true; + break; + } + + total += ret; + } while (total < len); + + if (total != len) + xerror("total %lu, len %lu eof %d\n", total, len, eof); + + if (memcmp(buf, buf2, len)) + xerror("data corruption"); + + if (s.tcpi_rcv_delta) + assert(s.tcpi_rcv_delta <= total); + + do_getsockopts(&s, fd, ret, ret); + + if (eof) + total += 1; /* sequence advances due to FIN */ + + assert(s.mptcpi_rcv_delta == (uint64_t)total); + close(fd); +} + +static void process_one_client(int fd, int pipefd) +{ + ssize_t ret, ret2, ret3; + struct so_state s; + char buf[4096]; + + memset(&s, 0, sizeof(s)); + do_getsockopts(&s, fd, 0, 0); + + ret = write(pipefd, "xmit", 4); + assert(ret == 4); + + ret = read(fd, buf, sizeof(buf)); + if (ret < 0) + die_perror("read"); + + assert(s.mptcpi_rcv_delta <= (uint64_t)ret); + + if (s.tcpi_rcv_delta) + assert(s.tcpi_rcv_delta == (uint64_t)ret); + + ret2 = write(fd, buf, ret); + if (ret2 < 0) + die_perror("write"); + + /* wait for hangup */ + ret3 = read(fd, buf, 1); + if (ret3 != 0) + xerror("expected EOF, got %lu", ret3); + + do_getsockopts(&s, fd, ret, ret2); + if (s.mptcpi_rcv_delta != (uint64_t)ret + 1) + xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret); + close(fd); +} + +static int xaccept(int s) +{ + int fd = accept(s, NULL, 0); + + if (fd < 0) + die_perror("accept"); + + return fd; +} + +static int server(int pipefd) +{ + int fd = -1, r; + + switch (pf) { + case AF_INET: + fd = sock_listen_mptcp("127.0.0.1", "15432"); + break; + case AF_INET6: + fd = sock_listen_mptcp("::1", "15432"); + break; + default: + xerror("Unknown pf %d\n", pf); + break; + } + + r = write(pipefd, "conn", 4); + assert(r == 4); + + alarm(15); + r = xaccept(fd); + + process_one_client(r, pipefd); + + return 0; +} + +static int client(int pipefd) +{ + int fd = -1; + + alarm(15); + + switch (pf) { + case AF_INET: + fd = sock_connect_mptcp("127.0.0.1", "15432", IPPROTO_MPTCP); + break; + case AF_INET6: + fd = sock_connect_mptcp("::1", "15432", IPPROTO_MPTCP); + break; + default: + xerror("Unknown pf %d\n", pf); + } + + connect_one_server(fd, pipefd); + + return 0; +} + +static pid_t xfork(void) +{ + pid_t p = fork(); + + if (p < 0) + die_perror("fork"); + + return p; +} + +static int rcheck(int wstatus, const char *what) +{ + if (WIFEXITED(wstatus)) { + if (WEXITSTATUS(wstatus) == 0) + return 0; + fprintf(stderr, "%s exited, status=%d\n", what, WEXITSTATUS(wstatus)); + return WEXITSTATUS(wstatus); + } else if (WIFSIGNALED(wstatus)) { + xerror("%s killed by signal %d\n", what, WTERMSIG(wstatus)); + } else if (WIFSTOPPED(wstatus)) { + xerror("%s stopped by signal %d\n", what, WSTOPSIG(wstatus)); + } + + return 111; +} + +int main(int argc, char *argv[]) +{ + int e1, e2, wstatus; + pid_t s, c, ret; + int pipefds[2]; + + parse_opts(argc, argv); + + e1 = pipe(pipefds); + if (e1 < 0) + die_perror("pipe"); + + s = xfork(); + if (s == 0) + return server(pipefds[1]); + + close(pipefds[1]); + + /* wait until server bound a socket */ + e1 = read(pipefds[0], &e1, 4); + assert(e1 == 4); + + c = xfork(); + if (c == 0) + return client(pipefds[0]); + + close(pipefds[0]); + + ret = waitpid(s, &wstatus, 0); + if (ret == -1) + die_perror("waitpid"); + e1 = rcheck(wstatus, "server"); + ret = waitpid(c, &wstatus, 0); + if (ret == -1) + die_perror("waitpid"); + e2 = rcheck(wstatus, "client"); + + return e1 ? e1 : e2; +} diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh index 1579e471a5e7b8..41de643788b80e 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -239,12 +239,35 @@ make_file() echo "Created $name (size $size KB) containing data sent by $who" } +do_mptcp_sockopt_tests() +{ + local lret=0 + + ./mptcp_sockopt + lret=$? + + if [ $lret -ne 0 ]; then + echo "FAIL: SOL_MPTCP getsockopt" 1>&2 + ret=$lret + return + fi + + ./mptcp_sockopt -6 + lret=$? + + if [ $lret -ne 0 ]; then + echo "FAIL: SOL_MPTCP getsockopt (ipv6)" 1>&2 + ret=$lret + return + fi +} + run_tests() { listener_ns="$1" connector_ns="$2" connect_addr="$3" - lret=0 + local lret=0 do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} @@ -268,9 +291,13 @@ trap cleanup EXIT run_tests $ns1 $ns2 10.0.1.1 run_tests $ns1 $ns2 dead:beef:1::1 - if [ $ret -eq 0 ];then echo "PASS: all packets had packet mark set" fi +do_mptcp_sockopt_tests +if [ $ret -eq 0 ];then + echo "PASS: SOL_MPTCP getsockopt has expected information" +fi + exit $ret diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh index 3c741abe034ef5..cbacf9f6538bd2 100755 --- a/tools/testing/selftests/net/mptcp/pm_netlink.sh +++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh @@ -70,7 +70,7 @@ check() check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "defaults addr list" check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0 -subflows 0" "defaults limits" +subflows 2" "defaults limits" ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.1 ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.2 flags subflow dev lo @@ -118,11 +118,11 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "flush addrs" ip netns exec $ns1 ./pm_nl_ctl limits 9 1 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0 -subflows 0" "rcv addrs above hard limit" +subflows 2" "rcv addrs above hard limit" ip netns exec $ns1 ./pm_nl_ctl limits 1 9 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0 -subflows 0" "subflows above hard limit" +subflows 2" "subflows above hard limit" ip netns exec $ns1 ./pm_nl_ctl limits 8 8 check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8 diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh index 910d8126af8f29..f441ff7904fcf8 100755 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh @@ -51,7 +51,7 @@ setup() sout=$(mktemp) cout=$(mktemp) capout=$(mktemp) - size=$((2048 * 4096)) + size=$((2 * 2048 * 4096)) dd if=/dev/zero of=$small bs=4096 count=20 >/dev/null 2>&1 dd if=/dev/zero of=$large bs=4096 count=$((size / 4096)) >/dev/null 2>&1 @@ -161,17 +161,15 @@ do_transfer() timeout ${timeout_test} \ ip netns exec ${ns3} \ - ./mptcp_connect -jt ${timeout_poll} -l -p $port \ + ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $time \ 0.0.0.0 < "$sin" > "$sout" & local spid=$! wait_local_port_listen "${ns3}" "${port}" - local start - start=$(date +%s%3N) timeout ${timeout_test} \ ip netns exec ${ns1} \ - ./mptcp_connect -jt ${timeout_poll} -p $port \ + ./mptcp_connect -jt ${timeout_poll} -p $port -T $time \ 10.0.3.3 < "$cin" > "$cout" & local cpid=$! @@ -180,27 +178,20 @@ do_transfer() wait $spid local rets=$? - local stop - stop=$(date +%s%3N) - if $capture; then sleep 1 kill ${cappid_listener} kill ${cappid_connector} fi - local duration - duration=$((stop-start)) - cmp $sin $cout > /dev/null 2>&1 local cmps=$? cmp $cin $sout > /dev/null 2>&1 local cmpc=$? - printf "%16s" "$duration max $max_time " + printf "%-16s" " max $max_time " if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \ - [ $cmpc -eq 0 ] && [ $cmps -eq 0 ] && \ - [ $duration -lt $max_time ]; then + [ $cmpc -eq 0 ] && [ $cmps -eq 0 ]; then echo "[ OK ]" cat "$capout" return 0 @@ -244,23 +235,24 @@ run_test() tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1 tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2 - # time is measure in ms - local time=$((size * 8 * 1000 / (( $rate1 + $rate2) * 1024 *1024) )) + # time is measured in ms, account for transfer size, affegated link speed + # and header overhead (10%) + local time=$((size * 8 * 1000 * 10 / (( $rate1 + $rate2) * 1024 *1024 * 9) )) # mptcp_connect will do some sleeps to allow the mp_join handshake - # completion - time=$((time + 1350)) + # completion (see mptcp_connect): 200ms on each side, add some slack + time=$((time + 450)) - printf "%-50s" "$msg" - do_transfer $small $large $((time * 11 / 10)) + printf "%-60s" "$msg" + do_transfer $small $large $time lret=$? if [ $lret -ne 0 ]; then ret=$lret [ $bail -eq 0 ] || exit $ret fi - printf "%-50s" "$msg - reverse direction" - do_transfer $large $small $((time * 11 / 10)) + printf "%-60s" "$msg - reverse direction" + do_transfer $large $small $time lret=$? if [ $lret -ne 0 ]; then ret=$lret diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 97fceb9be9ed36..d3047e251fe938 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -29,6 +29,8 @@ struct tls_crypto_info_keys { union { struct tls12_crypto_info_aes_gcm_128 aes128; struct tls12_crypto_info_chacha20_poly1305 chacha20; + struct tls12_crypto_info_sm4_gcm sm4gcm; + struct tls12_crypto_info_sm4_ccm sm4ccm; }; size_t len; }; @@ -49,6 +51,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, tls12->aes128.info.version = tls_version; tls12->aes128.info.cipher_type = cipher_type; break; + case TLS_CIPHER_SM4_GCM: + tls12->len = sizeof(struct tls12_crypto_info_sm4_gcm); + tls12->sm4gcm.info.version = tls_version; + tls12->sm4gcm.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_SM4_CCM: + tls12->len = sizeof(struct tls12_crypto_info_sm4_ccm); + tls12->sm4ccm.info.version = tls_version; + tls12->sm4ccm.info.cipher_type = cipher_type; + break; default: break; } @@ -148,13 +160,13 @@ FIXTURE_VARIANT(tls) uint16_t cipher_type; }; -FIXTURE_VARIANT_ADD(tls, 12_gcm) +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm) { .tls_version = TLS_1_2_VERSION, .cipher_type = TLS_CIPHER_AES_GCM_128, }; -FIXTURE_VARIANT_ADD(tls, 13_gcm) +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm) { .tls_version = TLS_1_3_VERSION, .cipher_type = TLS_CIPHER_AES_GCM_128, @@ -172,6 +184,18 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha) .cipher_type = TLS_CIPHER_CHACHA20_POLY1305, }; +FIXTURE_VARIANT_ADD(tls, 13_sm4_gcm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_SM4_GCM, +}; + +FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_SM4_CCM, +}; + FIXTURE_SETUP(tls) { struct tls_crypto_info_keys tls12; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 1d64891e649237..d425688cf59c15 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -276,12 +276,12 @@ int seccomp(unsigned int op, unsigned int flags, void *args) } #endif -#if __BYTE_ORDER == __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) -#elif __BYTE_ORDER == __BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) #else -#error "wut? Unknown __BYTE_ORDER?!" +#error "wut? Unknown __BYTE_ORDER__?!" #endif #define SIBLING_EXIT_UNKILLED 0xbadbeef